Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Working as objectid identifier #1

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions __init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import mongodb2
import orm_mongodb
import osv_mongodb
import ir_attachment
2 changes: 1 addition & 1 deletion __terp__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
"author": "Joan M. Grande",
"license" : "GPL-3",
"category": "Generic Modules",
"depends": [],
"depends": ['base'],
"init_xml": [],
"demo_xml": [],
"update_xml": [],
Expand Down
52 changes: 52 additions & 0 deletions ir_attachment.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
from osv import osv, fields
from mongodb_backend.osv_mongodb import osv_mongodb


def check_use_objectid(pool, obj_str):
return isinstance(pool.get(obj_str), osv_mongodb)


class IrAttachemnt(osv.osv):
_name = 'ir.attachment'
_inherit = 'ir.attachment'

def search(self, cr, user, args, offset=0, limit=None, order=None,
context=None, count=False):
# [['res_model', '=', 'giscedata.sips.ps'], ['res_id', '=', '556bfea9e838b75c51d1f14c']]
use_objectid = False
newargs = []
for search in args:
if len(search) != 3:
newargs.append(search)
continue
k, op, value = search
if k == 'res_model':
if check_use_objectid(self.pool, value):
use_objectid = True
if k == 'res_id' and use_objectid:
k = 'res_objectid'
newargs.append([k, op, value])
return super(IrAttachemnt, self).search(cr, user, newargs, offset,
limit, order, context, count)

def create(self, cr, user, vals, context=None):
# {'lang': 'es_ES', 'tz': False, 'default_res_model': 'giscedata.sips.ps', 'active_ids': [], 'default_res_id': '556bfea9e838b75c51d1f14c', 'active_id': False}
if context is None:
context = {}
use_objectid = False
for k, v in context.iteritems():
if k == 'default_res_model':
if check_use_objectid(self.pool, v):
use_objectid = True
break
if use_objectid and 'default_res_id' in context:
context['default_res_objectid'] = context['default_res_id']
del context['default_res_id']

return super(IrAttachemnt, self).create(cr, user, vals, context)

_columns = {
'res_objectid': fields.char('Ref ObjectId', size=24)
}

IrAttachemnt()
124 changes: 54 additions & 70 deletions orm_mongodb.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,27 +49,17 @@ def _auto_init(self, cr, context=None):

db = mdbpool.get_db()

#Create the model counters document in order to
#have incremental ids the way postgresql does
# Create the model counters document in order to
# have incremental ids the way postgresql does
collection = db['counters']

if not collection.find({'_id': self._table}).count():
vals = {'_id': self._table,
'counter': 1}
collection.save(vals)

collection = db[self._table]
#Create index for the id field
collection.ensure_index([('id', pymongo.ASCENDING)],
deprecated_unique=None,
ttl=300,
unique=True)

if db.error():
raise except_orm('MongoDB create id field index error', db.error())
#Update docs with new default values if they do not exist
#If we find at least one document with this field
#we assume that the field is present in the collection
# Update docs with new default values if they do not exist
# If we find at least one document with this field
# we assume that the field is present in the collection
def_fields = filter(lambda a: not collection.find_one(
{a: {'$exists': True}}),
self._defaults.keys())
Expand Down Expand Up @@ -222,21 +212,15 @@ def read(self, cr, user, ids, fields=None, context=None,
result = self._read_flat(cr, user, select, fields, context, load)

for r in result:
for key, v in r.items():
#remove the '_id' field from the response
for key, v in r.iteritems():
# remove the '_id' field from the response
if key == '_id':
r['id'] = str(v)
del r[key]
continue
#WTF. id field is not always readed as int
if key == 'id':
r[key] = int(v)
continue
if v is None:
r[key] = False
else:
continue

if isinstance(ids, (int, long)):
if isinstance(ids, basestring):
return result and result[0] or False
return result

Expand All @@ -250,6 +234,8 @@ def _read_flat(self, cr, user, ids, fields_to_read, context=None,
if not ids:
return []

ids = [ObjectId(_x) for _x in ids]

if fields_to_read is None:
fields_to_read = self._columns.keys()

Expand All @@ -261,15 +247,14 @@ def _read_flat(self, cr, user, ids, fields_to_read, context=None,
'_classic_write'))
]

res = []
if len(fields_pre):
if fields_pre:
order = self._compute_order(cr, user)
mongo_cr = collection.find({'id': {'$in': ids}},
fields_pre + ['id'],
mongo_cr = collection.find({'_id': {'$in': ids}},
fields_pre,
sort=order)
res = [x for x in mongo_cr]
else:
res = map(lambda x: {'id': x}, ids)
res = map(lambda x: {'_id': x}, ids)
#Post process date and datetime fields
self.read_date_fields(fields_to_read, res)
self.read_binary_gridfs_fields(fields_to_read, res)
Expand All @@ -284,19 +269,21 @@ def write(self, cr, user, ids, vals, context=None):
if not ids:
return True

ids = [ObjectId(_x) for _x in ids]

self.pool.get('ir.model.access').check(cr, user, self._name,
'write', context=context)
#Pre process date and datetime fields
# Pre process date and datetime fields
self.preformat_write_fields(vals)
self.write_binary_gridfs_fields(vals)

#Log access
# Log access
vals.update({'write_uid': user,
'write_date': datetime.now(),
})

#bulk update with modifiers, and safe mode
collection.update({'id': {'$in': ids}},
collection.update({'_id': {'$in': ids}},
{'$set': vals},
False, False, True, True)

Expand Down Expand Up @@ -324,34 +311,29 @@ def create(self, cr, user, vals, context=None):
default_values = self.default_get(cr, user, default, context)
vals.update(default_values)

#Add incremental id to store vals
counter = mdbpool.get_collection('counters').find_and_modify(
{'_id': self._table},
{'$inc': {'counter': 1}})
vals.update({'id': counter['counter']})
#Pre proces date fields
# Pre proces date fields
self.preformat_write_fields(vals)
self.write_binary_gridfs_fields(vals)
#Log access
# Log access
vals.update({'create_uid': user,
'create_date': datetime.now(),
})

#Effectively create the record
collection.insert(vals)
# Effectively create the record
oid = collection.insert(vals)

return vals['id']
return str(oid)

def _compute_order(self, cr, user, order=None, context=None):
#Parse the order of the object to addapt it to MongoDB
# Parse the order of the object to addapt it to MongoDB

if not order:
order = self._order

mongo_order = order.split(',')
#If we only have one order field
#it can contain asc or desc
#Otherwise is not allowed
# If we only have one order field
# it can contain asc or desc
# Otherwise is not allowed
if len(mongo_order) == 1:
reg_expr = '^(([a-z0-9_]+|"[a-z0-9_]+")( *desc)+( *, *|))+$'
order_desc = re.compile(reg_expr, re.I)
Expand All @@ -375,8 +357,8 @@ def _compute_order(self, cr, user, order=None, context=None):

def search(self, cr, user, args, offset=0, limit=0, order=None,
context=None, count=False):
#Make a copy of args for working
#Domain has to be list of lists
# Make a copy of args for working
# Domain has to be list of lists
tmp_args = [isinstance(arg, tuple) and list(arg)
or arg for arg in args]
collection = mdbpool.get_collection(self._table)
Expand All @@ -387,22 +369,22 @@ def search(self, cr, user, args, offset=0, limit=0, order=None,
context = {}
self.pool.get('ir.model.access').check(cr, user,
self._name, 'read', context=context)
#Performance problems for counting in mongodb
#Only count when forcing. Else return limit
#https://jira.mongodb.org/browse/SERVER-1752
# Performance problems for counting in mongodb
# Only count when forcing. Else return limit
# https://jira.mongodb.org/browse/SERVER-1752
if not context.get('force_count', False) and count:
return limit
#In very large collections when no args
#orders all documents prior to return a result
#so when no filters, order by id that is sure that
#has an individual index and works very fast
# In very large collections when no args
# orders all documents prior to return a result
# so when no filters, order by id that is sure that
# has an individual index and works very fast
if not args:
order = 'id'
order = '_id'

if count:
return collection.find(
new_args,
{'id': 1},
{'_id': 1},
skip=int(offset),
limit=int(limit),
timeout=True,
Expand All @@ -412,15 +394,15 @@ def search(self, cr, user, args, offset=0, limit=0, order=None,

mongo_cr = collection.find(
new_args,
{'id': 1},
{'_id': 1},
skip=int(offset),
limit=int(limit),
timeout=True,
snapshot=False,
tailable=False,
sort=self._compute_order(cr, user, order))

res = [x['id'] for x in mongo_cr]
res = [str(x['_id']) for x in mongo_cr]

return res

Expand All @@ -431,16 +413,18 @@ def unlink(self, cr, uid, ids, context=None):

if not ids:
return True
if isinstance(ids, (int, long)):
if isinstance(ids, basestring):
ids = [ids]

ids = [ObjectId(_x) for _x in ids]

self.pool.get('ir.model.access').check(cr, uid, self._name,
'unlink', context=context)

# Remove binary fields (files in gridfs)
self.unlink_binary_gridfs_fields(collection, ids)
#Remove with safe mode
collection.remove({'id': {'$in': ids}}, True)
# Remove with safe mode
collection.remove({'_id': {'$in': ids}}, True)

if db.error():
raise except_orm('MongoDB unlink error', db.error())
Expand All @@ -459,26 +443,26 @@ def perm_read(self, cr, user, ids, context=None, details=True):
if isinstance(ids, (int, long)):
ids = [ids]

ids = [ObjectId(x) for x in ids]

collection = mdbpool.get_collection(self._table)

fields = ['id', 'create_uid', 'create_date',
fields = ['_id', 'create_uid', 'create_date',
'write_uid', 'write_date']

res = []
mongo_cr = collection.find({'id': {'$in': ids}}, fields)
mongo_cr = collection.find({'_id': {'$in': ids}}, fields)
res = [x for x in mongo_cr]
for doc in res:
docfields = doc.keys()
for field in fields:
if field not in docfields:
doc[field] = False
if field in ['create_date', 'write_date']\
and doc[field]:
if field in ['create_date', 'write_date'] and doc[field]:
doc[field] = doc[field].strftime('%Y-%m-%d %H:%M:%S')
if field in ['create_uid', 'write_uid']\
and doc[field]:
if field in ['create_uid', 'write_uid'] and doc[field]:
doc[field] = self.pool.get('res.users').name_get(cr,
user, [doc[field]])[0]
doc['id'] = str(doc['_id'])
del doc['_id']

return res
Expand Down