[IMP] improve cache system

[FIX] cache now take in account default parameters
[IMP] new way to clear the cache
[FIX] clearing the cache is now made by database

bzr revid: christophe@tinyerp.com-20090105211746-o1fapuu319ozi7nv
This commit is contained in:
Christophe Simonis 2009-01-05 22:17:46 +01:00
parent 390780c216
commit 5a20219f9b
6 changed files with 112 additions and 97 deletions

View File

@ -376,18 +376,18 @@ class ir_model_access(osv.osv):
def write(self, cr, uid, *args, **argv):
self.call_cache_clearing_methods()
res = super(ir_model_access, self).write(cr, uid, *args, **argv)
self.check() # clear the cache of check function
self.check.clear_cache(cr.dbname) # clear the cache of check function
return res
def create(self, cr, uid, *args, **argv):
res = super(ir_model_access, self).create(cr, uid, *args, **argv)
self.check()
self.check.clear_cache(cr.dbname) # clear the cache of check function
return res
def unlink(self, cr, uid, *args, **argv):
self.call_cache_clearing_methods()
res = super(ir_model_access, self).unlink(cr, uid, *args, **argv)
self.check()
self.check.clear_cache(cr.dbname) # clear the cache of check function
return res
ir_model_access()

View File

@ -46,21 +46,19 @@ class ir_rule_group(osv.osv):
def unlink(self, cr, uid, ids, context=None):
res = super(ir_rule_group, self).unlink(cr, uid, ids, context=context)
# Restart the cache on the domain_get method of ir.rule
self.pool.get('ir.rule').domain_get()
self.pool.get('ir.rule').domain_get.clear_cache(cr.dbname)
return res
def create(self, cr, user, vals, context=None):
res = super(ir_rule_group, self).create(cr, user, vals, context=context)
# Restart the cache on the domain_get method of ir.rule
self.pool.get('ir.rule').domain_get()
self.pool.get('ir.rule').domain_get.clear_cache(cr.dbname)
return res
def write(self, cr, uid, ids, vals, context=None):
if not context:
context={}
res = super(ir_rule_group, self).write(cr, uid, ids, vals, context=context)
# Restart the cache on the domain_get method of ir.rule
self.pool.get('ir.rule').domain_get()
self.pool.get('ir.rule').domain_get.clear_cache(cr.dbname)
return res
ir_rule_group()
@ -213,13 +211,13 @@ class ir_rule(osv.osv):
def unlink(self, cr, uid, ids, context=None):
res = super(ir_rule, self).unlink(cr, uid, ids, context=context)
# Restart the cache on the domain_get method of ir.rule
self.domain_get()
self.domain_get.clear_cache(cr.dbname)
return res
def create(self, cr, user, vals, context=None):
res = super(ir_rule, self).create(cr, user, vals, context=context)
# Restart the cache on the domain_get method of ir.rule
self.domain_get()
self.domain_get.clear_cache(cr.dbname)
return res
def write(self, cr, uid, ids, vals, context=None):
@ -227,7 +225,7 @@ class ir_rule(osv.osv):
context={}
res = super(ir_rule, self).write(cr, uid, ids, vals, context=context)
# Restart the cache on the domain_get method
self.domain_get()
self.domain_get.clear_cache(cr.dbname)
return res
ir_rule()

View File

@ -68,24 +68,21 @@ class res_company(osv.osv):
descendance = self._get_partner_descendance(cr, uid, child_id, descendance)
return descendance
def __init__(self, *args, **argv):
return super(res_company, self).__init__(*args, **argv)
#
# This function restart the cache on the _get_company_children method
#
def cache_restart(self, uid=None):
self._get_company_children()
def cache_restart(self, cr):
self._get_company_children.clear_cache(cr.dbname)
def create(self, *args, **argv):
self.cache_restart()
return super(res_company, self).create(*args, **argv)
def create(self, cr, *args, **argv):
self.cache_restart(cr)
return super(res_company, self).create(cr, *args, **argv)
def write(self, *args, **argv):
self.cache_restart()
def write(self, cr, *args, **argv):
self.cache_restart(cr)
# Restart the cache on the company_get method
self.pool.get('ir.rule').domain_get()
return super(res_company, self).write(*args, **argv)
self.pool.get('ir.rule').domain_get.clear_cache(cr.dbname)
return super(res_company, self).write(cr, *args, **argv)
def _get_euro(self, cr, uid, context={}):
try:

View File

@ -47,8 +47,8 @@ class groups(osv.osv):
_('The name of the group can not start with "-"'))
res = super(groups, self).write(cr, uid, ids, vals, context=context)
# Restart the cache on the company_get method
self.pool.get('ir.rule').domain_get()
self.pool.get('ir.model.access').check()
self.pool.get('ir.rule').domain_get.clear_cache(cr.dbname)
self.pool.get('ir.model.access').check.clear_cache(cr.dbname)
return res
def create(self, cr, uid, vals, context=None):
@ -165,9 +165,9 @@ class users(osv.osv):
if ok:
uid = 1
res = super(users, self).write(cr, uid, ids, values, *args, **argv)
self.company_get()
self.company_get.clear_cache(cr.dbname)
# Restart the cache on the company_get method
self.pool.get('ir.rule').domain_get()
self.pool.get('ir.rule').domain_get.clear_cache(cr.dbname)
return res
def unlink(self, cr, uid, ids):

View File

@ -228,7 +228,7 @@ def db_connect(db_name, serialize=0):
def close_db(db_name):
PoolManager.close(db_name)
tools.cache.clean_cache_for_db(db_name)
tools.cache.clean_caches_for_db(db_name)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

View File

@ -549,26 +549,74 @@ class cache(object):
self.multi = multi
self.lasttime = time.time()
self.cache = {}
self.fun = None
cache.__caches.append(self)
@classmethod
def clean_cache_for_db(cls, dbname):
def get_dbname_from_key(key):
for e in key:
if e[0] == 'dbname':
return e[1]
return None
def _generate_keys(self, dbname, kwargs2):
"""
Generate keys depending of the arguments and the self.mutli value
"""
def to_tuple(d):
i = d.items()
i.sort()
return tuple(i)
for cache in cls.__caches:
keys_to_del = [key for key in cache.cache if get_dbname_from_key(key) == dbname]
for key in keys_to_del:
del cache.cache[key]
if not self.multi:
key = (('dbname', dbname),) + to_tuple(kwargs2)
yield key, None
else:
multis = kwargs2[self.multi][:]
for id in multis:
kwargs2[self.multi] = [id]
key = (('dbname', dbname),) + to_tuple(kwargs2)
yield key, id
def _unify_args(self, *args, **kwargs):
# Update named arguments with positional argument values (without self and cr)
kwargs2 = self.fun_default_values.copy()
kwargs2.update(kwargs)
kwargs2.update(dict(zip(self.fun_arg_names, args[self.skiparg-2:])))
for k in kwargs2:
if isinstance(kwargs2[k], (list, dict, set)):
kwargs2[k] = tuple(kwargs2[k])
elif not is_hashable(kwargs2[k]):
kwargs2[k] = repr(kwargs2[k])
return kwargs2
def clear(self, dbname, *args, **kwargs):
"""clear the cache for database dbname
if *args and **kwargs are both empty, clear all the keys related to this database
"""
if not args and not kwargs:
keys_to_del = [key for key in self.cache if key[0][1] == dbname]
else:
kwargs2 = self._unify_args(*args, **kwargs)
keys_to_del = [key for key, _ in self._generate_keys(dbname, kwargs2) if key in self.cache]
for key in keys_to_del:
del self.cache[key]
@classmethod
def clean_caches_for_db(cls, dbname):
for c in cls.__caches:
c.clear(dbname)
def __call__(self, fn):
arg_names = inspect.getargspec(fn)[0][self.skiparg:]
if self.fun is not None:
raise Exception("Can not use a cache instance on more than one function")
self.fun = fn
def cached_result(self2, cr=None, *args, **kwargs):
argspec = inspect.getargspec(fn)
self.fun_arg_names = argspec[0][self.skiparg:]
self.fun_default_values = {}
if argspec[3]:
self.fun_default_values = dict(zip(self.fun_arg_names[-len(argspec[3]):], argspec[3]))
debug(self.fun_default_values)
def cached_result(self2, cr, *args, **kwargs):
if time.time()-self.timeout > self.lasttime:
self.lasttime = time.time()
t = time.time()-self.timeout
@ -576,64 +624,36 @@ class cache(object):
if self.cache[key][1]<t:
del self.cache[key]
if cr is None:
self.cache = {}
return True
if ('clear_keys' in kwargs):
if (kwargs['clear_keys'] in self.cache):
del self.cache[kwargs['clear_keys']]
return True
kwargs2 = self._unify_args(*args, **kwargs)
# Update named arguments with positional argument values (without self and cr)
kwargs2 = kwargs.copy()
kwargs2.update(dict(zip(arg_names, args[self.skiparg-2:])))
for k in kwargs2:
if isinstance(kwargs2[k], (list, dict, set)):
kwargs2[k] = tuple(kwargs2[k])
elif not is_hashable(kwargs2[k]):
kwargs2[k] = repr(kwargs2[k])
if self.multi:
kwargs3 = kwargs2.copy()
notincache = []
result = {}
for id in kwargs3[self.multi]:
kwargs2[self.multi] = [id]
kwargs4 = kwargs2.items()
kwargs4.sort()
# Work out key as a tuple of ('argname', value) pairs
key = (('dbname', cr.dbname),) + tuple(kwargs4)
if key in self.cache:
result[id] = self.cache[key][0]
else:
notincache.append(id)
if notincache:
kwargs2[self.multi] = notincache
result2 = fn(self2, cr, *args[2:self.skip], **kwargs3)
result = {}
notincache = {}
for key, id in self._generate_keys(cr.dbname, kwargs2):
if key in self.cache:
result[id] = self.cache[key][0]
else:
notincache[id] = key
if notincache:
if self.multi:
kwargs2[self.multi] = notincache.keys()
result2 = fn(self2, cr, *args[2:self.skiparg], **kwargs2)
if not self.multi:
key = notincache[None]
self.cache[key] = (result2, time.time())
result[None] = result2
else:
for id in result2:
kwargs2[self.multi] = [id]
kwargs4 = kwargs2.items()
kwargs4.sort()
key = (('dbname', cr.dbname),) + tuple(kwargs4)
self.cache[key] = result2[id]
result.updat(result2)
return result
kwargs2 = kwargs2.items()
kwargs2.sort()
key = (('dbname', cr.dbname),) + tuple(kwargs2)
if key in self.cache:
return self.cache[key][0]
result = fn(self2, cr, *args, **kwargs)
self.cache[key] = (result, time.time())
key = notincache[id]
self.cache[key] = (result2[id], time.time())
result.update(result2)
if not self.multi:
return result[None]
return result
cached_result.clear_cache = self.clear
return cached_result
def to_xml(s):