diff --git a/addons/audittrail/audittrail.py b/addons/audittrail/audittrail.py index 7364ea505f6..0debdf11492 100644 --- a/addons/audittrail/audittrail.py +++ b/addons/audittrail/audittrail.py @@ -20,7 +20,7 @@ ############################################################################## from openerp.osv import fields, osv -from openerp.osv.osv import object_proxy +import openerp.service.model from openerp.tools.translate import _ from openerp import pooler import time @@ -171,355 +171,360 @@ class audittrail_log_line(osv.osv): 'field_description': fields.char('Field Description', size=64), } -class audittrail_objects_proxy(object_proxy): - """ Uses Object proxy for auditing changes on object of subscribed Rules""" +# Monkeypatch the model RPC endpoint for auditing changes. - def get_value_text(self, cr, uid, pool, resource_pool, method, field, value): - """ - Gets textual values for the fields. - If the field is a many2one, it returns the name. - If it's a one2many or a many2many, it returns a list of name. - In other cases, it just returns the value. - :param cr: the current row, from the database cursor, - :param uid: the current user’s ID for security checks, - :param pool: current db's pooler object. - :param resource_pool: pooler object of the model which values are being changed. - :param field: for which the text value is to be returned. - :param value: value of the field. - :param recursive: True or False, True will repeat the process recursively - :return: string value or a list of values(for O2M/M2M) - """ +def get_value_text(cr, uid, pool, resource_pool, method, field, value): + """ + Gets textual values for the fields. + If the field is a many2one, it returns the name. + If it's a one2many or a many2many, it returns a list of name. + In other cases, it just returns the value. + :param cr: the current row, from the database cursor, + :param uid: the current user’s ID for security checks, + :param pool: current db's pooler object. + :param resource_pool: pooler object of the model which values are being changed. + :param field: for which the text value is to be returned. + :param value: value of the field. + :param recursive: True or False, True will repeat the process recursively + :return: string value or a list of values(for O2M/M2M) + """ - field_obj = (resource_pool._all_columns.get(field)).column - if field_obj._type in ('one2many','many2many'): - data = pool.get(field_obj._obj).name_get(cr, uid, value) - #return the modifications on x2many fields as a list of names - res = map(lambda x:x[1], data) - elif field_obj._type == 'many2one': - #return the modifications on a many2one field as its value returned by name_get() - res = value and value[1] or value - else: - res = value - return res + field_obj = (resource_pool._all_columns.get(field)).column + if field_obj._type in ('one2many','many2many'): + data = pool.get(field_obj._obj).name_get(cr, uid, value) + #return the modifications on x2many fields as a list of names + res = map(lambda x:x[1], data) + elif field_obj._type == 'many2one': + #return the modifications on a many2one field as its value returned by name_get() + res = value and value[1] or value + else: + res = value + return res - def create_log_line(self, cr, uid, log_id, model, lines=None): - """ - Creates lines for changed fields with its old and new values +def create_log_line(cr, uid, log_id, model, lines=None): + """ + Creates lines for changed fields with its old and new values - @param cr: the current row, from the database cursor, - @param uid: the current user’s ID for security checks, - @param model: Object which values are being changed - @param lines: List of values for line is to be created - """ - if lines is None: - lines = [] - pool = pooler.get_pool(cr.dbname) - obj_pool = pool.get(model.model) - model_pool = pool.get('ir.model') - field_pool = pool.get('ir.model.fields') - log_line_pool = pool.get('audittrail.log.line') - for line in lines: - field_obj = obj_pool._all_columns.get(line['name']) - assert field_obj, _("'%s' field does not exist in '%s' model" %(line['name'], model.model)) - field_obj = field_obj.column - old_value = line.get('old_value', '') - new_value = line.get('new_value', '') - search_models = [model.id] - if obj_pool._inherits: - search_models += model_pool.search(cr, uid, [('model', 'in', obj_pool._inherits.keys())]) - field_id = field_pool.search(cr, uid, [('name', '=', line['name']), ('model_id', 'in', search_models)]) - if field_obj._type == 'many2one': - old_value = old_value and old_value[0] or old_value - new_value = new_value and new_value[0] or new_value - vals = { - "log_id": log_id, - "field_id": field_id and field_id[0] or False, - "old_value": old_value, - "new_value": new_value, - "old_value_text": line.get('old_value_text', ''), - "new_value_text": line.get('new_value_text', ''), - "field_description": field_obj.string - } - line_id = log_line_pool.create(cr, uid, vals) - return True - - def log_fct(self, cr, uid_orig, model, method, fct_src, *args, **kw): - """ - Logging function: This function is performing the logging operation - @param model: Object whose values are being changed - @param method: method to log: create, read, write, unlink, action or workflow action - @param fct_src: execute method of Object proxy - - @return: Returns result as per method of Object proxy - """ - pool = pooler.get_pool(cr.dbname) - resource_pool = pool.get(model) - model_pool = pool.get('ir.model') - model_ids = model_pool.search(cr, SUPERUSER_ID, [('model', '=', model)]) - model_id = model_ids and model_ids[0] or False - assert model_id, _("'%s' Model does not exist..." %(model)) - model = model_pool.browse(cr, SUPERUSER_ID, model_id) - - # fields to log. currently only used by log on read() - field_list = [] - old_values = new_values = {} - - if method == 'create': - res = fct_src(cr, uid_orig, model.model, method, *args, **kw) - if res: - res_ids = [res] - new_values = self.get_data(cr, uid_orig, pool, res_ids, model, method) - elif method == 'read': - res = fct_src(cr, uid_orig, model.model, method, *args, **kw) - # build the res_ids and the old_values dict. Here we don't use get_data() to - # avoid performing an additional read() - res_ids = [] - for record in res: - res_ids.append(record['id']) - old_values[(model.id, record['id'])] = {'value': record, 'text': record} - # log only the fields read - field_list = args[1] - elif method == 'unlink': - res_ids = args[0] - old_values = self.get_data(cr, uid_orig, pool, res_ids, model, method) - res = fct_src(cr, uid_orig, model.model, method, *args, **kw) - else: # method is write, action or workflow action - res_ids = [] - if args: - res_ids = args[0] - if isinstance(res_ids, (long, int)): - res_ids = [res_ids] - if res_ids: - # store the old values into a dictionary - old_values = self.get_data(cr, uid_orig, pool, res_ids, model, method) - # process the original function, workflow trigger... - res = fct_src(cr, uid_orig, model.model, method, *args, **kw) - if method == 'copy': - res_ids = [res] - if res_ids: - # check the new values and store them into a dictionary - new_values = self.get_data(cr, uid_orig, pool, res_ids, model, method) - # compare the old and new values and create audittrail log if needed - self.process_data(cr, uid_orig, pool, res_ids, model, method, old_values, new_values, field_list) - return res - - def get_data(self, cr, uid, pool, res_ids, model, method): - """ - This function simply read all the fields of the given res_ids, and also recurisvely on - all records of a x2m fields read that need to be logged. Then it returns the result in - convenient structure that will be used as comparison basis. - - :param cr: the current row, from the database cursor, - :param uid: the current user’s ID. This parameter is currently not used as every - operation to get data is made as super admin. Though, it could be usefull later. - :param pool: current db's pooler object. - :param res_ids: Id's of resource to be logged/compared. - :param model: Object whose values are being changed - :param method: method to log: create, read, unlink, write, actions, workflow actions - :return: dict mapping a tuple (model_id, resource_id) with its value and textual value - { (model_id, resource_id): { 'value': ... - 'textual_value': ... - }, + @param cr: the current row, from the database cursor, + @param uid: the current user’s ID for security checks, + @param model: Object which values are being changed + @param lines: List of values for line is to be created + """ + if lines is None: + lines = [] + pool = pooler.get_pool(cr.dbname) + obj_pool = pool.get(model.model) + model_pool = pool.get('ir.model') + field_pool = pool.get('ir.model.fields') + log_line_pool = pool.get('audittrail.log.line') + for line in lines: + field_obj = obj_pool._all_columns.get(line['name']) + assert field_obj, _("'%s' field does not exist in '%s' model" %(line['name'], model.model)) + field_obj = field_obj.column + old_value = line.get('old_value', '') + new_value = line.get('new_value', '') + search_models = [model.id] + if obj_pool._inherits: + search_models += model_pool.search(cr, uid, [('model', 'in', obj_pool._inherits.keys())]) + field_id = field_pool.search(cr, uid, [('name', '=', line['name']), ('model_id', 'in', search_models)]) + if field_obj._type == 'many2one': + old_value = old_value and old_value[0] or old_value + new_value = new_value and new_value[0] or new_value + vals = { + "log_id": log_id, + "field_id": field_id and field_id[0] or False, + "old_value": old_value, + "new_value": new_value, + "old_value_text": line.get('old_value_text', ''), + "new_value_text": line.get('new_value_text', ''), + "field_description": field_obj.string } - """ - data = {} - resource_pool = pool.get(model.model) - # read all the fields of the given resources in super admin mode - for resource in resource_pool.read(cr, SUPERUSER_ID, res_ids): - values = {} - values_text = {} - resource_id = resource['id'] - # loop on each field on the res_ids we just have read - for field in resource: - if field in ('__last_update', 'id'): - continue - values[field] = resource[field] - # get the textual value of that field for this record - values_text[field] = self.get_value_text(cr, SUPERUSER_ID, pool, resource_pool, method, field, resource[field]) + line_id = log_line_pool.create(cr, uid, vals) + return True - field_obj = resource_pool._all_columns.get(field).column - if field_obj._type in ('one2many','many2many'): - # check if an audittrail rule apply in super admin mode - if self.check_rules(cr, SUPERUSER_ID, field_obj._obj, method): - # check if the model associated to a *2m field exists, in super admin mode - x2m_model_ids = pool.get('ir.model').search(cr, SUPERUSER_ID, [('model', '=', field_obj._obj)]) - x2m_model_id = x2m_model_ids and x2m_model_ids[0] or False - assert x2m_model_id, _("'%s' Model does not exist..." %(field_obj._obj)) - x2m_model = pool.get('ir.model').browse(cr, SUPERUSER_ID, x2m_model_id) - field_resource_ids = list(set(resource[field])) - if model.model == x2m_model.model: - # we need to remove current resource_id from the many2many to prevent an infinit loop - if resource_id in field_resource_ids: - field_resource_ids.remove(resource_id) - data.update(self.get_data(cr, SUPERUSER_ID, pool, field_resource_ids, x2m_model, method)) - - data[(model.id, resource_id)] = {'text':values_text, 'value': values} - return data +def log_fct(cr, uid_orig, model, method, fct_src, *args, **kw): + """ + Logging function: This function is performing the logging operation + @param model: Object whose values are being changed + @param method: method to log: create, read, write, unlink, action or workflow action + @param fct_src: execute method of Object proxy - def prepare_audittrail_log_line(self, cr, uid, pool, model, resource_id, method, old_values, new_values, field_list=None): - """ - This function compares the old data (i.e before the method was executed) and the new data - (after the method was executed) and returns a structure with all the needed information to - log those differences. + @return: Returns result as per method of Object proxy + """ + pool = pooler.get_pool(cr.dbname) + resource_pool = pool.get(model) + model_pool = pool.get('ir.model') + model_ids = model_pool.search(cr, SUPERUSER_ID, [('model', '=', model)]) + model_id = model_ids and model_ids[0] or False + assert model_id, _("'%s' Model does not exist..." %(model)) + model = model_pool.browse(cr, SUPERUSER_ID, model_id) + + # fields to log. currently only used by log on read() + field_list = [] + old_values = new_values = {} + + if method == 'create': + res = fct_src(cr, uid_orig, model.model, method, *args, **kw) + if res: + res_ids = [res] + new_values = get_data(cr, uid_orig, pool, res_ids, model, method) + elif method == 'read': + res = fct_src(cr, uid_orig, model.model, method, *args, **kw) + # build the res_ids and the old_values dict. Here we don't use get_data() to + # avoid performing an additional read() + res_ids = [] + for record in res: + res_ids.append(record['id']) + old_values[(model.id, record['id'])] = {'value': record, 'text': record} + # log only the fields read + field_list = args[1] + elif method == 'unlink': + res_ids = args[0] + old_values = get_data(cr, uid_orig, pool, res_ids, model, method) + res = fct_src(cr, uid_orig, model.model, method, *args, **kw) + else: # method is write, action or workflow action + res_ids = [] + if args: + res_ids = args[0] + if isinstance(res_ids, (long, int)): + res_ids = [res_ids] + if res_ids: + # store the old values into a dictionary + old_values = get_data(cr, uid_orig, pool, res_ids, model, method) + # process the original function, workflow trigger... + res = fct_src(cr, uid_orig, model.model, method, *args, **kw) + if method == 'copy': + res_ids = [res] + if res_ids: + # check the new values and store them into a dictionary + new_values = get_data(cr, uid_orig, pool, res_ids, model, method) + # compare the old and new values and create audittrail log if needed + process_data(cr, uid_orig, pool, res_ids, model, method, old_values, new_values, field_list) + return res + +def get_data(cr, uid, pool, res_ids, model, method): + """ + This function simply read all the fields of the given res_ids, and also recurisvely on + all records of a x2m fields read that need to be logged. Then it returns the result in + convenient structure that will be used as comparison basis. :param cr: the current row, from the database cursor, :param uid: the current user’s ID. This parameter is currently not used as every operation to get data is made as super admin. Though, it could be usefull later. :param pool: current db's pooler object. - :param model: model object which values are being changed - :param resource_id: ID of record to which values are being changed + :param res_ids: Id's of resource to be logged/compared. + :param model: Object whose values are being changed :param method: method to log: create, read, unlink, write, actions, workflow actions - :param old_values: dict of values read before execution of the method - :param new_values: dict of values read after execution of the method - :param field_list: optional argument containing the list of fields to log. Currently only - used when performing a read, it could be usefull later on if we want to log the write - on specific fields only. - - :return: dictionary with - * keys: tuples build as ID of model object to log and ID of resource to log - * values: list of all the changes in field values for this couple (model, resource) - return { - (model.id, resource_id): [] - } - - The reason why the structure returned is build as above is because when modifying an existing - record, we may have to log a change done in a x2many field of that object - """ - if field_list is None: - field_list = [] - key = (model.id, resource_id) - lines = { - key: [] - } - # loop on all the fields - for field_name, field_definition in pool.get(model.model)._all_columns.items(): - if field_name in ('__last_update', 'id'): + :return: dict mapping a tuple (model_id, resource_id) with its value and textual value + { (model_id, resource_id): { 'value': ... + 'textual_value': ... + }, + } + """ + data = {} + resource_pool = pool.get(model.model) + # read all the fields of the given resources in super admin mode + for resource in resource_pool.read(cr, SUPERUSER_ID, res_ids): + values = {} + values_text = {} + resource_id = resource['id'] + # loop on each field on the res_ids we just have read + for field in resource: + if field in ('__last_update', 'id'): continue - #if the field_list param is given, skip all the fields not in that list - if field_list and field_name not in field_list: - continue - field_obj = field_definition.column + values[field] = resource[field] + # get the textual value of that field for this record + values_text[field] = get_value_text(cr, SUPERUSER_ID, pool, resource_pool, method, field, resource[field]) + + field_obj = resource_pool._all_columns.get(field).column if field_obj._type in ('one2many','many2many'): - # checking if an audittrail rule apply in super admin mode - if self.check_rules(cr, SUPERUSER_ID, field_obj._obj, method): - # checking if the model associated to a *2m field exists, in super admin mode + # check if an audittrail rule apply in super admin mode + if check_rules(cr, SUPERUSER_ID, field_obj._obj, method): + # check if the model associated to a *2m field exists, in super admin mode x2m_model_ids = pool.get('ir.model').search(cr, SUPERUSER_ID, [('model', '=', field_obj._obj)]) x2m_model_id = x2m_model_ids and x2m_model_ids[0] or False assert x2m_model_id, _("'%s' Model does not exist..." %(field_obj._obj)) x2m_model = pool.get('ir.model').browse(cr, SUPERUSER_ID, x2m_model_id) - # the resource_ids that need to be checked are the sum of both old and previous values (because we - # need to log also creation or deletion in those lists). - x2m_old_values_ids = old_values.get(key, {'value': {}})['value'].get(field_name, []) - x2m_new_values_ids = new_values.get(key, {'value': {}})['value'].get(field_name, []) - # We use list(set(...)) to remove duplicates. - res_ids = list(set(x2m_old_values_ids + x2m_new_values_ids)) + field_resource_ids = list(set(resource[field])) if model.model == x2m_model.model: # we need to remove current resource_id from the many2many to prevent an infinit loop - if resource_id in res_ids: - res_ids.remove(resource_id) - for res_id in res_ids: - lines.update(self.prepare_audittrail_log_line(cr, SUPERUSER_ID, pool, x2m_model, res_id, method, old_values, new_values, field_list)) - # if the value value is different than the old value: record the change - if key not in old_values or key not in new_values or old_values[key]['value'][field_name] != new_values[key]['value'][field_name]: - data = { - 'name': field_name, - 'new_value': key in new_values and new_values[key]['value'].get(field_name), - 'old_value': key in old_values and old_values[key]['value'].get(field_name), - 'new_value_text': key in new_values and new_values[key]['text'].get(field_name), - 'old_value_text': key in old_values and old_values[key]['text'].get(field_name) - } - lines[key].append(data) - return lines + if resource_id in field_resource_ids: + field_resource_ids.remove(resource_id) + data.update(get_data(cr, SUPERUSER_ID, pool, field_resource_ids, x2m_model, method)) - def process_data(self, cr, uid, pool, res_ids, model, method, old_values=None, new_values=None, field_list=None): - """ - This function processes and iterates recursively to log the difference between the old - data (i.e before the method was executed) and the new data and creates audittrail log - accordingly. + data[(model.id, resource_id)] = {'text':values_text, 'value': values} + return data - :param cr: the current row, from the database cursor, - :param uid: the current user’s ID, - :param pool: current db's pooler object. - :param res_ids: Id's of resource to be logged/compared. - :param model: model object which values are being changed - :param method: method to log: create, read, unlink, write, actions, workflow actions - :param old_values: dict of values read before execution of the method - :param new_values: dict of values read after execution of the method - :param field_list: optional argument containing the list of fields to log. Currently only - used when performing a read, it could be usefull later on if we want to log the write - on specific fields only. - :return: True - """ - if field_list is None: - field_list = [] - # loop on all the given ids - for res_id in res_ids: - # compare old and new values and get audittrail log lines accordingly - lines = self.prepare_audittrail_log_line(cr, uid, pool, model, res_id, method, old_values, new_values, field_list) +def prepare_audittrail_log_line(cr, uid, pool, model, resource_id, method, old_values, new_values, field_list=None): + """ + This function compares the old data (i.e before the method was executed) and the new data + (after the method was executed) and returns a structure with all the needed information to + log those differences. - # if at least one modification has been found - for model_id, resource_id in lines: - name = pool.get(model.model).name_get(cr, uid, [resource_id])[0][1] - vals = { - 'method': method, - 'object_id': model_id, - 'user_id': uid, - 'res_id': resource_id, - 'name': name, - } - if (model_id, resource_id) not in old_values and method not in ('copy', 'read'): - # the resource was not existing so we are forcing the method to 'create' - # (because it could also come with the value 'write' if we are creating - # new record through a one2many field) - vals.update({'method': 'create'}) - if (model_id, resource_id) not in new_values and method not in ('copy', 'read'): - # the resource is not existing anymore so we are forcing the method to 'unlink' - # (because it could also come with the value 'write' if we are deleting the - # record through a one2many field) - vals.update({'method': 'unlink'}) - # create the audittrail log in super admin mode, only if a change has been detected - if lines[(model_id, resource_id)]: - log_id = pool.get('audittrail.log').create(cr, SUPERUSER_ID, vals) - model = pool.get('ir.model').browse(cr, uid, model_id) - self.create_log_line(cr, SUPERUSER_ID, log_id, model, lines[(model_id, resource_id)]) - return True + :param cr: the current row, from the database cursor, + :param uid: the current user’s ID. This parameter is currently not used as every + operation to get data is made as super admin. Though, it could be usefull later. + :param pool: current db's pooler object. + :param model: model object which values are being changed + :param resource_id: ID of record to which values are being changed + :param method: method to log: create, read, unlink, write, actions, workflow actions + :param old_values: dict of values read before execution of the method + :param new_values: dict of values read after execution of the method + :param field_list: optional argument containing the list of fields to log. Currently only + used when performing a read, it could be usefull later on if we want to log the write + on specific fields only. - def check_rules(self, cr, uid, model, method): - """ - Checks if auditrails is installed for that db and then if one rule match - @param cr: the current row, from the database cursor, - @param uid: the current user’s ID, - @param model: value of _name of the object which values are being changed - @param method: method to log: create, read, unlink,write,actions,workflow actions - @return: True or False - """ - pool = pooler.get_pool(cr.dbname) - if 'audittrail.rule' in pool.models: - model_ids = pool.get('ir.model').search(cr, SUPERUSER_ID, [('model', '=', model)]) - model_id = model_ids and model_ids[0] or False - if model_id: - rule_ids = pool.get('audittrail.rule').search(cr, SUPERUSER_ID, [('object_id', '=', model_id), ('state', '=', 'subscribed')]) - for rule in pool.get('audittrail.rule').read(cr, SUPERUSER_ID, rule_ids, ['user_id','log_read','log_write','log_create','log_unlink','log_action','log_workflow']): - if len(rule['user_id']) == 0 or uid in rule['user_id']: - if rule.get('log_'+method,0): + :return: dictionary with + * keys: tuples build as ID of model object to log and ID of resource to log + * values: list of all the changes in field values for this couple (model, resource) + return { + (model.id, resource_id): [] + } + + The reason why the structure returned is build as above is because when modifying an existing + record, we may have to log a change done in a x2many field of that object + """ + if field_list is None: + field_list = [] + key = (model.id, resource_id) + lines = { + key: [] + } + # loop on all the fields + for field_name, field_definition in pool.get(model.model)._all_columns.items(): + if field_name in ('__last_update', 'id'): + continue + #if the field_list param is given, skip all the fields not in that list + if field_list and field_name not in field_list: + continue + field_obj = field_definition.column + if field_obj._type in ('one2many','many2many'): + # checking if an audittrail rule apply in super admin mode + if check_rules(cr, SUPERUSER_ID, field_obj._obj, method): + # checking if the model associated to a *2m field exists, in super admin mode + x2m_model_ids = pool.get('ir.model').search(cr, SUPERUSER_ID, [('model', '=', field_obj._obj)]) + x2m_model_id = x2m_model_ids and x2m_model_ids[0] or False + assert x2m_model_id, _("'%s' Model does not exist..." %(field_obj._obj)) + x2m_model = pool.get('ir.model').browse(cr, SUPERUSER_ID, x2m_model_id) + # the resource_ids that need to be checked are the sum of both old and previous values (because we + # need to log also creation or deletion in those lists). + x2m_old_values_ids = old_values.get(key, {'value': {}})['value'].get(field_name, []) + x2m_new_values_ids = new_values.get(key, {'value': {}})['value'].get(field_name, []) + # We use list(set(...)) to remove duplicates. + res_ids = list(set(x2m_old_values_ids + x2m_new_values_ids)) + if model.model == x2m_model.model: + # we need to remove current resource_id from the many2many to prevent an infinit loop + if resource_id in res_ids: + res_ids.remove(resource_id) + for res_id in res_ids: + lines.update(prepare_audittrail_log_line(cr, SUPERUSER_ID, pool, x2m_model, res_id, method, old_values, new_values, field_list)) + # if the value value is different than the old value: record the change + if key not in old_values or key not in new_values or old_values[key]['value'][field_name] != new_values[key]['value'][field_name]: + data = { + 'name': field_name, + 'new_value': key in new_values and new_values[key]['value'].get(field_name), + 'old_value': key in old_values and old_values[key]['value'].get(field_name), + 'new_value_text': key in new_values and new_values[key]['text'].get(field_name), + 'old_value_text': key in old_values and old_values[key]['text'].get(field_name) + } + lines[key].append(data) + return lines + +def process_data(cr, uid, pool, res_ids, model, method, old_values=None, new_values=None, field_list=None): + """ + This function processes and iterates recursively to log the difference between the old + data (i.e before the method was executed) and the new data and creates audittrail log + accordingly. + + :param cr: the current row, from the database cursor, + :param uid: the current user’s ID, + :param pool: current db's pooler object. + :param res_ids: Id's of resource to be logged/compared. + :param model: model object which values are being changed + :param method: method to log: create, read, unlink, write, actions, workflow actions + :param old_values: dict of values read before execution of the method + :param new_values: dict of values read after execution of the method + :param field_list: optional argument containing the list of fields to log. Currently only + used when performing a read, it could be usefull later on if we want to log the write + on specific fields only. + :return: True + """ + if field_list is None: + field_list = [] + # loop on all the given ids + for res_id in res_ids: + # compare old and new values and get audittrail log lines accordingly + lines = prepare_audittrail_log_line(cr, uid, pool, model, res_id, method, old_values, new_values, field_list) + + # if at least one modification has been found + for model_id, resource_id in lines: + name = pool.get(model.model).name_get(cr, uid, [resource_id])[0][1] + vals = { + 'method': method, + 'object_id': model_id, + 'user_id': uid, + 'res_id': resource_id, + 'name': name, + } + if (model_id, resource_id) not in old_values and method not in ('copy', 'read'): + # the resource was not existing so we are forcing the method to 'create' + # (because it could also come with the value 'write' if we are creating + # new record through a one2many field) + vals.update({'method': 'create'}) + if (model_id, resource_id) not in new_values and method not in ('copy', 'read'): + # the resource is not existing anymore so we are forcing the method to 'unlink' + # (because it could also come with the value 'write' if we are deleting the + # record through a one2many field) + vals.update({'method': 'unlink'}) + # create the audittrail log in super admin mode, only if a change has been detected + if lines[(model_id, resource_id)]: + log_id = pool.get('audittrail.log').create(cr, SUPERUSER_ID, vals) + model = pool.get('ir.model').browse(cr, uid, model_id) + create_log_line(cr, SUPERUSER_ID, log_id, model, lines[(model_id, resource_id)]) + return True + +def check_rules(cr, uid, model, method): + """ + Checks if auditrails is installed for that db and then if one rule match + @param cr: the current row, from the database cursor, + @param uid: the current user’s ID, + @param model: value of _name of the object which values are being changed + @param method: method to log: create, read, unlink,write,actions,workflow actions + @return: True or False + """ + pool = pooler.get_pool(cr.dbname) + if 'audittrail.rule' in pool.models: + model_ids = pool.get('ir.model').search(cr, SUPERUSER_ID, [('model', '=', model)]) + model_id = model_ids and model_ids[0] or False + if model_id: + rule_ids = pool.get('audittrail.rule').search(cr, SUPERUSER_ID, [('object_id', '=', model_id), ('state', '=', 'subscribed')]) + for rule in pool.get('audittrail.rule').read(cr, SUPERUSER_ID, rule_ids, ['user_id','log_read','log_write','log_create','log_unlink','log_action','log_workflow']): + if len(rule['user_id']) == 0 or uid in rule['user_id']: + if rule.get('log_'+method,0): + return True + elif method not in ('default_get','read','fields_view_get','fields_get','search','search_count','name_search','name_get','get','request_get', 'get_sc', 'unlink', 'write', 'create', 'read_group', 'import_data'): + if rule['log_action']: return True - elif method not in ('default_get','read','fields_view_get','fields_get','search','search_count','name_search','name_get','get','request_get', 'get_sc', 'unlink', 'write', 'create', 'read_group', 'import_data'): - if rule['log_action']: - return True - def execute_cr(self, cr, uid, model, method, *args, **kw): - fct_src = super(audittrail_objects_proxy, self).execute_cr - if self.check_rules(cr,uid,model,method): - return self.log_fct(cr, uid, model, method, fct_src, *args, **kw) - return fct_src(cr, uid, model, method, *args, **kw) +# Replace the openerp.service.model functions. - def exec_workflow_cr(self, cr, uid, model, method, *args, **kw): - fct_src = super(audittrail_objects_proxy, self).exec_workflow_cr - if self.check_rules(cr,uid,model,'workflow'): - return self.log_fct(cr, uid, model, method, fct_src, *args, **kw) - return fct_src(cr, uid, model, method, *args, **kw) +original_execute_cr = openerp.service.model.execute_cr +original_exec_workflow_cr = openerp.service.model.exec_workflow_cr -audittrail_objects_proxy() +def execute_cr(cr, uid, model, method, *args, **kw): + fct_src = original_execute_cr + if check_rules(cr,uid,model,method): + return log_fct(cr, uid, model, method, fct_src, *args, **kw) + return fct_src(cr, uid, model, method, *args, **kw) + +def exec_workflow_cr(cr, uid, model, method, *args, **kw): + fct_src = original_exec_workflow_cr + if check_rules(cr,uid,model,'workflow'): + return log_fct(cr, uid, model, method, fct_src, *args, **kw) + return fct_src(cr, uid, model, method, *args, **kw) + +openerp.service.model.execute_cr = execute_cr +openerp.service.model.exec_workflow_cr = exec_workflow_cr # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: diff --git a/addons/base_calendar/base_calendar.py b/addons/base_calendar/base_calendar.py index ab0d8d324b4..ca75c9e20fc 100644 --- a/addons/base_calendar/base_calendar.py +++ b/addons/base_calendar/base_calendar.py @@ -24,12 +24,13 @@ from dateutil import parser from dateutil import rrule from dateutil.relativedelta import relativedelta from openerp.osv import fields, osv -from openerp.service import web_services from openerp.tools.translate import _ import pytz import re import time + from openerp import tools, SUPERUSER_ID +import openerp.service.report months = { 1: "January", 2: "February", 3: "March", 4: "April", \ @@ -1729,27 +1730,25 @@ class ir_model(osv.osv): ir_model() -class virtual_report_spool(web_services.report_spool): +original_exp_report = openerp.service.report.exp_report - def exp_report(self, db, uid, object, ids, data=None, context=None): - """ - Export Report - @param self: The object pointer - @param db: get the current database, - @param uid: the current user's ID for security checks, - @param context: A standard dictionary for contextual values - """ +def exp_report(db, uid, object, ids, data=None, context=None): + """ + Export Report + @param db: get the current database, + @param uid: the current user's ID for security checks, + @param context: A standard dictionary for contextual values + """ - if object == 'printscreen.list': - return super(virtual_report_spool, self).exp_report(db, uid, \ - object, ids, data, context) - new_ids = [] - for id in ids: - new_ids.append(base_calendar_id2real_id(id)) - if data.get('id', False): - data['id'] = base_calendar_id2real_id(data['id']) - return super(virtual_report_spool, self).exp_report(db, uid, object, new_ids, data, context) + if object == 'printscreen.list': + original_exp_report(db, uid, object, ids, data, context) + new_ids = [] + for id in ids: + new_ids.append(base_calendar_id2real_id(id)) + if data.get('id', False): + data['id'] = base_calendar_id2real_id(data['id']) + return original_exp_report(db, uid, object, new_ids, data, context) -virtual_report_spool() +openerp.service.report.exp_report = exp_report # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: diff --git a/addons/document/document.py b/addons/document/document.py index 1671b6bc1ba..85468e00509 100644 --- a/addons/document/document.py +++ b/addons/document/document.py @@ -31,11 +31,11 @@ from StringIO import StringIO import psycopg2 import openerp -from openerp import netsvc from openerp import pooler from openerp import tools from openerp.osv import fields, osv from openerp.osv.orm import except_orm +import openerp.report.interface from openerp.tools.misc import ustr from openerp.tools.translate import _ from openerp.tools.safe_eval import safe_eval @@ -456,7 +456,7 @@ class document_directory_content(osv.osv): if node.extension != '.pdf': raise Exception("Invalid content: %s" % node.extension) report = self.pool.get('ir.actions.report.xml').browse(cr, uid, node.report_id, context=context) - srv = netsvc.Service._services['report.'+report.report_name] + srv = openerp.report.interface.report_int._reports['report.'+report.report_name] ctx = node.context.context.copy() ctx.update(node.dctx) pdf,pdftype = srv.create(cr, uid, [node.act_id,], {}, context=ctx) diff --git a/addons/document_ftp/ftpserver/abstracted_fs.py b/addons/document_ftp/ftpserver/abstracted_fs.py index a9efac5819c..b746f189700 100644 --- a/addons/document_ftp/ftpserver/abstracted_fs.py +++ b/addons/document_ftp/ftpserver/abstracted_fs.py @@ -10,6 +10,7 @@ import glob import fnmatch from openerp import pooler, netsvc, sql_db +import openerp.service from openerp.service import security from openerp.osv import osv @@ -60,7 +61,7 @@ class abstracted_fs(object): def db_list(self): """Get the list of available databases, with FTPd support """ - s = netsvc.ExportService.getService('db') + s = openerp.service.db result = s.exp_list(document=True) self.db_name_list = [] for db_name in result: diff --git a/addons/document_webdav/dav_fs.py b/addons/document_webdav/dav_fs.py index e5756f5df91..1781a02d76e 100644 --- a/addons/document_webdav/dav_fs.py +++ b/addons/document_webdav/dav_fs.py @@ -38,6 +38,7 @@ except ImportError: import openerp from openerp import pooler, sql_db, netsvc +import openerp.service from openerp.tools import misc from cache import memoize @@ -372,7 +373,7 @@ class openerp_dav_handler(dav_interface): @memoize(4) def _all_db_list(self): """return all databases who have module document_webdav installed""" - s = netsvc.ExportService.getService('db') + s = openerp.service.db result = s.exp_list() self.db_name_list=[] for db_name in result: diff --git a/addons/edi/edi_service.py b/addons/edi/edi_service.py index fd911139170..d5dc5c09f74 100644 --- a/addons/edi/edi_service.py +++ b/addons/edi/edi_service.py @@ -21,45 +21,46 @@ import logging import openerp -import openerp.netsvc as netsvc _logger = logging.getLogger(__name__) -class edi(netsvc.ExportService): +# TODO this is not needed anymore: +# - the exposed new service just forward to the model service +# - the service is called by the web controller, which can +# now directly call into openerp as the web server is always +# embedded in openerp. - def __init__(self, name="edi"): - netsvc.ExportService.__init__(self, name) +def _edi_dispatch(db_name, method_name, *method_args): + try: + registry = openerp.modules.registry.RegistryManager.get(db_name) + assert registry, 'Unknown database %s' % db_name + edi = registry['edi.edi'] + cr = registry.db.cursor() + res = None + res = getattr(edi, method_name)(cr, *method_args) + cr.commit() + except Exception, e: + _logger.exception('Failed to execute EDI method %s with args %r.', + method_name, method_args) + raise + finally: + cr.close() + return res - def _edi_dispatch(self, db_name, method_name, *method_args): - try: - registry = openerp.modules.registry.RegistryManager.get(db_name) - assert registry, 'Unknown database %s' % db_name - edi = registry['edi.edi'] - cr = registry.db.cursor() - res = None - res = getattr(edi, method_name)(cr, *method_args) - cr.commit() - except Exception: - _logger.exception('Failed to execute EDI method %s with args %r.', method_name, method_args) - raise - finally: - cr.close() - return res +def exp_import_edi_document(db_name, uid, passwd, edi_document, context=None): + return _edi_dispatch(db_name, 'import_edi', uid, edi_document, None) - def exp_import_edi_document(self, db_name, uid, passwd, edi_document, context=None): - return self._edi_dispatch(db_name, 'import_edi', uid, edi_document, None) +def exp_import_edi_url(db_name, uid, passwd, edi_url, context=None): + return _edi_dispatch(db_name, 'import_edi', uid, None, edi_url) - def exp_import_edi_url(self, db_name, uid, passwd, edi_url, context=None): - return self._edi_dispatch(db_name, 'import_edi', uid, None, edi_url) +@openerp.http.rpc('edi') +def dispatch(method, params): + if method in ['import_edi_document', 'import_edi_url']: + (db, uid, passwd) = params[0:3] + openerp.service.security.check(db, uid, passwd) + else: + raise KeyError("Method not found: %s." % method) + fn = globals()['exp_' + method] + return fn(*params) - def dispatch(self, method, params): - if method in ['import_edi_document', 'import_edi_url']: - (db, uid, passwd ) = params[0:3] - openerp.service.security.check(db, uid, passwd) - else: - raise KeyError("Method not found: %s." % method) - fn = getattr(self, 'exp_'+method) - return fn(*params) - -edi() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: diff --git a/addons/report_webkit/ir_report.py b/addons/report_webkit/ir_report.py index 1385f504928..01b65987a8e 100644 --- a/addons/report_webkit/ir_report.py +++ b/addons/report_webkit/ir_report.py @@ -30,21 +30,22 @@ ############################################################################## from openerp.osv import fields, osv -from openerp import netsvc -from webkit_report import WebKitParser +import openerp.report.interface from openerp.report.report_sxw import rml_parse +from webkit_report import WebKitParser + def register_report(name, model, tmpl_path, parser=rml_parse): """Register the report into the services""" name = 'report.%s' % name - if netsvc.Service._services.get(name, False): - service = netsvc.Service._services[name] + if name in openerp.report.interface.report_int._reports: + service = openerp.report.interface.report_int[name] if isinstance(service, WebKitParser): #already instantiated properly, skip it return if hasattr(service, 'parser'): parser = service.parser - del netsvc.Service._services[name] + del openerp.report.interface.report_int[name] WebKitParser(name, model, tmpl_path, parser=parser)