# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # ############################################################################## #.apidoc title: Object Relational Mapping #.apidoc module-mods: member-order: bysource """ Object relational mapping to database (postgresql) module * Hierarchical structure * Constraints consistency, validations * Object meta Data depends on its status * Optimised processing by complex query (multiple actions at once) * Default fields value * Permissions optimisation * Persistant object: DB postgresql * Datas conversions * Multi-level caching system * 2 different inheritancies * Fields: - classicals (varchar, integer, boolean, ...) - relations (one2many, many2one, many2many) - functions """ import calendar import copy import datetime import itertools import logging import operator import pickle import re import simplejson import time import types import psycopg2 from lxml import etree import warnings import fields import openerp import openerp.netsvc as netsvc import openerp.tools as tools from openerp.tools.config import config from openerp.tools.misc import CountingStream from openerp.tools.safe_eval import safe_eval as eval from openerp.tools.translate import _ from openerp import SUPERUSER_ID from query import Query from openerp import SUPERUSER_ID _logger = logging.getLogger(__name__) _schema = logging.getLogger(__name__ + '.schema') # List of etree._Element subclasses that we choose to ignore when parsing XML. from openerp.tools import SKIPPED_ELEMENT_TYPES regex_order = re.compile('^(([a-z0-9_]+|"[a-z0-9_]+")( *desc| *asc)?( *, *|))+$', re.I) regex_object_name = re.compile(r'^[a-z0-9_.]+$') def transfer_field_to_modifiers(field, modifiers): default_values = {} state_exceptions = {} for attr in ('invisible', 'readonly', 'required'): state_exceptions[attr] = [] default_values[attr] = bool(field.get(attr)) for state, modifs in (field.get("states",{})).items(): for modif in modifs: if default_values[modif[0]] != modif[1]: state_exceptions[modif[0]].append(state) for attr, default_value in default_values.items(): if state_exceptions[attr]: modifiers[attr] = [("state", "not in" if default_value else "in", state_exceptions[attr])] else: modifiers[attr] = default_value # Don't deal with groups, it is done by check_group(). # Need the context to evaluate the invisible attribute on tree views. # For non-tree views, the context shouldn't be given. def transfer_node_to_modifiers(node, modifiers, context=None, in_tree_view=False): if node.get('attrs'): modifiers.update(eval(node.get('attrs'))) if node.get('states'): if 'invisible' in modifiers and isinstance(modifiers['invisible'], list): # TODO combine with AND or OR, use implicit AND for now. modifiers['invisible'].append(('state', 'not in', node.get('states').split(','))) else: modifiers['invisible'] = [('state', 'not in', node.get('states').split(','))] for a in ('invisible', 'readonly', 'required'): if node.get(a): v = bool(eval(node.get(a), {'context': context or {}})) if in_tree_view and a == 'invisible': # Invisible in a tree view has a specific meaning, make it a # new key in the modifiers attribute. modifiers['tree_invisible'] = v elif v or (a not in modifiers or not isinstance(modifiers[a], list)): # Don't set the attribute to False if a dynamic value was # provided (i.e. a domain from attrs or states). modifiers[a] = v def simplify_modifiers(modifiers): for a in ('invisible', 'readonly', 'required'): if a in modifiers and not modifiers[a]: del modifiers[a] def transfer_modifiers_to_node(modifiers, node): if modifiers: simplify_modifiers(modifiers) node.set('modifiers', simplejson.dumps(modifiers)) def setup_modifiers(node, field=None, context=None, in_tree_view=False): """ Processes node attributes and field descriptors to generate the ``modifiers`` node attribute and set it on the provided node. Alters its first argument in-place. :param node: ``field`` node from an OpenERP view :type node: lxml.etree._Element :param dict field: field descriptor corresponding to the provided node :param dict context: execution context used to evaluate node attributes :param bool in_tree_view: triggers the ``tree_invisible`` code path (separate from ``invisible``): in tree view there are two levels of invisibility, cell content (a column is present but the cell itself is not displayed) with ``invisible`` and column invisibility (the whole column is hidden) with ``tree_invisible``. :returns: nothing """ modifiers = {} if field is not None: transfer_field_to_modifiers(field, modifiers) transfer_node_to_modifiers( node, modifiers, context=context, in_tree_view=in_tree_view) transfer_modifiers_to_node(modifiers, node) def test_modifiers(what, expected): modifiers = {} if isinstance(what, basestring): node = etree.fromstring(what) transfer_node_to_modifiers(node, modifiers) simplify_modifiers(modifiers) json = simplejson.dumps(modifiers) assert json == expected, "%s != %s" % (json, expected) elif isinstance(what, dict): transfer_field_to_modifiers(what, modifiers) simplify_modifiers(modifiers) json = simplejson.dumps(modifiers) assert json == expected, "%s != %s" % (json, expected) # To use this test: # import openerp # openerp.osv.orm.modifiers_tests() def modifiers_tests(): test_modifiers('', '{}') test_modifiers('', '{"invisible": true}') test_modifiers('', '{"readonly": true}') test_modifiers('', '{"required": true}') test_modifiers('', '{}') test_modifiers('', '{}') test_modifiers('', '{}') test_modifiers('', '{"invisible": true, "required": true}') # TODO order is not guaranteed test_modifiers('', '{"invisible": true}') test_modifiers('', '{"required": true}') test_modifiers("""""", '{"invisible": [["b", "=", "c"]]}') # The dictionary is supposed to be the result of fields_get(). test_modifiers({}, '{}') test_modifiers({"invisible": True}, '{"invisible": true}') test_modifiers({"invisible": False}, '{}') def check_object_name(name): """ Check if the given name is a valid openerp object name. The _name attribute in osv and osv_memory object is subject to some restrictions. This function returns True or False whether the given name is allowed or not. TODO: this is an approximation. The goal in this approximation is to disallow uppercase characters (in some places, we quote table/column names and in other not, which leads to this kind of errors: psycopg2.ProgrammingError: relation "xxx" does not exist). The same restriction should apply to both osv and osv_memory objects for consistency. """ if regex_object_name.match(name) is None: return False return True def raise_on_invalid_object_name(name): if not check_object_name(name): msg = "The _name attribute %s is not valid." % name _logger.error(msg) raise except_orm('ValueError', msg) POSTGRES_CONFDELTYPES = { 'RESTRICT': 'r', 'NO ACTION': 'a', 'CASCADE': 'c', 'SET NULL': 'n', 'SET DEFAULT': 'd', } def intersect(la, lb): return filter(lambda x: x in lb, la) def fix_import_export_id_paths(fieldname): """ Fixes the id fields in import and exports, and splits field paths on '/'. :param str fieldname: name of the field to import/export :return: split field name :rtype: list of str """ fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname) fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id) return fixed_external_id.split('/') class except_orm(Exception): def __init__(self, name, value): self.name = name self.value = value self.args = (name, value) class BrowseRecordError(Exception): pass class browse_null(object): """ Readonly python database object browser """ def __init__(self): self.id = False def __getitem__(self, name): return None def __getattr__(self, name): return None # XXX: return self ? def __int__(self): return False def __str__(self): return '' def __nonzero__(self): return False def __unicode__(self): return u'' # # TODO: execute an object method on browse_record_list # class browse_record_list(list): """ Collection of browse objects Such an instance will be returned when doing a ``browse([ids..])`` and will be iterable, yielding browse() objects """ def __init__(self, lst, context=None): if not context: context = {} super(browse_record_list, self).__init__(lst) self.context = context class browse_record(object): """ An object that behaves like a row of an object's table. It has attributes after the columns of the corresponding object. Examples:: uobj = pool.get('res.users') user_rec = uobj.browse(cr, uid, 104) name = user_rec.name """ def __init__(self, cr, uid, id, table, cache, context=None, list_class=browse_record_list, fields_process=None): """ :param table: the browsed object (inherited from orm) :param dict cache: a dictionary of model->field->data to be shared across browse objects, thus reducing the SQL read()s. It can speed up things a lot, but also be disastrous if not discarded after write()/unlink() operations :param dict context: dictionary with an optional context """ if fields_process is None: fields_process = {} if context is None: context = {} self._list_class = list_class self._cr = cr self._uid = uid self._id = id self._table = table # deprecated, use _model! self._model = table self._table_name = self._table._name self.__logger = logging.getLogger('openerp.osv.orm.browse_record.' + self._table_name) self._context = context self._fields_process = fields_process cache.setdefault(table._name, {}) self._data = cache[table._name] # if not (id and isinstance(id, (int, long,))): # raise BrowseRecordError(_('Wrong ID for the browse record, got %r, expected an integer.') % (id,)) # if not table.exists(cr, uid, id, context): # raise BrowseRecordError(_('Object %s does not exists') % (self,)) if id not in self._data: self._data[id] = {'id': id} self._cache = cache def __getitem__(self, name): if name == 'id': return self._id if name not in self._data[self._id]: # build the list of fields we will fetch # fetch the definition of the field which was asked for if name in self._table._columns: col = self._table._columns[name] elif name in self._table._inherit_fields: col = self._table._inherit_fields[name][2] elif hasattr(self._table, str(name)): attr = getattr(self._table, name) if isinstance(attr, (types.MethodType, types.LambdaType, types.FunctionType)): def function_proxy(*args, **kwargs): if 'context' not in kwargs and self._context: kwargs.update(context=self._context) return attr(self._cr, self._uid, [self._id], *args, **kwargs) return function_proxy else: return attr else: error_msg = "Field '%s' does not exist in object '%s'" % (name, self) self.__logger.warning(error_msg) raise KeyError(error_msg) # if the field is a classic one or a many2one, we'll fetch all classic and many2one fields if col._prefetch: # gen the list of "local" (ie not inherited) fields which are classic or many2one fields_to_fetch = filter(lambda x: x[1]._classic_write, self._table._columns.items()) # gen the list of inherited fields inherits = map(lambda x: (x[0], x[1][2]), self._table._inherit_fields.items()) # complete the field list with the inherited fields which are classic or many2one fields_to_fetch += filter(lambda x: x[1]._classic_write, inherits) # otherwise we fetch only that field else: fields_to_fetch = [(name, col)] ids = filter(lambda id: name not in self._data[id], self._data.keys()) # read the results field_names = map(lambda x: x[0], fields_to_fetch) field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write") # TODO: improve this, very slow for reports if self._fields_process: lang = self._context.get('lang', 'en_US') or 'en_US' lang_obj_ids = self.pool.get('res.lang').search(self._cr, self._uid, [('code', '=', lang)]) if not lang_obj_ids: raise Exception(_('Language with code "%s" is not defined in your system !\nDefine it through the Administration menu.') % (lang,)) lang_obj = self.pool.get('res.lang').browse(self._cr, self._uid, lang_obj_ids[0]) for field_name, field_column in fields_to_fetch: if field_column._type in self._fields_process: for result_line in field_values: result_line[field_name] = self._fields_process[field_column._type](result_line[field_name]) if result_line[field_name]: result_line[field_name].set_value(self._cr, self._uid, result_line[field_name], self, field_column, lang_obj) if not field_values: # Where did those ids come from? Perhaps old entries in ir_model_dat? _logger.warning("No field_values found for ids %s in %s", ids, self) raise KeyError('Field %s not found in %s'%(name, self)) # create browse records for 'remote' objects for result_line in field_values: new_data = {} for field_name, field_column in fields_to_fetch: if field_column._type == 'many2one': if result_line[field_name]: obj = self._table.pool.get(field_column._obj) if isinstance(result_line[field_name], (list, tuple)): value = result_line[field_name][0] else: value = result_line[field_name] if value: # FIXME: this happen when a _inherits object # overwrite a field of it parent. Need # testing to be sure we got the right # object and not the parent one. if not isinstance(value, browse_record): if obj is None: # In some cases the target model is not available yet, so we must ignore it, # which is safe in most cases, this value will just be loaded later when needed. # This situation can be caused by custom fields that connect objects with m2o without # respecting module dependencies, causing relationships to be connected to soon when # the target is not loaded yet. continue new_data[field_name] = browse_record(self._cr, self._uid, value, obj, self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process) else: new_data[field_name] = value else: new_data[field_name] = browse_null() else: new_data[field_name] = browse_null() elif field_column._type in ('one2many', 'many2many') and len(result_line[field_name]): new_data[field_name] = self._list_class([browse_record(self._cr, self._uid, id, self._table.pool.get(field_column._obj), self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process) for id in result_line[field_name]], self._context) elif field_column._type in ('reference'): if result_line[field_name]: if isinstance(result_line[field_name], browse_record): new_data[field_name] = result_line[field_name] else: ref_obj, ref_id = result_line[field_name].split(',') ref_id = long(ref_id) if ref_id: obj = self._table.pool.get(ref_obj) new_data[field_name] = browse_record(self._cr, self._uid, ref_id, obj, self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process) else: new_data[field_name] = browse_null() else: new_data[field_name] = browse_null() else: new_data[field_name] = result_line[field_name] self._data[result_line['id']].update(new_data) if not name in self._data[self._id]: # How did this happen? Could be a missing model due to custom fields used too soon, see above. self.__logger.error("Fields to fetch: %s, Field values: %s", field_names, field_values) self.__logger.error("Cached: %s, Table: %s", self._data[self._id], self._table) raise KeyError(_('Unknown attribute %s in %s ') % (name, self)) return self._data[self._id][name] def __getattr__(self, name): try: return self[name] except KeyError, e: raise AttributeError(e) def __contains__(self, name): return (name in self._table._columns) or (name in self._table._inherit_fields) or hasattr(self._table, name) def __iter__(self): raise NotImplementedError("Iteration is not allowed on %s" % self) def __hasattr__(self, name): return name in self def __int__(self): return self._id def __str__(self): return "browse_record(%s, %d)" % (self._table_name, self._id) def __eq__(self, other): if not isinstance(other, browse_record): return False return (self._table_name, self._id) == (other._table_name, other._id) def __ne__(self, other): if not isinstance(other, browse_record): return True return (self._table_name, self._id) != (other._table_name, other._id) # we need to define __unicode__ even though we've already defined __str__ # because we have overridden __getattr__ def __unicode__(self): return unicode(str(self)) def __hash__(self): return hash((self._table_name, self._id)) __repr__ = __str__ def refresh(self): """Force refreshing this browse_record's data and all the data of the records that belong to the same cache, by emptying the cache completely, preserving only the record identifiers (for prefetching optimizations). """ for model, model_cache in self._cache.iteritems(): # only preserve the ids of the records that were in the cache cached_ids = dict([(i, {'id': i}) for i in model_cache.keys()]) self._cache[model].clear() self._cache[model].update(cached_ids) def pg_varchar(size=0): """ Returns the VARCHAR declaration for the provided size: * If no size (or an empty or negative size is provided) return an 'infinite' VARCHAR * Otherwise return a VARCHAR(n) :type int size: varchar size, optional :rtype: str """ if size: if not isinstance(size, int): raise TypeError("VARCHAR parameter should be an int, got %s" % type(size)) if size > 0: return 'VARCHAR(%d)' % size return 'VARCHAR' FIELDS_TO_PGTYPES = { fields.boolean: 'bool', fields.integer: 'int4', fields.text: 'text', fields.html: 'text', fields.date: 'date', fields.datetime: 'timestamp', fields.binary: 'bytea', fields.many2one: 'int4', fields.serialized: 'text', } def get_pg_type(f, type_override=None): """ :param fields._column f: field to get a Postgres type for :param type type_override: use the provided type for dispatching instead of the field's own type :returns: (postgres_identification_type, postgres_type_specification) :rtype: (str, str) """ field_type = type_override or type(f) if field_type in FIELDS_TO_PGTYPES: pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type]) elif issubclass(field_type, fields.float): if f.digits: pg_type = ('numeric', 'NUMERIC') else: pg_type = ('float8', 'DOUBLE PRECISION') elif issubclass(field_type, (fields.char, fields.reference)): pg_type = ('varchar', pg_varchar(f.size)) elif issubclass(field_type, fields.selection): if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\ or getattr(f, 'size', None) == -1: pg_type = ('int4', 'INTEGER') else: pg_type = ('varchar', pg_varchar(getattr(f, 'size', None))) elif issubclass(field_type, fields.function): if f._type == 'selection': pg_type = ('varchar', pg_varchar()) else: pg_type = get_pg_type(f, getattr(fields, f._type)) else: _logger.warning('%s type not supported!', field_type) pg_type = None return pg_type class MetaModel(type): """ Metaclass for the Model. This class is used as the metaclass for the Model class to discover the models defined in a module (i.e. without instanciating them). If the automatic discovery is not needed, it is possible to set the model's _register attribute to False. """ module_to_models = {} def __init__(self, name, bases, attrs): if not self._register: self._register = True super(MetaModel, self).__init__(name, bases, attrs) return # The (OpenERP) module name can be in the `openerp.addons` namespace # or not. For instance module `sale` can be imported as # `openerp.addons.sale` (the good way) or `sale` (for backward # compatibility). module_parts = self.__module__.split('.') if len(module_parts) > 2 and module_parts[0] == 'openerp' and \ module_parts[1] == 'addons': module_name = self.__module__.split('.')[2] else: module_name = self.__module__.split('.')[0] if not hasattr(self, '_module'): self._module = module_name # Remember which models to instanciate for this module. self.module_to_models.setdefault(self._module, []).append(self) # Definition of log access columns, automatically added to models if # self._log_access is True LOG_ACCESS_COLUMNS = { 'create_uid': 'INTEGER REFERENCES res_users ON DELETE SET NULL', 'create_date': 'TIMESTAMP', 'write_uid': 'INTEGER REFERENCES res_users ON DELETE SET NULL', 'write_date': 'TIMESTAMP' } # special columns automatically created by the ORM MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS.keys() class BaseModel(object): """ Base class for OpenERP models. OpenERP models are created by inheriting from this class' subclasses: * Model: for regular database-persisted models * TransientModel: for temporary data, stored in the database but automatically vaccuumed every so often * AbstractModel: for abstract super classes meant to be shared by multiple _inheriting classes (usually Models or TransientModels) The system will later instantiate the class once per database (on which the class' module is installed). To create a class that should not be instantiated, the _register class attribute may be set to False. """ __metaclass__ = MetaModel _auto = True # create database backend _register = False # Set to false if the model shouldn't be automatically discovered. _name = None _columns = {} _constraints = [] _defaults = {} _rec_name = None _parent_name = 'parent_id' _parent_store = False _parent_order = False _date_name = 'date' _order = 'id' _sequence = None _description = None _needaction = False # dict of {field:method}, with method returning the (name_get of records, {id: fold}) # to include in the _read_group, if grouped on this field _group_by_full = {} # Transience _transient = False # True in a TransientModel _transient_max_count = None _transient_max_hours = None _transient_check_time = 20 # structure: # { 'parent_model': 'm2o_field', ... } _inherits = {} # Mapping from inherits'd field name to triple (m, r, f, n) where m is the # model from which it is inherits'd, r is the (local) field towards m, f # is the _column object itself, and n is the original (i.e. top-most) # parent model. # Example: # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent', # field_column_obj, origina_parent_model), ... } _inherit_fields = {} # Mapping field name/column_info object # This is similar to _inherit_fields but: # 1. includes self fields, # 2. uses column_info instead of a triple. _all_columns = {} _table = None _invalids = set() _log_create = False _sql_constraints = [] _protected = ['read', 'write', 'create', 'default_get', 'perm_read', 'unlink', 'fields_get', 'fields_view_get', 'search', 'name_get', 'distinct_field_get', 'name_search', 'copy', 'import_data', 'search_count', 'exists'] CONCURRENCY_CHECK_FIELD = '__last_update' def log(self, cr, uid, id, message, secondary=False, context=None): return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.") def view_init(self, cr, uid, fields_list, context=None): """Override this method to do specific things when a view on the object is opened.""" pass def _field_create(self, cr, context=None): """ Create entries in ir_model_fields for all the model's fields. If necessary, also create an entry in ir_model, and if called from the modules loading scheme (by receiving 'module' in the context), also create entries in ir_model_data (for the model and the fields). - create an entry in ir_model (if there is not already one), - create an entry in ir_model_data (if there is not already one, and if 'module' is in the context), - update ir_model_fields with the fields found in _columns (TODO there is some redundancy as _columns is updated from ir_model_fields in __init__). """ if context is None: context = {} cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,)) if not cr.rowcount: cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',)) model_id = cr.fetchone()[0] cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base')) else: model_id = cr.fetchone()[0] if 'module' in context: name_id = 'model_'+self._name.replace('.', '_') cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module'])) if not cr.rowcount: cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \ (name_id, context['module'], 'ir.model', model_id) ) cr.commit() cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,)) cols = {} for rec in cr.dictfetchall(): cols[rec['name']] = rec ir_model_fields_obj = self.pool.get('ir.model.fields') # sparse field should be created at the end, as it depends on its serialized field already existing model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0) for (k, f) in model_fields: vals = { 'model_id': model_id, 'model': self._name, 'name': k, 'field_description': f.string, 'ttype': f._type, 'relation': f._obj or '', 'view_load': (f.view_load and 1) or 0, 'select_level': tools.ustr(f.select or 0), 'readonly': (f.readonly and 1) or 0, 'required': (f.required and 1) or 0, 'selectable': (f.selectable and 1) or 0, 'translate': (f.translate and 1) or 0, 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '', 'serialization_field_id': None, } if getattr(f, 'serialization_field', None): # resolve link to serialization_field if specified by name serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)]) if not serialization_field_id: raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k)) vals['serialization_field_id'] = serialization_field_id[0] # When its a custom field,it does not contain f.select if context.get('field_state', 'base') == 'manual': if context.get('field_name', '') == k: vals['select_level'] = context.get('select', '0') #setting value to let the problem NOT occur next time elif k in cols: vals['select_level'] = cols[k]['select_level'] if k not in cols: cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',)) id = cr.fetchone()[0] vals['id'] = id cr.execute("""INSERT INTO ir_model_fields ( id, model_id, model, name, field_description, ttype, relation,view_load,state,select_level,relation_field, translate, serialization_field_id ) VALUES ( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s )""", ( id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'], vals['relation'], bool(vals['view_load']), 'base', vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'] )) if 'module' in context: name1 = 'field_' + self._table + '_' + k cr.execute("select name from ir_model_data where name=%s", (name1,)) if cr.fetchone(): name1 = name1 + "_" + str(id) cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \ (name1, context['module'], 'ir.model.fields', id) ) else: for key, val in vals.items(): if cols[k][key] != vals[key]: cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name'])) cr.commit() cr.execute("""UPDATE ir_model_fields SET model_id=%s, field_description=%s, ttype=%s, relation=%s, view_load=%s, select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s WHERE model=%s AND name=%s""", ( vals['model_id'], vals['field_description'], vals['ttype'], vals['relation'], bool(vals['view_load']), vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name'] )) break cr.commit() # # Goal: try to apply inheritance at the instanciation level and # put objects in the pool var # @classmethod def create_instance(cls, pool, cr): """ Instanciate a given model. This class method instanciates the class of some model (i.e. a class deriving from osv or osv_memory). The class might be the class passed in argument or, if it inherits from another class, a class constructed by combining the two classes. The ``attributes`` argument specifies which parent class attributes have to be combined. TODO: the creation of the combined class is repeated at each call of this method. This is probably unnecessary. """ attributes = ['_columns', '_defaults', '_inherits', '_constraints', '_sql_constraints'] parent_names = getattr(cls, '_inherit', None) if parent_names: if isinstance(parent_names, (str, unicode)): name = cls._name or parent_names parent_names = [parent_names] else: name = cls._name # for res.parnter.address compatiblity, should be remove in v7 if 'res.partner.address' in parent_names: parent_names.pop(parent_names.index('res.partner.address')) parent_names.append('res.partner') if not name: raise TypeError('_name is mandatory in case of multiple inheritance') for parent_name in ((type(parent_names)==list) and parent_names or [parent_names]): parent_model = pool.get(parent_name) if not parent_model: raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n' 'You may need to add a dependency on the parent class\' module.' % (name, parent_name)) if not getattr(cls, '_original_module', None) and name == parent_model._name: cls._original_module = parent_model._original_module parent_class = parent_model.__class__ nattr = {} for s in attributes: new = copy.copy(getattr(parent_model, s, {})) if s == '_columns': # Don't _inherit custom fields. for c in new.keys(): if new[c].manual: del new[c] # Duplicate float fields because they have a .digits # cache (which must be per-registry, not server-wide). for c in new.keys(): if new[c]._type == 'float': new[c] = copy.copy(new[c]) if hasattr(new, 'update'): new.update(cls.__dict__.get(s, {})) elif s=='_constraints': for c in cls.__dict__.get(s, []): exist = False for c2 in range(len(new)): #For _constraints, we should check field and methods as well if new[c2][2]==c[2] and (new[c2][0] == c[0] \ or getattr(new[c2][0],'__name__', True) == \ getattr(c[0],'__name__', False)): # If new class defines a constraint with # same function name, we let it override # the old one. new[c2] = c exist = True break if not exist: new.append(c) else: new.extend(cls.__dict__.get(s, [])) nattr[s] = new cls = type(name, (cls, parent_class), dict(nattr, _register=False)) if not getattr(cls, '_original_module', None): cls._original_module = cls._module obj = object.__new__(cls) obj.__init__(pool, cr) return obj def __new__(cls): """Register this model. This doesn't create an instance but simply register the model as being part of the module where it is defined. """ # Set the module name (e.g. base, sale, accounting, ...) on the class. module = cls.__module__.split('.')[0] if not hasattr(cls, '_module'): cls._module = module # Record this class in the list of models to instantiate for this module, # managed by the metaclass. module_model_list = MetaModel.module_to_models.setdefault(cls._module, []) if cls not in module_model_list: module_model_list.append(cls) # Since we don't return an instance here, the __init__ # method won't be called. return None def __init__(self, pool, cr): """ Initialize a model and make it part of the given registry. - copy the stored fields' functions in the osv_pool, - update the _columns with the fields found in ir_model_fields, - ensure there is a many2one for each _inherits'd parent, - update the children's _columns, - give a chance to each field to initialize itself. """ pool.add(self._name, self) self.pool = pool if not self._name and not hasattr(self, '_inherit'): name = type(self).__name__.split('.')[0] msg = "The class %s has to have a _name attribute" % name _logger.error(msg) raise except_orm('ValueError', msg) if not self._description: self._description = self._name if not self._table: self._table = self._name.replace('.', '_') if not hasattr(self, '_log_access'): # If _log_access is not specified, it is the same value as _auto. self._log_access = getattr(self, "_auto", True) self._columns = self._columns.copy() for store_field in self._columns: f = self._columns[store_field] if hasattr(f, 'digits_change'): f.digits_change(cr) def not_this_field(stored_func): x, y, z, e, f, l = stored_func return x != self._name or y != store_field self.pool._store_function[self._name] = filter(not_this_field, self.pool._store_function.get(self._name, [])) if not isinstance(f, fields.function): continue if not f.store: continue sm = f.store if sm is True: sm = {self._name: (lambda self, cr, uid, ids, c={}: ids, None, 10, None)} for object, aa in sm.items(): if len(aa) == 4: (fnct, fields2, order, length) = aa elif len(aa) == 3: (fnct, fields2, order) = aa length = None else: raise except_orm('Error', ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (store_field, self._name))) self.pool._store_function.setdefault(object, []) self.pool._store_function[object].append((self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length)) self.pool._store_function[object].sort(lambda x, y: cmp(x[4], y[4])) for (key, _, msg) in self._sql_constraints: self.pool._sql_error[self._table+'_'+key] = msg # Load manual fields cr.execute("SELECT id FROM ir_model_fields WHERE name=%s AND model=%s", ('state', 'ir.model.fields')) if cr.fetchone(): cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (self._name, 'manual')) for field in cr.dictfetchall(): if field['name'] in self._columns: continue attrs = { 'string': field['field_description'], 'required': bool(field['required']), 'readonly': bool(field['readonly']), 'domain': eval(field['domain']) if field['domain'] else None, 'size': field['size'], 'ondelete': field['on_delete'], 'translate': (field['translate']), 'manual': True, #'select': int(field['select_level']) } if field['serialization_field_id']: cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],)) attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']}) if field['ttype'] in ['many2one', 'one2many', 'many2many']: attrs.update({'relation': field['relation']}) self._columns[field['name']] = fields.sparse(**attrs) elif field['ttype'] == 'selection': self._columns[field['name']] = fields.selection(eval(field['selection']), **attrs) elif field['ttype'] == 'reference': self._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs) elif field['ttype'] == 'many2one': self._columns[field['name']] = fields.many2one(field['relation'], **attrs) elif field['ttype'] == 'one2many': self._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs) elif field['ttype'] == 'many2many': _rel1 = field['relation'].replace('.', '_') _rel2 = field['model'].replace('.', '_') _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name']) self._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs) else: self._columns[field['name']] = getattr(fields, field['ttype'])(**attrs) self._inherits_check() self._inherits_reload() if not self._sequence: self._sequence = self._table + '_id_seq' for k in self._defaults: assert (k in self._columns) or (k in self._inherit_fields), 'Default function defined in %s but field %s does not exist !' % (self._name, k,) for f in self._columns: self._columns[f].restart() # Transience if self.is_transient(): self._transient_check_count = 0 self._transient_max_count = config.get('osv_memory_count_limit') self._transient_max_hours = config.get('osv_memory_age_limit') assert self._log_access, "TransientModels must have log_access turned on, "\ "in order to implement their access rights policy" # Validate rec_name if self._rec_name is not None: assert self._rec_name in self._columns.keys() + ['id'], "Invalid rec_name %s for model %s" % (self._rec_name, self._name) else: self._rec_name = 'name' def __export_row(self, cr, uid, row, fields, context=None): if context is None: context = {} def check_type(field_type): if field_type == 'float': return 0.0 elif field_type == 'integer': return 0 elif field_type == 'boolean': return 'False' return '' def selection_field(in_field): col_obj = self.pool.get(in_field.keys()[0]) if f[i] in col_obj._columns.keys(): return col_obj._columns[f[i]] elif f[i] in col_obj._inherits.keys(): selection_field(col_obj._inherits) else: return False def _get_xml_id(self, cr, uid, r): model_data = self.pool.get('ir.model.data') data_ids = model_data.search(cr, uid, [('model', '=', r._table_name), ('res_id', '=', r['id'])]) if len(data_ids): d = model_data.read(cr, uid, data_ids, ['name', 'module'])[0] if d['module']: r = '%s.%s' % (d['module'], d['name']) else: r = d['name'] else: postfix = 0 while True: n = self._table+'_'+str(r['id']) + (postfix and ('_'+str(postfix)) or '' ) if not model_data.search(cr, uid, [('name', '=', n)]): break postfix += 1 model_data.create(cr, uid, { 'name': n, 'model': self._name, 'res_id': r['id'], 'module': '__export__', }) r = '__export__.'+n return r lines = [] data = map(lambda x: '', range(len(fields))) done = [] for fpos in range(len(fields)): f = fields[fpos] if f: r = row i = 0 while i < len(f): cols = False if f[i] == '.id': r = r['id'] elif f[i] == 'id': r = _get_xml_id(self, cr, uid, r) else: r = r[f[i]] # To display external name of selection field when its exported if f[i] in self._columns.keys(): cols = self._columns[f[i]] elif f[i] in self._inherit_fields.keys(): cols = selection_field(self._inherits) if cols and cols._type == 'selection': sel_list = cols.selection if r and type(sel_list) == type([]): r = [x[1] for x in sel_list if r==x[0]] r = r and r[0] or False if not r: if f[i] in self._columns: r = check_type(self._columns[f[i]]._type) elif f[i] in self._inherit_fields: r = check_type(self._inherit_fields[f[i]][2]._type) data[fpos] = r or False break if isinstance(r, (browse_record_list, list)): first = True fields2 = map(lambda x: (x[:i+1]==f[:i+1] and x[i+1:]) \ or [], fields) if fields2 in done: if [x for x in fields2 if x]: break done.append(fields2) if cols and cols._type=='many2many' and len(fields[fpos])>(i+1) and (fields[fpos][i+1]=='id'): data[fpos] = ','.join([_get_xml_id(self, cr, uid, x) for x in r]) break for row2 in r: lines2 = row2._model.__export_row(cr, uid, row2, fields2, context) if first: for fpos2 in range(len(fields)): if lines2 and lines2[0][fpos2]: data[fpos2] = lines2[0][fpos2] if not data[fpos]: dt = '' for rr in r: name_relation = self.pool.get(rr._table_name)._rec_name if isinstance(rr[name_relation], browse_record): rr = rr[name_relation] rr_name = self.pool.get(rr._table_name).name_get(cr, uid, [rr.id], context=context) rr_name = rr_name and rr_name[0] and rr_name[0][1] or '' dt += tools.ustr(rr_name or '') + ',' data[fpos] = dt[:-1] break lines += lines2[1:] first = False else: lines += lines2 break i += 1 if i == len(f): if isinstance(r, browse_record): r = self.pool.get(r._table_name).name_get(cr, uid, [r.id], context=context) r = r and r[0] and r[0][1] or '' data[fpos] = tools.ustr(r or '') return [data] + lines def export_data(self, cr, uid, ids, fields_to_export, context=None): """ Export fields for selected objects :param cr: database cursor :param uid: current user id :param ids: list of ids :param fields_to_export: list of fields :param context: context arguments, like lang, time zone :rtype: dictionary with a *datas* matrix This method is used when exporting data via client menu """ if context is None: context = {} cols = self._columns.copy() for f in self._inherit_fields: cols.update({f: self._inherit_fields[f][2]}) fields_to_export = map(fix_import_export_id_paths, fields_to_export) datas = [] for row in self.browse(cr, uid, ids, context): datas += self.__export_row(cr, uid, row, fields_to_export, context) return {'datas': datas} def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None): """Import given data in given module This method is used when importing data via client menu. Example of fields to import for a sale.order:: .id, (=database_id) partner_id, (=name_search) order_line/.id, (=database_id) order_line/name, order_line/product_id/id, (=xml id) order_line/price_unit, order_line/product_uom_qty, order_line/product_uom/id (=xml_id) This method returns a 4-tuple with the following structure:: (return_code, errored_resource, error_message, unused) * The first item is a return code, it is ``-1`` in case of import error, or the last imported row number in case of success * The second item contains the record data dict that failed to import in case of error, otherwise it's 0 * The third item contains an error message string in case of error, otherwise it's 0 * The last item is currently unused, with no specific semantics :param fields: list of fields to import :param datas: data to import :param mode: 'init' or 'update' for record creation :param current_module: module name :param noupdate: flag for record creation :param filename: optional file to store partial import state for recovery :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused) :rtype: (int, dict or 0, str or 0, str or 0) """ if not context: context = {} fields = map(fix_import_export_id_paths, fields) ir_model_data_obj = self.pool.get('ir.model.data') # mode: id (XML id) or .id (database id) or False for name_get def _get_id(model_name, id, current_module=False, mode='id'): if mode=='.id': id = int(id) obj_model = self.pool.get(model_name) ids = obj_model.search(cr, uid, [('id', '=', int(id))]) if not len(ids): raise Exception(_("Database ID doesn't exist: %s : %s") %(model_name, id)) elif mode=='id': if '.' in id: module, xml_id = id.rsplit('.', 1) else: module, xml_id = current_module, id record_id = ir_model_data_obj._get_id(cr, uid, module, xml_id) ir_model_data = ir_model_data_obj.read(cr, uid, [record_id], ['res_id']) if not ir_model_data: raise ValueError('No references to %s.%s' % (module, xml_id)) id = ir_model_data[0]['res_id'] else: obj_model = self.pool.get(model_name) ids = obj_model.name_search(cr, uid, id, operator='=', context=context) if not ids: raise ValueError('No record found for %s' % (id,)) id = ids[0][0] return id # IN: # datas: a list of records, each record is defined by a list of values # prefix: a list of prefix fields ['line_ids'] # position: the line to process, skip is False if it's the first line of the current record # OUT: # (res, position, warning, res_id) with # res: the record for the next line to process (including it's one2many) # position: the new position for the next line # res_id: the ID of the record if it's a modification def process_liness(self, datas, prefix, current_module, model_name, fields_def, position=0, skip=0): line = datas[position] row = {} warning = [] data_res_id = False xml_id = False nbrmax = position+1 done = {} for i, field in enumerate(fields): res = False if i >= len(line): raise Exception(_('Please check that all your lines have %d columns.' 'Stopped around line %d having %d columns.') % \ (len(fields), position+2, len(line))) if not line[i]: continue if field[:len(prefix)] <> prefix: if line[i] and skip: return False continue field_name = field[len(prefix)] #set the mode for m2o, o2m, m2m : xml_id/id/name if len(field) == len(prefix)+1: mode = False else: mode = field[len(prefix)+1] # TODO: improve this by using csv.csv_reader def many_ids(line, relation, current_module, mode): res = [] for db_id in line.split(config.get('csv_internal_sep')): res.append(_get_id(relation, db_id, current_module, mode)) return [(6,0,res)] # ID of the record using a XML ID if field_name == 'id': try: data_res_id = _get_id(model_name, line[i], current_module) except ValueError: pass xml_id = line[i] continue # ID of the record using a database ID elif field_name == '.id': data_res_id = _get_id(model_name, line[i], current_module, '.id') continue field_type = fields_def[field_name]['type'] # recursive call for getting children and returning [(0,0,{})] or [(1,ID,{})] if field_type == 'one2many': if field_name in done: continue done[field_name] = True relation = fields_def[field_name]['relation'] relation_obj = self.pool.get(relation) newfd = relation_obj.fields_get( cr, uid, context=context ) pos = position res = [] first = 0 while pos < len(datas): res2 = process_liness(self, datas, prefix + [field_name], current_module, relation_obj._name, newfd, pos, first) if not res2: break (newrow, pos, w2, data_res_id2, xml_id2) = res2 nbrmax = max(nbrmax, pos) warning += w2 first += 1 if (not newrow) or not reduce(lambda x, y: x or y, newrow.values(), 0): break res.append( (data_res_id2 and 1 or 0, data_res_id2 or 0, newrow) ) elif field_type == 'many2one': relation = fields_def[field_name]['relation'] res = _get_id(relation, line[i], current_module, mode) elif field_type == 'many2many': relation = fields_def[field_name]['relation'] res = many_ids(line[i], relation, current_module, mode) elif field_type == 'integer': res = line[i] and int(line[i]) or 0 elif field_type == 'boolean': res = line[i].lower() not in ('0', 'false', 'off') elif field_type == 'float': res = line[i] and float(line[i]) or 0.0 elif field_type == 'selection': for key, val in fields_def[field_name]['selection']: if tools.ustr(line[i]) in [tools.ustr(key), tools.ustr(val)]: res = key break if line[i] and not res: _logger.warning( _("key '%s' not found in selection field '%s'"), tools.ustr(line[i]), tools.ustr(field_name)) warning.append(_("Key/value '%s' not found in selection field '%s'") % ( tools.ustr(line[i]), tools.ustr(field_name))) else: res = line[i] row[field_name] = res or False return row, nbrmax, warning, data_res_id, xml_id fields_def = self.fields_get(cr, uid, context=context) position = 0 if config.get('import_partial') and filename: with open(config.get('import_partial'), 'rb') as partial_import_file: data = pickle.load(partial_import_file) position = data.get(filename, 0) while position