2009-10-20 10:52:23 +00:00
# -*- coding: utf-8 -*-
2006-12-07 13:41:40 +00:00
##############################################################################
2009-12-28 06:07:29 +00:00
#
2008-11-10 11:07:21 +00:00
# OpenERP, Open Source Management Solution
2009-11-26 14:12:40 +00:00
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
2006-12-07 13:41:40 +00:00
#
2008-11-03 18:27:16 +00:00
# This program is free software: you can redistribute it and/or modify
2009-11-26 14:12:40 +00:00
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
2006-12-07 13:41:40 +00:00
#
2008-11-03 18:27:16 +00:00
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2009-11-26 14:12:40 +00:00
# GNU Affero General Public License for more details.
2006-12-07 13:41:40 +00:00
#
2009-11-26 14:12:40 +00:00
# You should have received a copy of the GNU Affero General Public License
2009-12-28 06:07:29 +00:00
# along with this program. If not, see <http://www.gnu.org/licenses/>.
2006-12-07 13:41:40 +00:00
#
2008-11-03 18:27:16 +00:00
##############################################################################
2006-12-07 13:41:40 +00:00
2011-06-23 09:03:57 +00:00
#.apidoc title: Object Relational Mapping
#.apidoc module-mods: member-order: bysource
2011-06-23 09:02:28 +00:00
"""
Object relational mapping to database ( postgresql ) module
* Hierarchical structure
* Constraints consistency , validations
* Object meta Data depends on its status
* Optimised processing by complex query ( multiple actions at once )
* Default fields value
* Permissions optimisation
* Persistant object : DB postgresql
* Datas conversions
* Multi - level caching system
* 2 different inheritancies
* Fields :
- classicals ( varchar , integer , boolean , . . . )
- relations ( one2many , many2one , many2many )
- functions
2011-08-23 09:27:37 +00:00
2011-06-23 09:02:28 +00:00
"""
2009-02-02 14:07:15 +00:00
import calendar
2010-01-29 16:26:16 +00:00
import copy
import datetime
2011-09-15 12:47:41 +00:00
import itertools
2010-03-03 15:36:11 +00:00
import logging
2010-05-27 07:53:50 +00:00
import operator
2010-01-29 16:26:16 +00:00
import pickle
2006-12-07 13:41:40 +00:00
import re
2011-09-23 12:45:25 +00:00
import simplejson
2010-01-29 16:26:16 +00:00
import time
import types
2010-05-27 07:53:50 +00:00
from lxml import etree
2008-05-27 05:38:42 +00:00
2010-05-27 07:53:50 +00:00
import fields
2011-09-23 12:45:25 +00:00
import openerp
import openerp . netsvc as netsvc
2011-02-07 12:57:23 +00:00
import openerp . tools as tools
2011-09-23 12:45:25 +00:00
from openerp . tools . config import config
2011-02-07 12:57:23 +00:00
from openerp . tools . safe_eval import safe_eval as eval
2011-09-23 12:45:25 +00:00
from openerp . tools . translate import _
2011-09-26 09:01:56 +00:00
from openerp import SUPERUSER_ID
2011-09-23 12:45:25 +00:00
from query import Query
2008-05-27 05:38:42 +00:00
2012-01-24 12:42:52 +00:00
_logger = logging . getLogger ( __name__ )
2012-02-01 23:56:04 +00:00
_schema = logging . getLogger ( __name__ + ' .schema ' )
2012-01-24 12:42:52 +00:00
2010-12-29 10:50:04 +00:00
# List of etree._Element subclasses that we choose to ignore when parsing XML.
2011-02-07 12:57:23 +00:00
from openerp . tools import SKIPPED_ELEMENT_TYPES
2008-05-26 18:33:33 +00:00
2010-12-29 10:50:04 +00:00
regex_order = re . compile ( ' ^(([a-z0-9_]+| " [a-z0-9_]+ " )( *desc| *asc)?( *, *|))+$ ' , re . I )
2011-04-27 09:08:46 +00:00
regex_object_name = re . compile ( r ' ^[a-z0-9_.]+$ ' )
2011-07-05 12:22:22 +00:00
def transfer_field_to_modifiers ( field , modifiers ) :
2011-07-07 15:51:35 +00:00
default_values = { }
state_exceptions = { }
2011-07-12 14:35:32 +00:00
for attr in ( ' invisible ' , ' readonly ' , ' required ' ) :
state_exceptions [ attr ] = [ ]
default_values [ attr ] = bool ( field . get ( attr ) )
for state , modifs in ( field . get ( " states " , { } ) ) . items ( ) :
2011-07-07 15:51:35 +00:00
for modif in modifs :
2011-07-12 14:35:32 +00:00
if default_values [ modif [ 0 ] ] != modif [ 1 ] :
2011-07-07 15:51:35 +00:00
state_exceptions [ modif [ 0 ] ] . append ( state )
2011-07-12 14:35:32 +00:00
2011-07-07 15:51:35 +00:00
for attr , default_value in default_values . items ( ) :
2011-07-12 14:35:32 +00:00
if state_exceptions [ attr ] :
modifiers [ attr ] = [ ( " state " , " not in " if default_value else " in " , state_exceptions [ attr ] ) ]
else :
modifiers [ attr ] = default_value
2011-07-05 12:22:22 +00:00
# Don't deal with groups, it is done by check_group().
# Need the context to evaluate the invisible attribute on tree views.
# For non-tree views, the context shouldn't be given.
def transfer_node_to_modifiers ( node , modifiers , context = None , in_tree_view = False ) :
if node . get ( ' attrs ' ) :
modifiers . update ( eval ( node . get ( ' attrs ' ) ) )
if node . get ( ' states ' ) :
if ' invisible ' in modifiers and isinstance ( modifiers [ ' invisible ' ] , list ) :
# TODO combine with AND or OR, use implicit AND for now.
modifiers [ ' invisible ' ] . append ( ( ' state ' , ' not in ' , node . get ( ' states ' ) . split ( ' , ' ) ) )
else :
modifiers [ ' invisible ' ] = [ ( ' state ' , ' not in ' , node . get ( ' states ' ) . split ( ' , ' ) ) ]
for a in ( ' invisible ' , ' readonly ' , ' required ' ) :
if node . get ( a ) :
v = bool ( eval ( node . get ( a ) , { ' context ' : context or { } } ) )
if in_tree_view and a == ' invisible ' :
# Invisible in a tree view has a specific meaning, make it a
# new key in the modifiers attribute.
modifiers [ ' tree_invisible ' ] = v
elif v or ( a not in modifiers or not isinstance ( modifiers [ a ] , list ) ) :
# Don't set the attribute to False if a dynamic value was
# provided (i.e. a domain from attrs or states).
modifiers [ a ] = v
def simplify_modifiers ( modifiers ) :
for a in ( ' invisible ' , ' readonly ' , ' required ' ) :
if a in modifiers and not modifiers [ a ] :
del modifiers [ a ]
def transfer_modifiers_to_node ( modifiers , node ) :
if modifiers :
simplify_modifiers ( modifiers )
node . set ( ' modifiers ' , simplejson . dumps ( modifiers ) )
2011-10-06 14:27:27 +00:00
def setup_modifiers ( node , field = None , context = None , in_tree_view = False ) :
""" Processes node attributes and field descriptors to generate
the ` ` modifiers ` ` node attribute and set it on the provided node .
Alters its first argument in - place .
: param node : ` ` field ` ` node from an OpenERP view
: type node : lxml . etree . _Element
: param dict field : field descriptor corresponding to the provided node
: param dict context : execution context used to evaluate node attributes
: param bool in_tree_view : triggers the ` ` tree_invisible ` ` code
path ( separate from ` ` invisible ` ` ) : in
tree view there are two levels of
invisibility , cell content ( a column is
present but the cell itself is not
displayed ) with ` ` invisible ` ` and column
invisibility ( the whole column is
hidden ) with ` ` tree_invisible ` ` .
: returns : nothing
"""
modifiers = { }
if field is not None :
transfer_field_to_modifiers ( field , modifiers )
transfer_node_to_modifiers (
node , modifiers , context = context , in_tree_view = in_tree_view )
transfer_modifiers_to_node ( modifiers , node )
2011-07-05 12:22:22 +00:00
def test_modifiers ( what , expected ) :
modifiers = { }
if isinstance ( what , basestring ) :
node = etree . fromstring ( what )
transfer_node_to_modifiers ( node , modifiers )
simplify_modifiers ( modifiers )
json = simplejson . dumps ( modifiers )
assert json == expected , " %s != %s " % ( json , expected )
elif isinstance ( what , dict ) :
transfer_field_to_modifiers ( what , modifiers )
simplify_modifiers ( modifiers )
json = simplejson . dumps ( modifiers )
assert json == expected , " %s != %s " % ( json , expected )
# To use this test:
# import openerp
# openerp.osv.orm.modifiers_tests()
def modifiers_tests ( ) :
test_modifiers ( ' <field name= " a " /> ' , ' {} ' )
test_modifiers ( ' <field name= " a " invisible= " 1 " /> ' , ' { " invisible " : true} ' )
test_modifiers ( ' <field name= " a " readonly= " 1 " /> ' , ' { " readonly " : true} ' )
test_modifiers ( ' <field name= " a " required= " 1 " /> ' , ' { " required " : true} ' )
test_modifiers ( ' <field name= " a " invisible= " 0 " /> ' , ' {} ' )
test_modifiers ( ' <field name= " a " readonly= " 0 " /> ' , ' {} ' )
test_modifiers ( ' <field name= " a " required= " 0 " /> ' , ' {} ' )
test_modifiers ( ' <field name= " a " invisible= " 1 " required= " 1 " /> ' , ' { " invisible " : true, " required " : true} ' ) # TODO order is not guaranteed
test_modifiers ( ' <field name= " a " invisible= " 1 " required= " 0 " /> ' , ' { " invisible " : true} ' )
test_modifiers ( ' <field name= " a " invisible= " 0 " required= " 1 " /> ' , ' { " required " : true} ' )
test_modifiers ( """ <field name= " a " attrs= " { ' invisible ' : [( ' b ' , ' = ' , ' c ' )]} " /> """ , ' { " invisible " : [[ " b " , " = " , " c " ]]} ' )
# The dictionary is supposed to be the result of fields_get().
test_modifiers ( { } , ' {} ' )
test_modifiers ( { " invisible " : True } , ' { " invisible " : true} ' )
test_modifiers ( { " invisible " : False } , ' {} ' )
2011-08-23 09:27:37 +00:00
2011-07-05 12:22:22 +00:00
2011-04-27 09:08:46 +00:00
def check_object_name ( name ) :
""" Check if the given name is a valid openerp object name.
The _name attribute in osv and osv_memory object is subject to
some restrictions . This function returns True or False whether
the given name is allowed or not .
TODO : this is an approximation . The goal in this approximation
is to disallow uppercase characters ( in some places , we quote
table / column names and in other not , which leads to this kind
of errors :
psycopg2 . ProgrammingError : relation " xxx " does not exist ) .
The same restriction should apply to both osv and osv_memory
objects for consistency .
"""
if regex_object_name . match ( name ) is None :
return False
return True
def raise_on_invalid_object_name ( name ) :
if not check_object_name ( name ) :
msg = " The _name attribute %s is not valid. " % name
2012-01-24 12:42:52 +00:00
_logger . error ( msg )
2011-04-27 09:08:46 +00:00
raise except_orm ( ' ValueError ' , msg )
2010-03-29 09:25:37 +00:00
POSTGRES_CONFDELTYPES = {
' RESTRICT ' : ' r ' ,
' NO ACTION ' : ' a ' ,
' CASCADE ' : ' c ' ,
' SET NULL ' : ' n ' ,
' SET DEFAULT ' : ' d ' ,
}
2006-12-07 13:41:40 +00:00
def intersect ( la , lb ) :
2008-07-22 14:24:36 +00:00
return filter ( lambda x : x in lb , la )
2006-12-07 13:41:40 +00:00
2011-09-22 12:25:42 +00:00
def fix_import_export_id_paths ( fieldname ) :
"""
Fixes the id fields in import and exports , and splits field paths
on ' / ' .
: param str fieldname : name of the field to import / export
: return : split field name
: rtype : list of str
"""
fixed_db_id = re . sub ( r ' ([^/]) \ .id ' , r ' \ 1/.id ' , fieldname )
fixed_external_id = re . sub ( r ' ([^/]):id ' , r ' \ 1/id ' , fixed_db_id )
return fixed_external_id . split ( ' / ' )
2006-12-07 13:41:40 +00:00
class except_orm ( Exception ) :
2008-07-22 14:24:36 +00:00
def __init__ ( self , name , value ) :
self . name = name
self . value = value
self . args = ( name , value )
2006-12-07 13:41:40 +00:00
2009-06-10 11:15:35 +00:00
class BrowseRecordError ( Exception ) :
pass
2008-08-12 14:44:56 +00:00
2006-12-07 13:41:40 +00:00
class browse_null ( object ) :
2011-06-23 09:02:28 +00:00
""" Readonly python database object browser
"""
2007-07-26 08:30:50 +00:00
2008-07-22 14:24:36 +00:00
def __init__ ( self ) :
2008-08-12 14:44:56 +00:00
self . id = False
2007-07-26 08:30:50 +00:00
2008-07-22 14:24:36 +00:00
def __getitem__ ( self , name ) :
2009-01-06 09:37:43 +00:00
return None
2007-07-26 08:30:50 +00:00
2008-10-27 13:59:31 +00:00
def __getattr__ ( self , name ) :
2009-01-06 09:37:43 +00:00
return None # XXX: return self ?
2008-10-27 13:59:31 +00:00
2008-07-22 14:24:36 +00:00
def __int__ ( self ) :
return False
2007-07-26 08:30:50 +00:00
2008-07-22 14:24:36 +00:00
def __str__ ( self ) :
return ' '
2007-07-26 08:30:50 +00:00
2008-07-22 14:24:36 +00:00
def __nonzero__ ( self ) :
return False
2009-02-14 05:35:17 +00:00
2008-12-29 12:36:01 +00:00
def __unicode__ ( self ) :
return u ' '
2006-12-07 13:41:40 +00:00
2008-08-12 14:44:56 +00:00
2006-12-07 13:41:40 +00:00
#
# TODO: execute an object method on browse_record_list
#
class browse_record_list ( list ) :
2011-06-23 09:02:28 +00:00
""" Collection of browse objects
2011-08-23 09:27:37 +00:00
2011-06-23 09:02:28 +00:00
Such an instance will be returned when doing a ` ` browse ( [ ids . . ] ) ` `
and will be iterable , yielding browse ( ) objects
"""
2007-07-26 08:30:53 +00:00
2008-07-22 14:24:36 +00:00
def __init__ ( self , lst , context = None ) :
if not context :
context = { }
super ( browse_record_list , self ) . __init__ ( lst )
self . context = context
2006-12-07 13:41:40 +00:00
2007-08-09 06:06:22 +00:00
2006-12-07 13:41:40 +00:00
class browse_record ( object ) :
2011-06-23 09:02:28 +00:00
""" An object that behaves like a row of an object ' s table.
It has attributes after the columns of the corresponding object .
2011-08-23 09:27:37 +00:00
2011-06-23 09:02:28 +00:00
Examples : :
2011-08-23 09:27:37 +00:00
2011-06-23 09:02:28 +00:00
uobj = pool . get ( ' res.users ' )
user_rec = uobj . browse ( cr , uid , 104 )
name = user_rec . name
"""
2010-01-29 16:23:32 +00:00
2012-01-25 11:34:29 +00:00
def __init__ ( self , cr , uid , id , table , cache , context = None ,
list_class = browse_record_list , fields_process = None ) :
2011-06-23 09:02:28 +00:00
"""
2012-01-25 11:34:29 +00:00
: param table : the browsed object ( inherited from orm )
: param dict cache : a dictionary of model - > field - > data to be shared
across browse objects , thus reducing the SQL
read ( ) s . It can speed up things a lot , but also be
disastrous if not discarded after write ( ) / unlink ( )
operations
: param dict context : dictionary with an optional context
2011-06-23 09:02:28 +00:00
"""
2010-11-12 13:42:14 +00:00
if fields_process is None :
2010-11-12 14:21:19 +00:00
fields_process = { }
2010-11-12 13:42:14 +00:00
if context is None :
context = { }
2012-01-25 11:34:29 +00:00
self . _list_class = list_class
2008-07-22 14:24:36 +00:00
self . _cr = cr
self . _uid = uid
self . _id = id
2011-10-03 14:40:49 +00:00
self . _table = table # deprecated, use _model!
self . _model = table
2008-07-22 14:24:36 +00:00
self . _table_name = self . _table . _name
2012-01-31 21:17:44 +00:00
self . __logger = logging . getLogger ( ' openerp.osv.orm.browse_record. ' + self . _table_name )
2010-11-12 13:42:14 +00:00
self . _context = context
self . _fields_process = fields_process
2008-07-22 14:24:36 +00:00
cache . setdefault ( table . _name , { } )
self . _data = cache [ table . _name ]
2008-11-21 18:12:24 +00:00
2011-12-19 22:09:54 +00:00
# if not (id and isinstance(id, (int, long,))):
# raise BrowseRecordError(_('Wrong ID for the browse record, got %r, expected an integer.') % (id,))
2009-06-16 06:45:56 +00:00
# if not table.exists(cr, uid, id, context):
# raise BrowseRecordError(_('Object %s does not exists') % (self,))
2009-06-10 11:15:35 +00:00
2008-12-19 19:17:59 +00:00
if id not in self . _data :
2008-08-12 14:44:56 +00:00
self . _data [ id ] = { ' id ' : id }
2008-11-21 18:12:24 +00:00
2008-07-22 14:24:36 +00:00
self . _cache = cache
def __getitem__ ( self , name ) :
if name == ' id ' :
return self . _id
2010-05-04 14:46:42 +00:00
2008-12-19 19:17:59 +00:00
if name not in self . _data [ self . _id ] :
2008-07-22 14:24:36 +00:00
# build the list of fields we will fetch
# fetch the definition of the field which was asked for
if name in self . _table . _columns :
col = self . _table . _columns [ name ]
elif name in self . _table . _inherit_fields :
col = self . _table . _inherit_fields [ name ] [ 2 ]
2008-10-10 09:18:05 +00:00
elif hasattr ( self . _table , str ( name ) ) :
2010-05-04 14:46:42 +00:00
attr = getattr ( self . _table , name )
if isinstance ( attr , ( types . MethodType , types . LambdaType , types . FunctionType ) ) :
2011-09-13 13:08:26 +00:00
def function_proxy ( * args , * * kwargs ) :
2011-09-13 14:47:02 +00:00
if ' context ' not in kwargs and self . _context :
2011-09-13 13:08:26 +00:00
kwargs . update ( context = self . _context )
return attr ( self . _cr , self . _uid , [ self . _id ] , * args , * * kwargs )
return function_proxy
2008-07-22 14:24:36 +00:00
else :
2010-05-04 14:46:42 +00:00
return attr
2008-07-22 14:24:36 +00:00
else :
2011-10-07 14:49:44 +00:00
error_msg = " Field ' %s ' does not exist in object ' %s ' " % ( name , self )
2012-02-02 09:26:34 +00:00
self . __logger . warning ( error_msg )
2011-10-07 14:49:44 +00:00
raise KeyError ( error_msg )
2008-07-22 14:24:36 +00:00
# if the field is a classic one or a many2one, we'll fetch all classic and many2one fields
2009-08-20 15:29:21 +00:00
if col . _prefetch :
2008-07-22 14:24:36 +00:00
# gen the list of "local" (ie not inherited) fields which are classic or many2one
2010-08-18 10:09:55 +00:00
fields_to_fetch = filter ( lambda x : x [ 1 ] . _classic_write , self . _table . _columns . items ( ) )
2008-07-22 14:24:36 +00:00
# gen the list of inherited fields
inherits = map ( lambda x : ( x [ 0 ] , x [ 1 ] [ 2 ] ) , self . _table . _inherit_fields . items ( ) )
# complete the field list with the inherited fields which are classic or many2one
2010-08-18 10:09:55 +00:00
fields_to_fetch + = filter ( lambda x : x [ 1 ] . _classic_write , inherits )
2008-07-22 14:24:36 +00:00
# otherwise we fetch only that field
else :
2010-08-18 10:09:55 +00:00
fields_to_fetch = [ ( name , col ) ]
2008-12-19 19:17:59 +00:00
ids = filter ( lambda id : name not in self . _data [ id ] , self . _data . keys ( ) )
2010-08-18 10:09:55 +00:00
# read the results
field_names = map ( lambda x : x [ 0 ] , fields_to_fetch )
field_values = self . _table . read ( self . _cr , self . _uid , ids , field_names , context = self . _context , load = " _classic_write " )
2011-01-18 13:12:47 +00:00
# TODO: improve this, very slow for reports
2008-07-22 14:24:36 +00:00
if self . _fields_process :
2009-04-28 10:24:21 +00:00
lang = self . _context . get ( ' lang ' , ' en_US ' ) or ' en_US '
2010-09-03 10:59:56 +00:00
lang_obj_ids = self . pool . get ( ' res.lang ' ) . search ( self . _cr , self . _uid , [ ( ' code ' , ' = ' , lang ) ] )
2009-09-25 12:41:56 +00:00
if not lang_obj_ids :
raise Exception ( _ ( ' Language with code " %s " is not defined in your system ! \n Define it through the Administration menu. ' ) % ( lang , ) )
2010-09-03 10:59:56 +00:00
lang_obj = self . pool . get ( ' res.lang ' ) . browse ( self . _cr , self . _uid , lang_obj_ids [ 0 ] )
2009-10-27 06:23:53 +00:00
2010-08-18 10:09:55 +00:00
for field_name , field_column in fields_to_fetch :
if field_column . _type in self . _fields_process :
for result_line in field_values :
result_line [ field_name ] = self . _fields_process [ field_column . _type ] ( result_line [ field_name ] )
if result_line [ field_name ] :
result_line [ field_name ] . set_value ( self . _cr , self . _uid , result_line [ field_name ] , self , field_column , lang_obj )
2008-07-22 14:24:36 +00:00
2010-08-18 10:09:55 +00:00
if not field_values :
2010-01-29 16:23:32 +00:00
# Where did those ids come from? Perhaps old entries in ir_model_dat?
2012-01-24 12:42:52 +00:00
_logger . warning ( " No field_values found for ids %s in %s " , ids , self )
2010-09-03 10:59:56 +00:00
raise KeyError ( ' Field %s not found in %s ' % ( name , self ) )
2008-07-22 14:24:36 +00:00
# create browse records for 'remote' objects
2010-08-18 10:09:55 +00:00
for result_line in field_values :
2010-02-09 06:03:33 +00:00
new_data = { }
2010-08-18 10:09:55 +00:00
for field_name , field_column in fields_to_fetch :
if field_column . _type in ( ' many2one ' , ' one2one ' ) :
if result_line [ field_name ] :
obj = self . _table . pool . get ( field_column . _obj )
2010-09-03 10:59:56 +00:00
if isinstance ( result_line [ field_name ] , ( list , tuple ) ) :
2010-08-18 10:09:55 +00:00
value = result_line [ field_name ] [ 0 ]
2008-07-22 14:24:36 +00:00
else :
2010-08-18 10:09:55 +00:00
value = result_line [ field_name ]
if value :
2010-02-02 16:40:13 +00:00
# FIXME: this happen when a _inherits object
# overwrite a field of it parent. Need
# testing to be sure we got the right
# object and not the parent one.
2010-08-18 10:09:55 +00:00
if not isinstance ( value , browse_record ) :
2011-02-16 14:21:28 +00:00
if obj is None :
# In some cases the target model is not available yet, so we must ignore it,
# which is safe in most cases, this value will just be loaded later when needed.
# This situation can be caused by custom fields that connect objects with m2o without
# respecting module dependencies, causing relationships to be connected to soon when
# the target is not loaded yet.
continue
2010-08-18 10:09:55 +00:00
new_data [ field_name ] = browse_record ( self . _cr ,
self . _uid , value , obj , self . _cache ,
2010-02-02 16:40:13 +00:00
context = self . _context ,
list_class = self . _list_class ,
fields_process = self . _fields_process )
2010-05-04 14:46:42 +00:00
else :
2010-08-18 10:09:55 +00:00
new_data [ field_name ] = value
2008-07-22 14:24:36 +00:00
else :
2010-08-18 10:09:55 +00:00
new_data [ field_name ] = browse_null ( )
2008-07-22 14:24:36 +00:00
else :
2010-08-18 10:09:55 +00:00
new_data [ field_name ] = browse_null ( )
elif field_column . _type in ( ' one2many ' , ' many2many ' ) and len ( result_line [ field_name ] ) :
new_data [ field_name ] = self . _list_class ( [ browse_record ( self . _cr , self . _uid , id , self . _table . pool . get ( field_column . _obj ) , self . _cache , context = self . _context , list_class = self . _list_class , fields_process = self . _fields_process ) for id in result_line [ field_name ] ] , self . _context )
elif field_column . _type in ( ' reference ' ) :
if result_line [ field_name ] :
if isinstance ( result_line [ field_name ] , browse_record ) :
new_data [ field_name ] = result_line [ field_name ]
2010-03-19 13:22:00 +00:00
else :
2010-08-18 10:09:55 +00:00
ref_obj , ref_id = result_line [ field_name ] . split ( ' , ' )
2010-03-19 13:22:00 +00:00
ref_id = long ( ref_id )
2010-12-20 16:09:59 +00:00
if ref_id :
obj = self . _table . pool . get ( ref_obj )
new_data [ field_name ] = browse_record ( self . _cr , self . _uid , ref_id , obj , self . _cache , context = self . _context , list_class = self . _list_class , fields_process = self . _fields_process )
else :
new_data [ field_name ] = browse_null ( )
2010-03-16 14:14:08 +00:00
else :
2010-08-18 10:09:55 +00:00
new_data [ field_name ] = browse_null ( )
2010-02-09 06:03:33 +00:00
else :
2010-08-18 10:09:55 +00:00
new_data [ field_name ] = result_line [ field_name ]
self . _data [ result_line [ ' id ' ] ] . update ( new_data )
2010-05-11 10:55:08 +00:00
2009-11-24 14:44:05 +00:00
if not name in self . _data [ self . _id ] :
2011-02-16 14:21:28 +00:00
# How did this happen? Could be a missing model due to custom fields used too soon, see above.
2012-01-25 11:34:29 +00:00
self . __logger . error ( " Fields to fetch: %s , Field values: %s " , field_names , field_values )
self . __logger . error ( " Cached: %s , Table: %s " , self . _data [ self . _id ] , self . _table )
2010-01-29 16:23:32 +00:00
raise KeyError ( _ ( ' Unknown attribute %s in %s ' ) % ( name , self ) )
2008-07-22 14:24:36 +00:00
return self . _data [ self . _id ] [ name ]
def __getattr__ ( self , name ) :
2010-01-29 16:23:32 +00:00
try :
return self [ name ]
except KeyError , e :
raise AttributeError ( e )
2008-07-22 14:24:36 +00:00
def __contains__ ( self , name ) :
return ( name in self . _table . _columns ) or ( name in self . _table . _inherit_fields ) or hasattr ( self . _table , name )
2011-10-05 11:24:44 +00:00
def __iter__ ( self ) :
raise NotImplementedError ( " Iteration is not allowed on %s " % self )
2008-07-22 14:24:36 +00:00
def __hasattr__ ( self , name ) :
return name in self
def __int__ ( self ) :
return self . _id
def __str__ ( self ) :
return " browse_record( %s , %d ) " % ( self . _table_name , self . _id )
def __eq__ ( self , other ) :
2010-05-04 14:46:42 +00:00
if not isinstance ( other , browse_record ) :
return False
2008-07-22 14:24:36 +00:00
return ( self . _table_name , self . _id ) == ( other . _table_name , other . _id )
def __ne__ ( self , other ) :
2010-05-04 14:46:42 +00:00
if not isinstance ( other , browse_record ) :
return True
2008-07-22 14:24:36 +00:00
return ( self . _table_name , self . _id ) != ( other . _table_name , other . _id )
# we need to define __unicode__ even though we've already defined __str__
# because we have overridden __getattr__
def __unicode__ ( self ) :
return unicode ( str ( self ) )
def __hash__ ( self ) :
return hash ( ( self . _table_name , self . _id ) )
__repr__ = __str__
2006-12-07 13:41:40 +00:00
2011-07-18 17:30:44 +00:00
def refresh ( self ) :
""" Force refreshing this browse_record ' s data and all the data of the
records that belong to the same cache , by emptying the cache completely ,
preserving only the record identifiers ( for prefetching optimizations ) .
"""
for model , model_cache in self . _cache . iteritems ( ) :
# only preserve the ids of the records that were in the cache
cached_ids = dict ( [ ( i , { ' id ' : i } ) for i in model_cache . keys ( ) ] )
self . _cache [ model ] . clear ( )
self . _cache [ model ] . update ( cached_ids )
2006-12-07 13:41:40 +00:00
2011-09-09 14:33:49 +00:00
def pg_varchar ( size = 0 ) :
""" Returns the VARCHAR declaration for the provided size:
2011-10-05 10:14:01 +00:00
* If no size ( or an empty or negative size is provided ) return an
' infinite ' VARCHAR
2011-09-09 14:33:49 +00:00
* Otherwise return a VARCHAR ( n )
: type int size : varchar size , optional
: rtype : str
"""
if size :
2011-09-09 16:29:29 +00:00
if not isinstance ( size , int ) :
raise TypeError ( " VARCHAR parameter should be an int, got %s "
% type ( size ) )
2011-10-05 10:14:01 +00:00
if size > 0 :
return ' VARCHAR( %d ) ' % size
2011-09-09 14:33:49 +00:00
return ' VARCHAR '
2006-12-07 13:41:40 +00:00
2011-09-09 15:26:22 +00:00
FIELDS_TO_PGTYPES = {
fields . boolean : ' bool ' ,
fields . integer : ' int4 ' ,
fields . integer_big : ' int8 ' ,
fields . text : ' text ' ,
fields . date : ' date ' ,
fields . time : ' time ' ,
fields . datetime : ' timestamp ' ,
fields . binary : ' bytea ' ,
fields . many2one : ' int4 ' ,
2011-11-16 17:25:25 +00:00
fields . serialized : ' text ' ,
2011-09-09 15:26:22 +00:00
}
2011-09-22 13:17:44 +00:00
2011-09-09 14:52:13 +00:00
def get_pg_type ( f , type_override = None ) :
2011-06-23 09:02:28 +00:00
"""
2011-09-09 14:52:13 +00:00
: param fields . _column f : field to get a Postgres type for
: param type type_override : use the provided type for dispatching instead of the field ' s own type
: returns : ( postgres_identification_type , postgres_type_specification )
: rtype : ( str , str )
2011-06-23 09:02:28 +00:00
"""
2011-09-09 14:52:13 +00:00
field_type = type_override or type ( f )
2008-07-22 14:24:36 +00:00
2011-09-09 15:26:22 +00:00
if field_type in FIELDS_TO_PGTYPES :
2011-09-21 08:20:44 +00:00
pg_type = ( FIELDS_TO_PGTYPES [ field_type ] , FIELDS_TO_PGTYPES [ field_type ] )
2011-09-09 15:58:48 +00:00
elif issubclass ( field_type , fields . float ) :
2008-07-22 14:24:36 +00:00
if f . digits :
2011-09-21 08:20:44 +00:00
pg_type = ( ' numeric ' , ' NUMERIC ' )
2008-07-22 14:24:36 +00:00
else :
2011-09-21 08:20:44 +00:00
pg_type = ( ' float8 ' , ' DOUBLE PRECISION ' )
2011-09-09 15:58:48 +00:00
elif issubclass ( field_type , ( fields . char , fields . reference ) ) :
2011-09-21 08:20:44 +00:00
pg_type = ( ' varchar ' , pg_varchar ( f . size ) )
2011-09-09 15:58:48 +00:00
elif issubclass ( field_type , fields . selection ) :
2011-09-09 16:16:23 +00:00
if ( isinstance ( f . selection , list ) and isinstance ( f . selection [ 0 ] [ 0 ] , int ) ) \
or getattr ( f , ' size ' , None ) == - 1 :
2011-09-21 08:20:44 +00:00
pg_type = ( ' int4 ' , ' INTEGER ' )
2008-07-22 14:24:36 +00:00
else :
2011-09-21 08:20:44 +00:00
pg_type = ( ' varchar ' , pg_varchar ( getattr ( f , ' size ' , None ) ) )
2011-09-09 15:58:48 +00:00
elif issubclass ( field_type , fields . function ) :
2011-09-09 15:42:42 +00:00
if f . _type == ' selection ' :
2011-09-21 08:20:44 +00:00
pg_type = ( ' varchar ' , pg_varchar ( ) )
2009-09-18 13:55:56 +00:00
else :
2011-09-21 08:20:44 +00:00
pg_type = get_pg_type ( f , getattr ( fields , f . _type ) )
2008-07-22 14:24:36 +00:00
else :
2012-01-24 12:42:52 +00:00
_logger . warning ( ' %s type not supported! ' , field_type )
2011-09-21 08:20:44 +00:00
pg_type = None
return pg_type
2006-12-07 13:41:40 +00:00
2008-08-12 14:44:56 +00:00
2011-06-15 10:21:15 +00:00
class MetaModel ( type ) :
""" Metaclass for the Model.
This class is used as the metaclass for the Model class to discover
the models defined in a module ( i . e . without instanciating them ) .
If the automatic discovery is not needed , it is possible to set the
model ' s _register attribute to False.
"""
module_to_models = { }
def __init__ ( self , name , bases , attrs ) :
if not self . _register :
self . _register = True
super ( MetaModel , self ) . __init__ ( name , bases , attrs )
return
2012-01-09 12:41:20 +00:00
# The (OpenERP) module name can be in the `openerp.addons` namespace
2012-01-09 10:16:47 +00:00
# or not. For instance module `sale` can be imported as
2012-01-09 12:41:20 +00:00
# `openerp.addons.sale` (the good way) or `sale` (for backward
2012-01-09 10:16:47 +00:00
# compatibility).
module_parts = self . __module__ . split ( ' . ' )
if len ( module_parts ) > 2 and module_parts [ 0 ] == ' openerp ' and \
2012-01-09 12:41:20 +00:00
module_parts [ 1 ] == ' addons ' :
2012-01-09 10:16:47 +00:00
module_name = self . __module__ . split ( ' . ' ) [ 2 ]
else :
module_name = self . __module__ . split ( ' . ' ) [ 0 ]
2011-06-15 10:21:15 +00:00
if not hasattr ( self , ' _module ' ) :
self . _module = module_name
# Remember which models to instanciate for this module.
self . module_to_models . setdefault ( self . _module , [ ] ) . append ( self )
2011-09-24 02:53:46 +00:00
# Definition of log access columns, automatically added to models if
# self._log_access is True
LOG_ACCESS_COLUMNS = {
' create_uid ' : ' INTEGER REFERENCES res_users ON DELETE SET NULL ' ,
' create_date ' : ' TIMESTAMP ' ,
' write_uid ' : ' INTEGER REFERENCES res_users ON DELETE SET NULL ' ,
' write_date ' : ' TIMESTAMP '
}
# special columns automatically created by the ORM
MAGIC_COLUMNS = [ ' id ' ] + LOG_ACCESS_COLUMNS . keys ( )
2011-09-23 12:45:25 +00:00
class BaseModel ( object ) :
2011-05-26 18:24:45 +00:00
""" Base class for OpenERP models.
2011-09-23 12:45:25 +00:00
OpenERP models are created by inheriting from this class ' subclasses:
2011-05-26 18:24:45 +00:00
2011-09-23 12:45:25 +00:00
* Model : for regular database - persisted models
* TransientModel : for temporary data , stored in the database but automatically
vaccuumed every so often
* AbstractModel : for abstract super classes meant to be shared by multiple
_inheriting classes ( usually Models or TransientModels )
2011-05-26 18:24:45 +00:00
2011-09-23 12:45:25 +00:00
The system will later instantiate the class once per database ( on
2011-05-26 18:24:45 +00:00
which the class ' module is installed).
2011-09-23 12:45:25 +00:00
To create a class that should not be instantiated , the _register class attribute
may be set to False .
2011-05-26 18:24:45 +00:00
"""
2011-08-16 07:44:31 +00:00
__metaclass__ = MetaModel
_register = False # Set to false if the model shouldn't be automatically discovered.
2008-07-22 14:24:36 +00:00
_name = None
_columns = { }
_constraints = [ ]
_defaults = { }
_rec_name = ' name '
_parent_name = ' parent_id '
2008-08-13 10:47:38 +00:00
_parent_store = False
2009-01-17 19:22:14 +00:00
_parent_order = False
2008-07-22 14:24:36 +00:00
_date_name = ' date '
_order = ' id '
_sequence = None
_description = None
2011-09-19 09:19:52 +00:00
2011-11-13 12:14:05 +00:00
# dict of {field:method}, with method returning the name_get of records
# to include in the _read_group, if grouped on this field
_group_by_full = { }
2011-09-23 17:40:18 +00:00
# Transience
_transient = False # True in a TransientModel
_transient_max_count = None
_transient_max_hours = None
_transient_check_time = 20
2011-09-19 09:19:52 +00:00
# structure:
# { 'parent_model': 'm2o_field', ... }
2008-07-22 14:24:36 +00:00
_inherits = { }
2011-09-19 09:19:52 +00:00
2011-09-20 07:54:01 +00:00
# Mapping from inherits'd field name to triple (m, r, f, n) where m is the
# model from which it is inherits'd, r is the (local) field towards m, f
# is the _column object itself, and n is the original (i.e. top-most)
# parent model.
# Example:
# { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
# field_column_obj, origina_parent_model), ... }
2011-06-14 10:13:11 +00:00
_inherit_fields = { }
2011-09-19 09:19:52 +00:00
2011-06-21 15:53:42 +00:00
# Mapping field name/column_info object
# This is similar to _inherit_fields but:
# 1. includes self fields,
# 2. uses column_info instead of a triple.
_all_columns = { }
2011-09-19 09:19:52 +00:00
2008-07-22 14:24:36 +00:00
_table = None
2008-11-24 15:42:29 +00:00
_invalids = set ( )
2010-05-19 20:02:14 +00:00
_log_create = False
2011-08-12 14:28:14 +00:00
_sql_constraints = [ ]
_protected = [ ' read ' , ' write ' , ' create ' , ' default_get ' , ' perm_read ' , ' unlink ' , ' fields_get ' , ' fields_view_get ' , ' search ' , ' name_get ' , ' distinct_field_get ' , ' name_search ' , ' copy ' , ' import_data ' , ' search_count ' , ' exists ' ]
2009-02-14 05:35:17 +00:00
2009-01-15 11:57:18 +00:00
CONCURRENCY_CHECK_FIELD = ' __last_update '
2011-09-24 02:03:03 +00:00
2010-05-19 20:02:14 +00:00
def log ( self , cr , uid , id , message , secondary = False , context = None ) :
2011-08-05 15:31:34 +00:00
if context and context . get ( ' disable_log ' ) :
return True
2010-10-21 14:45:46 +00:00
return self . pool . get ( ' res.log ' ) . create ( cr , uid ,
{
' name ' : message ,
' res_model ' : self . _name ,
' secondary ' : secondary ,
' res_id ' : id ,
} ,
2010-05-19 18:32:10 +00:00
context = context
)
2008-07-22 14:24:36 +00:00
2010-07-22 13:49:48 +00:00
def view_init ( self , cr , uid , fields_list , context = None ) :
2010-03-16 13:05:17 +00:00
""" Override this method to do specific things when a view on the object is opened. """
pass
2010-07-22 13:49:48 +00:00
def _field_create ( self , cr , context = None ) :
2011-06-14 10:13:11 +00:00
""" Create entries in ir_model_fields for all the model ' s fields.
2011-05-27 12:32:36 +00:00
2011-06-14 10:13:11 +00:00
If necessary , also create an entry in ir_model , and if called from the
modules loading scheme ( by receiving ' module ' in the context ) , also
create entries in ir_model_data ( for the model and the fields ) .
2011-05-27 12:32:36 +00:00
- create an entry in ir_model ( if there is not already one ) ,
- create an entry in ir_model_data ( if there is not already one , and if
' module ' is in the context ) ,
- update ir_model_fields with the fields found in _columns
( TODO there is some redundancy as _columns is updated from
ir_model_fields in __init__ ) .
"""
2010-07-22 13:49:48 +00:00
if context is None :
context = { }
2008-12-19 19:17:59 +00:00
cr . execute ( " SELECT id FROM ir_model WHERE model= %s " , ( self . _name , ) )
2008-07-22 14:24:36 +00:00
if not cr . rowcount :
cr . execute ( ' SELECT nextval( %s ) ' , ( ' ir_model_id_seq ' , ) )
2008-11-25 23:33:17 +00:00
model_id = cr . fetchone ( ) [ 0 ]
2008-12-19 19:17:59 +00:00
cr . execute ( " INSERT INTO ir_model (id,model, name, info,state) VALUES ( %s , %s , %s , %s , %s ) " , ( model_id , self . _name , self . _description , self . __doc__ , ' base ' ) )
2008-11-25 23:33:17 +00:00
else :
model_id = cr . fetchone ( ) [ 0 ]
2008-12-09 14:10:34 +00:00
if ' module ' in context :
2010-09-03 10:59:56 +00:00
name_id = ' model_ ' + self . _name . replace ( ' . ' , ' _ ' )
2011-01-15 17:04:10 +00:00
cr . execute ( ' select * from ir_model_data where name= %s and module= %s ' , ( name_id , context [ ' module ' ] ) )
2008-12-09 14:10:34 +00:00
if not cr . rowcount :
cr . execute ( " INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES ( %s , now(), now(), %s , %s , %s ) " , \
( name_id , context [ ' module ' ] , ' ir.model ' , model_id )
)
2008-07-22 14:24:36 +00:00
cr . commit ( )
cr . execute ( " SELECT * FROM ir_model_fields WHERE model= %s " , ( self . _name , ) )
cols = { }
for rec in cr . dictfetchall ( ) :
cols [ rec [ ' name ' ] ] = rec
2011-09-20 17:07:07 +00:00
ir_model_fields_obj = self . pool . get ( ' ir.model.fields ' )
2011-11-17 14:30:10 +00:00
# sparse field should be created at the end, as it depends on its serialized field already existing
2011-12-22 18:20:23 +00:00
model_fields = sorted ( self . _columns . items ( ) , key = lambda x : 1 if x [ 1 ] . _type == ' sparse ' else 0 )
for ( k , f ) in model_fields :
2008-07-22 14:24:36 +00:00
vals = {
2008-08-12 14:44:56 +00:00
' model_id ' : model_id ,
' model ' : self . _name ,
' name ' : k ,
' field_description ' : f . string . replace ( " ' " , " " ) ,
' ttype ' : f . _type ,
2010-05-11 10:55:08 +00:00
' relation ' : f . _obj or ' ' ,
2008-08-12 14:44:56 +00:00
' view_load ' : ( f . view_load and 1 ) or 0 ,
2009-12-29 13:08:41 +00:00
' select_level ' : tools . ustr ( f . select or 0 ) ,
2010-09-03 10:59:56 +00:00
' readonly ' : ( f . readonly and 1 ) or 0 ,
' required ' : ( f . required and 1 ) or 0 ,
' selectable ' : ( f . selectable and 1 ) or 0 ,
2011-05-23 13:21:53 +00:00
' translate ' : ( f . translate and 1 ) or 0 ,
2010-09-03 10:59:56 +00:00
' relation_field ' : ( f . _type == ' one2many ' and isinstance ( f , fields . one2many ) ) and f . _fields_id or ' ' ,
2011-09-20 17:07:07 +00:00
' serialization_field_id ' : None ,
2008-07-22 14:24:36 +00:00
}
2011-11-17 14:30:10 +00:00
if getattr ( f , ' serialization_field ' , None ) :
# resolve link to serialization_field if specified by name
2011-09-20 17:07:07 +00:00
serialization_field_id = ir_model_fields_obj . search ( cr , 1 , [ ( ' model ' , ' = ' , vals [ ' model ' ] ) , ( ' name ' , ' = ' , f . serialization_field ) ] )
if not serialization_field_id :
2011-11-17 14:30:10 +00:00
raise except_orm ( _ ( ' Error ' ) , _ ( " Serialization field ` %s ` not found for sparse field ` %s `! " ) % ( f . serialization_field , k ) )
2011-09-20 17:07:07 +00:00
vals [ ' serialization_field_id ' ] = serialization_field_id [ 0 ]
2011-11-17 14:30:10 +00:00
2009-12-29 13:08:41 +00:00
# When its a custom field,it does not contain f.select
2010-09-03 10:59:56 +00:00
if context . get ( ' field_state ' , ' base ' ) == ' manual ' :
if context . get ( ' field_name ' , ' ' ) == k :
vals [ ' select_level ' ] = context . get ( ' select ' , ' 0 ' )
2009-12-29 13:08:41 +00:00
#setting value to let the problem NOT occur next time
2010-09-06 14:45:33 +00:00
elif k in cols :
2009-12-29 13:08:41 +00:00
vals [ ' select_level ' ] = cols [ k ] [ ' select_level ' ]
2010-01-19 06:47:59 +00:00
2008-07-22 14:24:36 +00:00
if k not in cols :
cr . execute ( ' select nextval( %s ) ' , ( ' ir_model_fields_id_seq ' , ) )
id = cr . fetchone ( ) [ 0 ]
vals [ ' id ' ] = id
cr . execute ( """ INSERT INTO ir_model_fields (
2008-08-12 14:44:56 +00:00
id , model_id , model , name , field_description , ttype ,
2011-09-20 17:07:07 +00:00
relation , view_load , state , select_level , relation_field , translate , serialization_field_id
2008-07-22 14:24:36 +00:00
) VALUES (
2011-09-17 18:35:35 +00:00
% s , % s , % s , % s , % s , % s , % s , % s , % s , % s , % s , % s , % s
2008-07-22 14:24:36 +00:00
) """ , (
id , vals [ ' model_id ' ] , vals [ ' model ' ] , vals [ ' name ' ] , vals [ ' field_description ' ] , vals [ ' ttype ' ] ,
2008-10-23 17:45:49 +00:00
vals [ ' relation ' ] , bool ( vals [ ' view_load ' ] ) , ' base ' ,
2011-09-20 17:07:07 +00:00
vals [ ' select_level ' ] , vals [ ' relation_field ' ] , bool ( vals [ ' translate ' ] ) , vals [ ' serialization_field_id ' ]
2008-07-22 14:24:36 +00:00
) )
if ' module ' in context :
2009-07-31 14:11:15 +00:00
name1 = ' field_ ' + self . _table + ' _ ' + k
2009-08-07 13:52:08 +00:00
cr . execute ( " select name from ir_model_data where name= %s " , ( name1 , ) )
2009-07-31 14:11:15 +00:00
if cr . fetchone ( ) :
name1 = name1 + " _ " + str ( id )
2008-07-22 14:24:36 +00:00
cr . execute ( " INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES ( %s , now(), now(), %s , %s , %s ) " , \
2009-08-07 13:52:08 +00:00
( name1 , context [ ' module ' ] , ' ir.model.fields ' , id )
2008-07-22 14:24:36 +00:00
)
else :
2008-08-12 14:44:56 +00:00
for key , val in vals . items ( ) :
if cols [ k ] [ key ] != vals [ key ] :
cr . execute ( ' update ir_model_fields set field_description= %s where model= %s and name= %s ' , ( vals [ ' field_description ' ] , vals [ ' model ' ] , vals [ ' name ' ] ) )
2008-07-22 14:24:36 +00:00
cr . commit ( )
cr . execute ( """ UPDATE ir_model_fields SET
2008-10-23 17:45:49 +00:00
model_id = % s , field_description = % s , ttype = % s , relation = % s ,
2011-09-20 17:07:07 +00:00
view_load = % s , select_level = % s , readonly = % s , required = % s , selectable = % s , relation_field = % s , translate = % s , serialization_field_id = % s
2008-07-22 14:24:36 +00:00
WHERE
model = % s AND name = % s """ , (
2008-11-10 11:07:21 +00:00
vals [ ' model_id ' ] , vals [ ' field_description ' ] , vals [ ' ttype ' ] ,
2008-08-20 19:42:30 +00:00
vals [ ' relation ' ] , bool ( vals [ ' view_load ' ] ) ,
2011-09-20 17:07:07 +00:00
vals [ ' select_level ' ] , bool ( vals [ ' readonly ' ] ) , bool ( vals [ ' required ' ] ) , bool ( vals [ ' selectable ' ] ) , vals [ ' relation_field ' ] , bool ( vals [ ' translate ' ] ) , vals [ ' serialization_field_id ' ] , vals [ ' model ' ] , vals [ ' name ' ]
2008-07-22 14:24:36 +00:00
) )
2010-11-01 11:33:20 +00:00
break
2008-07-22 14:24:36 +00:00
cr . commit ( )
2011-05-26 18:24:45 +00:00
#
# Goal: try to apply inheritance at the instanciation level and
# put objects in the pool var
#
@classmethod
2011-08-16 07:44:31 +00:00
def create_instance ( cls , pool , cr ) :
2011-05-27 12:32:36 +00:00
""" Instanciate a given model.
This class method instanciates the class of some model ( i . e . a class
deriving from osv or osv_memory ) . The class might be the class passed
in argument or , if it inherits from another class , a class constructed
by combining the two classes .
The ` ` attributes ` ` argument specifies which parent class attributes
have to be combined .
TODO : the creation of the combined class is repeated at each call of
this method . This is probably unnecessary .
"""
2011-08-16 07:44:31 +00:00
attributes = [ ' _columns ' , ' _defaults ' , ' _inherits ' , ' _constraints ' ,
' _sql_constraints ' ]
2011-05-26 18:24:45 +00:00
parent_names = getattr ( cls , ' _inherit ' , None )
if parent_names :
if isinstance ( parent_names , ( str , unicode ) ) :
name = cls . _name or parent_names
parent_names = [ parent_names ]
else :
name = cls . _name
if not name :
raise TypeError ( ' _name is mandatory in case of multiple inheritance ' )
for parent_name in ( ( type ( parent_names ) == list ) and parent_names or [ parent_names ] ) :
2011-10-04 20:47:05 +00:00
parent_model = pool . get ( parent_name )
if not getattr ( cls , ' _original_module ' , None ) and name == parent_model . _name :
2011-10-04 22:54:39 +00:00
cls . _original_module = parent_model . _original_module
2011-10-04 20:47:05 +00:00
if not parent_model :
2011-05-27 12:32:36 +00:00
raise TypeError ( ' The model " %s " specifies an unexisting parent class " %s " \n '
' You may need to add a dependency on the parent class \' module. ' % ( name , parent_name ) )
2011-10-04 20:47:05 +00:00
parent_class = parent_model . __class__
2011-05-26 18:24:45 +00:00
nattr = { }
for s in attributes :
2011-10-04 20:47:05 +00:00
new = copy . copy ( getattr ( parent_model , s , { } ) )
2011-05-26 18:24:45 +00:00
if s == ' _columns ' :
# Don't _inherit custom fields.
for c in new . keys ( ) :
if new [ c ] . manual :
del new [ c ]
2012-02-15 10:17:14 +00:00
# Duplicate float fields because they have a .digits
# cache (which must be per-registry, not server-wide).
for c in new . keys ( ) :
if new [ c ] . _type == ' float ' :
new [ c ] = copy . copy ( new [ c ] )
2011-05-26 18:24:45 +00:00
if hasattr ( new , ' update ' ) :
new . update ( cls . __dict__ . get ( s , { } ) )
elif s == ' _constraints ' :
for c in cls . __dict__ . get ( s , [ ] ) :
exist = False
for c2 in range ( len ( new ) ) :
#For _constraints, we should check field and methods as well
if new [ c2 ] [ 2 ] == c [ 2 ] and ( new [ c2 ] [ 0 ] == c [ 0 ] \
or getattr ( new [ c2 ] [ 0 ] , ' __name__ ' , True ) == \
getattr ( c [ 0 ] , ' __name__ ' , False ) ) :
# If new class defines a constraint with
# same function name, we let it override
# the old one.
new [ c2 ] = c
exist = True
break
if not exist :
new . append ( c )
else :
new . extend ( cls . __dict__ . get ( s , [ ] ) )
nattr [ s ] = new
2011-06-15 10:21:15 +00:00
cls = type ( name , ( cls , parent_class ) , dict ( nattr , _register = False ) )
2011-10-04 20:47:05 +00:00
if not getattr ( cls , ' _original_module ' , None ) :
cls . _original_module = cls . _module
2011-05-26 18:24:45 +00:00
obj = object . __new__ ( cls )
obj . __init__ ( pool , cr )
return obj
def __new__ ( cls ) :
2011-08-26 16:17:36 +00:00
""" Register this model.
2011-05-26 18:24:45 +00:00
This doesn ' t create an instance but simply register the model
as being part of the module where it is defined .
"""
2011-09-23 12:45:25 +00:00
2011-05-26 18:24:45 +00:00
# Set the module name (e.g. base, sale, accounting, ...) on the class.
module = cls . __module__ . split ( ' . ' ) [ 0 ]
if not hasattr ( cls , ' _module ' ) :
cls . _module = module
2011-08-26 16:17:36 +00:00
# Record this class in the list of models to instantiate for this module,
# managed by the metaclass.
module_model_list = MetaModel . module_to_models . setdefault ( cls . _module , [ ] )
if cls not in module_model_list :
module_model_list . append ( cls )
2011-05-26 18:24:45 +00:00
# Since we don't return an instance here, the __init__
# method won't be called.
return None
def __init__ ( self , pool , cr ) :
2011-08-12 14:28:14 +00:00
""" Initialize a model and make it part of the given registry.
- copy the stored fields ' functions in the osv_pool,
- update the _columns with the fields found in ir_model_fields ,
- ensure there is a many2one for each _inherits ' d parent,
- update the children ' s _columns,
- give a chance to each field to initialize itself .
"""
2011-05-27 10:03:10 +00:00
pool . add ( self . _name , self )
2011-05-26 18:24:45 +00:00
self . pool = pool
2008-10-17 22:41:12 +00:00
if not self . _name and not hasattr ( self , ' _inherit ' ) :
name = type ( self ) . __name__ . split ( ' . ' ) [ 0 ]
msg = " The class %s has to have a _name attribute " % name
2012-01-24 12:42:52 +00:00
_logger . error ( msg )
2010-09-03 10:59:56 +00:00
raise except_orm ( ' ValueError ' , msg )
2008-10-17 22:41:12 +00:00
2008-07-22 14:24:36 +00:00
if not self . _description :
self . _description = self . _name
if not self . _table :
2008-08-12 14:44:56 +00:00
self . _table = self . _name . replace ( ' . ' , ' _ ' )
2008-07-22 14:24:36 +00:00
2011-08-12 14:28:14 +00:00
if not hasattr ( self , ' _log_access ' ) :
# If _log_access is not specified, it is the same value as _auto.
self . _log_access = getattr ( self , " _auto " , True )
2010-04-02 13:54:12 +00:00
2011-08-12 14:28:14 +00:00
self . _columns = self . _columns . copy ( )
for store_field in self . _columns :
f = self . _columns [ store_field ]
if hasattr ( f , ' digits_change ' ) :
f . digits_change ( cr )
2011-10-03 14:45:03 +00:00
def not_this_field ( stored_func ) :
x , y , z , e , f , l = stored_func
return x != self . _name or y != store_field
self . pool . _store_function [ self . _name ] = filter ( not_this_field , self . pool . _store_function . get ( self . _name , [ ] ) )
2011-08-12 14:28:14 +00:00
if not isinstance ( f , fields . function ) :
continue
if not f . store :
continue
2011-10-03 14:45:03 +00:00
sm = f . store
if sm is True :
2011-08-12 14:28:14 +00:00
sm = { self . _name : ( lambda self , cr , uid , ids , c = { } : ids , None , 10 , None ) }
for object , aa in sm . items ( ) :
if len ( aa ) == 4 :
( fnct , fields2 , order , length ) = aa
elif len ( aa ) == 3 :
( fnct , fields2 , order ) = aa
length = None
else :
raise except_orm ( ' Error ' ,
( ' Invalid function definition %s in object %s ! \n You must use the definition: store= { object:(fnct, fields, priority, time length)}. ' % ( store_field , self . _name ) ) )
self . pool . _store_function . setdefault ( object , [ ] )
2011-10-03 14:45:03 +00:00
self . pool . _store_function [ object ] . append ( ( self . _name , store_field , fnct , tuple ( fields2 ) if fields2 else None , order , length ) )
self . pool . _store_function [ object ] . sort ( lambda x , y : cmp ( x [ 4 ] , y [ 4 ] ) )
2010-04-02 13:54:12 +00:00
2011-08-12 14:28:14 +00:00
for ( key , _ , msg ) in self . _sql_constraints :
self . pool . _sql_error [ self . _table + ' _ ' + key ] = msg
# Load manual fields
cr . execute ( " SELECT id FROM ir_model_fields WHERE name= %s AND model= %s " , ( ' state ' , ' ir.model.fields ' ) )
if cr . fetchone ( ) :
cr . execute ( ' SELECT * FROM ir_model_fields WHERE model= %s AND state= %s ' , ( self . _name , ' manual ' ) )
for field in cr . dictfetchall ( ) :
if field [ ' name ' ] in self . _columns :
continue
attrs = {
' string ' : field [ ' field_description ' ] ,
' required ' : bool ( field [ ' required ' ] ) ,
' readonly ' : bool ( field [ ' readonly ' ] ) ,
' domain ' : eval ( field [ ' domain ' ] ) if field [ ' domain ' ] else None ,
' size ' : field [ ' size ' ] ,
' ondelete ' : field [ ' on_delete ' ] ,
' translate ' : ( field [ ' translate ' ] ) ,
' manual ' : True ,
#'select': int(field['select_level'])
}
2011-11-16 17:25:25 +00:00
if field [ ' serialization_field_id ' ] :
cr . execute ( ' SELECT name FROM ir_model_fields WHERE id= %s ' , ( field [ ' serialization_field_id ' ] , ) )
attrs . update ( { ' serialization_field ' : cr . fetchone ( ) [ 0 ] , ' type ' : field [ ' ttype ' ] } )
if field [ ' ttype ' ] in [ ' many2one ' , ' one2many ' , ' many2many ' ] :
attrs . update ( { ' relation ' : field [ ' relation ' ] } )
self . _columns [ field [ ' name ' ] ] = fields . sparse ( * * attrs )
2011-12-22 18:20:23 +00:00
elif field [ ' ttype ' ] == ' selection ' :
2011-08-12 14:28:14 +00:00
self . _columns [ field [ ' name ' ] ] = fields . selection ( eval ( field [ ' selection ' ] ) , * * attrs )
elif field [ ' ttype ' ] == ' reference ' :
self . _columns [ field [ ' name ' ] ] = fields . reference ( selection = eval ( field [ ' selection ' ] ) , * * attrs )
elif field [ ' ttype ' ] == ' many2one ' :
self . _columns [ field [ ' name ' ] ] = fields . many2one ( field [ ' relation ' ] , * * attrs )
elif field [ ' ttype ' ] == ' one2many ' :
self . _columns [ field [ ' name ' ] ] = fields . one2many ( field [ ' relation ' ] , field [ ' relation_field ' ] , * * attrs )
elif field [ ' ttype ' ] == ' many2many ' :
_rel1 = field [ ' relation ' ] . replace ( ' . ' , ' _ ' )
_rel2 = field [ ' model ' ] . replace ( ' . ' , ' _ ' )
_rel_name = ' x_ %s _ %s _ %s _rel ' % ( _rel1 , _rel2 , field [ ' name ' ] )
self . _columns [ field [ ' name ' ] ] = fields . many2many ( field [ ' relation ' ] , _rel_name , ' id1 ' , ' id2 ' , * * attrs )
else :
self . _columns [ field [ ' name ' ] ] = getattr ( fields , field [ ' ttype ' ] ) ( * * attrs )
self . _inherits_check ( )
self . _inherits_reload ( )
if not self . _sequence :
self . _sequence = self . _table + ' _id_seq '
for k in self . _defaults :
assert ( k in self . _columns ) or ( k in self . _inherit_fields ) , ' Default function defined in %s but field %s does not exist ! ' % ( self . _name , k , )
for f in self . _columns :
self . _columns [ f ] . restart ( )
2010-04-02 13:54:12 +00:00
2011-09-23 17:40:18 +00:00
# Transience
if self . is_transient ( ) :
self . _transient_check_count = 0
self . _transient_max_count = config . get ( ' osv_memory_count_limit ' )
self . _transient_max_hours = config . get ( ' osv_memory_age_limit ' )
assert self . _log_access , " TransientModels must have log_access turned on, " \
" in order to implement their access rights policy "
2008-07-22 14:24:36 +00:00
def __export_row ( self , cr , uid , row , fields , context = None ) :
2010-05-12 14:26:44 +00:00
if context is None :
context = { }
2009-10-27 06:23:53 +00:00
2009-07-14 04:52:22 +00:00
def check_type ( field_type ) :
if field_type == ' float ' :
2009-07-03 06:08:23 +00:00
return 0.0
2009-07-14 04:52:22 +00:00
elif field_type == ' integer ' :
2009-07-03 06:08:23 +00:00
return 0
2009-10-27 06:23:53 +00:00
elif field_type == ' boolean ' :
2011-08-23 09:27:37 +00:00
return ' False '
2009-07-14 04:52:22 +00:00
return ' '
2009-12-28 06:07:29 +00:00
2009-10-22 14:02:51 +00:00
def selection_field ( in_field ) :
col_obj = self . pool . get ( in_field . keys ( ) [ 0 ] )
if f [ i ] in col_obj . _columns . keys ( ) :
return col_obj . _columns [ f [ i ] ]
elif f [ i ] in col_obj . _inherits . keys ( ) :
selection_field ( col_obj . _inherits )
else :
2009-12-28 06:07:29 +00:00
return False
2011-12-28 20:57:57 +00:00
def _get_xml_id ( self , cr , uid , r ) :
model_data = self . pool . get ( ' ir.model.data ' )
data_ids = model_data . search ( cr , uid , [ ( ' model ' , ' = ' , r . _table_name ) , ( ' res_id ' , ' = ' , r [ ' id ' ] ) ] )
if len ( data_ids ) :
d = model_data . read ( cr , uid , data_ids , [ ' name ' , ' module ' ] ) [ 0 ]
if d [ ' module ' ] :
r = ' %s . %s ' % ( d [ ' module ' ] , d [ ' name ' ] )
else :
r = d [ ' name ' ]
else :
postfix = 0
while True :
n = self . _table + ' _ ' + str ( r [ ' id ' ] ) + ( postfix and ( ' _ ' + str ( postfix ) ) or ' ' )
if not model_data . search ( cr , uid , [ ( ' name ' , ' = ' , n ) ] ) :
break
postfix + = 1
model_data . create ( cr , uid , {
' name ' : n ,
' model ' : self . _name ,
' res_id ' : r [ ' id ' ] ,
' module ' : ' __export__ ' ,
} )
r = ' __export__. ' + n
return r
2008-07-22 14:24:36 +00:00
lines = [ ]
data = map ( lambda x : ' ' , range ( len ( fields ) ) )
2010-11-16 09:26:50 +00:00
done = [ ]
2008-07-22 14:24:36 +00:00
for fpos in range ( len ( fields ) ) :
2009-10-27 06:23:53 +00:00
f = fields [ fpos ]
2008-07-22 14:24:36 +00:00
if f :
r = row
i = 0
2008-08-12 14:44:56 +00:00
while i < len ( f ) :
2011-12-28 20:57:57 +00:00
cols = False
2010-12-20 23:17:43 +00:00
if f [ i ] == ' .id ' :
2009-10-27 06:23:53 +00:00
r = r [ ' id ' ]
elif f [ i ] == ' id ' :
2011-12-28 20:57:57 +00:00
r = _get_xml_id ( self , cr , uid , r )
2009-07-14 04:52:22 +00:00
else :
2009-10-27 06:23:53 +00:00
r = r [ f [ i ] ]
2009-10-22 14:02:51 +00:00
# To display external name of selection field when its exported
2010-12-20 23:17:43 +00:00
if f [ i ] in self . _columns . keys ( ) :
cols = self . _columns [ f [ i ] ]
elif f [ i ] in self . _inherit_fields . keys ( ) :
cols = selection_field ( self . _inherits )
if cols and cols . _type == ' selection ' :
sel_list = cols . selection
if r and type ( sel_list ) == type ( [ ] ) :
r = [ x [ 1 ] for x in sel_list if r == x [ 0 ] ]
r = r and r [ 0 ] or False
2008-07-22 14:24:36 +00:00
if not r :
2009-10-27 06:23:53 +00:00
if f [ i ] in self . _columns :
2009-07-14 04:52:22 +00:00
r = check_type ( self . _columns [ f [ i ] ] . _type )
2009-07-03 06:08:23 +00:00
elif f [ i ] in self . _inherit_fields :
2009-10-27 06:23:53 +00:00
r = check_type ( self . _inherit_fields [ f [ i ] ] [ 2 ] . _type )
2010-12-21 19:38:47 +00:00
data [ fpos ] = r or False
2008-07-22 14:24:36 +00:00
break
if isinstance ( r , ( browse_record_list , list ) ) :
first = True
fields2 = map ( lambda x : ( x [ : i + 1 ] == f [ : i + 1 ] and x [ i + 1 : ] ) \
or [ ] , fields )
if fields2 in done :
2010-11-16 09:26:50 +00:00
if [ x for x in fields2 if x ] :
break
2009-10-27 06:23:53 +00:00
done . append ( fields2 )
2011-12-28 20:57:57 +00:00
if cols and cols . _type == ' many2many ' and len ( fields [ fpos ] ) > ( i + 1 ) and ( fields [ fpos ] [ i + 1 ] == ' id ' ) :
data [ fpos ] = ' , ' . join ( [ _get_xml_id ( self , cr , uid , x ) for x in r ] )
break
2008-07-22 14:24:36 +00:00
for row2 in r :
2011-12-28 20:57:57 +00:00
lines2 = row2 . _model . __export_row ( cr , uid , row2 , fields2 ,
2009-10-27 06:23:53 +00:00
context )
2008-07-22 14:24:36 +00:00
if first :
for fpos2 in range ( len ( fields ) ) :
if lines2 and lines2 [ 0 ] [ fpos2 ] :
data [ fpos2 ] = lines2 [ 0 ] [ fpos2 ]
2009-07-14 05:42:37 +00:00
if not data [ fpos ] :
dt = ' '
2010-09-03 10:59:56 +00:00
for rr in r :
2011-02-01 10:03:58 +00:00
name_relation = self . pool . get ( rr . _table_name ) . _rec_name
if isinstance ( rr [ name_relation ] , browse_record ) :
rr = rr [ name_relation ]
2010-05-12 14:26:44 +00:00
rr_name = self . pool . get ( rr . _table_name ) . name_get ( cr , uid , [ rr . id ] , context = context )
2009-11-05 06:39:15 +00:00
rr_name = rr_name and rr_name [ 0 ] and rr_name [ 0 ] [ 1 ] or ' '
2009-11-09 07:25:32 +00:00
dt + = tools . ustr ( rr_name or ' ' ) + ' , '
2009-07-14 05:42:37 +00:00
data [ fpos ] = dt [ : - 1 ]
break
2008-08-12 14:44:56 +00:00
lines + = lines2 [ 1 : ]
2008-07-22 14:24:36 +00:00
first = False
else :
2009-10-27 06:23:53 +00:00
lines + = lines2
2008-07-22 14:24:36 +00:00
break
2008-08-12 14:44:56 +00:00
i + = 1
if i == len ( f ) :
2009-07-14 04:52:22 +00:00
if isinstance ( r , browse_record ) :
2010-05-12 14:26:44 +00:00
r = self . pool . get ( r . _table_name ) . name_get ( cr , uid , [ r . id ] , context = context )
2009-11-05 06:39:15 +00:00
r = r and r [ 0 ] and r [ 0 ] [ 1 ] or ' '
2009-01-19 10:08:46 +00:00
data [ fpos ] = tools . ustr ( r or ' ' )
2008-07-22 14:24:36 +00:00
return [ data ] + lines
2009-07-14 05:42:37 +00:00
def export_data ( self , cr , uid , ids , fields_to_export , context = None ) :
2010-04-02 13:54:12 +00:00
"""
Export fields for selected objects
: param cr : database cursor
: param uid : current user id
2010-04-06 12:30:28 +00:00
: param ids : list of ids
: param fields_to_export : list of fields
2010-12-20 23:17:43 +00:00
: param context : context arguments , like lang , time zone
2010-04-06 12:30:28 +00:00
: rtype : dictionary with a * datas * matrix
2010-04-02 13:54:12 +00:00
This method is used when exporting data via client menu
"""
2010-08-12 17:48:18 +00:00
if context is None :
2008-08-12 14:44:56 +00:00
context = { }
2009-07-14 04:52:22 +00:00
cols = self . _columns . copy ( )
for f in self . _inherit_fields :
2009-10-27 06:23:53 +00:00
cols . update ( { f : self . _inherit_fields [ f ] [ 2 ] } )
2011-09-22 12:25:42 +00:00
fields_to_export = map ( fix_import_export_id_paths , fields_to_export )
2008-07-22 14:24:36 +00:00
datas = [ ]
for row in self . browse ( cr , uid , ids , context ) :
2009-07-14 04:52:22 +00:00
datas + = self . __export_row ( cr , uid , row , fields_to_export , context )
2010-09-03 10:59:56 +00:00
return { ' datas ' : datas }
2008-07-22 14:24:36 +00:00
2009-02-27 16:37:20 +00:00
def import_data ( self , cr , uid , fields , datas , mode = ' init ' , current_module = ' ' , noupdate = False , context = None , filename = None ) :
2011-10-11 17:11:48 +00:00
""" Import given data in given module
2010-04-02 13:54:12 +00:00
2010-12-29 12:09:26 +00:00
This method is used when importing data via client menu .
Example of fields to import for a sale . order : :
2010-12-27 16:46:21 +00:00
. id , ( = database_id )
partner_id , ( = name_search )
order_line / . id , ( = database_id )
order_line / name ,
order_line / product_id / id , ( = xml id )
order_line / price_unit ,
order_line / product_uom_qty ,
order_line / product_uom / id ( = xml_id )
2011-09-26 07:51:15 +00:00
2011-10-11 17:11:48 +00:00
This method returns a 4 - tuple with the following structure : :
2011-09-26 07:51:15 +00:00
2011-10-11 17:11:48 +00:00
( return_code , errored_resource , error_message , unused )
2011-09-26 07:51:15 +00:00
2011-10-11 17:11:48 +00:00
* The first item is a return code , it is ` ` - 1 ` ` in case of
import error , or the last imported row number in case of success
* The second item contains the record data dict that failed to import
in case of error , otherwise it ' s 0
* The third item contains an error message string in case of error ,
otherwise it ' s 0
* The last item is currently unused , with no specific semantics
: param fields : list of fields to import
2011-09-26 07:51:15 +00:00
: param data : data to import
: param mode : ' init ' or ' update ' for record creation
: param current_module : module name
: param noupdate : flag for record creation
: param filename : optional file to store partial import state for recovery
2011-10-11 17:11:48 +00:00
: returns : 4 - tuple in the form ( return_code , errored_resource , error_message , unused )
: rtype : ( int , dict or 0 , str or 0 , str or 0 )
2010-04-02 13:54:12 +00:00
"""
2008-07-22 14:24:36 +00:00
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2011-09-22 12:25:42 +00:00
fields = map ( fix_import_export_id_paths , fields )
2009-07-14 04:52:22 +00:00
ir_model_data_obj = self . pool . get ( ' ir.model.data ' )
2009-10-27 06:23:53 +00:00
2010-12-27 16:46:21 +00:00
# mode: id (XML id) or .id (database id) or False for name_get
def _get_id ( model_name , id , current_module = False , mode = ' id ' ) :
if mode == ' .id ' :
id = int ( id )
obj_model = self . pool . get ( model_name )
ids = obj_model . search ( cr , uid , [ ( ' id ' , ' = ' , int ( id ) ) ] )
if not len ( ids ) :
raise Exception ( _ ( " Database ID doesn ' t exist: %s : %s " ) % ( model_name , id ) )
elif mode == ' id ' :
if ' . ' in id :
module , xml_id = id . rsplit ( ' . ' , 1 )
else :
module , xml_id = current_module , id
record_id = ir_model_data_obj . _get_id ( cr , uid , module , xml_id )
ir_model_data = ir_model_data_obj . read ( cr , uid , [ record_id ] , [ ' res_id ' ] )
if not ir_model_data :
raise ValueError ( ' No references to %s . %s ' % ( module , xml_id ) )
id = ir_model_data [ 0 ] [ ' res_id ' ]
else :
obj_model = self . pool . get ( model_name )
2011-01-20 05:40:34 +00:00
ids = obj_model . name_search ( cr , uid , id , operator = ' = ' , context = context )
2010-12-27 16:46:21 +00:00
if not ids :
raise ValueError ( ' No record found for %s ' % ( id , ) )
id = ids [ 0 ] [ 0 ]
return id
# IN:
# datas: a list of records, each record is defined by a list of values
# prefix: a list of prefix fields ['line_ids']
# position: the line to process, skip is False if it's the first line of the current record
# OUT:
# (res, position, warning, res_id) with
# res: the record for the next line to process (including it's one2many)
# position: the new position for the next line
# res_id: the ID of the record if it's a modification
def process_liness ( self , datas , prefix , current_module , model_name , fields_def , position = 0 , skip = 0 ) :
2008-07-22 14:24:36 +00:00
line = datas [ position ]
row = { }
2009-08-26 13:40:49 +00:00
warning = [ ]
2009-07-14 04:52:22 +00:00
data_res_id = False
2010-12-27 17:00:54 +00:00
xml_id = False
2010-12-27 16:46:21 +00:00
nbrmax = position + 1
done = { }
2011-10-19 11:02:15 +00:00
for i , field in enumerate ( fields ) :
2010-12-27 16:46:21 +00:00
res = False
2011-04-20 15:24:13 +00:00
if i > = len ( line ) :
raise Exception ( _ ( ' Please check that all your lines have %d columns. '
' Stopped around line %d having %d columns. ' ) % \
( len ( fields ) , position + 2 , len ( line ) ) )
2009-07-14 04:52:22 +00:00
if not line [ i ] :
2008-07-22 14:24:36 +00:00
continue
2009-10-27 06:23:53 +00:00
2010-12-27 16:46:21 +00:00
if field [ : len ( prefix ) ] < > prefix :
if line [ i ] and skip :
return False
2009-10-08 09:44:57 +00:00
continue
2011-10-19 11:46:18 +00:00
field_name = field [ len ( prefix ) ]
2011-08-23 09:27:37 +00:00
2011-07-19 12:28:02 +00:00
#set the mode for m2o, o2m, m2m : xml_id/id/name
if len ( field ) == len ( prefix ) + 1 :
mode = False
else :
mode = field [ len ( prefix ) + 1 ]
2009-10-27 06:23:53 +00:00
2011-07-19 12:28:02 +00:00
# TODO: improve this by using csv.csv_reader
def many_ids ( line , relation , current_module , mode ) :
res = [ ]
for db_id in line . split ( config . get ( ' csv_internal_sep ' ) ) :
res . append ( _get_id ( relation , db_id , current_module , mode ) )
return [ ( 6 , 0 , res ) ]
2011-08-23 09:27:37 +00:00
2010-12-27 16:46:21 +00:00
# ID of the record using a XML ID
2011-10-19 11:46:18 +00:00
if field_name == ' id ' :
2010-12-27 17:00:54 +00:00
try :
2011-12-01 12:12:57 +00:00
data_res_id = _get_id ( model_name , line [ i ] , current_module )
2011-06-24 13:33:35 +00:00
except ValueError :
2010-12-27 17:00:54 +00:00
pass
xml_id = line [ i ]
2008-07-22 14:24:36 +00:00
continue
2010-12-27 16:46:21 +00:00
# ID of the record using a database ID
2011-10-19 11:46:18 +00:00
elif field_name == ' .id ' :
2010-12-27 16:46:21 +00:00
data_res_id = _get_id ( model_name , line [ i ] , current_module , ' .id ' )
2008-07-22 14:24:36 +00:00
continue
2010-12-27 16:46:21 +00:00
2011-10-19 11:46:18 +00:00
field_type = fields_def [ field_name ] [ ' type ' ]
2010-12-27 16:46:21 +00:00
# recursive call for getting children and returning [(0,0,{})] or [(1,ID,{})]
2011-10-19 11:46:18 +00:00
if field_type == ' one2many ' :
if field_name in done :
2009-07-14 04:52:22 +00:00
continue
2011-10-19 11:46:18 +00:00
done [ field_name ] = True
relation = fields_def [ field_name ] [ ' relation ' ]
2011-07-19 12:28:02 +00:00
relation_obj = self . pool . get ( relation )
2010-12-27 16:46:21 +00:00
newfd = relation_obj . fields_get ( cr , uid , context = context )
pos = position
2011-08-23 09:27:37 +00:00
2011-12-28 19:45:06 +00:00
res = [ ]
2011-08-23 09:27:37 +00:00
2010-12-27 16:46:21 +00:00
first = 0
while pos < len ( datas ) :
2011-10-19 11:46:18 +00:00
res2 = process_liness ( self , datas , prefix + [ field_name ] , current_module , relation_obj . _name , newfd , pos , first )
2010-12-27 16:46:21 +00:00
if not res2 :
break
2010-12-27 17:00:54 +00:00
( newrow , pos , w2 , data_res_id2 , xml_id2 ) = res2
2010-12-27 16:46:21 +00:00
nbrmax = max ( nbrmax , pos )
warning + = w2
first + = 1
2011-08-23 09:27:37 +00:00
2010-12-27 16:46:21 +00:00
if ( not newrow ) or not reduce ( lambda x , y : x or y , newrow . values ( ) , 0 ) :
break
2011-07-19 12:28:02 +00:00
2010-12-27 16:46:21 +00:00
res . append ( ( data_res_id2 and 1 or 0 , data_res_id2 or 0 , newrow ) )
2011-08-23 09:27:37 +00:00
2011-10-19 11:46:18 +00:00
elif field_type == ' many2one ' :
relation = fields_def [ field_name ] [ ' relation ' ]
2010-12-27 16:46:21 +00:00
res = _get_id ( relation , line [ i ] , current_module , mode )
2009-10-27 06:23:53 +00:00
2011-10-19 11:46:18 +00:00
elif field_type == ' many2many ' :
relation = fields_def [ field_name ] [ ' relation ' ]
2011-07-19 12:28:02 +00:00
res = many_ids ( line [ i ] , relation , current_module , mode )
2010-12-27 16:46:21 +00:00
2011-10-19 11:46:18 +00:00
elif field_type == ' integer ' :
2010-12-27 16:46:21 +00:00
res = line [ i ] and int ( line [ i ] ) or 0
2011-10-19 11:46:18 +00:00
elif field_type == ' boolean ' :
2010-12-27 16:46:21 +00:00
res = line [ i ] . lower ( ) not in ( ' 0 ' , ' false ' , ' off ' )
2011-10-19 11:46:18 +00:00
elif field_type == ' float ' :
2010-12-27 16:46:21 +00:00
res = line [ i ] and float ( line [ i ] ) or 0.0
2011-10-19 11:46:18 +00:00
elif field_type == ' selection ' :
for key , val in fields_def [ field_name ] [ ' selection ' ] :
2011-05-20 11:42:21 +00:00
if tools . ustr ( line [ i ] ) in [ tools . ustr ( key ) , tools . ustr ( val ) ] :
2010-12-27 16:46:21 +00:00
res = key
break
if line [ i ] and not res :
2012-01-24 12:42:52 +00:00
_logger . warning (
2011-10-19 11:46:18 +00:00
_ ( " key ' %s ' not found in selection field ' %s ' " ) ,
tools . ustr ( line [ i ] ) , tools . ustr ( field_name ) )
warning . append ( _ ( " Key/value ' %s ' not found in selection field ' %s ' " ) % (
tools . ustr ( line [ i ] ) , tools . ustr ( field_name ) ) )
2011-05-20 11:42:21 +00:00
2010-12-27 16:46:21 +00:00
else :
res = line [ i ]
2011-08-23 09:27:37 +00:00
2011-10-19 11:46:18 +00:00
row [ field_name ] = res or False
2008-07-22 14:24:36 +00:00
2011-12-01 12:12:57 +00:00
return row , nbrmax , warning , data_res_id , xml_id
2008-07-22 14:24:36 +00:00
fields_def = self . fields_get ( cr , uid , context = context )
2010-12-27 16:46:21 +00:00
position = 0
2011-12-01 12:10:40 +00:00
if config . get ( ' import_partial ' ) and filename :
with open ( config . get ( ' import_partial ' ) , ' rb ' ) as partial_import_file :
data = pickle . load ( partial_import_file )
position = data . get ( filename , 0 )
2011-08-23 09:27:37 +00:00
2010-12-27 16:46:21 +00:00
while position < len ( datas ) :
2010-12-27 17:00:54 +00:00
( res , position , warning , res_id , xml_id ) = \
2010-12-27 16:46:21 +00:00
process_liness ( self , datas , [ ] , current_module , self . _name , fields_def , position = position )
2009-10-27 06:23:53 +00:00
if len ( warning ) :
2008-07-22 14:24:36 +00:00
cr . rollback ( )
2011-12-01 12:12:57 +00:00
return - 1 , res , ' Line ' + str ( position ) + ' : ' + ' ! \n ' . join ( warning ) , ' '
2009-10-27 06:23:53 +00:00
2009-06-26 14:27:34 +00:00
try :
2011-06-15 07:22:31 +00:00
ir_model_data_obj . _update ( cr , uid , self . _name ,
2010-12-27 17:00:54 +00:00
current_module , res , mode = mode , xml_id = xml_id ,
2009-10-05 12:10:29 +00:00
noupdate = noupdate , res_id = res_id , context = context )
2009-06-26 14:27:34 +00:00
except Exception , e :
2011-12-01 12:12:57 +00:00
return - 1 , res , ' Line ' + str ( position ) + ' : ' + tools . ustr ( e ) , ' '
2010-12-27 16:46:21 +00:00
2011-12-01 12:10:40 +00:00
if config . get ( ' import_partial ' ) and filename and ( not ( position % 100 ) ) :
with open ( config . get ( ' import_partial ' ) , ' rb ' ) as partial_import :
data = pickle . load ( partial_import )
2010-12-27 16:46:21 +00:00
data [ filename ] = position
2011-12-01 12:10:40 +00:00
with open ( config . get ( ' import_partial ' ) , ' wb ' ) as partial_import :
pickle . dump ( data , partial_import )
2010-05-14 09:11:43 +00:00
if context . get ( ' defer_parent_store_computation ' ) :
self . _parent_store_compute ( cr )
2008-08-19 13:03:11 +00:00
cr . commit ( )
2008-08-19 13:26:12 +00:00
2010-05-14 09:11:43 +00:00
if context . get ( ' defer_parent_store_computation ' ) :
self . _parent_store_compute ( cr )
2011-12-01 12:12:57 +00:00
return position , 0 , 0 , 0
2008-07-22 14:24:36 +00:00
2010-07-22 13:49:48 +00:00
def get_invalid_fields ( self , cr , uid ) :
2008-11-24 15:42:29 +00:00
return list ( self . _invalids )
2008-11-24 12:48:39 +00:00
2008-07-22 14:24:36 +00:00
def _validate ( self , cr , uid , ids , context = None ) :
context = context or { }
lng = context . get ( ' lang ' , False ) or ' en_US '
trans = self . pool . get ( ' ir.translation ' )
2008-08-26 14:26:42 +00:00
error_msgs = [ ]
2008-07-22 14:24:36 +00:00
for constraint in self . _constraints :
fun , msg , fields = constraint
if not fun ( self , cr , uid , ids ) :
2010-05-18 17:00:23 +00:00
# Check presence of __call__ directly instead of using
# callable() because it will be deprecated as of Python 3.0
2010-05-11 12:31:49 +00:00
if hasattr ( msg , ' __call__ ' ) :
2010-11-18 16:47:21 +00:00
tmp_msg = msg ( self , cr , uid , ids , context = context )
if isinstance ( tmp_msg , tuple ) :
tmp_msg , params = tmp_msg
translated_msg = tmp_msg % params
else :
translated_msg = tmp_msg
2010-05-18 17:00:23 +00:00
else :
2011-08-24 23:11:31 +00:00
translated_msg = trans . _get_source ( cr , uid , self . _name , ' constraint ' , lng , msg ) or msg
2008-08-26 14:26:42 +00:00
error_msgs . append (
2009-01-28 12:52:32 +00:00
_ ( " Error occurred while validating the field(s) %s : %s " ) % ( ' , ' . join ( fields ) , translated_msg )
2008-09-03 11:14:29 +00:00
)
2008-11-24 15:42:29 +00:00
self . _invalids . update ( fields )
2008-08-26 14:26:42 +00:00
if error_msgs :
2008-07-22 14:24:36 +00:00
cr . rollback ( )
2008-08-26 14:26:42 +00:00
raise except_orm ( ' ValidateError ' , ' \n ' . join ( error_msgs ) )
2008-11-24 13:27:43 +00:00
else :
2008-11-24 15:42:29 +00:00
self . _invalids . clear ( )
2008-07-22 14:24:36 +00:00
def default_get ( self , cr , uid , fields_list , context = None ) :
2010-04-02 13:54:12 +00:00
"""
2010-08-13 15:09:55 +00:00
Returns default values for the fields in fields_list .
2010-08-17 10:02:33 +00:00
2010-08-13 15:09:55 +00:00
: param fields_list : list of fields to get the default values for ( example [ ' field1 ' , ' field2 ' , ] )
: type fields_list : list
2010-11-04 17:05:23 +00:00
: param context : optional context dictionary - it may contains keys for specifying certain options
like ` ` context_lang ` ` ( language ) or ` ` context_tz ` ` ( timezone ) to alter the results of the call .
It may contain keys in the form ` ` default_XXX ` ` ( where XXX is a field name ) , to set
or override a default value for a field .
A special ` ` bin_size ` ` boolean flag may also be passed in the context to request the
value of all fields . binary columns to be returned as the size of the binary instead of its
contents . This can also be selectively overriden by passing a field - specific flag
in the form ` ` bin_size_XXX : True / False ` ` where ` ` XXX ` ` is the name of the field .
Note : The ` ` bin_size_XXX ` ` form is new in OpenERP v6 .0 .
2010-08-13 15:09:55 +00:00
: return : dictionary of the default values ( set on the object model class , through user preferences , or in the context )
"""
# trigger view init hook
self . view_init ( cr , uid , fields_list , context )
2010-01-21 13:21:18 +00:00
2010-08-13 15:09:55 +00:00
if not context :
context = { }
defaults = { }
2010-08-13 15:38:39 +00:00
# get the default values for the inherited fields
2010-08-13 15:09:55 +00:00
for t in self . _inherits . keys ( ) :
defaults . update ( self . pool . get ( t ) . default_get ( cr , uid , fields_list ,
context ) )
2010-08-13 15:38:39 +00:00
# get the default values defined in the object
2010-08-13 15:09:55 +00:00
for f in fields_list :
if f in self . _defaults :
if callable ( self . _defaults [ f ] ) :
defaults [ f ] = self . _defaults [ f ] ( self , cr , uid , context )
else :
defaults [ f ] = self . _defaults [ f ]
fld_def = ( ( f in self . _columns ) and self . _columns [ f ] ) \
or ( ( f in self . _inherit_fields ) and self . _inherit_fields [ f ] [ 2 ] ) \
or False
if isinstance ( fld_def , fields . property ) :
property_obj = self . pool . get ( ' ir.property ' )
prop_value = property_obj . get ( cr , uid , f , self . _name , context = context )
if prop_value :
if isinstance ( prop_value , ( browse_record , browse_null ) ) :
defaults [ f ] = prop_value . id
else :
defaults [ f ] = prop_value
else :
if f not in defaults :
defaults [ f ] = False
2010-08-13 15:38:39 +00:00
# get the default values set by the user and override the default
# values defined in the object
ir_values_obj = self . pool . get ( ' ir.values ' )
2010-08-13 15:09:55 +00:00
res = ir_values_obj . get ( cr , uid , ' default ' , False , [ self . _name ] )
for id , field , field_value in res :
if field in fields_list :
fld_def = ( field in self . _columns ) and self . _columns [ field ] or self . _inherit_fields [ field ] [ 2 ]
if fld_def . _type in ( ' many2one ' , ' one2one ' ) :
obj = self . pool . get ( fld_def . _obj )
if not obj . search ( cr , uid , [ ( ' id ' , ' = ' , field_value or False ) ] ) :
continue
if fld_def . _type in ( ' many2many ' ) :
obj = self . pool . get ( fld_def . _obj )
field_value2 = [ ]
for i in range ( len ( field_value ) ) :
if not obj . search ( cr , uid , [ ( ' id ' , ' = ' ,
field_value [ i ] ) ] ) :
continue
field_value2 . append ( field_value [ i ] )
field_value = field_value2
if fld_def . _type in ( ' one2many ' ) :
obj = self . pool . get ( fld_def . _obj )
field_value2 = [ ]
for i in range ( len ( field_value ) ) :
field_value2 . append ( { } )
for field2 in field_value [ i ] :
if field2 in obj . _columns . keys ( ) and obj . _columns [ field2 ] . _type in ( ' many2one ' , ' one2one ' ) :
obj2 = self . pool . get ( obj . _columns [ field2 ] . _obj )
if not obj2 . search ( cr , uid ,
[ ( ' id ' , ' = ' , field_value [ i ] [ field2 ] ) ] ) :
continue
elif field2 in obj . _inherit_fields . keys ( ) and obj . _inherit_fields [ field2 ] [ 2 ] . _type in ( ' many2one ' , ' one2one ' ) :
obj2 = self . pool . get ( obj . _inherit_fields [ field2 ] [ 2 ] . _obj )
if not obj2 . search ( cr , uid ,
[ ( ' id ' , ' = ' , field_value [ i ] [ field2 ] ) ] ) :
continue
# TODO add test for many2many and one2many
field_value2 [ i ] [ field2 ] = field_value [ i ] [ field2 ]
field_value = field_value2
defaults [ field ] = field_value
2010-08-13 15:38:39 +00:00
# get the default values from the context
2010-08-13 15:09:55 +00:00
for key in context or { } :
if key . startswith ( ' default_ ' ) and ( key [ 8 : ] in fields_list ) :
defaults [ key [ 8 : ] ] = context [ key ]
return defaults
2010-01-21 13:21:18 +00:00
2010-07-12 14:15:13 +00:00
def fields_get_keys ( self , cr , user , context = None ) :
2008-12-02 08:16:29 +00:00
res = self . _columns . keys ( )
2011-06-14 10:13:11 +00:00
# TODO I believe this loop can be replace by
# res.extend(self._inherit_fields.key())
2008-12-02 08:16:29 +00:00
for parent in self . _inherits :
2010-07-12 14:15:13 +00:00
res . extend ( self . pool . get ( parent ) . fields_get_keys ( cr , user , context ) )
2008-12-02 08:16:29 +00:00
return res
2008-07-22 14:24:36 +00:00
#
# Overload this method if you need a window title which depends on the context
#
def view_header_get ( self , cr , user , view_id = None , view_type = ' form ' , context = None ) :
return False
2011-07-05 12:22:22 +00:00
def __view_look_dom ( self , cr , user , node , view_id , in_tree_view , model_fields , context = None ) :
""" Return the description of the fields in the node.
In a normal call to this method , node is a complete view architecture
but it is actually possible to give some sub - node ( this is used so
that the method can call itself recursively ) .
Originally , the field descriptions are drawn from the node itself .
But there is now some code calling fields_get ( ) in order to merge some
of those information in the architecture .
"""
2011-06-17 15:44:40 +00:00
if context is None :
2008-08-12 14:44:56 +00:00
context = { }
2008-07-22 14:24:36 +00:00
result = False
fields = { }
2010-03-24 16:32:22 +00:00
children = True
2008-07-22 14:24:36 +00:00
2011-07-05 12:22:22 +00:00
modifiers = { }
2010-04-14 12:12:17 +00:00
def encode ( s ) :
if isinstance ( s , unicode ) :
return s . encode ( ' utf8 ' )
return s
2010-04-30 14:57:54 +00:00
2010-04-29 14:03:13 +00:00
def check_group ( node ) :
2011-06-17 15:44:40 +00:00
""" Set invisible to true if the user is not in the specified groups. """
2010-04-29 14:03:13 +00:00
if node . get ( ' groups ' ) :
groups = node . get ( ' groups ' ) . split ( ' , ' )
2011-09-28 13:01:07 +00:00
ir_model_access = self . pool . get ( ' ir.model.access ' )
can_see = any ( ir_model_access . check_groups ( cr , user , group ) for group in groups )
2010-04-30 14:57:54 +00:00
if not can_see :
2010-04-29 14:03:13 +00:00
node . set ( ' invisible ' , ' 1 ' )
2011-07-05 12:22:22 +00:00
modifiers [ ' invisible ' ] = True
2010-04-30 14:57:54 +00:00
if ' attrs ' in node . attrib :
del ( node . attrib [ ' attrs ' ] ) #avoid making field visible later
2010-05-03 23:42:46 +00:00
del ( node . attrib [ ' groups ' ] )
2010-04-30 14:57:54 +00:00
2010-04-13 10:01:57 +00:00
if node . tag in ( ' field ' , ' node ' , ' arrow ' ) :
if node . get ( ' object ' ) :
attrs = { }
views = { }
2010-04-14 12:12:17 +00:00
xml = " <form> "
2010-04-13 10:01:57 +00:00
for f in node :
if f . tag in ( ' field ' ) :
2010-04-14 12:12:17 +00:00
xml + = etree . tostring ( f , encoding = " utf-8 " )
xml + = " </form> "
new_xml = etree . fromstring ( encode ( xml ) )
ctx = context . copy ( )
ctx [ ' base_model_name ' ] = self . _name
2010-11-30 11:09:40 +00:00
xarch , xfields = self . pool . get ( node . get ( ' object ' ) ) . __view_look_dom_arch ( cr , user , new_xml , view_id , ctx )
views [ ' form ' ] = {
2010-04-14 12:12:17 +00:00
' arch ' : xarch ,
' fields ' : xfields
}
attrs = { ' views ' : views }
2010-11-30 11:09:40 +00:00
fields = xfields
2009-09-17 07:27:12 +00:00
if node . get ( ' name ' ) :
2008-07-22 14:24:36 +00:00
attrs = { }
try :
2009-09-17 07:27:12 +00:00
if node . get ( ' name ' ) in self . _columns :
column = self . _columns [ node . get ( ' name ' ) ]
2008-07-22 14:24:36 +00:00
else :
2009-09-17 07:27:12 +00:00
column = self . _inherit_fields [ node . get ( ' name ' ) ] [ 2 ]
2010-11-30 11:09:40 +00:00
except Exception :
2009-04-28 14:40:14 +00:00
column = False
2008-07-22 14:24:36 +00:00
2009-04-28 14:40:14 +00:00
if column :
2010-05-07 13:50:02 +00:00
relation = self . pool . get ( column . _obj )
2010-03-24 16:32:22 +00:00
children = False
2008-07-22 14:24:36 +00:00
views = { }
2009-09-17 07:27:12 +00:00
for f in node :
if f . tag in ( ' form ' , ' tree ' , ' graph ' ) :
node . remove ( f )
2008-11-28 09:09:34 +00:00
ctx = context . copy ( )
ctx [ ' base_model_name ' ] = self . _name
2010-05-07 13:50:02 +00:00
xarch , xfields = relation . __view_look_dom_arch ( cr , user , f , view_id , ctx )
2009-09-17 07:27:12 +00:00
views [ str ( f . tag ) ] = {
2008-07-22 14:24:36 +00:00
' arch ' : xarch ,
' fields ' : xfields
}
attrs = { ' views ' : views }
2009-09-17 07:27:12 +00:00
if node . get ( ' widget ' ) and node . get ( ' widget ' ) == ' selection ' :
2010-09-01 12:08:27 +00:00
# Prepare the cached selection list for the client. This needs to be
# done even when the field is invisible to the current user, because
# other events could need to change its value to any of the selectable ones
# (such as on_change events, refreshes, etc.)
# If domain and context are strings, we keep them for client-side, otherwise
# we evaluate them server-side to consider them when generating the list of
# possible values
# TODO: find a way to remove this hack, by allow dynamic domains
dom = [ ]
if column . _domain and not isinstance ( column . _domain , basestring ) :
dom = column . _domain
2010-09-03 10:59:56 +00:00
dom + = eval ( node . get ( ' domain ' , ' [] ' ) , { ' uid ' : user , ' time ' : time } )
2010-09-01 12:08:27 +00:00
search_context = dict ( context )
if column . _context and not isinstance ( column . _context , basestring ) :
search_context . update ( column . _context )
2010-09-01 15:08:33 +00:00
attrs [ ' selection ' ] = relation . _name_search ( cr , user , ' ' , dom , context = search_context , limit = None , name_get_uid = 1 )
2010-09-01 12:08:27 +00:00
if ( node . get ( ' required ' ) and not int ( node . get ( ' required ' ) ) ) or not column . required :
2010-09-03 10:59:56 +00:00
attrs [ ' selection ' ] . append ( ( False , ' ' ) )
2009-09-17 07:27:12 +00:00
fields [ node . get ( ' name ' ) ] = attrs
2008-07-22 14:24:36 +00:00
2011-07-29 13:34:55 +00:00
field = model_fields . get ( node . get ( ' name ' ) )
if field :
transfer_field_to_modifiers ( field , modifiers )
2011-08-23 09:27:37 +00:00
2011-07-05 12:22:22 +00:00
2009-09-17 07:27:12 +00:00
elif node . tag in ( ' form ' , ' tree ' ) :
result = self . view_header_get ( cr , user , False , node . tag , context )
2008-07-22 14:24:36 +00:00
if result :
2009-09-17 07:27:12 +00:00
node . set ( ' string ' , result )
2011-07-05 12:22:22 +00:00
in_tree_view = node . tag == ' tree '
2008-11-05 20:19:49 +00:00
2009-09-17 07:27:12 +00:00
elif node . tag == ' calendar ' :
2008-11-05 20:19:49 +00:00
for additional_field in ( ' date_start ' , ' date_delay ' , ' date_stop ' , ' color ' ) :
2009-09-17 07:27:12 +00:00
if node . get ( additional_field ) :
fields [ node . get ( additional_field ) ] = { }
2008-11-05 20:19:49 +00:00
2011-06-17 15:44:40 +00:00
check_group ( node )
2009-09-17 07:27:12 +00:00
2011-07-05 12:22:22 +00:00
# The view architeture overrides the python model.
# Get the attrs before they are (possibly) deleted by check_group below
transfer_node_to_modifiers ( node , modifiers , context , in_tree_view )
# TODO remove attrs couterpart in modifiers when invisible is true ?
2009-09-17 07:27:12 +00:00
# translate view
2011-06-20 12:06:00 +00:00
if ' lang ' in context :
if node . get ( ' string ' ) and not result :
2010-09-08 17:02:57 +00:00
trans = self . pool . get ( ' ir.translation ' ) . _get_source ( cr , user , self . _name , ' view ' , context [ ' lang ' ] , node . get ( ' string ' ) )
2010-12-15 11:25:37 +00:00
if trans == node . get ( ' string ' ) and ( ' base_model_name ' in context ) :
2010-12-15 18:24:16 +00:00
# If translation is same as source, perhaps we'd have more luck with the alternative model name
# (in case we are in a mixed situation, such as an inherited view where parent_view.model != model
2010-09-08 17:02:57 +00:00
trans = self . pool . get ( ' ir.translation ' ) . _get_source ( cr , user , context [ ' base_model_name ' ] , ' view ' , context [ ' lang ' ] , node . get ( ' string ' ) )
2009-09-17 07:27:12 +00:00
if trans :
node . set ( ' string ' , trans )
2010-11-24 09:36:47 +00:00
if node . get ( ' confirm ' ) :
2010-11-30 11:09:40 +00:00
trans = self . pool . get ( ' ir.translation ' ) . _get_source ( cr , user , self . _name , ' view ' , context [ ' lang ' ] , node . get ( ' confirm ' ) )
2010-11-24 09:36:47 +00:00
if trans :
node . set ( ' confirm ' , trans )
2009-09-17 07:27:12 +00:00
if node . get ( ' sum ' ) :
2010-09-08 17:02:57 +00:00
trans = self . pool . get ( ' ir.translation ' ) . _get_source ( cr , user , self . _name , ' view ' , context [ ' lang ' ] , node . get ( ' sum ' ) )
2009-09-17 07:27:12 +00:00
if trans :
node . set ( ' sum ' , trans )
2011-04-20 13:13:46 +00:00
if node . get ( ' help ' ) :
trans = self . pool . get ( ' ir.translation ' ) . _get_source ( cr , user , self . _name , ' view ' , context [ ' lang ' ] , node . get ( ' help ' ) )
if trans :
node . set ( ' help ' , trans )
2008-07-22 14:24:36 +00:00
2010-10-05 11:26:53 +00:00
for f in node :
2010-10-25 07:43:13 +00:00
if children or ( node . tag == ' field ' and f . tag in ( ' filter ' , ' separator ' ) ) :
2011-07-05 12:22:22 +00:00
fields . update ( self . __view_look_dom ( cr , user , f , view_id , in_tree_view , model_fields , context ) )
2008-12-02 18:38:21 +00:00
2011-07-05 12:22:22 +00:00
transfer_modifiers_to_node ( modifiers , node )
2008-07-22 14:24:36 +00:00
return fields
2010-10-13 21:53:40 +00:00
def _disable_workflow_buttons ( self , cr , user , node ) :
2011-06-17 15:44:40 +00:00
""" Set the buttons in node to readonly if the user can ' t activate them. """
2010-10-13 21:53:40 +00:00
if user == 1 :
# admin user can always activate workflow buttons
return node
2008-07-22 14:24:36 +00:00
2010-10-13 21:53:40 +00:00
# TODO handle the case of more than one workflow for a model or multiple
# transitions with different groups and same signal
2009-01-02 23:18:51 +00:00
usersobj = self . pool . get ( ' res.users ' )
2009-09-17 07:27:12 +00:00
buttons = ( n for n in node . getiterator ( ' button ' ) if n . get ( ' type ' ) != ' object ' )
2009-01-19 23:24:58 +00:00
for button in buttons :
2010-10-13 21:53:40 +00:00
user_groups = usersobj . read ( cr , user , [ user ] , [ ' groups_id ' ] ) [ 0 ] [ ' groups_id ' ]
cr . execute ( """ SELECT DISTINCT t.group_id
FROM wkf
INNER JOIN wkf_activity a ON a . wkf_id = wkf . id
INNER JOIN wkf_transition t ON ( t . act_to = a . id )
WHERE wkf . osv = % s
AND t . signal = % s
2010-10-15 20:06:55 +00:00
AND t . group_id is NOT NULL
2010-10-13 21:53:40 +00:00
""" , (self._name, button.get( ' name ' )))
2010-10-15 20:06:55 +00:00
group_ids = [ x [ 0 ] for x in cr . fetchall ( ) if x [ 0 ] ]
2010-10-13 21:53:40 +00:00
can_click = not group_ids or bool ( set ( user_groups ) . intersection ( group_ids ) )
2009-10-09 09:59:54 +00:00
button . set ( ' readonly ' , str ( int ( not can_click ) ) )
2010-10-13 21:53:40 +00:00
return node
2008-07-22 14:24:36 +00:00
2010-10-13 21:53:40 +00:00
def __view_look_dom_arch ( self , cr , user , node , view_id , context = None ) :
2011-07-05 12:22:22 +00:00
""" Return an architecture and a description of all the fields.
The field description combines the result of fields_get ( ) and
__view_look_dom ( ) .
: param node : the architecture as as an etree
: return : a tuple ( arch , fields ) where arch is the given node as a
string and fields is the description of all the fields .
"""
2010-09-03 10:59:56 +00:00
fields = { }
if node . tag == ' diagram ' :
if node . getchildren ( ) [ 0 ] . tag == ' node ' :
2011-07-05 12:22:22 +00:00
node_fields = self . pool . get ( node . getchildren ( ) [ 0 ] . get ( ' object ' ) ) . fields_get ( cr , user , None , context )
2011-06-20 12:01:57 +00:00
fields . update ( node_fields )
2010-09-03 10:59:56 +00:00
if node . getchildren ( ) [ 1 ] . tag == ' arrow ' :
2011-07-05 12:22:22 +00:00
arrow_fields = self . pool . get ( node . getchildren ( ) [ 1 ] . get ( ' object ' ) ) . fields_get ( cr , user , None , context )
2011-06-20 12:01:57 +00:00
fields . update ( arrow_fields )
2010-02-24 11:48:06 +00:00
else :
2011-07-05 12:22:22 +00:00
fields = self . fields_get ( cr , user , None , context )
fields_def = self . __view_look_dom ( cr , user , node , view_id , False , fields , context = context )
node = self . _disable_workflow_buttons ( cr , user , node )
arch = etree . tostring ( node , encoding = " utf-8 " ) . replace ( ' \t ' , ' ' )
for k in fields . keys ( ) :
if k not in fields_def :
del fields [ k ]
2008-07-22 14:24:36 +00:00
for field in fields_def :
2009-04-23 08:57:50 +00:00
if field == ' id ' :
2010-03-24 16:12:16 +00:00
# sometime, the view may contain the (invisible) field 'id' needed for a domain (when 2 objects have cross references)
2009-04-23 08:57:50 +00:00
fields [ ' id ' ] = { ' readonly ' : True , ' type ' : ' integer ' , ' string ' : ' ID ' }
elif field in fields :
2008-12-02 17:41:37 +00:00
fields [ field ] . update ( fields_def [ field ] )
else :
2009-01-19 11:01:50 +00:00
cr . execute ( ' select name, model from ir_ui_view where (id= %s or inherit_id= %s ) and arch like %s ' , ( view_id , view_id , ' %% %s %% ' % field ) )
res = cr . fetchall ( ) [ : ]
model = res [ 0 ] [ 1 ]
res . insert ( 0 , ( " Can ' t find field ' %s ' in the following view parts composing the view of object model ' %s ' : " % ( field , model ) , None ) )
msg = " \n * " . join ( [ r [ 0 ] for r in res ] )
2010-11-08 14:30:14 +00:00
msg + = " \n \n Either you wrongly customized this view, or some modules bringing those views are not compatible with your current data model "
2012-01-24 17:30:17 +00:00
_logger . error ( msg )
2009-01-19 11:01:50 +00:00
raise except_orm ( ' View error ' , msg )
2008-07-22 14:24:36 +00:00
return arch , fields
2011-09-15 11:12:55 +00:00
def _get_default_form_view ( self , cr , user , context = None ) :
2011-09-15 12:04:29 +00:00
""" Generates a default single-line form view using all fields
of the current model except the m2m and o2m ones .
: param cr : database cursor
: param int user : user id
: param dict context : connection context
: returns : a form view as an lxml document
: rtype : etree . _Element
2008-07-22 14:24:36 +00:00
"""
2011-09-15 12:04:29 +00:00
view = etree . Element ( ' form ' , string = self . _description )
2011-09-15 11:12:21 +00:00
# TODO it seems fields_get can be replaced by _all_columns (no need for translation)
2011-09-15 12:04:29 +00:00
for field , descriptor in self . fields_get ( cr , user , context = context ) . iteritems ( ) :
if descriptor [ ' type ' ] in ( ' one2many ' , ' many2many ' ) :
continue
etree . SubElement ( view , ' field ' , name = field )
if descriptor [ ' type ' ] == ' text ' :
etree . SubElement ( view , ' newline ' )
return view
2008-07-22 14:24:36 +00:00
2011-09-15 11:12:55 +00:00
def _get_default_tree_view ( self , cr , user , context = None ) :
2011-09-15 12:04:29 +00:00
""" Generates a single-field tree view, using _rec_name if
it ' s one of the columns or the first column it finds otherwise
: param cr : database cursor
: param int user : user id
: param dict context : connection context
: returns : a tree view as an lxml document
: rtype : etree . _Element
"""
2011-09-15 11:12:21 +00:00
_rec_name = self . _rec_name
if _rec_name not in self . _columns :
2011-11-10 13:51:06 +00:00
_rec_name = self . _columns . keys ( ) [ 0 ] if len ( self . _columns . keys ( ) ) > 0 else " id "
2011-09-15 12:04:29 +00:00
view = etree . Element ( ' tree ' , string = self . _description )
etree . SubElement ( view , ' field ' , name = _rec_name )
return view
2011-09-15 11:12:21 +00:00
2011-09-15 11:12:55 +00:00
def _get_default_calendar_view ( self , cr , user , context = None ) :
2011-09-15 12:04:29 +00:00
""" Generates a default calendar view by trying to infer
calendar fields from a number of pre - set attribute names
: param cr : database cursor
: param int user : user id
: param dict context : connection context
: returns : a calendar view
: rtype : etree . _Element
2008-07-22 14:24:36 +00:00
"""
2011-09-15 12:24:43 +00:00
def set_first_of ( seq , in_ , to ) :
2011-10-04 09:28:28 +00:00
""" Sets the first value of ``seq`` also found in ``in_`` to
2011-09-15 12:24:43 +00:00
the ` ` to ` ` attribute of the view being closed over .
Returns whether it ' s found a suitable value (and set it on
the attribute ) or not
"""
for item in seq :
if item in in_ :
2011-10-04 09:28:28 +00:00
view . set ( to , item )
2011-09-15 12:24:43 +00:00
return True
return False
2011-09-15 12:11:29 +00:00
view = etree . Element ( ' calendar ' , string = self . _description )
etree . SubElement ( view , ' field ' , name = self . _rec_name )
2009-09-17 07:27:12 +00:00
if ( self . _date_name not in self . _columns ) :
2010-09-03 07:03:30 +00:00
date_found = False
2010-09-03 10:59:56 +00:00
for dt in [ ' date ' , ' date_start ' , ' x_date ' , ' x_date_start ' ] :
2010-09-03 07:03:30 +00:00
if dt in self . _columns :
self . _date_name = dt
date_found = True
break
2008-07-22 14:24:36 +00:00
2010-09-03 07:03:30 +00:00
if not date_found :
2010-09-03 10:59:56 +00:00
raise except_orm ( _ ( ' Invalid Object Architecture! ' ) , _ ( " Insufficient fields for Calendar View! " ) )
2011-09-15 12:11:29 +00:00
view . set ( ' date_start ' , self . _date_name )
2008-07-22 14:24:36 +00:00
2011-09-15 12:24:43 +00:00
set_first_of ( [ " user_id " , " partner_id " , " x_user_id " , " x_partner_id " ] ,
self . _columns , ' color ' )
2008-07-22 14:24:36 +00:00
2011-09-15 12:24:43 +00:00
if not set_first_of ( [ " date_stop " , " date_end " , " x_date_stop " , " x_date_end " ] ,
self . _columns , ' date_stop ' ) :
if not set_first_of ( [ " date_delay " , " planned_hours " , " x_date_delay " , " x_planned_hours " ] ,
self . _columns , ' date_delay ' ) :
2011-09-15 12:15:44 +00:00
raise except_orm (
_ ( ' Invalid Object Architecture! ' ) ,
_ ( " Insufficient fields to generate a Calendar View for %s , missing a date_stop or a date_delay " % ( self . _name ) ) )
2008-07-22 14:24:36 +00:00
2011-09-15 12:11:29 +00:00
return view
2008-07-22 14:24:36 +00:00
2011-09-15 11:12:55 +00:00
def _get_default_search_view ( self , cr , uid , context = None ) :
2011-09-15 12:04:29 +00:00
"""
: param cr : database cursor
: param int user : user id
: param dict context : connection context
: returns : an lxml document of the view
: rtype : etree . _Element
"""
2010-12-10 21:25:29 +00:00
form_view = self . fields_view_get ( cr , uid , False , ' form ' , context = context )
tree_view = self . fields_view_get ( cr , uid , False , ' tree ' , context = context )
2010-01-19 06:47:59 +00:00
2011-06-27 09:50:59 +00:00
# TODO it seems _all_columns could be used instead of fields_get (no need for translated fields info)
2010-12-10 21:25:29 +00:00
fields = self . fields_get ( cr , uid , context = context )
2011-09-15 12:28:34 +00:00
fields_to_search = set (
field for field , descriptor in fields . iteritems ( )
if descriptor . get ( ' select ' ) )
2011-09-15 12:31:55 +00:00
2010-12-10 21:25:29 +00:00
for view in ( form_view , tree_view ) :
view_root = etree . fromstring ( view [ ' arch ' ] )
# Only care about select=1 in xpath below, because select=2 is covered
# by the custom advanced search in clients
2011-09-15 12:31:55 +00:00
fields_to_search . update ( view_root . xpath ( " //field[@select=1]/@name " ) )
2010-12-10 21:25:29 +00:00
tree_view_root = view_root # as provided by loop above
2011-09-15 12:37:07 +00:00
search_view = etree . Element ( " search " , string = tree_view_root . get ( " string " , " " ) )
2010-12-10 21:25:29 +00:00
2011-09-15 12:37:07 +00:00
field_group = etree . SubElement ( search_view , " group " )
2010-12-10 21:25:29 +00:00
for field_name in fields_to_search :
2011-09-29 13:28:22 +00:00
etree . SubElement ( field_group , " field " , name = field_name )
2010-12-10 21:25:29 +00:00
2011-09-15 12:04:29 +00:00
return search_view
2009-12-17 07:06:47 +00:00
2008-07-22 14:24:36 +00:00
#
# if view_id, view_type is not required
#
2009-09-17 07:27:12 +00:00
def fields_view_get ( self , cr , user , view_id = None , view_type = ' form ' , context = None , toolbar = False , submenu = False ) :
2010-04-02 13:54:12 +00:00
"""
Get the detailed composition of the requested view like fields , model , view architecture
: param cr : database cursor
: param user : current user id
: param view_id : id of the view or None
: param view_type : type of the view to return if view_id is None ( ' form ' , tree ' , ...)
: param context : context arguments , like lang , time zone
2010-04-06 12:30:28 +00:00
: param toolbar : true to include contextual actions
2011-08-11 18:12:17 +00:00
: param submenu : deprecated
2010-04-06 12:30:28 +00:00
: return : dictionary describing the composition of the requested view ( including inherited views and extensions )
2010-04-02 13:54:12 +00:00
: raise AttributeError :
* if the inherited view has unknown position to work with other than ' before ' , ' after ' , ' inside ' , ' replace '
* if some tag other than ' position ' is found in parent view
: raise Invalid ArchitectureError : if there is view type other than form , tree , calendar , search etc defined on the structure
"""
2011-06-17 07:26:08 +00:00
if context is None :
2008-08-12 14:44:56 +00:00
context = { }
2009-02-14 05:35:17 +00:00
2008-12-16 10:58:58 +00:00
def encode ( s ) :
if isinstance ( s , unicode ) :
return s . encode ( ' utf8 ' )
2009-02-14 05:35:17 +00:00
return s
2008-12-16 10:58:58 +00:00
2010-12-10 15:03:55 +00:00
def raise_view_error ( error_msg , child_view_id ) :
view , child_view = self . pool . get ( ' ir.ui.view ' ) . browse ( cr , user , [ view_id , child_view_id ] , context )
2011-06-17 07:26:08 +00:00
raise AttributeError ( " View definition error for inherited view ' %s ' on model ' %s ' : %s "
% ( child_view . xml_id , self . _name , error_msg ) )
2010-12-10 15:03:55 +00:00
2011-06-17 09:31:26 +00:00
def locate ( source , spec ) :
2011-06-17 15:44:40 +00:00
""" Locate a node in a source (parent) architecture.
2011-06-17 09:31:26 +00:00
2011-06-17 15:44:40 +00:00
Given a complete source ( parent ) architecture ( i . e . the field
2011-06-20 09:56:42 +00:00
` arch ` in a view ) , and a ' spec ' node ( a node in an inheriting
2011-06-17 15:44:40 +00:00
view that specifies the location in the source view of what
should be changed ) , return ( if it exists ) the node in the
source view matching the specification .
2011-06-17 09:31:26 +00:00
2011-06-20 09:56:42 +00:00
: param source : a parent architecture to modify
: param spec : a modifying node in an inheriting view
: return : a node in the source matching the spec
2011-06-17 09:31:26 +00:00
"""
if spec . tag == ' xpath ' :
nodes = source . xpath ( spec . get ( ' expr ' ) )
return nodes [ 0 ] if nodes else None
elif spec . tag == ' field ' :
# Only compare the field name: a field can be only once in a given view
# at a given level (and for multilevel expressions, we should use xpath
# inheritance spec anyway).
for node in source . getiterator ( ' field ' ) :
if node . get ( ' name ' ) == spec . get ( ' name ' ) :
return node
return None
else :
for node in source . getiterator ( spec . tag ) :
good = True
for attr in spec . attrib :
2011-06-20 14:10:05 +00:00
if attr != ' position ' and ( not node . get ( attr ) or node . get ( attr ) != spec . get ( attr ) ) :
2011-06-17 09:31:26 +00:00
good = False
break
if good :
return node
2008-07-22 14:24:36 +00:00
return None
2009-02-14 05:35:17 +00:00
2011-06-20 09:56:42 +00:00
def apply_inheritance_specs ( source , specs_arch , inherit_id = None ) :
2011-06-17 15:44:40 +00:00
""" Apply an inheriting view.
2009-09-17 07:27:12 +00:00
2011-06-17 15:44:40 +00:00
Apply to a source architecture all the spec nodes ( i . e . nodes
describing where and what changes to apply to some parent
architecture ) given by an inheriting view .
2008-07-22 14:24:36 +00:00
2011-06-20 09:56:42 +00:00
: param source : a parent architecture to modify
: param specs_arch : a modifying architecture in an inheriting view
: param inherit_id : the database id of the inheriting view
: return : a modified source where the specs are applied
2011-06-17 15:44:40 +00:00
"""
2011-06-20 09:56:42 +00:00
specs_tree = etree . fromstring ( encode ( specs_arch ) )
2011-06-17 15:44:40 +00:00
# Queue of specification nodes (i.e. nodes describing where and
# changes to apply to some parent architecture).
specs = [ specs_tree ]
while len ( specs ) :
spec = specs . pop ( 0 )
if isinstance ( spec , SKIPPED_ELEMENT_TYPES ) :
2010-12-17 15:05:27 +00:00
continue
2011-06-17 15:44:40 +00:00
if spec . tag == ' data ' :
specs + = [ c for c in specs_tree ]
2008-07-22 14:24:36 +00:00
continue
2011-06-17 15:44:40 +00:00
node = locate ( source , spec )
2009-09-17 07:27:12 +00:00
if node is not None :
2011-06-17 15:44:40 +00:00
pos = spec . get ( ' position ' , ' inside ' )
2008-08-12 14:44:56 +00:00
if pos == ' replace ' :
2011-06-17 15:44:40 +00:00
if node . getparent ( ) is None :
source = copy . deepcopy ( spec [ 0 ] )
2009-10-14 12:32:15 +00:00
else :
2011-06-17 15:44:40 +00:00
for child in spec :
2009-10-14 12:32:15 +00:00
node . addprevious ( child )
node . getparent ( ) . remove ( node )
2009-11-24 09:15:43 +00:00
elif pos == ' attributes ' :
2011-06-17 15:44:40 +00:00
for child in spec . getiterator ( ' attribute ' ) :
2009-11-24 09:15:43 +00:00
attribute = ( child . get ( ' name ' ) , child . text and child . text . encode ( ' utf8 ' ) or None )
if attribute [ 1 ] :
node . set ( attribute [ 0 ] , attribute [ 1 ] )
else :
del ( node . attrib [ attribute [ 0 ] ] )
2008-07-22 14:24:36 +00:00
else :
2009-09-17 07:27:12 +00:00
sib = node . getnext ( )
2011-06-17 15:44:40 +00:00
for child in spec :
2009-09-17 07:27:12 +00:00
if pos == ' inside ' :
node . append ( child )
elif pos == ' after ' :
if sib is None :
node . addnext ( child )
2010-12-11 01:21:04 +00:00
node = child
2008-07-22 14:24:36 +00:00
else :
2009-09-17 07:27:12 +00:00
sib . addprevious ( child )
elif pos == ' before ' :
node . addprevious ( child )
else :
2010-12-10 15:03:55 +00:00
raise_view_error ( " Invalid position value: ' %s ' " % pos , inherit_id )
2008-07-22 14:24:36 +00:00
else :
attrs = ' ' . join ( [
2011-06-17 15:44:40 +00:00
' %s = " %s " ' % ( attr , spec . get ( attr ) )
for attr in spec . attrib
2008-07-22 14:24:36 +00:00
if attr != ' position '
] )
2011-06-17 15:44:40 +00:00
tag = " < %s %s > " % ( spec . tag , attrs )
2010-12-10 15:03:55 +00:00
raise_view_error ( " Element ' %s ' not found in parent view ' %% (parent_xml_id)s ' " % tag , inherit_id )
2011-06-17 15:44:40 +00:00
return source
2011-09-18 23:59:47 +00:00
def apply_view_inheritance ( cr , user , source , inherit_id ) :
2011-06-20 09:56:42 +00:00
""" Apply all the (directly and indirectly) inheriting views.
: param source : a parent architecture to modify ( with parent
modifications already applied )
2011-09-19 15:24:34 +00:00
: param inherit_id : the database view_id of the parent view
2011-06-20 09:56:42 +00:00
: return : a modified source where all the modifying architecture
are applied
"""
2011-09-19 15:24:34 +00:00
sql_inherit = self . pool . get ( ' ir.ui.view ' ) . get_inheriting_views_arch ( cr , user , inherit_id , self . _name )
for ( view_arch , view_id ) in sql_inherit :
source = apply_inheritance_specs ( source , view_arch , view_id )
source = apply_view_inheritance ( cr , user , source , view_id )
2011-06-17 15:44:40 +00:00
return source
2008-07-22 14:24:36 +00:00
2008-08-12 14:44:56 +00:00
result = { ' type ' : view_type , ' model ' : self . _name }
2008-07-22 14:24:36 +00:00
sql_res = False
2010-12-15 11:25:37 +00:00
parent_view_model = None
2011-06-17 15:44:40 +00:00
view_ref = context . get ( view_type + ' _view_ref ' )
# Search for a root (i.e. without any parent) view.
while True :
2010-07-21 13:37:27 +00:00
if view_ref and not view_id :
2009-10-08 13:32:35 +00:00
if ' . ' in view_ref :
module , view_ref = view_ref . split ( ' . ' , 1 )
cr . execute ( " SELECT res_id FROM ir_model_data WHERE model= ' ir.ui.view ' AND module= %s AND name= %s " , ( module , view_ref ) )
view_ref_res = cr . fetchone ( )
if view_ref_res :
view_id = view_ref_res [ 0 ]
2008-07-22 14:24:36 +00:00
if view_id :
2011-02-21 17:27:57 +00:00
cr . execute ( """ SELECT arch,name,field_parent,id,type,inherit_id,model
FROM ir_ui_view
WHERE id = % s """ , (view_id,))
2008-07-22 14:24:36 +00:00
else :
2011-06-17 15:44:40 +00:00
cr . execute ( """ SELECT arch,name,field_parent,id,type,inherit_id,model
FROM ir_ui_view
WHERE model = % s AND type = % s AND inherit_id IS NULL
ORDER BY priority """ , (self._name, view_type))
sql_res = cr . dictfetchone ( )
2009-12-17 07:06:47 +00:00
2008-07-22 14:24:36 +00:00
if not sql_res :
break
2010-01-19 06:47:59 +00:00
2011-06-17 15:44:40 +00:00
view_id = sql_res [ ' inherit_id ' ] or sql_res [ ' id ' ]
parent_view_model = sql_res [ ' model ' ]
if not sql_res [ ' inherit_id ' ] :
break
2008-07-22 14:24:36 +00:00
# if a view was found
if sql_res :
2011-06-17 15:44:40 +00:00
source = etree . fromstring ( encode ( sql_res [ ' arch ' ] ) )
2011-09-15 11:12:13 +00:00
result . update (
2011-09-29 13:24:54 +00:00
arch = apply_view_inheritance ( cr , user , source , sql_res [ ' id ' ] ) ,
2011-09-15 11:12:13 +00:00
type = sql_res [ ' type ' ] ,
view_id = sql_res [ ' id ' ] ,
name = sql_res [ ' name ' ] ,
field_parent = sql_res [ ' field_parent ' ] or False )
2008-07-22 14:24:36 +00:00
else :
# otherwise, build some kind of default view
2011-09-15 11:12:55 +00:00
try :
2011-09-15 12:04:29 +00:00
view = getattr ( self , ' _get_default_ %s _view ' % view_type ) (
2011-09-15 11:12:55 +00:00
cr , user , context )
except AttributeError :
2011-06-17 15:44:40 +00:00
# what happens here, graph case?
2010-09-03 10:59:56 +00:00
raise except_orm ( _ ( ' Invalid Architecture! ' ) , _ ( " There is no view of type ' %s ' defined for the structure! " ) % view_type )
2011-09-15 11:12:55 +00:00
2011-09-15 11:12:13 +00:00
result . update (
2011-09-15 12:04:29 +00:00
arch = view ,
2011-09-15 11:12:13 +00:00
name = ' default ' ,
field_parent = False ,
view_id = 0 )
2008-07-22 14:24:36 +00:00
2010-12-15 11:25:37 +00:00
if parent_view_model != self . _name :
2010-12-15 18:24:16 +00:00
ctx = context . copy ( )
ctx [ ' base_model_name ' ] = parent_view_model
2010-12-15 11:25:37 +00:00
else :
2010-12-15 18:24:16 +00:00
ctx = context
2010-12-15 11:25:37 +00:00
xarch , xfields = self . __view_look_dom_arch ( cr , user , result [ ' arch ' ] , view_id , context = ctx )
2008-07-22 14:24:36 +00:00
result [ ' arch ' ] = xarch
result [ ' fields ' ] = xfields
2009-09-17 07:27:12 +00:00
2008-07-22 14:24:36 +00:00
if toolbar :
def clean ( x ) :
x = x [ 2 ]
for key in ( ' report_sxw_content ' , ' report_rml_content ' ,
' report_sxw ' , ' report_rml ' ,
' report_sxw_content_data ' , ' report_rml_content_data ' ) :
if key in x :
del x [ key ]
return x
ir_values_obj = self . pool . get ( ' ir.values ' )
resprint = ir_values_obj . get ( cr , user , ' action ' ,
' client_print_multi ' , [ ( self . _name , False ) ] , False ,
context )
resaction = ir_values_obj . get ( cr , user , ' action ' ,
' client_action_multi ' , [ ( self . _name , False ) ] , False ,
context )
resrelate = ir_values_obj . get ( cr , user , ' action ' ,
' client_action_relate ' , [ ( self . _name , False ) ] , False ,
context )
2011-09-29 13:28:22 +00:00
resaction = [ clean ( action ) for action in resaction
if view_type == ' tree ' or not action [ 2 ] . get ( ' multi ' ) ]
resprint = [ clean ( print_ ) for print_ in resprint
if view_type == ' tree ' or not print_ [ 2 ] . get ( ' multi ' ) ]
2008-08-12 14:44:56 +00:00
resrelate = map ( lambda x : x [ 2 ] , resrelate )
2008-07-22 14:24:36 +00:00
2011-09-15 12:47:41 +00:00
for x in itertools . chain ( resprint , resaction , resrelate ) :
2008-07-22 14:24:36 +00:00
x [ ' string ' ] = x [ ' name ' ]
result [ ' toolbar ' ] = {
' print ' : resprint ,
' action ' : resaction ,
' relate ' : resrelate
}
return result
2008-08-12 14:44:56 +00:00
_view_look_dom_arch = __view_look_dom_arch
2008-07-22 14:24:36 +00:00
def search_count ( self , cr , user , args , context = None ) :
if not context :
context = { }
res = self . search ( cr , user , args , context = context , count = True )
if isinstance ( res , list ) :
return len ( res )
return res
2010-09-01 15:08:33 +00:00
def search ( self , cr , user , args , offset = 0 , limit = None , order = None , context = None , count = False ) :
"""
Search for records based on a search domain .
: param cr : database cursor
: param user : current user id
: param args : list of tuples specifying the search domain [ ( ' field_name ' , ' operator ' , value ) , . . . ] . Pass an empty list to match all records .
: param offset : optional number of results to skip in the returned values ( default : 0 )
: param limit : optional max number of records to return ( default : * * None * * )
: param order : optional columns to sort by ( default : self . _order = id )
: param context : optional context arguments , like lang , time zone
: type context : dictionary
: param count : optional ( default : * * False * * ) , if * * True * * , returns only the number of records matching the criteria , not their ids
: return : id or list of ids of records matching the criteria
: rtype : integer or list of integers
: raise AccessError : * if user tries to bypass access rules for read on the requested object .
* * Expressing a search domain ( args ) * *
Each tuple in the search domain needs to have 3 elements , in the form : * * ( ' field_name ' , ' operator ' , value ) * * , where :
* * * field_name * * must be a valid name of field of the object model , possibly following many - to - one relationships using dot - notation , e . g ' street ' or ' partner_id.country ' are valid values .
* * * operator * * must be a string with a valid comparison operator from this list : ` ` = , != , > , > = , < , < = , like , ilike , in , not in , child_of , parent_left , parent_right ` `
The semantics of most of these operators are obvious .
The ` ` child_of ` ` operator will look for records who are children or grand - children of a given record ,
according to the semantics of this model ( i . e following the relationship field named by
` ` self . _parent_name ` ` , by default ` ` parent_id ` ` .
* * * value * * must be a valid value to compare with the values of * * field_name * * , depending on its type .
Domain criteria can be combined using 3 logical operators than can be added between tuples : ' **&** ' ( logical AND , default ) , ' **|** ' ( logical OR ) , ' **!** ' ( logical NOT ) .
These are * * prefix * * operators and the arity of the ' **&** ' and ' **|** ' operator is 2 , while the arity of the ' **!** ' is just 1.
Be very careful about this when you combine them the first time .
Here is an example of searching for Partners named * ABC * from Belgium and Germany whose language is not english : :
[ ( ' name ' , ' = ' , ' ABC ' ) , ' ! ' , ( ' language.code ' , ' = ' , ' en_US ' ) , ' | ' , ( ' country_id.code ' , ' = ' , ' be ' ) , ( ' country_id.code ' , ' = ' , ' de ' ) )
The ' & ' is omitted as it is the default , and of course we could have used ' != ' for the language , but what this domain really represents is : :
( name is ' ABC ' AND ( language is NOT english ) AND ( country is Belgium OR Germany ) )
"""
return self . _search ( cr , user , args , offset = offset , limit = limit , order = order , context = context , count = count )
2008-07-22 14:24:36 +00:00
def name_get ( self , cr , user , ids , context = None ) :
2011-09-12 17:12:10 +00:00
""" Returns the preferred display value (text representation) for the records with the
given ` ` ids ` ` . By default this will be the value of the ` ` name ` ` column , unless
the model implements a custom behavior .
Can sometimes be seen as the inverse function of : meth : ` ~ . name_search ` , but it is not
guaranteed to be .
: rtype : list ( tuple )
: return : list of pairs ` ` ( id , text_repr ) ` ` for all records with the given ` ` ids ` ` .
2010-09-01 16:44:37 +00:00
"""
if not ids :
return [ ]
if isinstance ( ids , ( int , long ) ) :
ids = [ ids ]
return [ ( r [ ' id ' ] , tools . ustr ( r [ self . _rec_name ] ) ) for r in self . read ( cr , user , ids ,
[ self . _rec_name ] , context , load = ' _classic_write ' ) ]
2008-07-22 14:24:36 +00:00
2009-12-09 11:42:41 +00:00
def name_search ( self , cr , user , name = ' ' , args = None , operator = ' ilike ' , context = None , limit = 100 ) :
2011-09-12 17:12:10 +00:00
""" Search for records that have a display name matching the given ``name`` pattern if compared
with the given ` ` operator ` ` , while also matching the optional search domain ( ` ` args ` ` ) .
This is used for example to provide suggestions based on a partial value for a relational
field .
Sometimes be seen as the inverse function of : meth : ` ~ . name_get ` , but it is not
guaranteed to be .
This method is equivalent to calling : meth : ` ~ . search ` with a search domain based on ` ` name ` `
and then : meth : ` ~ . name_get ` on the result of the search .
: param list args : optional search domain ( see : meth : ` ~ . search ` for syntax ) ,
specifying further restrictions
: param str operator : domain operator for matching the ` ` name ` ` pattern , such as ` ` ' like ' ` `
or ` ` ' = ' ` ` .
: param int limit : optional max number of records to return
: rtype : list
: return : list of pairs ` ` ( id , text_repr ) ` ` for all matching records .
2010-09-01 16:44:37 +00:00
"""
return self . _name_search ( cr , user , name , args , operator , context , limit )
2011-06-10 17:31:30 +00:00
def name_create ( self , cr , uid , name , context = None ) :
2011-09-12 17:12:10 +00:00
""" Creates a new record by calling :meth:`~.create` with only one
value provided : the name of the new record ( ` ` _rec_name ` ` field ) .
The new record will also be initialized with any default values applicable
to this model , or provided through the context . The usual behavior of
: meth : ` ~ . create ` applies .
Similarly , this method may raise an exception if the model has multiple
required fields and some do not have default values .
: param name : name of the record to create
: rtype : tuple
: return : the : meth : ` ~ . name_get ` pair value for the newly - created record .
2011-06-10 16:10:02 +00:00
"""
2011-06-10 17:31:30 +00:00
rec_id = self . create ( cr , uid , { self . _rec_name : name } , context ) ;
return self . name_get ( cr , uid , [ rec_id ] , context ) [ 0 ]
2010-09-01 16:44:37 +00:00
# private implementation of name_search, allows passing a dedicated user for the name_get part to
# solve some access rights issues
def _name_search ( self , cr , user , name = ' ' , args = None , operator = ' ilike ' , context = None , limit = 100 , name_get_uid = None ) :
if args is None :
args = [ ]
if context is None :
context = { }
args = args [ : ]
2011-10-11 16:34:35 +00:00
# optimize out the default criterion of ``ilike ''`` that matches everything
if not ( name == ' ' and operator == ' ilike ' ) :
2010-09-01 16:44:37 +00:00
args + = [ ( self . _rec_name , operator , name ) ]
access_rights_uid = name_get_uid or user
ids = self . _search ( cr , user , args , limit = limit , context = context , access_rights_uid = access_rights_uid )
res = self . name_get ( cr , access_rights_uid , ids , context )
return res
2008-07-22 14:24:36 +00:00
2008-11-27 08:24:44 +00:00
def read_string ( self , cr , uid , id , langs , fields = None , context = None ) :
res = { }
res2 = { }
2011-09-28 13:01:07 +00:00
self . pool . get ( ' ir.translation ' ) . check_read ( cr , uid )
2008-11-27 08:24:44 +00:00
if not fields :
fields = self . _columns . keys ( ) + self . _inherit_fields . keys ( )
2009-11-24 14:44:05 +00:00
#FIXME: collect all calls to _get_source into one SQL call.
2008-11-27 08:24:44 +00:00
for lang in langs :
res [ lang ] = { ' code ' : lang }
for f in fields :
if f in self . _columns :
res_trans = self . pool . get ( ' ir.translation ' ) . _get_source ( cr , uid , self . _name + ' , ' + f , ' field ' , lang )
if res_trans :
res [ lang ] [ f ] = res_trans
else :
res [ lang ] [ f ] = self . _columns [ f ] . string
for table in self . _inherits :
cols = intersect ( self . _inherit_fields . keys ( ) , fields )
res2 = self . pool . get ( table ) . read_string ( cr , uid , id , langs , cols , context )
for lang in res2 :
if lang in res :
2009-01-05 15:28:43 +00:00
res [ lang ] [ ' code ' ] = lang
2008-11-27 08:24:44 +00:00
for f in res2 [ lang ] :
res [ lang ] [ f ] = res2 [ lang ] [ f ]
return res
def write_string ( self , cr , uid , id , langs , vals , context = None ) :
2011-09-28 13:01:07 +00:00
self . pool . get ( ' ir.translation ' ) . check_write ( cr , uid )
2009-11-24 14:44:05 +00:00
#FIXME: try to only call the translation in one SQL
2008-11-27 08:24:44 +00:00
for lang in langs :
for field in vals :
if field in self . _columns :
2010-05-12 10:42:49 +00:00
src = self . _columns [ field ] . string
self . pool . get ( ' ir.translation ' ) . _set_ids ( cr , uid , self . _name + ' , ' + field , ' field ' , lang , [ 0 ] , vals [ field ] , src )
2008-11-27 08:24:44 +00:00
for table in self . _inherits :
cols = intersect ( self . _inherit_fields . keys ( ) , vals )
if cols :
self . pool . get ( table ) . write_string ( cr , uid , id , langs , vals , context )
return True
2010-08-27 09:25:54 +00:00
def _add_missing_default_values ( self , cr , uid , values , context = None ) :
missing_defaults = [ ]
avoid_tables = [ ] # avoid overriding inherited values when parent is set
for tables , parent_field in self . _inherits . items ( ) :
if parent_field in values :
avoid_tables . append ( tables )
for field in self . _columns . keys ( ) :
if not field in values :
missing_defaults . append ( field )
for field in self . _inherit_fields . keys ( ) :
if ( field not in values ) and ( self . _inherit_fields [ field ] [ 0 ] not in avoid_tables ) :
missing_defaults . append ( field )
if len ( missing_defaults ) :
# override defaults with the provided values, never allow the other way around
defaults = self . default_get ( cr , uid , missing_defaults , context )
for dv in defaults :
2011-01-28 16:04:19 +00:00
if ( ( dv in self . _columns and self . _columns [ dv ] . _type == ' many2many ' ) \
or ( dv in self . _inherit_fields and self . _inherit_fields [ dv ] [ 2 ] . _type == ' many2many ' ) ) \
2010-09-03 07:03:30 +00:00
and defaults [ dv ] and isinstance ( defaults [ dv ] [ 0 ] , ( int , long ) ) :
defaults [ dv ] = [ ( 6 , 0 , defaults [ dv ] ) ]
2011-01-28 16:04:19 +00:00
if ( dv in self . _columns and self . _columns [ dv ] . _type == ' one2many ' \
or ( dv in self . _inherit_fields and self . _inherit_fields [ dv ] [ 2 ] . _type == ' one2many ' ) ) \
2011-03-03 14:24:25 +00:00
and isinstance ( defaults [ dv ] , ( list , tuple ) ) and defaults [ dv ] and isinstance ( defaults [ dv ] [ 0 ] , dict ) :
2010-10-18 16:16:38 +00:00
defaults [ dv ] = [ ( 0 , 0 , x ) for x in defaults [ dv ] ]
2010-08-27 09:25:54 +00:00
defaults . update ( values )
values = defaults
return values
2008-06-14 15:14:19 +00:00
2011-08-25 12:47:11 +00:00
def clear_caches ( self ) :
""" Clear the caches
2011-09-24 02:03:03 +00:00
2011-08-25 12:47:11 +00:00
This clears the caches associated to methods decorated with
` ` tools . ormcache ` ` or ` ` tools . ormcache_multi ` ` .
"""
try :
getattr ( self , ' _ormcache ' )
self . _ormcache = { }
except AttributeError :
pass
2011-09-24 02:03:03 +00:00
2011-11-18 13:36:42 +00:00
2011-11-21 16:43:26 +00:00
def _read_group_fill_results ( self , cr , uid , domain , groupby , groupby_list , aggregated_fields ,
read_group_result , read_group_order = None , context = None ) :
""" Helper method for filling in empty groups for all possible values of
the field being grouped by """
2011-11-18 13:36:42 +00:00
# self._group_by_full should map groupable fields to a method that returns
2011-11-21 16:43:26 +00:00
# a list of all aggregated values that we want to display for this field,
# in the form of a m2o-like pair (key,label).
2011-11-18 13:36:42 +00:00
# This is useful to implement kanban views for instance, where all columns
# should be displayed even if they don't contain any record.
2011-11-21 16:43:26 +00:00
# Grab the list of all groups that should be displayed, including all present groups
2011-11-18 13:36:42 +00:00
present_group_ids = [ x [ groupby ] [ 0 ] for x in read_group_result if x [ groupby ] ]
all_groups = self . _group_by_full [ groupby ] ( self , cr , uid , present_group_ids , domain ,
2011-11-23 13:33:12 +00:00
read_group_order = read_group_order ,
access_rights_uid = openerp . SUPERUSER_ID ,
context = context )
2011-11-21 16:43:26 +00:00
2011-11-18 15:26:29 +00:00
result_template = dict . fromkeys ( aggregated_fields , False )
2011-11-21 16:43:26 +00:00
result_template . update ( { groupby + ' _count ' : 0 } )
if groupby_list and len ( groupby_list ) > 1 :
result_template . update ( __context = { ' group_by ' : groupby_list [ 1 : ] } )
# Merge the left_side (current results as dicts) with the right_side (all
# possible values as m2o pairs). Both lists are supposed to be using the
# same ordering, and can be merged in one pass.
2011-11-18 13:36:42 +00:00
result = [ ]
2011-11-21 16:43:26 +00:00
known_values = { }
def append_left ( left_side ) :
grouped_value = left_side [ groupby ] and left_side [ groupby ] [ 0 ]
if not grouped_value in known_values :
result . append ( left_side )
known_values [ grouped_value ] = left_side
else :
count_attr = groupby + ' _count '
known_values [ grouped_value ] . update ( { count_attr : left_side [ count_attr ] } )
def append_right ( right_side ) :
grouped_value = right_side [ 0 ]
if not grouped_value in known_values :
line = dict ( result_template )
line . update ( {
groupby : right_side ,
' __domain ' : [ ( groupby , ' = ' , grouped_value ) ] + domain ,
} )
result . append ( line )
known_values [ grouped_value ] = line
2011-11-18 13:36:42 +00:00
while read_group_result or all_groups :
left_side = read_group_result [ 0 ] if read_group_result else None
right_side = all_groups [ 0 ] if all_groups else None
2011-11-21 16:43:26 +00:00
assert left_side is None or left_side [ groupby ] is False \
or isinstance ( left_side [ groupby ] , ( tuple , list ) ) , \
' M2O-like pair expected, got %r ' % left_side [ groupby ]
assert right_side is None or isinstance ( right_side , ( tuple , list ) ) , \
' M2O-like pair expected, got %r ' % right_side
2011-11-18 13:36:42 +00:00
if left_side is None :
2011-11-21 16:43:26 +00:00
append_right ( all_groups . pop ( 0 ) )
2011-11-18 13:36:42 +00:00
elif right_side is None :
2011-11-21 16:43:26 +00:00
append_left ( read_group_result . pop ( 0 ) )
elif left_side [ groupby ] == right_side :
append_left ( read_group_result . pop ( 0 ) )
all_groups . pop ( 0 ) # discard right_side
elif not left_side [ groupby ] or not left_side [ groupby ] [ 0 ] :
# left side == "Undefined" entry, not present on right_side
append_left ( read_group_result . pop ( 0 ) )
2011-11-18 13:36:42 +00:00
else :
2011-11-21 16:43:26 +00:00
append_right ( all_groups . pop ( 0 ) )
2011-11-18 13:36:42 +00:00
return result
2010-11-19 13:15:50 +00:00
def read_group ( self , cr , uid , domain , fields , groupby , offset = 0 , limit = None , context = None , orderby = False ) :
2010-04-02 13:54:12 +00:00
"""
2010-04-06 12:30:28 +00:00
Get the list of records in list view grouped by the given ` ` groupby ` ` fields
2010-04-02 13:54:12 +00:00
: param cr : database cursor
: param uid : current user id
2010-04-06 12:30:28 +00:00
: param domain : list specifying search criteria [ [ ' field_name ' , ' operator ' , ' value ' ] , . . . ]
2011-05-09 09:21:27 +00:00
: param list fields : list of fields present in the list view specified on the object
: param list groupby : fields by which the records will be grouped
: param int offset : optional number of records to skip
: param int limit : optional max number of records to return
: param dict context : context arguments , like lang , time zone
2011-07-13 09:20:03 +00:00
: param list orderby : optional ` ` order by ` ` specification , for
overriding the natural sort ordering of the
groups , see also : py : meth : ` ~ osv . osv . osv . search `
( supported only for many2one fields currently )
2010-04-06 12:30:28 +00:00
: return : list of dictionaries ( one dictionary for each record ) containing :
2010-04-02 13:54:12 +00:00
2010-04-06 12:30:28 +00:00
* the values of fields grouped by the fields in ` ` groupby ` ` argument
* __domain : list of tuples specifying the search criteria
* __context : dictionary with argument like ` ` groupby ` `
2010-04-02 13:54:12 +00:00
: rtype : [ { ' field_name_1 ' : value , . . . ]
: raise AccessError : * if user has no read rights on the requested object
* if user tries to bypass access rules for read on the requested object
"""
2010-01-20 14:31:32 +00:00
context = context or { }
2011-09-28 13:01:07 +00:00
self . check_read ( cr , uid )
2010-01-20 14:31:32 +00:00
if not fields :
2010-01-22 11:00:17 +00:00
fields = self . _columns . keys ( )
2010-02-28 19:50:16 +00:00
2010-09-30 13:24:03 +00:00
query = self . _where_calc ( cr , uid , domain , context = context )
self . _apply_ir_rules ( cr , uid , query , ' read ' , context = context )
2010-02-28 19:50:16 +00:00
2010-02-26 01:03:45 +00:00
# Take care of adding join(s) if groupby is an '_inherits'ed field
2010-04-16 20:11:24 +00:00
groupby_list = groupby
2011-01-15 01:31:09 +00:00
qualified_groupby_field = groupby
2010-04-16 20:11:24 +00:00
if groupby :
2010-09-17 14:51:03 +00:00
if isinstance ( groupby , list ) :
2010-04-16 20:11:24 +00:00
groupby = groupby [ 0 ]
2011-01-15 01:31:09 +00:00
qualified_groupby_field = self . _inherits_join_calc ( groupby , query )
2010-02-28 19:50:16 +00:00
2010-12-16 18:37:08 +00:00
if groupby :
assert not groupby or groupby in fields , " Fields in ' groupby ' must appear in the list of fields to read (perhaps it ' s missing in the list view?) "
groupby_def = self . _columns . get ( groupby ) or ( self . _inherit_fields . get ( groupby ) and self . _inherit_fields . get ( groupby ) [ 2 ] )
assert groupby_def and groupby_def . _classic_write , " Fields in ' groupby ' must be regular database-persisted fields (no function or related fields), or function fields with store=True "
2010-01-20 14:31:32 +00:00
2011-06-27 09:50:59 +00:00
# TODO it seems fields_get can be replaced by _all_columns (no need for translation)
2010-01-22 11:00:17 +00:00
fget = self . fields_get ( cr , uid , fields )
2010-04-16 20:11:24 +00:00
flist = ' '
2010-11-11 11:04:20 +00:00
group_count = group_by = groupby
2010-04-16 20:11:24 +00:00
if groupby :
2010-09-17 14:51:03 +00:00
if fget . get ( groupby ) :
2011-12-20 15:11:31 +00:00
groupby_type = fget [ groupby ] [ ' type ' ]
if groupby_type in ( ' date ' , ' datetime ' ) :
qualified_groupby_field = " to_char( %s , ' yyyy-mm ' ) " % qualified_groupby_field
flist = " %s as %s " % ( qualified_groupby_field , groupby )
elif groupby_type == ' boolean ' :
qualified_groupby_field = " coalesce( %s ,false) " % qualified_groupby_field
flist = " %s as %s " % ( qualified_groupby_field , groupby )
2010-09-13 00:48:40 +00:00
else :
2011-01-15 01:31:09 +00:00
flist = qualified_groupby_field
2010-04-16 20:11:24 +00:00
else :
2010-09-20 06:12:25 +00:00
# Don't allow arbitrary values, as this would be a SQL injection vector!
2010-09-13 00:48:40 +00:00
raise except_orm ( _ ( ' Invalid group_by ' ) ,
_ ( ' Invalid group_by specification: " %s " . \n A group_by specification must be a list of valid fields. ' ) % ( groupby , ) )
2010-04-16 20:11:24 +00:00
2011-11-15 12:00:28 +00:00
aggregated_fields = [
f for f in fields
if f not in ( ' id ' , ' sequence ' )
if fget [ f ] [ ' type ' ] in ( ' integer ' , ' float ' )
2011-11-18 15:26:29 +00:00
if ( f in self . _columns and getattr ( self . _columns [ f ] , ' _classic_write ' ) ) ]
2011-11-15 12:00:28 +00:00
for f in aggregated_fields :
group_operator = fget [ f ] . get ( ' group_operator ' , ' sum ' )
if flist :
flist + = ' , '
qualified_field = ' " %s " . " %s " ' % ( self . _table , f )
flist + = " %s ( %s ) AS %s " % ( group_operator , qualified_field , f )
2010-02-28 19:50:16 +00:00
2011-01-15 01:31:09 +00:00
gb = groupby and ( ' GROUP BY ' + qualified_groupby_field ) or ' '
2010-09-30 13:24:03 +00:00
from_clause , where_clause , where_clause_params = query . get_sql ( )
where_clause = where_clause and ' WHERE ' + where_clause
limit_str = limit and ' limit %d ' % limit or ' '
offset_str = offset and ' offset %d ' % offset or ' '
2010-11-11 11:04:20 +00:00
if len ( groupby_list ) < 2 and context . get ( ' group_by_no_leaf ' ) :
group_count = ' _ '
2010-12-21 14:37:14 +00:00
cr . execute ( ' SELECT min( %s .id) AS id, count( %s .id) AS %s _count ' % ( self . _table , self . _table , group_count ) + ( flist and ' , ' ) + flist + ' FROM ' + from_clause + where_clause + gb + limit_str + offset_str , where_clause_params )
2010-01-22 11:00:17 +00:00
alldata = { }
2010-02-10 12:34:26 +00:00
groupby = group_by
2010-01-22 11:00:17 +00:00
for r in cr . dictfetchall ( ) :
2010-09-03 10:59:56 +00:00
for fld , val in r . items ( ) :
if val == None : r [ fld ] = False
2010-01-22 11:00:17 +00:00
alldata [ r [ ' id ' ] ] = r
del r [ ' id ' ]
2010-12-06 13:18:18 +00:00
2011-11-21 16:43:26 +00:00
order = orderby or groupby
data_ids = self . search ( cr , uid , [ ( ' id ' , ' in ' , alldata . keys ( ) ) ] , order = order , context = context )
2010-12-06 13:18:18 +00:00
# the IDS of records that have groupby field value = False or '' should be sorted too
data_ids + = filter ( lambda x : x not in data_ids , alldata . keys ( ) )
data = self . read ( cr , uid , data_ids , groupby and [ groupby ] or [ ' id ' ] , context = context )
# restore order of the search as read() uses the default _order (this is only for groups, so the size of data_read shoud be small):
data . sort ( lambda x , y : cmp ( data_ids . index ( x [ ' id ' ] ) , data_ids . index ( y [ ' id ' ] ) ) )
2010-01-22 11:00:17 +00:00
for d in data :
2010-04-16 20:11:24 +00:00
if groupby :
2010-09-03 10:59:56 +00:00
d [ ' __domain ' ] = [ ( groupby , ' = ' , alldata [ d [ ' id ' ] ] [ groupby ] or False ) ] + domain
if not isinstance ( groupby_list , ( str , unicode ) ) :
2010-04-20 05:58:05 +00:00
if groupby or not context . get ( ' group_by_no_leaf ' , False ) :
2010-09-03 10:59:56 +00:00
d [ ' __context ' ] = { ' group_by ' : groupby_list [ 1 : ] }
2010-09-03 07:03:30 +00:00
if groupby and groupby in fget :
2010-09-03 10:59:56 +00:00
if d [ groupby ] and fget [ groupby ] [ ' type ' ] in ( ' date ' , ' datetime ' ) :
dt = datetime . datetime . strptime ( alldata [ d [ ' id ' ] ] [ groupby ] [ : 7 ] , ' % Y- % m ' )
2010-08-17 18:34:59 +00:00
days = calendar . monthrange ( dt . year , dt . month ) [ 1 ]
2010-02-10 12:34:26 +00:00
2010-09-03 10:59:56 +00:00
d [ groupby ] = datetime . datetime . strptime ( d [ groupby ] [ : 10 ] , ' % Y- % m- %d ' ) . strftime ( ' % B % Y ' )
d [ ' __domain ' ] = [ ( groupby , ' >= ' , alldata [ d [ ' id ' ] ] [ groupby ] and datetime . datetime . strptime ( alldata [ d [ ' id ' ] ] [ groupby ] [ : 7 ] + ' -01 ' , ' % Y- % m- %d ' ) . strftime ( ' % Y- % m- %d ' ) or False ) , \
( groupby , ' <= ' , alldata [ d [ ' id ' ] ] [ groupby ] and datetime . datetime . strptime ( alldata [ d [ ' id ' ] ] [ groupby ] [ : 7 ] + ' - ' + str ( days ) , ' % Y- % m- %d ' ) . strftime ( ' % Y- % m- %d ' ) or False ) ] + domain
2010-04-16 20:11:24 +00:00
del alldata [ d [ ' id ' ] ] [ groupby ]
2010-01-22 11:00:17 +00:00
d . update ( alldata [ d [ ' id ' ] ] )
del d [ ' id ' ]
2011-11-13 12:14:05 +00:00
if groupby and groupby in self . _group_by_full :
2011-11-18 13:36:42 +00:00
data = self . _read_group_fill_results ( cr , uid , domain , groupby , groupby_list ,
2011-11-21 16:43:26 +00:00
aggregated_fields , data , read_group_order = order ,
context = context )
2011-11-18 15:26:29 +00:00
2010-01-22 11:00:17 +00:00
return data
2010-01-20 14:31:32 +00:00
2011-08-26 10:35:16 +00:00
def _inherits_join_add ( self , current_table , parent_model_name , query ) :
2010-04-02 13:54:12 +00:00
"""
2010-09-30 13:24:03 +00:00
Add missing table SELECT and JOIN clause to ` ` query ` ` for reaching the parent table ( no duplicates )
2011-08-26 10:35:16 +00:00
: param current_table : current model object
2010-08-17 18:34:59 +00:00
: param parent_model_name : name of the parent model for which the clauses should be added
2010-10-01 16:54:11 +00:00
: param query : query object on which the JOIN should be added
2010-08-17 18:34:59 +00:00
"""
2011-08-26 10:35:16 +00:00
inherits_field = current_table . _inherits [ parent_model_name ]
2010-08-17 18:34:59 +00:00
parent_model = self . pool . get ( parent_model_name )
parent_table_name = parent_model . _table
quoted_parent_table_name = ' " %s " ' % parent_table_name
2010-09-30 13:24:03 +00:00
if quoted_parent_table_name not in query . tables :
query . tables . append ( quoted_parent_table_name )
2011-08-26 10:35:16 +00:00
query . where_clause . append ( ' ( %s . %s = %s .id) ' % ( current_table . _table , inherits_field , parent_table_name ) )
2010-04-02 13:54:12 +00:00
2010-09-30 13:24:03 +00:00
def _inherits_join_calc ( self , field , query ) :
2010-08-17 18:34:59 +00:00
"""
2010-09-30 13:24:03 +00:00
Adds missing table select and join clause ( s ) to ` ` query ` ` for reaching
2010-08-17 18:34:59 +00:00
the field coming from an ' _inherits ' parent table ( no duplicates ) .
2010-04-02 13:54:12 +00:00
2010-09-30 13:24:03 +00:00
: param field : name of inherited field to reach
: param query : query object on which the JOIN should be added
: return : qualified name of field , to be used in SELECT clause
2010-02-26 01:03:45 +00:00
"""
current_table = self
while field in current_table . _inherit_fields and not field in current_table . _columns :
2010-08-17 18:34:59 +00:00
parent_model_name = current_table . _inherit_fields [ field ] [ 0 ]
parent_table = self . pool . get ( parent_model_name )
2011-08-26 10:35:16 +00:00
self . _inherits_join_add ( current_table , parent_model_name , query )
2010-02-26 01:03:45 +00:00
current_table = parent_table
2010-09-30 13:24:03 +00:00
return ' " %s " . %s ' % ( current_table . _table , field )
2010-02-26 01:03:45 +00:00
2008-08-17 18:28:29 +00:00
def _parent_store_compute ( self , cr ) :
2010-05-14 09:11:43 +00:00
if not self . _parent_store :
return
2012-01-24 12:42:52 +00:00
_logger . info ( ' Computing parent left and right for table %s ... ' , self . _table )
2008-08-13 10:47:38 +00:00
def browse_rec ( root , pos = 0 ) :
# TODO: set order
where = self . _parent_name + ' = ' + str ( root )
if not root :
where = self . _parent_name + ' IS NULL '
2009-01-17 19:22:14 +00:00
if self . _parent_order :
where + = ' order by ' + self . _parent_order
2008-08-13 10:47:38 +00:00
cr . execute ( ' SELECT id FROM ' + self . _table + ' WHERE ' + where )
pos2 = pos + 1
2010-11-12 10:39:52 +00:00
for id in cr . fetchall ( ) :
2008-08-13 10:47:38 +00:00
pos2 = browse_rec ( id [ 0 ] , pos2 )
2010-09-03 10:59:56 +00:00
cr . execute ( ' update ' + self . _table + ' set parent_left= %s , parent_right= %s where id= %s ' , ( pos , pos2 , root ) )
return pos2 + 1
2009-01-27 10:25:15 +00:00
query = ' SELECT id FROM ' + self . _table + ' WHERE ' + self . _parent_name + ' IS NULL '
if self . _parent_order :
2010-09-03 10:59:56 +00:00
query + = ' order by ' + self . _parent_order
2009-01-27 10:25:15 +00:00
pos = 0
cr . execute ( query )
for ( root , ) in cr . fetchall ( ) :
pos = browse_rec ( root , pos )
2008-08-13 10:47:38 +00:00
return True
2008-12-14 16:46:47 +00:00
def _update_store ( self , cr , f , k ) :
2012-01-24 12:42:52 +00:00
_logger . info ( " storing computed values of fields.function ' %s ' " , k )
2008-12-14 16:46:47 +00:00
ss = self . _columns [ k ] . _symbol_set
update_query = ' UPDATE " %s " SET " %s " = %s WHERE id= %% s ' % ( self . _table , k , ss [ 0 ] )
cr . execute ( ' select id from ' + self . _table )
ids_lst = map ( lambda x : x [ 0 ] , cr . fetchall ( ) )
while ids_lst :
iids = ids_lst [ : 40 ]
ids_lst = ids_lst [ 40 : ]
2011-09-26 09:01:56 +00:00
res = f . get ( cr , self , iids , k , SUPERUSER_ID , { } )
2010-09-03 10:59:56 +00:00
for key , val in res . items ( ) :
2008-12-14 16:46:47 +00:00
if f . _multi :
val = val [ k ]
# if val is a many2one, just write the ID
2010-09-03 10:59:56 +00:00
if type ( val ) == tuple :
2008-12-14 16:46:47 +00:00
val = val [ 0 ]
if ( val < > False ) or ( type ( val ) < > bool ) :
cr . execute ( update_query , ( ss [ 1 ] ( val ) , key ) )
2010-12-20 16:09:59 +00:00
def _check_selection_field_value ( self , cr , uid , field , value , context = None ) :
""" Raise except_orm if value is not among the valid values for the selection field """
if self . _columns [ field ] . _type == ' reference ' :
val_model , val_id_str = value . split ( ' , ' , 1 )
val_id = False
try :
val_id = long ( val_id_str )
except ValueError :
pass
if not val_id :
raise except_orm ( _ ( ' ValidateError ' ) ,
2011-09-20 12:24:11 +00:00
_ ( ' Invalid value for reference field " %s . %s " (last part must be a non-zero integer): " %s " ' ) % ( self . _table , field , value ) )
2010-12-20 16:09:59 +00:00
val = val_model
else :
val = value
if isinstance ( self . _columns [ field ] . selection , ( tuple , list ) ) :
if val in dict ( self . _columns [ field ] . selection ) :
return
elif val in dict ( self . _columns [ field ] . selection ( self , cr , uid , context = context ) ) :
return
raise except_orm ( _ ( ' ValidateError ' ) ,
2011-09-20 12:24:11 +00:00
_ ( ' The value " %s " for the field " %s . %s " is not in the selection ' ) % ( value , self . _table , field ) )
2010-12-20 16:09:59 +00:00
2009-01-29 09:59:37 +00:00
def _check_removed_columns ( self , cr , log = False ) :
# iterate on the database columns to drop the NOT NULL constraints
# of fields which were required but have been removed (or will be added by another module)
columns = [ c for c in self . _columns if not ( isinstance ( self . _columns [ c ] , fields . function ) and not self . _columns [ c ] . store ) ]
2011-09-24 02:03:03 +00:00
columns + = MAGIC_COLUMNS
2009-01-29 09:59:37 +00:00
cr . execute ( " SELECT a.attname, a.attnotnull "
" FROM pg_class c, pg_attribute a "
2010-06-15 13:27:22 +00:00
" WHERE c.relname= %s "
2009-01-29 09:59:37 +00:00
" AND c.oid=a.attrelid "
2010-06-15 13:27:22 +00:00
" AND a.attisdropped= %s "
2009-01-29 09:59:37 +00:00
" AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ( ' cid ' , ' tid ' , ' oid ' , ' xid ' ) "
2010-09-03 10:59:56 +00:00
" AND a.attname NOT IN %s " , ( self . _table , False , tuple ( columns ) ) ) ,
2010-06-15 13:27:22 +00:00
2009-01-29 09:59:37 +00:00
for column in cr . dictfetchall ( ) :
if log :
2012-01-24 12:42:52 +00:00
_logger . debug ( " column %s is in the table %s but not in the corresponding object %s " ,
column [ ' attname ' ] , self . _table , self . _name )
2009-01-29 09:59:37 +00:00
if column [ ' attnotnull ' ] :
cr . execute ( ' ALTER TABLE " %s " ALTER COLUMN " %s " DROP NOT NULL ' % ( self . _table , column [ ' attname ' ] ) )
2012-01-24 12:42:52 +00:00
_schema . debug ( " Table ' %s ' : column ' %s ' : dropped NOT NULL constraint " ,
self . _table , column [ ' attname ' ] )
2009-01-29 09:59:37 +00:00
2011-09-26 11:12:26 +00:00
# checked version: for direct m2o starting from `self`
def _m2o_add_foreign_key_checked ( self , source_field , dest_model , ondelete ) :
assert self . is_transient ( ) or not dest_model . is_transient ( ) , \
' Many2One relationships from non-transient Model to TransientModel are forbidden '
if self . is_transient ( ) and not dest_model . is_transient ( ) :
# TransientModel relationships to regular Models are annoying
# usually because they could block deletion due to the FKs.
# So unless stated otherwise we default them to ondelete=cascade.
ondelete = ondelete or ' cascade '
self . _foreign_keys . append ( ( self . _table , source_field , dest_model . _table , ondelete or ' set null ' ) )
2012-01-24 12:42:52 +00:00
_schema . debug ( " Table ' %s ' : added foreign key ' %s ' with definition=REFERENCES \" %s \" ON DELETE %s " ,
self . _table , source_field , dest_model . _table , ondelete )
2011-09-26 11:12:26 +00:00
# unchecked version: for custom cases, such as m2m relationships
def _m2o_add_foreign_key_unchecked ( self , source_table , source_field , dest_model , ondelete ) :
self . _foreign_keys . append ( ( source_table , source_field , dest_model . _table , ondelete or ' set null ' ) )
2012-01-24 12:42:52 +00:00
_schema . debug ( " Table ' %s ' : added foreign key ' %s ' with definition=REFERENCES \" %s \" ON DELETE %s " ,
source_table , source_field , dest_model . _table , ondelete )
2011-09-26 11:12:26 +00:00
2010-07-22 13:49:48 +00:00
def _auto_init ( self , cr , context = None ) :
2011-05-27 12:32:36 +00:00
"""
Call _field_create and , unless _auto is False :
- create the corresponding table in database for the model ,
- possibly add the parent columns in database ,
- possibly add the columns ' create_uid ' , ' create_date ' , ' write_uid ' ,
' write_date ' in database if _log_access is True ( the default ) ,
- report on database columns no more existing in _columns ,
- remove no more existing not null constraints ,
- alter existing database columns to match _columns ,
- create database tables to match _columns ,
- add database indices to match _columns ,
2011-06-14 08:44:15 +00:00
- save in self . _foreign_keys a list a foreign keys to create ( see
_auto_end ) .
2011-05-27 12:32:36 +00:00
"""
2011-06-10 14:05:21 +00:00
self . _foreign_keys = [ ]
2011-04-27 09:08:46 +00:00
raise_on_invalid_object_name ( self . _name )
2010-07-22 13:49:48 +00:00
if context is None :
context = { }
2010-09-03 10:59:56 +00:00
store_compute = False
2008-12-14 16:46:47 +00:00
todo_end = [ ]
2011-06-14 09:37:15 +00:00
update_custom_fields = context . get ( ' update_custom_fields ' , False )
2008-07-22 14:24:36 +00:00
self . _field_create ( cr , context = context )
2011-06-14 09:37:15 +00:00
create = not self . _table_exist ( cr )
2010-01-03 14:35:19 +00:00
if getattr ( self , ' _auto ' , True ) :
2011-06-14 09:37:15 +00:00
if create :
self . _create_table ( cr )
2010-09-03 10:59:56 +00:00
2008-07-22 14:24:36 +00:00
cr . commit ( )
2008-08-13 10:47:38 +00:00
if self . _parent_store :
2011-06-14 09:37:15 +00:00
if not self . _parent_columns_exist ( cr ) :
self . _create_parent_columns ( cr )
2008-08-13 10:47:38 +00:00
store_compute = True
2011-06-14 09:37:15 +00:00
# Create the create_uid, create_date, write_uid, write_date, columns if desired.
2008-07-22 14:24:36 +00:00
if self . _log_access :
2011-06-14 09:37:15 +00:00
self . _add_log_columns ( cr )
2009-02-14 05:35:17 +00:00
2009-01-29 09:59:37 +00:00
self . _check_removed_columns ( cr , log = False )
2008-07-22 14:24:36 +00:00
# iterate on the "object columns"
2011-06-14 09:37:15 +00:00
column_data = self . _select_column_data ( cr )
2010-11-01 11:33:20 +00:00
2011-06-14 09:37:15 +00:00
for k , f in self . _columns . iteritems ( ) :
2011-09-24 02:03:03 +00:00
if k in MAGIC_COLUMNS :
2008-07-22 14:24:36 +00:00
continue
2011-06-14 09:37:15 +00:00
# Don't update custom (also called manual) fields
if f . manual and not update_custom_fields :
2010-02-09 09:13:28 +00:00
continue
2010-11-01 11:33:20 +00:00
2008-07-22 14:24:36 +00:00
if isinstance ( f , fields . one2many ) :
2011-06-14 09:37:15 +00:00
self . _o2m_raise_on_missing_reference ( cr , f )
2010-02-28 19:50:16 +00:00
2008-07-22 14:24:36 +00:00
elif isinstance ( f , fields . many2many ) :
2011-06-14 09:37:15 +00:00
self . _m2m_raise_or_create_relation ( cr , f )
2008-07-22 14:24:36 +00:00
else :
2011-06-14 09:37:15 +00:00
res = column_data . get ( k )
# The field is not found as-is in database, try if it
# exists with an old name.
2010-09-03 10:59:56 +00:00
if not res and hasattr ( f , ' oldname ' ) :
2011-06-14 09:37:15 +00:00
res = column_data . get ( f . oldname )
if res :
2010-09-03 10:59:56 +00:00
cr . execute ( ' ALTER TABLE " %s " RENAME " %s " TO " %s " ' % ( self . _table , f . oldname , k ) )
2011-06-14 09:37:15 +00:00
res [ ' attname ' ] = k
column_data [ k ] = res
2012-01-24 12:42:52 +00:00
_schema . debug ( " Table ' %s ' : renamed column ' %s ' to ' %s ' " ,
self . _table , f . oldname , k )
2009-12-28 06:07:29 +00:00
2011-06-14 09:37:15 +00:00
# The field already exists in database. Possibly
# change its type, rename it, drop it or change its
# constraints.
if res :
f_pg_type = res [ ' typname ' ]
f_pg_size = res [ ' size ' ]
f_pg_notnull = res [ ' attnotnull ' ]
2010-01-03 14:35:19 +00:00
if isinstance ( f , fields . function ) and not f . store and \
not getattr ( f , ' nodrop ' , False ) :
2012-01-24 12:42:52 +00:00
_logger . info ( ' column %s ( %s ) in table %s removed: converted to a function ! \n ' ,
k , f . string , self . _table )
2010-09-03 10:59:56 +00:00
cr . execute ( ' ALTER TABLE " %s " DROP COLUMN " %s " CASCADE ' % ( self . _table , k ) )
2008-12-15 04:34:26 +00:00
cr . commit ( )
2012-01-24 12:42:52 +00:00
_schema . debug ( " Table ' %s ' : dropped column ' %s ' with cascade " ,
self . _table , k )
2008-07-22 14:24:36 +00:00
f_obj_type = None
else :
f_obj_type = get_pg_type ( f ) and get_pg_type ( f ) [ 0 ]
if f_obj_type :
2008-12-14 16:46:47 +00:00
ok = False
casts = [
2011-09-09 15:42:42 +00:00
( ' text ' , ' char ' , pg_varchar ( f . size ) , ' :: %s ' % pg_varchar ( f . size ) ) ,
2008-12-14 16:46:47 +00:00
( ' varchar ' , ' text ' , ' TEXT ' , ' ' ) ,
( ' int4 ' , ' float ' , get_pg_type ( f ) [ 1 ] , ' :: ' + get_pg_type ( f ) [ 1 ] ) ,
( ' date ' , ' datetime ' , ' TIMESTAMP ' , ' ::TIMESTAMP ' ) ,
2010-06-28 17:56:37 +00:00
( ' timestamp ' , ' date ' , ' date ' , ' ::date ' ) ,
2009-09-24 14:22:27 +00:00
( ' numeric ' , ' float ' , get_pg_type ( f ) [ 1 ] , ' :: ' + get_pg_type ( f ) [ 1 ] ) ,
( ' float8 ' , ' float ' , get_pg_type ( f ) [ 1 ] , ' :: ' + get_pg_type ( f ) [ 1 ] ) ,
2008-12-14 16:46:47 +00:00
]
2009-06-30 07:33:44 +00:00
if f_pg_type == ' varchar ' and f . _type == ' char ' and f_pg_size < f . size :
2008-12-14 16:46:47 +00:00
cr . execute ( ' ALTER TABLE " %s " RENAME COLUMN " %s " TO temp_change_size ' % ( self . _table , k ) )
2011-09-09 14:33:49 +00:00
cr . execute ( ' ALTER TABLE " %s " ADD COLUMN " %s " %s ' % ( self . _table , k , pg_varchar ( f . size ) ) )
cr . execute ( ' UPDATE " %s " SET " %s " =temp_change_size:: %s ' % ( self . _table , k , pg_varchar ( f . size ) ) )
2009-11-24 10:32:48 +00:00
cr . execute ( ' ALTER TABLE " %s " DROP COLUMN temp_change_size CASCADE ' % ( self . _table , ) )
2008-12-14 16:46:47 +00:00
cr . commit ( )
2012-01-24 12:42:52 +00:00
_schema . debug ( " Table ' %s ' : column ' %s ' (type varchar) changed size from %s to %s " ,
2010-09-08 11:02:50 +00:00
self . _table , k , f_pg_size , f . size )
2008-12-14 16:46:47 +00:00
for c in casts :
if ( f_pg_type == c [ 0 ] ) and ( f . _type == c [ 1 ] ) :
2010-03-06 19:59:55 +00:00
if f_pg_type != f_obj_type :
2009-09-24 14:22:27 +00:00
ok = True
cr . execute ( ' ALTER TABLE " %s " RENAME COLUMN " %s " TO temp_change_size ' % ( self . _table , k ) )
cr . execute ( ' ALTER TABLE " %s " ADD COLUMN " %s " %s ' % ( self . _table , k , c [ 2 ] ) )
cr . execute ( ( ' UPDATE " %s " SET " %s " =temp_change_size ' + c [ 3 ] ) % ( self . _table , k ) )
cr . execute ( ' ALTER TABLE " %s " DROP COLUMN temp_change_size CASCADE ' % ( self . _table , ) )
cr . commit ( )
2012-01-24 12:42:52 +00:00
_schema . debug ( " Table ' %s ' : column ' %s ' changed type from %s to %s " ,
2010-09-08 11:02:50 +00:00
self . _table , k , c [ 0 ] , c [ 1 ] )
2009-09-24 14:22:27 +00:00
break
2008-12-14 16:46:47 +00:00
if f_pg_type != f_obj_type :
if not ok :
2010-06-28 17:56:37 +00:00
i = 0
while True :
2010-11-23 13:57:48 +00:00
newname = k + ' _moved ' + str ( i )
2010-06-28 17:56:37 +00:00
cr . execute ( " SELECT count(1) FROM pg_class c,pg_attribute a " \
" WHERE c.relname= %s " \
" AND a.attname= %s " \
" AND c.oid=a.attrelid " , ( self . _table , newname ) )
if not cr . fetchone ( ) [ 0 ] :
break
2010-09-03 10:59:56 +00:00
i + = 1
2010-07-04 12:57:27 +00:00
if f_pg_notnull :
cr . execute ( ' ALTER TABLE " %s " ALTER COLUMN " %s " DROP NOT NULL ' % ( self . _table , k ) )
2010-06-28 17:56:37 +00:00
cr . execute ( ' ALTER TABLE " %s " RENAME COLUMN " %s " TO " %s " ' % ( self . _table , k , newname ) )
cr . execute ( ' ALTER TABLE " %s " ADD COLUMN " %s " %s ' % ( self . _table , k , get_pg_type ( f ) [ 1 ] ) )
2011-09-19 20:18:45 +00:00
cr . execute ( " COMMENT ON COLUMN %s . \" %s \" IS %% s " % ( self . _table , k ) , ( f . string , ) )
2012-01-24 12:42:52 +00:00
_schema . debug ( " Table ' %s ' : column ' %s ' has changed type (DB= %s , def= %s ), data moved to column %s ! " ,
2010-09-03 10:59:56 +00:00
self . _table , k , f_pg_type , f . _type , newname )
2008-12-14 16:46:47 +00:00
2008-07-22 14:24:36 +00:00
# if the field is required and hasn't got a NOT NULL constraint
if f . required and f_pg_notnull == 0 :
# set the field to the default value if any
2008-08-12 14:44:56 +00:00
if k in self . _defaults :
2009-11-25 14:30:58 +00:00
if callable ( self . _defaults [ k ] ) :
2011-09-26 09:01:56 +00:00
default = self . _defaults [ k ] ( self , cr , SUPERUSER_ID , context )
2009-11-25 14:30:58 +00:00
else :
default = self . _defaults [ k ]
2008-12-09 13:35:40 +00:00
if ( default is not None ) :
ss = self . _columns [ k ] . _symbol_set
2009-10-01 11:06:41 +00:00
query = ' UPDATE " %s " SET " %s " = %s WHERE " %s " is NULL ' % ( self . _table , k , ss [ 0 ] , k )
2008-12-09 13:35:40 +00:00
cr . execute ( query , ( ss [ 1 ] ( default ) , ) )
2008-07-22 14:24:36 +00:00
# add the NOT NULL constraint
2008-12-14 16:46:47 +00:00
cr . commit ( )
2008-07-22 14:24:36 +00:00
try :
2011-01-15 17:04:10 +00:00
cr . execute ( ' ALTER TABLE " %s " ALTER COLUMN " %s " SET NOT NULL ' % ( self . _table , k ) , log_exceptions = False )
2008-07-22 14:24:36 +00:00
cr . commit ( )
2012-01-24 12:42:52 +00:00
_schema . debug ( " Table ' %s ' : column ' %s ' : added NOT NULL constraint " ,
self . _table , k )
2010-06-15 13:27:22 +00:00
except Exception :
2010-09-03 10:59:56 +00:00
msg = " Table ' %s ' : unable to set a NOT NULL constraint on column ' %s ' ! \n " \
" If you want to have it, you should update the records and execute manually: \n " \
2010-09-08 11:02:50 +00:00
" ALTER TABLE %s ALTER COLUMN %s SET NOT NULL "
2012-01-24 12:42:52 +00:00
_schema . warning ( msg , self . _table , k , self . _table , k )
2008-07-22 14:24:36 +00:00
cr . commit ( )
elif not f . required and f_pg_notnull == 1 :
2008-12-09 13:35:40 +00:00
cr . execute ( ' ALTER TABLE " %s " ALTER COLUMN " %s " DROP NOT NULL ' % ( self . _table , k ) )
2008-07-22 14:24:36 +00:00
cr . commit ( )
2012-01-24 12:42:52 +00:00
_schema . debug ( " Table ' %s ' : column ' %s ' : dropped NOT NULL constraint " ,
self . _table , k )
2010-08-26 15:00:06 +00:00
# Verify index
2008-12-09 13:35:40 +00:00
indexname = ' %s _ %s _index ' % ( self . _table , k )
cr . execute ( " SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s " , ( indexname , self . _table ) )
2010-06-28 17:56:37 +00:00
res2 = cr . dictfetchall ( )
if not res2 and f . select :
2008-12-09 13:35:40 +00:00
cr . execute ( ' CREATE INDEX " %s _ %s _index " ON " %s " ( " %s " ) ' % ( self . _table , k , self . _table , k ) )
2008-07-22 14:24:36 +00:00
cr . commit ( )
2010-08-26 15:00:06 +00:00
if f . _type == ' text ' :
# FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2010-09-03 10:59:56 +00:00
msg = " Table ' %s ' : Adding (b-tree) index for text column ' %s ' . " \
" This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts " \
" because there is a length limit for indexable btree values! \n " \
2010-09-08 11:02:50 +00:00
" Use a search view instead if you simply want to make the field searchable. "
2012-01-24 12:42:52 +00:00
_schema . warning ( msg , self . _table , k , f . _type )
2010-06-28 17:56:37 +00:00
if res2 and not f . select :
2008-12-09 13:35:40 +00:00
cr . execute ( ' DROP INDEX " %s _ %s _index " ' % ( self . _table , k ) )
2008-07-22 14:24:36 +00:00
cr . commit ( )
2010-09-08 11:02:50 +00:00
msg = " Table ' %s ' : dropping index for column ' %s ' of type ' %s ' as it is not required anymore "
2012-01-24 12:42:52 +00:00
_schema . debug ( msg , self . _table , k , f . _type )
2010-08-26 15:00:06 +00:00
2008-07-22 14:24:36 +00:00
if isinstance ( f , fields . many2one ) :
2011-09-26 11:12:26 +00:00
dest_model = self . pool . get ( f . _obj )
ref = dest_model . _table
2008-07-22 14:24:36 +00:00
if ref != ' ir_actions ' :
2009-02-14 05:35:17 +00:00
cr . execute ( ' SELECT confdeltype, conname FROM pg_constraint as con, pg_class as cl1, pg_class as cl2, '
' pg_attribute as att1, pg_attribute as att2 '
' WHERE con.conrelid = cl1.oid '
' AND cl1.relname = %s '
' AND con.confrelid = cl2.oid '
' AND cl2.relname = %s '
' AND array_lower(con.conkey, 1) = 1 '
' AND con.conkey[1] = att1.attnum '
' AND att1.attrelid = cl1.oid '
' AND att1.attname = %s '
' AND array_lower(con.confkey, 1) = 1 '
' AND con.confkey[1] = att2.attnum '
' AND att2.attrelid = cl2.oid '
' AND att2.attname = %s '
2008-12-09 13:35:40 +00:00
" AND con.contype = ' f ' " , ( self . _table , ref , k , ' id ' ) )
2010-06-28 17:56:37 +00:00
res2 = cr . dictfetchall ( )
if res2 :
2011-09-26 11:12:26 +00:00
if res2 [ 0 ] [ ' confdeltype ' ] != POSTGRES_CONFDELTYPES . get ( ( f . ondelete or ' set null ' ) . upper ( ) , ' a ' ) :
2010-06-28 17:56:37 +00:00
cr . execute ( ' ALTER TABLE " ' + self . _table + ' " DROP CONSTRAINT " ' + res2 [ 0 ] [ ' conname ' ] + ' " ' )
2011-09-26 11:12:26 +00:00
self . _m2o_add_foreign_key_checked ( k , dest_model , f . ondelete )
2008-07-22 14:24:36 +00:00
cr . commit ( )
2012-01-24 12:42:52 +00:00
_schema . debug ( " Table ' %s ' : column ' %s ' : XXX " ,
2010-09-08 11:02:50 +00:00
self . _table , k )
2011-06-14 10:13:11 +00:00
# The field doesn't exist in database. Create it if necessary.
2011-06-14 09:37:15 +00:00
else :
2010-06-28 17:56:37 +00:00
if not isinstance ( f , fields . function ) or f . store :
# add the missing field
cr . execute ( ' ALTER TABLE " %s " ADD COLUMN " %s " %s ' % ( self . _table , k , get_pg_type ( f ) [ 1 ] ) )
2011-09-19 20:18:45 +00:00
cr . execute ( " COMMENT ON COLUMN %s . \" %s \" IS %% s " % ( self . _table , k ) , ( f . string , ) )
2012-01-24 12:42:52 +00:00
_schema . debug ( " Table ' %s ' : added column ' %s ' with definition= %s " ,
2010-09-08 11:02:50 +00:00
self . _table , k , get_pg_type ( f ) [ 1 ] )
2010-06-28 17:56:37 +00:00
# initialize it
if not create and k in self . _defaults :
if callable ( self . _defaults [ k ] ) :
2011-09-26 09:01:56 +00:00
default = self . _defaults [ k ] ( self , cr , SUPERUSER_ID , context )
2010-06-28 17:56:37 +00:00
else :
default = self . _defaults [ k ]
ss = self . _columns [ k ] . _symbol_set
query = ' UPDATE " %s " SET " %s " = %s ' % ( self . _table , k , ss [ 0 ] )
cr . execute ( query , ( ss [ 1 ] ( default ) , ) )
cr . commit ( )
2012-01-24 17:30:17 +00:00
_logger . debug ( " Table ' %s ' : setting default value of new column %s " , self . _table , k )
2010-06-28 17:56:37 +00:00
2011-06-14 10:13:11 +00:00
# remember the functions to call for the stored fields
2010-06-28 17:56:37 +00:00
if isinstance ( f , fields . function ) :
order = 10
2011-06-14 10:13:11 +00:00
if f . store is not True : # i.e. if f.store is a dict
2010-06-28 17:56:37 +00:00
order = f . store [ f . store . keys ( ) [ 0 ] ] [ 2 ]
2011-06-14 09:37:15 +00:00
todo_end . append ( ( order , self . _update_store , ( f , k ) ) )
2010-06-28 17:56:37 +00:00
# and add constraints if needed
if isinstance ( f , fields . many2one ) :
if not self . pool . get ( f . _obj ) :
raise except_orm ( ' Programming Error ' , ( ' There is no reference available for %s ' ) % ( f . _obj , ) )
2011-09-26 11:12:26 +00:00
dest_model = self . pool . get ( f . _obj )
ref = dest_model . _table
2010-06-28 17:56:37 +00:00
# ir_actions is inherited so foreign key doesn't work on it
if ref != ' ir_actions ' :
2011-09-26 11:12:26 +00:00
self . _m2o_add_foreign_key_checked ( k , dest_model , f . ondelete )
2010-06-28 17:56:37 +00:00
if f . select :
cr . execute ( ' CREATE INDEX " %s _ %s _index " ON " %s " ( " %s " ) ' % ( self . _table , k , self . _table , k ) )
if f . required :
try :
cr . commit ( )
2011-01-15 17:04:10 +00:00
cr . execute ( ' ALTER TABLE " %s " ALTER COLUMN " %s " SET NOT NULL ' % ( self . _table , k ) , log_exceptions = False )
2012-01-24 12:42:52 +00:00
_schema . debug ( " Table ' %s ' : column ' %s ' : added a NOT NULL constraint " ,
2010-09-08 11:02:50 +00:00
self . _table , k )
2010-06-28 17:56:37 +00:00
except Exception :
2010-09-03 10:59:56 +00:00
msg = " WARNING: unable to set column %s of table %s not null ! \n " \
2011-03-24 09:50:12 +00:00
" Try to re-run: openerp-server --update=module \n " \
2010-09-03 10:59:56 +00:00
" If it doesn ' t work, update records and execute manually: \n " \
2010-09-08 11:02:50 +00:00
" ALTER TABLE %s ALTER COLUMN %s SET NOT NULL "
2012-01-24 12:42:52 +00:00
_logger . warning ( msg , k , self . _table , self . _table , k )
2010-06-28 17:56:37 +00:00
cr . commit ( )
2008-12-07 12:45:02 +00:00
2008-07-22 14:24:36 +00:00
else :
2010-06-18 10:16:41 +00:00
cr . execute ( " SELECT relname FROM pg_class WHERE relkind IN ( ' r ' , ' v ' ) AND relname= %s " , ( self . _table , ) )
2008-07-22 14:24:36 +00:00
create = not bool ( cr . fetchone ( ) )
2010-05-12 10:46:15 +00:00
cr . commit ( ) # start a new transaction
2011-06-14 09:37:15 +00:00
self . _add_sql_constraints ( cr )
if create :
self . _execute_sql ( cr )
if store_compute :
self . _parent_store_compute ( cr )
cr . commit ( )
return todo_end
def _auto_end ( self , cr , context = None ) :
""" Create the foreign keys recorded by _auto_init. """
for t , k , r , d in self . _foreign_keys :
cr . execute ( ' ALTER TABLE " %s " ADD FOREIGN KEY ( " %s " ) REFERENCES " %s " ON DELETE %s ' % ( t , k , r , d ) )
cr . commit ( )
del self . _foreign_keys
def _table_exist ( self , cr ) :
cr . execute ( " SELECT relname FROM pg_class WHERE relkind IN ( ' r ' , ' v ' ) AND relname= %s " , ( self . _table , ) )
return cr . rowcount
def _create_table ( self , cr ) :
cr . execute ( ' CREATE TABLE " %s " (id SERIAL NOT NULL, PRIMARY KEY(id)) WITHOUT OIDS ' % ( self . _table , ) )
2011-09-19 20:18:45 +00:00
cr . execute ( ( " COMMENT ON TABLE \" %s \" IS %% s " % self . _table ) , ( self . _description , ) )
2012-01-24 12:42:52 +00:00
_schema . debug ( " Table ' %s ' : created " , self . _table )
2011-06-14 09:37:15 +00:00
def _parent_columns_exist ( self , cr ) :
cr . execute ( """ SELECT c.relname
FROM pg_class c , pg_attribute a
WHERE c . relname = % s AND a . attname = % s AND c . oid = a . attrelid
""" , (self._table, ' parent_left ' ))
return cr . rowcount
def _create_parent_columns ( self , cr ) :
cr . execute ( ' ALTER TABLE " %s " ADD COLUMN " parent_left " INTEGER ' % ( self . _table , ) )
cr . execute ( ' ALTER TABLE " %s " ADD COLUMN " parent_right " INTEGER ' % ( self . _table , ) )
if ' parent_left ' not in self . _columns :
2012-01-24 12:42:52 +00:00
_logger . error ( ' create a column parent_left on object %s : fields.integer( \' Left Parent \' , select=1) ' ,
self . _table )
_schema . debug ( " Table ' %s ' : added column ' %s ' with definition= %s " ,
self . _table , ' parent_left ' , ' INTEGER ' )
2011-06-14 09:37:15 +00:00
elif not self . _columns [ ' parent_left ' ] . select :
2012-01-24 12:42:52 +00:00
_logger . error ( ' parent_left column on object %s must be indexed! Add select=1 to the field definition) ' ,
self . _table )
2011-06-14 09:37:15 +00:00
if ' parent_right ' not in self . _columns :
2012-01-24 12:42:52 +00:00
_logger . error ( ' create a column parent_right on object %s : fields.integer( \' Right Parent \' , select=1) ' ,
self . _table )
_schema . debug ( " Table ' %s ' : added column ' %s ' with definition= %s " ,
self . _table , ' parent_right ' , ' INTEGER ' )
2011-06-14 09:37:15 +00:00
elif not self . _columns [ ' parent_right ' ] . select :
2012-01-24 12:42:52 +00:00
_logger . error ( ' parent_right column on object %s must be indexed! Add select=1 to the field definition) ' ,
self . _table )
2011-06-14 09:37:15 +00:00
if self . _columns [ self . _parent_name ] . ondelete != ' cascade ' :
2012-01-24 12:42:52 +00:00
_logger . error ( " The column %s on object %s must be set as ondelete= ' cascade ' " ,
self . _parent_name , self . _name )
2011-06-14 09:37:15 +00:00
cr . commit ( )
def _add_log_columns ( self , cr ) :
2011-09-24 02:03:03 +00:00
for field , field_def in LOG_ACCESS_COLUMNS . iteritems ( ) :
2011-06-14 09:37:15 +00:00
cr . execute ( """
SELECT c . relname
FROM pg_class c , pg_attribute a
WHERE c . relname = % s AND a . attname = % s AND c . oid = a . attrelid
2011-09-24 02:03:03 +00:00
""" , (self._table, field))
2011-06-14 09:37:15 +00:00
if not cr . rowcount :
2011-09-24 02:03:03 +00:00
cr . execute ( ' ALTER TABLE " %s " ADD COLUMN " %s " %s ' % ( self . _table , field , field_def ) )
2011-06-14 09:37:15 +00:00
cr . commit ( )
2012-01-24 12:42:52 +00:00
_schema . debug ( " Table ' %s ' : added column ' %s ' with definition= %s " ,
self . _table , field , field_def )
2011-06-14 09:37:15 +00:00
def _select_column_data ( self , cr ) :
2011-10-06 11:44:44 +00:00
# attlen is the number of bytes necessary to represent the type when
# the type has a fixed size. If the type has a varying size attlen is
# -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
# Thus the query can return a negative size for a unlimited varchar.
2011-06-14 09:37:15 +00:00
cr . execute ( " SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN a.atttypmod-4 ELSE a.attlen END as size " \
" FROM pg_class c,pg_attribute a,pg_type t " \
" WHERE c.relname= %s " \
" AND c.oid=a.attrelid " \
" AND a.atttypid=t.oid " , ( self . _table , ) )
return dict ( map ( lambda x : ( x [ ' attname ' ] , x ) , cr . dictfetchall ( ) ) )
def _o2m_raise_on_missing_reference ( self , cr , f ) :
# TODO this check should be a method on fields.one2many.
other = self . pool . get ( f . _obj )
if other :
# TODO the condition could use fields_get_keys().
if f . _fields_id not in other . _columns . keys ( ) :
if f . _fields_id not in other . _inherit_fields . keys ( ) :
raise except_orm ( ' Programming Error ' , ( " There is no reference field ' %s ' found for ' %s ' " ) % ( f . _fields_id , f . _obj , ) )
def _m2m_raise_or_create_relation ( self , cr , f ) :
2011-09-26 17:14:51 +00:00
m2m_tbl , col1 , col2 = f . _sql_names ( self )
cr . execute ( " SELECT relname FROM pg_class WHERE relkind IN ( ' r ' , ' v ' ) AND relname= %s " , ( m2m_tbl , ) )
2011-06-14 09:37:15 +00:00
if not cr . dictfetchall ( ) :
if not self . pool . get ( f . _obj ) :
2011-09-26 17:14:51 +00:00
raise except_orm ( ' Programming Error ' , ( ' Many2Many destination model does not exist: ` %s ` ' ) % ( f . _obj , ) )
2011-09-26 11:12:26 +00:00
dest_model = self . pool . get ( f . _obj )
ref = dest_model . _table
2011-09-26 17:14:51 +00:00
cr . execute ( ' CREATE TABLE " %s " ( " %s " INTEGER NOT NULL, " %s " INTEGER NOT NULL, UNIQUE( " %s " , " %s " )) WITH OIDS ' % ( m2m_tbl , col1 , col2 , col1 , col2 ) )
2011-09-23 16:29:29 +00:00
# create foreign key references with ondelete=cascade, unless the targets are SQL views
cr . execute ( " SELECT relkind FROM pg_class WHERE relkind IN ( ' v ' ) AND relname= %s " , ( ref , ) )
if not cr . fetchall ( ) :
2011-09-26 17:14:51 +00:00
self . _m2o_add_foreign_key_unchecked ( m2m_tbl , col2 , dest_model , ' cascade ' )
2011-09-23 16:29:29 +00:00
cr . execute ( " SELECT relkind FROM pg_class WHERE relkind IN ( ' v ' ) AND relname= %s " , ( self . _table , ) )
if not cr . fetchall ( ) :
2011-09-26 17:14:51 +00:00
self . _m2o_add_foreign_key_unchecked ( m2m_tbl , col1 , self , ' cascade ' )
2011-09-23 16:29:29 +00:00
2011-09-26 17:14:51 +00:00
cr . execute ( ' CREATE INDEX " %s _ %s _index " ON " %s " ( " %s " ) ' % ( m2m_tbl , col1 , m2m_tbl , col1 ) )
cr . execute ( ' CREATE INDEX " %s _ %s _index " ON " %s " ( " %s " ) ' % ( m2m_tbl , col2 , m2m_tbl , col2 ) )
cr . execute ( " COMMENT ON TABLE \" %s \" IS ' RELATION BETWEEN %s AND %s ' " % ( m2m_tbl , self . _table , ref ) )
2011-06-14 09:37:15 +00:00
cr . commit ( )
2012-01-24 12:42:52 +00:00
_schema . debug ( " Create table ' %s ' : m2m relation between ' %s ' and ' %s ' " , m2m_tbl , self . _table , ref )
2011-06-14 09:37:15 +00:00
def _add_sql_constraints ( self , cr ) :
"""
Modify this model ' s database table constraints so they match the one in
_sql_constraints .
"""
2008-08-12 14:44:56 +00:00
for ( key , con , _ ) in self . _sql_constraints :
2008-12-09 13:35:40 +00:00
conname = ' %s _ %s ' % ( self . _table , key )
2010-09-03 12:46:35 +00:00
cr . execute ( " SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname= %s " , ( conname , ) )
existing_constraints = cr . dictfetchall ( )
sql_actions = {
' drop ' : {
' execute ' : False ,
' query ' : ' ALTER TABLE " %s " DROP CONSTRAINT " %s " ' % ( self . _table , conname , ) ,
' msg_ok ' : " Table ' %s ' : dropped constraint ' %s ' . Reason: its definition changed from ' %% s ' to ' %s ' " % (
self . _table , conname , con ) ,
' msg_err ' : " Table ' %s ' : unable to drop \' %s \' constraint ! " % ( self . _table , con ) ,
' order ' : 1 ,
} ,
' add ' : {
' execute ' : False ,
' query ' : ' ALTER TABLE " %s " ADD CONSTRAINT " %s " %s ' % ( self . _table , conname , con , ) ,
' msg_ok ' : " Table ' %s ' : added constraint ' %s ' with definition= %s " % ( self . _table , conname , con ) ,
' msg_err ' : " Table ' %s ' : unable to add \' %s \' constraint ! \n If you want to have it, you should update the records and execute manually: \n %% s " % (
self . _table , con ) ,
' order ' : 2 ,
} ,
}
if not existing_constraints :
# constraint does not exists:
sql_actions [ ' add ' ] [ ' execute ' ] = True
sql_actions [ ' add ' ] [ ' msg_err ' ] = sql_actions [ ' add ' ] [ ' msg_err ' ] % ( sql_actions [ ' add ' ] [ ' query ' ] , )
elif con . lower ( ) not in [ item [ ' condef ' ] . lower ( ) for item in existing_constraints ] :
# constraint exists but its definition has changed:
sql_actions [ ' drop ' ] [ ' execute ' ] = True
sql_actions [ ' drop ' ] [ ' msg_ok ' ] = sql_actions [ ' drop ' ] [ ' msg_ok ' ] % ( existing_constraints [ 0 ] [ ' condef ' ] . lower ( ) , )
sql_actions [ ' add ' ] [ ' execute ' ] = True
sql_actions [ ' add ' ] [ ' msg_err ' ] = sql_actions [ ' add ' ] [ ' msg_err ' ] % ( sql_actions [ ' add ' ] [ ' query ' ] , )
# we need to add the constraint:
sql_actions = [ item for item in sql_actions . values ( ) ]
sql_actions . sort ( key = lambda x : x [ ' order ' ] )
for sql_action in [ action for action in sql_actions if action [ ' execute ' ] ] :
2008-07-22 14:24:36 +00:00
try :
2010-09-03 12:46:35 +00:00
cr . execute ( sql_action [ ' query ' ] )
2008-07-22 14:24:36 +00:00
cr . commit ( )
2012-01-24 12:42:52 +00:00
_schema . debug ( sql_action [ ' msg_ok ' ] )
2008-07-22 14:24:36 +00:00
except :
2012-01-24 12:42:52 +00:00
_schema . warning ( sql_action [ ' msg_err ' ] )
2010-05-12 10:46:15 +00:00
cr . rollback ( )
2008-07-22 14:24:36 +00:00
2011-06-14 09:37:15 +00:00
def _execute_sql ( self , cr ) :
""" Execute the SQL code from the _sql attribute (if any). """
if hasattr ( self , " _sql " ) :
for line in self . _sql . split ( ' ; ' ) :
line2 = line . replace ( ' \n ' , ' ' ) . strip ( )
if line2 :
cr . execute ( line2 )
cr . commit ( )
2008-07-22 14:24:36 +00:00
#
# Update objects that uses this one to update their _inherits fields
#
2010-03-19 10:58:38 +00:00
2008-07-22 14:24:36 +00:00
def _inherits_reload_src ( self ) :
2011-06-14 09:37:15 +00:00
""" Recompute the _inherit_fields mapping on each _inherits ' d child model. """
2011-06-14 14:22:26 +00:00
for obj in self . pool . models . values ( ) :
2008-07-22 14:24:36 +00:00
if self . _name in obj . _inherits :
obj . _inherits_reload ( )
2011-06-21 15:53:42 +00:00
2008-07-22 14:24:36 +00:00
def _inherits_reload ( self ) :
2011-06-14 09:37:15 +00:00
""" Recompute the _inherit_fields mapping.
This will also call itself on each inherits ' d child model.
"""
2008-07-22 14:24:36 +00:00
res = { }
for table in self . _inherits :
2011-06-14 09:37:15 +00:00
other = self . pool . get ( table )
for col in other . _columns . keys ( ) :
2011-08-26 10:35:16 +00:00
res [ col ] = ( table , self . _inherits [ table ] , other . _columns [ col ] , table )
2011-06-14 09:37:15 +00:00
for col in other . _inherit_fields . keys ( ) :
2011-08-26 10:35:16 +00:00
res [ col ] = ( table , self . _inherits [ table ] , other . _inherit_fields [ col ] [ 2 ] , other . _inherit_fields [ col ] [ 3 ] )
2008-08-12 14:44:56 +00:00
self . _inherit_fields = res
2011-06-21 15:53:42 +00:00
self . _all_columns = self . _get_column_infos ( )
2008-07-22 14:24:36 +00:00
self . _inherits_reload_src ( )
2011-06-21 15:53:42 +00:00
def _get_column_infos ( self ) :
""" Returns a dict mapping all fields names (direct fields and
inherited field via _inherits ) to a ` ` column_info ` ` struct
giving detailed columns """
result = { }
2011-08-26 10:35:16 +00:00
for k , ( parent , m2o , col , original_parent ) in self . _inherit_fields . iteritems ( ) :
result [ k ] = fields . column_info ( k , col , parent , m2o , original_parent )
2011-06-21 15:53:42 +00:00
for k , col in self . _columns . iteritems ( ) :
result [ k ] = fields . column_info ( k , col )
return result
2010-08-12 20:08:48 +00:00
def _inherits_check ( self ) :
for table , field_name in self . _inherits . items ( ) :
if field_name not in self . _columns :
2012-01-24 12:42:52 +00:00
_logger . info ( ' Missing many2one field definition for _inherits reference " %s " in " %s " , using default one. ' , field_name , self . _name )
2010-09-03 10:59:56 +00:00
self . _columns [ field_name ] = fields . many2one ( table , string = " Automatically created field to link to parent %s " % table ,
2010-08-12 20:08:48 +00:00
required = True , ondelete = " cascade " )
elif not self . _columns [ field_name ] . required or self . _columns [ field_name ] . ondelete . lower ( ) != " cascade " :
2012-01-24 12:42:52 +00:00
_logger . warning ( ' Field definition for _inherits reference " %s " in " %s " must be marked as " required " with ondelete= " cascade " , forcing it. ' , field_name , self . _name )
2010-08-12 20:08:48 +00:00
self . _columns [ field_name ] . required = True
self . _columns [ field_name ] . ondelete = " cascade "
2010-06-25 09:22:56 +00:00
#def __getattr__(self, name):
# """
# Proxies attribute accesses to the `inherits` parent so we can call methods defined on the inherited parent
# (though inherits doesn't use Python inheritance).
# Handles translating between local ids and remote ids.
# Known issue: doesn't work correctly when using python's own super(), don't involve inherit-based inheritance
# when you have inherits.
# """
# for model, field in self._inherits.iteritems():
# proxy = self.pool.get(model)
# if hasattr(proxy, name):
# attribute = getattr(proxy, name)
# if not hasattr(attribute, '__call__'):
# return attribute
# break
# else:
# return super(orm, self).__getattr__(name)
# def _proxy(cr, uid, ids, *args, **kwargs):
# objects = self.browse(cr, uid, ids, kwargs.get('context', None))
# lst = [obj[field].id for obj in objects if obj[field]]
# return getattr(proxy, name)(cr, uid, lst, *args, **kwargs)
# return _proxy
2010-06-15 13:27:22 +00:00
2010-06-09 14:08:02 +00:00
2011-08-12 14:28:14 +00:00
def fields_get ( self , cr , user , allfields = None , context = None , write_access = True ) :
""" Return the definition of each field.
The returned value is a dictionary ( indiced by field name ) of
dictionaries . The _inherits ' d fields are included. The string, help,
and selection ( if present ) attributes are translated .
2010-04-02 13:54:12 +00:00
: param cr : database cursor
: param user : current user id
2010-04-06 12:30:28 +00:00
: param fields : list of fields
2010-04-02 13:54:12 +00:00
: param context : context arguments , like lang , time zone
2010-04-06 12:30:28 +00:00
: return : dictionary of field dictionaries , each one describing a field of the business object
2010-04-02 13:54:12 +00:00
: raise AccessError : * if user has no create / write rights on the requested object
"""
2011-08-12 14:28:14 +00:00
if context is None :
context = { }
2011-09-28 13:01:07 +00:00
write_access = self . check_write ( cr , user , False ) or \
self . check_create ( cr , user , False )
2011-08-12 14:28:14 +00:00
res = { }
translation_obj = self . pool . get ( ' ir.translation ' )
for parent in self . _inherits :
res . update ( self . pool . get ( parent ) . fields_get ( cr , user , allfields , context ) )
for f , field in self . _columns . iteritems ( ) :
if allfields and f not in allfields :
continue
2011-09-30 11:23:48 +00:00
res [ f ] = fields . field_to_dict ( self , cr , user , field , context = context )
2011-08-12 14:28:14 +00:00
if not write_access :
res [ f ] [ ' readonly ' ] = True
res [ f ] [ ' states ' ] = { }
if ' string ' in res [ f ] :
res_trans = translation_obj . _get_source ( cr , user , self . _name + ' , ' + f , ' field ' , context . get ( ' lang ' , False ) or ' en_US ' )
if res_trans :
res [ f ] [ ' string ' ] = res_trans
if ' help ' in res [ f ] :
help_trans = translation_obj . _get_source ( cr , user , self . _name + ' , ' + f , ' help ' , context . get ( ' lang ' , False ) or ' en_US ' )
if help_trans :
res [ f ] [ ' help ' ] = help_trans
if ' selection ' in res [ f ] :
if isinstance ( field . selection , ( tuple , list ) ) :
sel = field . selection
sel2 = [ ]
for key , val in sel :
val2 = None
if val :
val2 = translation_obj . _get_source ( cr , user , self . _name + ' , ' + f , ' selection ' , context . get ( ' lang ' , False ) or ' en_US ' , val )
sel2 . append ( ( key , val2 or val ) )
res [ f ] [ ' selection ' ] = sel2
return res
2008-07-22 14:24:36 +00:00
def read ( self , cr , user , ids , fields = None , context = None , load = ' _classic_read ' ) :
2011-08-12 14:28:14 +00:00
""" Read records with given ids with the given fields
: param cr : database cursor
: param user : current user id
: param ids : id or list of the ids of the records to read
: param fields : optional list of field names to return ( default : all fields would be returned )
: type fields : list ( example [ ' field_name_1 ' , . . . ] )
: param context : optional context dictionary - it may contains keys for specifying certain options
like ` ` context_lang ` ` , ` ` context_tz ` ` to alter the results of the call .
A special ` ` bin_size ` ` boolean flag may also be passed in the context to request the
value of all fields . binary columns to be returned as the size of the binary instead of its
contents . This can also be selectively overriden by passing a field - specific flag
in the form ` ` bin_size_XXX : True / False ` ` where ` ` XXX ` ` is the name of the field .
Note : The ` ` bin_size_XXX ` ` form is new in OpenERP v6 .0 .
: return : list of dictionaries ( ( dictionary per record asked ) ) with requested field values
: rtype : [ { ‘ name_of_the_field ’ : value , . . . } , . . . ]
: raise AccessError : * if user has no read rights on the requested object
* if user tries to bypass access rules for read on the requested object
"""
2008-07-22 14:24:36 +00:00
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2011-09-28 13:01:07 +00:00
self . check_read ( cr , user )
2008-07-22 14:24:36 +00:00
if not fields :
2011-01-13 01:09:01 +00:00
fields = list ( set ( self . _columns . keys ( ) + self . _inherit_fields . keys ( ) ) )
2008-07-22 14:24:36 +00:00
if isinstance ( ids , ( int , long ) ) :
select = [ ids ]
2009-09-25 20:34:27 +00:00
else :
2009-12-09 08:57:39 +00:00
select = ids
2010-09-03 10:59:56 +00:00
select = map ( lambda x : isinstance ( x , dict ) and x [ ' id ' ] or x , select )
2008-08-12 14:44:56 +00:00
result = self . _read_flat ( cr , user , select , fields , context , load )
2010-04-16 14:48:56 +00:00
2008-07-22 14:24:36 +00:00
for r in result :
2008-08-12 14:44:56 +00:00
for key , v in r . items ( ) :
2010-04-20 08:49:17 +00:00
if v is None :
r [ key ] = False
2010-04-16 14:48:56 +00:00
2009-10-13 14:09:37 +00:00
if isinstance ( ids , ( int , long , dict ) ) :
2008-12-05 12:41:58 +00:00
return result and result [ 0 ] or False
2008-07-22 14:24:36 +00:00
return result
2008-08-29 13:08:14 +00:00
def _read_flat ( self , cr , user , ids , fields_to_read , context = None , load = ' _classic_read ' ) :
2008-07-22 14:24:36 +00:00
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2008-07-22 14:24:36 +00:00
if not ids :
return [ ]
2008-08-29 13:08:14 +00:00
if fields_to_read == None :
fields_to_read = self . _columns . keys ( )
2008-07-22 14:24:36 +00:00
2010-07-13 13:16:31 +00:00
# Construct a clause for the security rules.
# 'tables' hold the list of tables necessary for the SELECT including the ir.rule clauses,
# or will at least contain self._table.
rule_clause , rule_params , tables = self . pool . get ( ' ir.rule ' ) . domain_get ( cr , user , self . _name , ' read ' , context = context )
2008-07-22 14:24:36 +00:00
# all inherited fields + all non inherited fields for which the attribute whose name is in load is True
2009-01-15 11:57:18 +00:00
fields_pre = [ f for f in fields_to_read if
2009-02-14 05:35:17 +00:00
f == self . CONCURRENCY_CHECK_FIELD
2009-01-15 11:57:18 +00:00
or ( f in self . _columns and getattr ( self . _columns [ f ] , ' _classic_write ' ) )
] + self . _inherits . values ( )
2008-07-22 14:24:36 +00:00
res = [ ]
2008-08-12 14:44:56 +00:00
if len ( fields_pre ) :
2008-08-29 13:08:14 +00:00
def convert_field ( f ) :
2011-09-19 20:18:45 +00:00
f_qual = ' %s . " %s " ' % ( self . _table , f ) # need fully-qualified references in case len(tables) > 1
2008-08-29 13:08:14 +00:00
if f in ( ' create_date ' , ' write_date ' ) :
2010-07-13 13:16:31 +00:00
return " date_trunc( ' second ' , %s ) as %s " % ( f_qual , f )
2009-01-15 11:57:18 +00:00
if f == self . CONCURRENCY_CHECK_FIELD :
if self . _log_access :
2009-12-22 19:01:58 +00:00
return " COALESCE( %s .write_date, %s .create_date, now())::timestamp AS %s " % ( self . _table , self . _table , f , )
2009-01-15 11:57:18 +00:00
return " now()::timestamp AS %s " % ( f , )
2008-09-23 10:47:45 +00:00
if isinstance ( self . _columns [ f ] , fields . binary ) and context . get ( ' bin_size ' , False ) :
2010-07-14 19:20:56 +00:00
return ' length( %s ) as " %s " ' % ( f_qual , f )
2010-07-13 13:16:31 +00:00
return f_qual
2008-08-29 13:08:14 +00:00
fields_pre2 = map ( convert_field , fields_pre )
2010-01-19 06:47:59 +00:00
order_by = self . _parent_order or self . _order
2010-07-13 13:16:31 +00:00
select_fields = ' , ' . join ( fields_pre2 + [ self . _table + ' .id ' ] )
query = ' SELECT %s FROM %s WHERE %s .id IN %% s ' % ( select_fields , ' , ' . join ( tables ) , self . _table )
if rule_clause :
query + = " AND " + ( ' OR ' . join ( rule_clause ) )
2010-06-15 13:27:22 +00:00
query + = " ORDER BY " + order_by
2010-03-30 17:28:06 +00:00
for sub_ids in cr . split_for_in_conditions ( ids ) :
2010-07-13 13:16:31 +00:00
if rule_clause :
cr . execute ( query , [ tuple ( sub_ids ) ] + rule_params )
2010-03-30 17:28:06 +00:00
if cr . rowcount != len ( sub_ids ) :
2008-07-22 14:24:36 +00:00
raise except_orm ( _ ( ' AccessError ' ) ,
2010-10-27 11:10:56 +00:00
_ ( ' Operation prohibited by access rules, or performed on an already deleted document (Operation: read, Document type: %s ). ' )
% ( self . _description , ) )
2008-07-22 14:24:36 +00:00
else :
2010-06-15 13:27:22 +00:00
cr . execute ( query , ( tuple ( sub_ids ) , ) )
2008-07-22 14:24:36 +00:00
res . extend ( cr . dictfetchall ( ) )
else :
res = map ( lambda x : { ' id ' : x } , ids )
for f in fields_pre :
2009-01-15 11:57:18 +00:00
if f == self . CONCURRENCY_CHECK_FIELD :
continue
2008-07-22 14:24:36 +00:00
if self . _columns [ f ] . translate :
2010-07-13 13:16:31 +00:00
ids = [ x [ ' id ' ] for x in res ]
2009-11-24 14:44:05 +00:00
#TODO: optimize out of this loop
2008-07-22 14:24:36 +00:00
res_trans = self . pool . get ( ' ir.translation ' ) . _get_ids ( cr , user , self . _name + ' , ' + f , ' model ' , context . get ( ' lang ' , False ) or ' en_US ' , ids )
for r in res :
r [ f ] = res_trans . get ( r [ ' id ' ] , False ) or r [ f ]
for table in self . _inherits :
col = self . _inherits [ table ]
2010-12-21 20:24:18 +00:00
cols = [ x for x in intersect ( self . _inherit_fields . keys ( ) , fields_to_read ) if x not in self . _columns . keys ( ) ]
2008-07-22 14:24:36 +00:00
if not cols :
continue
res2 = self . pool . get ( table ) . read ( cr , user , [ x [ col ] for x in res ] , cols , context , load )
res3 = { }
for r in res2 :
res3 [ r [ ' id ' ] ] = r
del r [ ' id ' ]
for record in res :
2010-09-03 10:59:56 +00:00
if not record [ col ] : # if the record is deleted from _inherits table?
2010-01-22 13:20:35 +00:00
continue
2008-07-22 14:24:36 +00:00
record . update ( res3 [ record [ col ] ] )
2008-09-03 11:14:29 +00:00
if col not in fields_to_read :
2008-07-22 14:24:36 +00:00
del record [ col ]
# all fields which need to be post-processed by a simple function (symbol_get)
2008-08-29 13:08:14 +00:00
fields_post = filter ( lambda x : x in self . _columns and self . _columns [ x ] . _symbol_get , fields_to_read )
2008-07-22 14:24:36 +00:00
if fields_post :
for r in res :
for f in fields_post :
2008-12-09 13:35:40 +00:00
r [ f ] = self . _columns [ f ] . _symbol_get ( r [ f ] )
2010-07-13 13:16:31 +00:00
ids = [ x [ ' id ' ] for x in res ]
2008-07-22 14:24:36 +00:00
# all non inherited fields for which the attribute whose name is in load is False
2008-08-29 13:08:14 +00:00
fields_post = filter ( lambda x : x in self . _columns and not getattr ( self . _columns [ x ] , load ) , fields_to_read )
2008-08-17 18:28:29 +00:00
# Compute POST fields
todo = { }
2008-07-22 14:24:36 +00:00
for f in fields_post :
2008-08-17 18:28:29 +00:00
todo . setdefault ( self . _columns [ f ] . _multi , [ ] )
todo [ self . _columns [ f ] . _multi ] . append ( f )
2010-09-03 10:59:56 +00:00
for key , val in todo . items ( ) :
2008-08-17 18:28:29 +00:00
if key :
res2 = self . _columns [ val [ 0 ] ] . get ( cr , self , ids , val , user , context = context , values = res )
2011-05-24 10:18:10 +00:00
assert res2 is not None , \
' The function field " %s " on the " %s " model returned None \n ' \
' (a dictionary was expected). ' % ( val [ 0 ] , self . _name )
2008-09-07 23:25:24 +00:00
for pos in val :
2008-08-17 18:28:29 +00:00
for record in res :
2010-09-03 10:59:56 +00:00
if isinstance ( res2 [ record [ ' id ' ] ] , str ) : res2 [ record [ ' id ' ] ] = eval ( res2 [ record [ ' id ' ] ] ) #TOCHECK : why got string instend of dict in python2.6
2010-10-15 13:46:30 +00:00
multi_fields = res2 . get ( record [ ' id ' ] , { } )
if multi_fields :
record [ pos ] = multi_fields . get ( pos , [ ] )
2008-08-17 18:28:29 +00:00
else :
for f in val :
res2 = self . _columns [ f ] . get ( cr , self , ids , f , user , context = context , values = res )
for record in res :
2009-09-17 07:27:12 +00:00
if res2 :
record [ f ] = res2 [ record [ ' id ' ] ]
else :
record [ f ] = [ ]
2008-07-22 14:24:36 +00:00
readonly = None
for vals in res :
for field in vals . copy ( ) :
fobj = None
if field in self . _columns :
fobj = self . _columns [ field ]
if not fobj :
continue
groups = fobj . read
if groups :
edit = False
for group in groups :
module = group . split ( " . " ) [ 0 ]
grp = group . split ( " . " ) [ 1 ]
2011-03-11 08:27:20 +00:00
cr . execute ( " select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name= %s and module= %s and model= %s ) and uid= %s " , \
2008-07-22 14:24:36 +00:00
( grp , module , ' res.groups ' , user ) )
readonly = cr . fetchall ( )
if readonly [ 0 ] [ 0 ] > = 1 :
edit = True
break
elif readonly [ 0 ] [ 0 ] == 0 :
edit = False
else :
edit = False
if not edit :
if type ( vals [ field ] ) == type ( [ ] ) :
vals [ field ] = [ ]
elif type ( vals [ field ] ) == type ( 0.0 ) :
vals [ field ] = 0
elif type ( vals [ field ] ) == type ( ' ' ) :
vals [ field ] = ' =No Permission= '
else :
vals [ field ] = False
return res
2011-08-12 12:41:28 +00:00
# TODO check READ access
2008-07-22 14:24:36 +00:00
def perm_read ( self , cr , user , ids , context = None , details = True ) :
2010-04-02 13:54:12 +00:00
"""
2010-10-11 15:43:14 +00:00
Returns some metadata about the given records .
2010-04-02 13:54:12 +00:00
2010-04-19 17:08:14 +00:00
: param details : if True , \* _uid fields are replaced with the name of the user
2010-04-06 12:30:28 +00:00
: return : list of ownership dictionaries for each requested record
: rtype : list of dictionaries with the following keys :
2010-04-02 13:54:12 +00:00
* id : object id
* create_uid : user who created the record
* create_date : date when the record was created
* write_uid : last user who changed the record
* write_date : date of the last change to the record
2010-10-11 15:43:14 +00:00
* xmlid : XML ID to use to refer to this record ( if there is one ) , in format ` ` module . name ` `
2010-04-02 13:54:12 +00:00
"""
2008-07-22 14:24:36 +00:00
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2008-07-22 14:24:36 +00:00
if not ids :
return [ ]
fields = ' '
2010-06-15 13:27:22 +00:00
uniq = isinstance ( ids , ( int , long ) )
if uniq :
ids = [ ids ]
2010-10-11 15:43:14 +00:00
fields = [ ' id ' ]
2008-07-22 14:24:36 +00:00
if self . _log_access :
2010-10-11 15:43:14 +00:00
fields + = [ ' create_uid ' , ' create_date ' , ' write_uid ' , ' write_date ' ]
quoted_table = ' " %s " ' % self . _table
fields_str = " , " . join ( ' %s . %s ' % ( quoted_table , field ) for field in fields )
query = ''' SELECT %s , __imd.module, __imd.name
FROM % s LEFT JOIN ir_model_data __imd
ON ( __imd . model = % % s and __imd . res_id = % s . id )
WHERE % s . id IN % % s ''' % (fields_str, quoted_table, quoted_table, quoted_table)
cr . execute ( query , ( self . _name , tuple ( ids ) ) )
2008-07-22 14:24:36 +00:00
res = cr . dictfetchall ( )
for r in res :
for key in r :
r [ key ] = r [ key ] or False
2010-11-23 13:56:54 +00:00
if details and key in ( ' write_uid ' , ' create_uid ' ) and r [ key ] :
try :
2008-07-22 14:24:36 +00:00
r [ key ] = self . pool . get ( ' res.users ' ) . name_get ( cr , user , [ r [ key ] ] ) [ 0 ]
2010-11-23 13:56:54 +00:00
except Exception :
pass # Leave the numeric uid there
2010-10-11 15:43:14 +00:00
r [ ' xmlid ' ] = ( " %(module)s . %(name)s " % r ) if r [ ' name ' ] else False
del r [ ' name ' ] , r [ ' module ' ]
2010-06-15 13:27:22 +00:00
if uniq :
return res [ ids [ 0 ] ]
2008-07-22 14:24:36 +00:00
return res
2009-01-15 11:57:18 +00:00
def _check_concurrency ( self , cr , ids , context ) :
2008-07-22 14:24:36 +00:00
if not context :
2009-01-15 11:57:18 +00:00
return
2010-10-23 12:51:31 +00:00
if not ( context . get ( self . CONCURRENCY_CHECK_FIELD ) and self . _log_access ) :
2010-10-22 16:21:31 +00:00
return
2010-10-25 10:05:40 +00:00
check_clause = " (id = %s AND %s < COALESCE(write_date, create_date, now())::timestamp) "
for sub_ids in cr . split_for_in_conditions ( ids ) :
ids_to_check = [ ]
for id in sub_ids :
id_ref = " %s , %s " % ( self . _name , id )
update_date = context [ self . CONCURRENCY_CHECK_FIELD ] . pop ( id_ref , None )
if update_date :
ids_to_check . extend ( [ id , update_date ] )
if not ids_to_check :
continue
2010-10-25 10:55:55 +00:00
cr . execute ( " SELECT id FROM %s WHERE %s " % ( self . _table , " OR " . join ( [ check_clause ] * ( len ( ids_to_check ) / 2 ) ) ) , tuple ( ids_to_check ) )
res = cr . fetchone ( )
if res :
# mention the first one only to keep the error message readable
raise except_orm ( ' ConcurrencyException ' , _ ( ' A document was modified since you last viewed it ( %s : %d ) ' ) % ( self . _description , res [ 0 ] ) )
2009-01-15 11:57:18 +00:00
2011-09-28 13:01:07 +00:00
def check_access_rights ( self , cr , uid , operation , raise_exception = True ) : # no context on purpose.
""" Verifies that the operation given by ``operation`` is allowed for the user
according to the access rights . """
return self . pool . get ( ' ir.model.access ' ) . check ( cr , uid , self . _name , operation , raise_exception )
def check_create ( self , cr , uid , raise_exception = True ) :
return self . check_access_rights ( cr , uid , ' create ' , raise_exception )
def check_read ( self , cr , uid , raise_exception = True ) :
return self . check_access_rights ( cr , uid , ' read ' , raise_exception )
def check_unlink ( self , cr , uid , raise_exception = True ) :
return self . check_access_rights ( cr , uid , ' unlink ' , raise_exception )
def check_write ( self , cr , uid , raise_exception = True ) :
return self . check_access_rights ( cr , uid , ' write ' , raise_exception )
2010-03-30 17:28:06 +00:00
def check_access_rule ( self , cr , uid , ids , operation , context = None ) :
""" Verifies that the operation given by ``operation`` is allowed for the user
according to ir . rules .
2010-04-06 12:30:28 +00:00
: param operation : one of ` ` write ` ` , ` ` unlink ` `
2010-04-06 10:26:21 +00:00
: raise except_orm : * if current ir . rules do not permit this operation .
: return : None if the operation is allowed
2010-03-30 17:28:06 +00:00
"""
2011-09-26 09:01:56 +00:00
if uid == SUPERUSER_ID :
2011-09-23 17:40:18 +00:00
return
2011-10-04 14:33:35 +00:00
if self . is_transient ( ) :
2011-09-23 17:40:18 +00:00
# Only one single implicit access rule for transient models: owner only!
# This is ok to hardcode because we assert that TransientModels always
# have log_access enabled and this the create_uid column is always there.
# And even with _inherits, these fields are always present in the local
# table too, so no need for JOINs.
cr . execute ( """ SELECT distinct create_uid
FROM % s
WHERE id IN % % s """ % s elf._table, (tuple(ids),))
uids = [ x [ 0 ] for x in cr . fetchall ( ) ]
if len ( uids ) != 1 or uids [ 0 ] != uid :
2011-09-29 07:31:24 +00:00
raise except_orm ( _ ( ' AccessError ' ) , ' %s access is '
2011-09-23 17:40:18 +00:00
' restricted to your own records for transient models '
' (except for the super-user). ' % operation . capitalize ( ) )
else :
where_clause , where_params , tables = self . pool . get ( ' ir.rule ' ) . domain_get ( cr , uid , self . _name , operation , context = context )
if where_clause :
where_clause = ' and ' + ' and ' . join ( where_clause )
for sub_ids in cr . split_for_in_conditions ( ids ) :
cr . execute ( ' SELECT ' + self . _table + ' .id FROM ' + ' , ' . join ( tables ) +
' WHERE ' + self . _table + ' .id IN %s ' + where_clause ,
[ sub_ids ] + where_params )
if cr . rowcount != len ( sub_ids ) :
raise except_orm ( _ ( ' AccessError ' ) ,
_ ( ' Operation prohibited by access rules, or performed on an already deleted document (Operation: %s , Document type: %s ). ' )
% ( operation , self . _description ) )
2010-03-19 10:58:38 +00:00
2009-01-15 11:57:18 +00:00
def unlink ( self , cr , uid , ids , context = None ) :
2010-04-02 13:54:12 +00:00
"""
Delete records with given ids
: param cr : database cursor
: param uid : current user id
2010-04-06 12:30:28 +00:00
: param ids : id or list of ids
2010-08-03 17:38:25 +00:00
: param context : ( optional ) context arguments , like lang , time zone
2010-04-02 13:54:12 +00:00
: return : True
: raise AccessError : * if user has no unlink rights on the requested object
* if user tries to bypass access rules for unlink on the requested object
: raise UserError : if the record is default property for other records
"""
2008-07-22 14:24:36 +00:00
if not ids :
return True
if isinstance ( ids , ( int , long ) ) :
ids = [ ids ]
2008-12-13 12:42:04 +00:00
result_store = self . _store_get_values ( cr , uid , ids , None , context )
2009-02-14 05:35:17 +00:00
2009-01-15 11:57:18 +00:00
self . _check_concurrency ( cr , ids , context )
2008-07-22 14:24:36 +00:00
2011-09-28 13:01:07 +00:00
self . check_unlink ( cr , uid )
2008-07-22 14:24:36 +00:00
2012-01-10 15:34:48 +00:00
ir_property = self . pool . get ( ' ir.property ' )
2011-12-02 10:22:02 +00:00
2012-01-10 15:34:48 +00:00
# Check if the records are used as default properties.
2009-10-27 06:23:53 +00:00
domain = [ ( ' res_id ' , ' = ' , False ) ,
2010-05-05 10:02:41 +00:00
( ' value_reference ' , ' in ' , [ ' %s , %s ' % ( self . _name , i ) for i in ids ] ) ,
2009-08-10 16:04:02 +00:00
]
2012-01-10 15:34:48 +00:00
if ir_property . search ( cr , uid , domain , context = context ) :
2009-08-10 16:04:02 +00:00
raise except_orm ( _ ( ' Error ' ) , _ ( ' Unable to delete this document because it is used as a default property ' ) )
2012-01-10 15:34:48 +00:00
# Delete the records' properties.
property_ids = ir_property . search ( cr , uid , [ ( ' res_id ' , ' in ' , [ ' %s , %s ' % ( self . _name , i ) for i in ids ] ) ] , context = context )
ir_property . unlink ( cr , uid , property_ids , context = context )
2008-07-22 14:24:36 +00:00
wf_service = netsvc . LocalService ( " workflow " )
2009-08-10 16:04:02 +00:00
for oid in ids :
wf_service . trg_delete ( uid , self . _name , oid , cr )
2008-07-22 14:24:36 +00:00
2010-03-30 17:28:06 +00:00
self . check_access_rule ( cr , uid , ids , ' unlink ' , context = context )
2011-03-22 14:43:33 +00:00
pool_model_data = self . pool . get ( ' ir.model.data ' )
2011-05-16 15:05:34 +00:00
ir_values_obj = self . pool . get ( ' ir.values ' )
2010-03-30 17:28:06 +00:00
for sub_ids in cr . split_for_in_conditions ( ids ) :
cr . execute ( ' delete from ' + self . _table + ' ' \
2010-06-18 14:05:37 +00:00
' where id IN %s ' , ( sub_ids , ) )
2011-03-22 16:30:07 +00:00
# Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
# as these are not connected with real database foreign keys, and would be dangling references.
2011-06-27 09:25:38 +00:00
# Note: following steps performed as admin to avoid access rights restrictions, and with no context
# to avoid possible side-effects during admin calls.
# Step 1. Calling unlink of ir_model_data only for the affected IDS
2011-09-26 09:01:56 +00:00
reference_ids = pool_model_data . search ( cr , SUPERUSER_ID , [ ( ' res_id ' , ' in ' , list ( sub_ids ) ) , ( ' model ' , ' = ' , self . _name ) ] )
2011-03-22 14:43:33 +00:00
# Step 2. Marching towards the real deletion of referenced records
2011-05-31 09:22:32 +00:00
if reference_ids :
2011-09-26 09:01:56 +00:00
pool_model_data . unlink ( cr , SUPERUSER_ID , reference_ids )
2011-03-22 16:30:07 +00:00
2011-03-22 14:43:33 +00:00
# For the same reason, removing the record relevant to ir_values
2011-05-16 15:05:34 +00:00
ir_value_ids = ir_values_obj . search ( cr , uid ,
2011-03-23 19:20:43 +00:00
[ ' | ' , ( ' value ' , ' in ' , [ ' %s , %s ' % ( self . _name , sid ) for sid in sub_ids ] ) , ' & ' , ( ' res_id ' , ' in ' , list ( sub_ids ) ) , ( ' model ' , ' = ' , self . _name ) ] ,
2011-03-22 16:30:07 +00:00
context = context )
2011-03-22 14:43:33 +00:00
if ir_value_ids :
2011-05-16 15:05:34 +00:00
ir_values_obj . unlink ( cr , uid , ir_value_ids , context = context )
2011-03-22 16:30:07 +00:00
2009-08-10 16:04:02 +00:00
for order , object , store_ids , fields in result_store :
2010-03-30 17:28:06 +00:00
if object != self . _name :
2010-09-03 10:59:56 +00:00
obj = self . pool . get ( object )
cr . execute ( ' select id from ' + obj . _table + ' where id IN %s ' , ( tuple ( store_ids ) , ) )
2009-08-10 16:04:02 +00:00
rids = map ( lambda x : x [ 0 ] , cr . fetchall ( ) )
if rids :
obj . _store_set_values ( cr , uid , rids , fields , context )
2011-03-22 16:30:07 +00:00
2008-07-22 14:24:36 +00:00
return True
#
# TODO: Validate
#
def write ( self , cr , user , ids , vals , context = None ) :
2010-04-02 13:54:12 +00:00
"""
Update records with given ids with the given field values
: param cr : database cursor
: param user : current user id
2010-08-03 17:38:25 +00:00
: type user : integer
: param ids : object id or list of object ids to update according to * * vals * *
: param vals : field values to update , e . g { ' field_name ' : new_field_value , . . . }
: type vals : dictionary
: param context : ( optional ) context arguments , e . g . { ' lang ' : ' en_us ' , ' tz ' : ' UTC ' , . . . }
: type context : dictionary
2010-04-02 13:54:12 +00:00
: return : True
: raise AccessError : * if user has no write rights on the requested object
* if user tries to bypass access rules for write on the requested object
: raise ValidateError : if user tries to enter invalid value for a field that is not in selection
2010-08-03 17:38:25 +00:00
: raise UserError : if a loop would be created in a hierarchy of objects a result of the operation ( such as setting an object as its own parent )
* * Note * * : The type of field values to pass in ` ` vals ` ` for relationship fields is specific :
2010-04-02 13:54:12 +00:00
2010-08-03 17:38:25 +00:00
+ For a many2many field , a list of tuples is expected .
Here is the list of tuple that are accepted , with the corresponding semantics : :
2010-04-02 13:54:12 +00:00
2010-08-03 17:38:25 +00:00
( 0 , 0 , { values } ) link to a new record that needs to be created with the given values dictionary
( 1 , ID , { values } ) update the linked record with id = ID ( write * values * on it )
( 2 , ID ) remove and delete the linked record with id = ID ( calls unlink on ID , that will delete the object completely , and the link to it as well )
( 3 , ID ) cut the link to the linked record with id = ID ( delete the relationship between the two objects but does not delete the target object itself )
( 4 , ID ) link to existing record with id = ID ( adds a relationship )
( 5 ) unlink all ( like using ( 3 , ID ) for all linked records )
( 6 , 0 , [ IDs ] ) replace the list of linked IDs ( like using ( 5 ) then ( 4 , ID ) for each ID in the list of IDs )
2010-06-16 17:58:37 +00:00
2010-08-03 17:38:25 +00:00
Example :
[ ( 6 , 0 , [ 8 , 5 , 6 , 4 ] ) ] sets the many2many to ids [ 8 , 5 , 6 , 4 ]
2010-06-16 17:58:37 +00:00
2010-08-03 17:38:25 +00:00
+ For a one2many field , a lits of tuples is expected .
Here is the list of tuple that are accepted , with the corresponding semantics : :
2010-06-16 17:58:37 +00:00
2010-08-03 17:38:25 +00:00
( 0 , 0 , { values } ) link to a new record that needs to be created with the given values dictionary
( 1 , ID , { values } ) update the linked record with id = ID ( write * values * on it )
( 2 , ID ) remove and delete the linked record with id = ID ( calls unlink on ID , that will delete the object completely , and the link to it as well )
2010-06-16 17:58:37 +00:00
2010-08-03 17:38:25 +00:00
Example :
[ ( 0 , 0 , { ' field_name ' : field_value_record1 , . . . } ) , ( 0 , 0 , { ' field_name ' : field_value_record2 , . . . } ) ]
2010-04-02 13:54:12 +00:00
2010-08-03 17:38:25 +00:00
+ For a many2one field , simply use the ID of target record , which must already exist , or ` ` False ` ` to remove the link .
+ For a reference field , use a string with the model name , a comma , and the target object id ( example : ` ` ' product.product, 5 ' ` ` )
2010-06-16 17:58:37 +00:00
2010-04-02 13:54:12 +00:00
"""
2008-07-22 14:24:36 +00:00
readonly = None
for field in vals . copy ( ) :
fobj = None
if field in self . _columns :
fobj = self . _columns [ field ]
2010-11-25 13:58:00 +00:00
elif field in self . _inherit_fields :
2008-07-22 14:24:36 +00:00
fobj = self . _inherit_fields [ field ] [ 2 ]
if not fobj :
continue
groups = fobj . write
if groups :
edit = False
for group in groups :
module = group . split ( " . " ) [ 0 ]
grp = group . split ( " . " ) [ 1 ]
2010-09-03 07:03:30 +00:00
cr . execute ( " select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name= %s and module= %s and model= %s ) and uid= %s " , \
2008-07-22 14:24:36 +00:00
( grp , module , ' res.groups ' , user ) )
readonly = cr . fetchall ( )
if readonly [ 0 ] [ 0 ] > = 1 :
edit = True
break
if not edit :
vals . pop ( field )
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2008-07-22 14:24:36 +00:00
if not ids :
return True
if isinstance ( ids , ( int , long ) ) :
ids = [ ids ]
2009-01-15 11:57:18 +00:00
self . _check_concurrency ( cr , ids , context )
2011-09-28 13:01:07 +00:00
self . check_write ( cr , user )
2008-07-22 14:24:36 +00:00
2010-06-10 20:29:14 +00:00
result = self . _store_get_values ( cr , user , ids , vals . keys ( ) , context ) or [ ]
2010-05-14 09:11:43 +00:00
# No direct update of parent_left/right
vals . pop ( ' parent_left ' , None )
vals . pop ( ' parent_right ' , None )
parents_changed = [ ]
2011-02-07 15:27:47 +00:00
parent_order = self . _parent_order or self . _order
2010-05-14 09:11:43 +00:00
if self . _parent_store and ( self . _parent_name in vals ) :
# The parent_left/right computation may take up to
# 5 seconds. No need to recompute the values if the
2011-02-07 15:27:47 +00:00
# parent is the same.
# Note: to respect parent_order, nodes must be processed in
# order, so ``parents_changed`` must be ordered properly.
2010-05-14 09:11:43 +00:00
parent_val = vals [ self . _parent_name ]
if parent_val :
2011-02-07 15:27:47 +00:00
query = " SELECT id FROM %s WHERE id IN %% s AND ( %s != %% s OR %s IS NULL) ORDER BY %s " % \
( self . _table , self . _parent_name , self . _parent_name , parent_order )
2010-06-22 15:57:19 +00:00
cr . execute ( query , ( tuple ( ids ) , parent_val ) )
2010-05-14 09:11:43 +00:00
else :
2011-02-07 15:27:47 +00:00
query = " SELECT id FROM %s WHERE id IN %% s AND ( %s IS NOT NULL) ORDER BY %s " % \
( self . _table , self . _parent_name , parent_order )
2010-06-22 15:57:19 +00:00
cr . execute ( query , ( tuple ( ids ) , ) )
2010-05-14 09:11:43 +00:00
parents_changed = map ( operator . itemgetter ( 0 ) , cr . fetchall ( ) )
2009-08-20 15:29:21 +00:00
2008-08-12 14:44:56 +00:00
upd0 = [ ]
upd1 = [ ]
upd_todo = [ ]
updend = [ ]
2008-07-22 14:24:36 +00:00
direct = [ ]
totranslate = context . get ( ' lang ' , False ) and ( context [ ' lang ' ] != ' en_US ' )
for field in vals :
if field in self . _columns :
2009-01-02 14:09:03 +00:00
if self . _columns [ field ] . _classic_write and not ( hasattr ( self . _columns [ field ] , ' _fnct_inv ' ) ) :
2008-07-22 14:24:36 +00:00
if ( not totranslate ) or not self . _columns [ field ] . translate :
upd0 . append ( ' " ' + field + ' " = ' + self . _columns [ field ] . _symbol_set [ 0 ] )
upd1 . append ( self . _columns [ field ] . _symbol_set [ 1 ] ( vals [ field ] ) )
direct . append ( field )
else :
upd_todo . append ( field )
else :
updend . append ( field )
if field in self . _columns \
and hasattr ( self . _columns [ field ] , ' selection ' ) \
and vals [ field ] :
2010-12-20 16:09:59 +00:00
self . _check_selection_field_value ( cr , user , field , vals [ field ] , context = context )
2008-07-22 14:24:36 +00:00
if self . _log_access :
2008-12-09 13:35:40 +00:00
upd0 . append ( ' write_uid= %s ' )
2008-07-22 14:24:36 +00:00
upd0 . append ( ' write_date=now() ' )
upd1 . append ( user )
if len ( upd0 ) :
2010-03-30 17:28:06 +00:00
self . check_access_rule ( cr , user , ids , ' write ' , context = context )
for sub_ids in cr . split_for_in_conditions ( ids ) :
cr . execute ( ' update ' + self . _table + ' set ' + ' , ' . join ( upd0 ) + ' ' \
2010-06-18 14:05:37 +00:00
' where id IN %s ' , upd1 + [ sub_ids ] )
2010-10-27 11:05:42 +00:00
if cr . rowcount != len ( sub_ids ) :
raise except_orm ( _ ( ' AccessError ' ) ,
_ ( ' One of the records you are trying to modify has already been deleted (Document type: %s ). ' ) % self . _description )
2009-10-27 06:23:53 +00:00
2008-07-22 14:24:36 +00:00
if totranslate :
2009-11-24 14:44:05 +00:00
# TODO: optimize
2008-07-22 14:24:36 +00:00
for f in direct :
if self . _columns [ f ] . translate :
2010-09-03 10:59:56 +00:00
src_trans = self . pool . get ( self . _name ) . read ( cr , user , ids , [ f ] ) [ 0 ] [ f ]
2010-01-07 14:47:49 +00:00
if not src_trans :
src_trans = vals [ f ]
# Inserting value to DB
2010-09-03 10:59:56 +00:00
self . write ( cr , user , ids , { f : vals [ f ] } )
2010-01-07 14:47:49 +00:00
self . pool . get ( ' ir.translation ' ) . _set_ids ( cr , user , self . _name + ' , ' + f , ' model ' , context [ ' lang ' ] , ids , vals [ f ] , src_trans )
2008-07-22 14:24:36 +00:00
2009-08-20 15:29:21 +00:00
2008-07-22 14:24:36 +00:00
# call the 'set' method of fields which are not classic_write
2008-08-12 14:44:56 +00:00
upd_todo . sort ( lambda x , y : self . _columns [ x ] . priority - self . _columns [ y ] . priority )
2009-06-17 13:00:53 +00:00
2009-08-04 10:46:50 +00:00
# default element in context must be removed when call a one2many or many2many
2009-06-19 05:26:25 +00:00
rel_context = context . copy ( )
2009-06-17 13:00:53 +00:00
for c in context . items ( ) :
if c [ 0 ] . startswith ( ' default_ ' ) :
del rel_context [ c [ 0 ] ]
2008-07-22 14:24:36 +00:00
for field in upd_todo :
for id in ids :
2009-09-15 08:17:44 +00:00
result + = self . _columns [ field ] . set ( cr , self , id , field , vals [ field ] , user , context = rel_context ) or [ ]
2008-07-22 14:24:36 +00:00
2011-11-01 13:57:31 +00:00
unknown_fields = updend [ : ]
2008-07-22 14:24:36 +00:00
for table in self . _inherits :
col = self . _inherits [ table ]
nids = [ ]
2010-03-30 17:28:06 +00:00
for sub_ids in cr . split_for_in_conditions ( ids ) :
2008-07-22 14:24:36 +00:00
cr . execute ( ' select distinct " ' + col + ' " from " ' + self . _table + ' " ' \
2010-06-18 10:16:41 +00:00
' where id IN %s ' , ( sub_ids , ) )
2008-07-22 14:24:36 +00:00
nids . extend ( [ x [ 0 ] for x in cr . fetchall ( ) ] )
v = { }
for val in updend :
2008-08-12 14:44:56 +00:00
if self . _inherit_fields [ val ] [ 0 ] == table :
v [ val ] = vals [ val ]
2011-11-01 13:57:31 +00:00
unknown_fields . remove ( val )
2010-10-12 19:22:59 +00:00
if v :
self . pool . get ( table ) . write ( cr , user , nids , v , context )
2008-07-22 14:24:36 +00:00
2011-11-01 13:57:31 +00:00
if unknown_fields :
2012-01-24 12:42:52 +00:00
_logger . warning (
2011-11-02 20:22:54 +00:00
' No such field(s) in model %s : %s . ' ,
self . _name , ' , ' . join ( unknown_fields ) )
2008-07-22 14:24:36 +00:00
self . _validate ( cr , user , ids , context )
2010-05-14 09:11:43 +00:00
# TODO: use _order to set dest at the right position and not first node of parent
# We can't defer parent_store computation because the stored function
# fields that are computer may refer (directly or indirectly) to
# parent_left/right (via a child_of domain)
if parents_changed :
2008-08-17 18:28:29 +00:00
if self . pool . _init :
2010-09-03 10:59:56 +00:00
self . pool . _init_parent [ self . _name ] = True
2008-08-13 10:47:38 +00:00
else :
2010-05-14 09:11:43 +00:00
order = self . _parent_order or self . _order
parent_val = vals [ self . _parent_name ]
if parent_val :
clause , params = ' %s = %% s ' % ( self . _parent_name , ) , ( parent_val , )
else :
clause , params = ' %s IS NULL ' % ( self . _parent_name , ) , ( )
for id in parents_changed :
cr . execute ( ' SELECT parent_left, parent_right FROM %s WHERE id= %% s ' % ( self . _table , ) , ( id , ) )
pleft , pright = cr . fetchone ( )
distance = pright - pleft + 1
2011-02-03 22:51:02 +00:00
# Positions of current siblings, to locate proper insertion point;
# this can _not_ be fetched outside the loop, as it needs to be refreshed
# after each update, in case several nodes are sequentially inserted one
# next to the other (i.e computed incrementally)
2011-02-07 15:27:47 +00:00
cr . execute ( ' SELECT parent_right, id FROM %s WHERE %s ORDER BY %s ' % ( self . _table , clause , parent_order ) , params )
2011-02-03 22:51:02 +00:00
parents = cr . fetchall ( )
2009-02-26 20:17:45 +00:00
# Find Position of the element
position = None
2010-05-14 09:11:43 +00:00
for ( parent_pright , parent_id ) in parents :
if parent_id == id :
2009-02-26 20:17:45 +00:00
break
2010-09-03 10:59:56 +00:00
position = parent_pright + 1
2009-02-26 20:17:45 +00:00
2010-05-14 09:11:43 +00:00
# It's the first node of the parent
2009-02-26 20:17:45 +00:00
if not position :
2010-05-14 09:11:43 +00:00
if not parent_val :
2009-02-26 20:17:45 +00:00
position = 1
2009-02-05 00:01:31 +00:00
else :
2010-05-14 09:11:43 +00:00
cr . execute ( ' select parent_left from ' + self . _table + ' where id= %s ' , ( parent_val , ) )
2010-09-03 10:59:56 +00:00
position = cr . fetchone ( ) [ 0 ] + 1
2009-02-26 20:17:45 +00:00
2010-05-14 09:11:43 +00:00
if pleft < position < = pright :
2009-02-26 20:17:45 +00:00
raise except_orm ( _ ( ' UserError ' ) , _ ( ' Recursivity Detected. ' ) )
2010-05-14 09:11:43 +00:00
if pleft < position :
2009-02-26 20:17:45 +00:00
cr . execute ( ' update ' + self . _table + ' set parent_left=parent_left+ %s where parent_left>= %s ' , ( distance , position ) )
cr . execute ( ' update ' + self . _table + ' set parent_right=parent_right+ %s where parent_right>= %s ' , ( distance , position ) )
2010-09-03 10:59:56 +00:00
cr . execute ( ' update ' + self . _table + ' set parent_left=parent_left+ %s , parent_right=parent_right+ %s where parent_left>= %s and parent_left< %s ' , ( position - pleft , position - pleft , pleft , pright ) )
2009-02-26 20:17:45 +00:00
else :
cr . execute ( ' update ' + self . _table + ' set parent_left=parent_left+ %s where parent_left>= %s ' , ( distance , position ) )
cr . execute ( ' update ' + self . _table + ' set parent_right=parent_right+ %s where parent_right>= %s ' , ( distance , position ) )
2010-09-03 10:59:56 +00:00
cr . execute ( ' update ' + self . _table + ' set parent_left=parent_left- %s , parent_right=parent_right- %s where parent_left>= %s and parent_left< %s ' , ( pleft - position + distance , pleft - position + distance , pleft + distance , pright + distance ) )
2008-07-22 14:24:36 +00:00
2009-09-15 08:17:44 +00:00
result + = self . _store_get_values ( cr , user , ids , vals . keys ( ) , context )
2010-06-10 20:29:14 +00:00
result . sort ( )
done = { }
2011-01-14 21:16:42 +00:00
for order , object , ids_to_update , fields_to_recompute in result :
key = ( object , tuple ( fields_to_recompute ) )
2010-06-10 20:29:14 +00:00
done . setdefault ( key , { } )
# avoid to do several times the same computation
todo = [ ]
2011-01-14 21:16:42 +00:00
for id in ids_to_update :
2010-06-10 20:29:14 +00:00
if id not in done [ key ] :
done [ key ] [ id ] = True
todo . append ( id )
2011-01-14 21:16:42 +00:00
self . pool . get ( object ) . _store_set_values ( cr , user , todo , fields_to_recompute , context )
2008-12-13 06:01:18 +00:00
2008-07-22 14:24:36 +00:00
wf_service = netsvc . LocalService ( " workflow " )
for id in ids :
wf_service . trg_write ( user , self . _name , id , cr )
return True
#
# TODO: Should set perm to user.xxx
#
def create ( self , cr , user , vals , context = None ) :
2010-04-02 13:54:12 +00:00
"""
2011-05-23 11:19:53 +00:00
Create a new record for the model .
The values for the new record are initialized using the ` ` vals ` `
argument , and if necessary the result of ` ` default_get ( ) ` ` .
2010-04-02 13:54:12 +00:00
: param cr : database cursor
: param user : current user id
2010-08-03 17:38:25 +00:00
: type user : integer
: param vals : field values for new record , e . g { ' field_name ' : field_value , . . . }
: type vals : dictionary
: param context : optional context arguments , e . g . { ' lang ' : ' en_us ' , ' tz ' : ' UTC ' , . . . }
: type context : dictionary
2010-04-02 13:54:12 +00:00
: return : id of new record created
: raise AccessError : * if user has no create rights on the requested object
* if user tries to bypass access rules for create on the requested object
: raise ValidateError : if user tries to enter invalid value for a field that is not in selection
2010-08-03 17:38:25 +00:00
: raise UserError : if a loop would be created in a hierarchy of objects a result of the operation ( such as setting an object as its own parent )
2010-04-02 13:54:12 +00:00
2010-08-03 17:38:25 +00:00
* * Note * * : The type of field values to pass in ` ` vals ` ` for relationship fields is specific .
Please see the description of the : py : meth : ` ~ osv . osv . osv . write ` method for details about the possible values and how
to specify them .
2010-04-02 13:54:12 +00:00
2008-07-22 14:24:36 +00:00
"""
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2011-09-23 17:40:18 +00:00
if self . is_transient ( ) :
self . _transient_vacuum ( cr , user )
2011-09-28 13:01:07 +00:00
self . check_create ( cr , user )
2008-07-22 14:24:36 +00:00
2010-08-27 09:25:54 +00:00
vals = self . _add_missing_default_values ( cr , user , vals , context )
2008-07-22 14:24:36 +00:00
tocreate = { }
for v in self . _inherits :
if self . _inherits [ v ] not in vals :
tocreate [ v ] = { }
2009-09-17 07:27:12 +00:00
else :
2010-09-03 10:59:56 +00:00
tocreate [ v ] = { ' id ' : vals [ self . _inherits [ v ] ] }
2008-07-22 14:24:36 +00:00
( upd0 , upd1 , upd2 ) = ( ' ' , ' ' , [ ] )
upd_todo = [ ]
2011-11-02 20:22:54 +00:00
unknown_fields = [ ]
2008-07-22 14:24:36 +00:00
for v in vals . keys ( ) :
2011-08-25 14:58:56 +00:00
if v in self . _inherit_fields and v not in self . _columns :
2011-09-12 09:14:18 +00:00
( table , col , col_detail , original_parent ) = self . _inherit_fields [ v ]
2008-07-22 14:24:36 +00:00
tocreate [ table ] [ v ] = vals [ v ]
del vals [ v ]
2009-09-17 07:27:12 +00:00
else :
if ( v not in self . _inherit_fields ) and ( v not in self . _columns ) :
del vals [ v ]
2011-11-02 20:22:54 +00:00
unknown_fields . append ( v )
if unknown_fields :
2012-01-24 12:42:52 +00:00
_logger . warning (
2011-11-02 20:22:54 +00:00
' No such field(s) in model %s : %s . ' ,
self . _name , ' , ' . join ( unknown_fields ) )
2009-01-15 17:41:03 +00:00
2009-01-01 13:39:56 +00:00
# Try-except added to filter the creation of those records whose filds are readonly.
# Example : any dashboard which has all the fields readonly.(due to Views(database views))
2009-02-14 05:35:17 +00:00
try :
2009-01-01 13:39:56 +00:00
cr . execute ( " SELECT nextval( ' " + self . _sequence + " ' ) " )
except :
raise except_orm ( _ ( ' UserError ' ) ,
2009-10-13 06:37:11 +00:00
_ ( ' You cannot perform this operation. New Record Creation is not allowed for this object as this object is for reporting purpose. ' ) )
2009-02-14 05:35:17 +00:00
2008-07-22 14:24:36 +00:00
id_new = cr . fetchone ( ) [ 0 ]
for table in tocreate :
2009-09-17 07:27:12 +00:00
if self . _inherits [ table ] in vals :
del vals [ self . _inherits [ table ] ]
2010-03-15 15:53:10 +00:00
record_id = tocreate [ table ] . pop ( ' id ' , None )
2010-04-29 12:01:58 +00:00
2010-04-28 09:28:00 +00:00
if record_id is None or not record_id :
2010-03-15 15:53:10 +00:00
record_id = self . pool . get ( table ) . create ( cr , user , tocreate [ table ] , context = context )
else :
self . pool . get ( table ) . write ( cr , user , [ record_id ] , tocreate [ table ] , context = context )
2010-09-03 10:59:56 +00:00
upd0 + = ' , ' + self . _inherits [ table ]
2008-12-09 13:35:40 +00:00
upd1 + = ' , %s '
2010-03-15 15:53:10 +00:00
upd2 . append ( record_id )
2009-09-17 07:27:12 +00:00
2009-10-27 06:23:53 +00:00
#Start : Set bool fields to be False if they are not touched(to make search more powerful)
2009-05-13 09:13:03 +00:00
bool_fields = [ x for x in self . _columns . keys ( ) if self . _columns [ x ] . _type == ' boolean ' ]
2009-10-27 06:23:53 +00:00
2009-05-13 09:13:03 +00:00
for bool_field in bool_fields :
if bool_field not in vals :
vals [ bool_field ] = False
#End
2009-09-17 07:27:12 +00:00
for field in vals . copy ( ) :
fobj = None
2009-08-04 10:46:50 +00:00
if field in self . _columns :
2009-09-17 07:27:12 +00:00
fobj = self . _columns [ field ]
else :
fobj = self . _inherit_fields [ field ] [ 2 ]
if not fobj :
continue
groups = fobj . write
if groups :
edit = False
for group in groups :
module = group . split ( " . " ) [ 0 ]
grp = group . split ( " . " ) [ 1 ]
2010-06-18 10:16:41 +00:00
cr . execute ( " select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name= ' %s ' and module= ' %s ' and model= ' %s ' ) and uid= %s " % \
2009-09-17 07:27:12 +00:00
( grp , module , ' res.groups ' , user ) )
readonly = cr . fetchall ( )
if readonly [ 0 ] [ 0 ] > = 1 :
edit = True
break
elif readonly [ 0 ] [ 0 ] == 0 :
edit = False
else :
edit = False
if not edit :
vals . pop ( field )
for field in vals :
if self . _columns [ field ] . _classic_write :
upd0 = upd0 + ' , " ' + field + ' " '
upd1 = upd1 + ' , ' + self . _columns [ field ] . _symbol_set [ 0 ]
upd2 . append ( self . _columns [ field ] . _symbol_set [ 1 ] ( vals [ field ] ) )
else :
2010-03-31 16:59:16 +00:00
if not isinstance ( self . _columns [ field ] , fields . related ) :
upd_todo . append ( field )
2008-07-22 14:24:36 +00:00
if field in self . _columns \
and hasattr ( self . _columns [ field ] , ' selection ' ) \
and vals [ field ] :
2010-12-20 16:25:01 +00:00
self . _check_selection_field_value ( cr , user , field , vals [ field ] , context = context )
2008-07-22 14:24:36 +00:00
if self . _log_access :
upd0 + = ' ,create_uid,create_date '
2008-12-09 13:35:40 +00:00
upd1 + = ' , %s ,now() '
2008-07-22 14:24:36 +00:00
upd2 . append ( user )
cr . execute ( ' insert into " ' + self . _table + ' " (id ' + upd0 + " ) values ( " + str ( id_new ) + upd1 + ' ) ' , tuple ( upd2 ) )
2010-04-22 07:45:28 +00:00
self . check_access_rule ( cr , user , [ id_new ] , ' create ' , context = context )
2008-08-12 14:44:56 +00:00
upd_todo . sort ( lambda x , y : self . _columns [ x ] . priority - self . _columns [ y ] . priority )
2008-07-22 14:24:36 +00:00
2010-05-14 09:11:43 +00:00
if self . _parent_store and not context . get ( ' defer_parent_store_computation ' ) :
2008-08-17 18:28:29 +00:00
if self . pool . _init :
2010-09-03 10:59:56 +00:00
self . pool . _init_parent [ self . _name ] = True
2008-08-13 10:47:38 +00:00
else :
2008-08-17 18:28:29 +00:00
parent = vals . get ( self . _parent_name , False )
if parent :
2009-02-05 17:15:31 +00:00
cr . execute ( ' select parent_right from ' + self . _table + ' where ' + self . _parent_name + ' = %s order by ' + ( self . _parent_order or self . _order ) , ( parent , ) )
2009-02-05 00:01:31 +00:00
pleft_old = None
result_p = cr . fetchall ( )
for ( pleft , ) in result_p :
if not pleft :
break
pleft_old = pleft
if not pleft_old :
cr . execute ( ' select parent_left from ' + self . _table + ' where id= %s ' , ( parent , ) )
pleft_old = cr . fetchone ( ) [ 0 ]
pleft = pleft_old
2008-08-17 18:28:29 +00:00
else :
cr . execute ( ' select max(parent_right) from ' + self . _table )
pleft = cr . fetchone ( ) [ 0 ] or 0
2008-12-09 13:35:40 +00:00
cr . execute ( ' update ' + self . _table + ' set parent_left=parent_left+2 where parent_left> %s ' , ( pleft , ) )
cr . execute ( ' update ' + self . _table + ' set parent_right=parent_right+2 where parent_right> %s ' , ( pleft , ) )
2010-09-03 10:59:56 +00:00
cr . execute ( ' update ' + self . _table + ' set parent_left= %s ,parent_right= %s where id= %s ' , ( pleft + 1 , pleft + 2 , id_new ) )
2009-10-27 06:23:53 +00:00
2009-09-17 07:27:12 +00:00
# default element in context must be remove when call a one2many or many2many
2009-06-19 05:26:25 +00:00
rel_context = context . copy ( )
2009-06-17 13:00:53 +00:00
for c in context . items ( ) :
if c [ 0 ] . startswith ( ' default_ ' ) :
del rel_context [ c [ 0 ] ]
2009-09-17 07:27:12 +00:00
2009-09-25 12:41:56 +00:00
result = [ ]
2009-04-27 18:17:01 +00:00
for field in upd_todo :
2009-09-25 12:41:56 +00:00
result + = self . _columns [ field ] . set ( cr , self , id_new , field , vals [ field ] , user , rel_context ) or [ ]
2009-04-27 18:17:01 +00:00
self . _validate ( cr , user , [ id_new ] , context )
2008-08-13 10:47:38 +00:00
2009-09-25 12:41:56 +00:00
if not context . get ( ' no_store_function ' , False ) :
result + = self . _store_get_values ( cr , user , [ id_new ] , vals . keys ( ) , context )
result . sort ( )
done = [ ]
for order , object , ids , fields2 in result :
if not ( object , ids , fields2 ) in done :
self . pool . get ( object ) . _store_set_values ( cr , user , ids , fields2 , context )
done . append ( ( object , ids , fields2 ) )
2008-12-13 06:01:18 +00:00
2010-05-19 18:32:10 +00:00
if self . _log_create and not ( context and context . get ( ' no_store_function ' , False ) ) :
message = self . _description + \
" ' " + \
self . name_get ( cr , user , [ id_new ] , context = context ) [ 0 ] [ 1 ] + \
2010-09-03 10:59:56 +00:00
" ' " + _ ( " created. " )
2010-05-19 20:02:14 +00:00
self . log ( cr , user , id_new , message , True , context = context )
2008-07-22 14:24:36 +00:00
wf_service = netsvc . LocalService ( " workflow " )
wf_service . trg_create ( user , self . _name , id_new , cr )
return id_new
2011-08-12 14:28:14 +00:00
def browse ( self , cr , uid , select , context = None , list_class = None , fields_process = None ) :
""" Fetch records as objects allowing to use dot notation to browse fields and relations
: param cr : database cursor
2012-01-25 11:34:29 +00:00
: param uid : current user id
2011-08-12 14:28:14 +00:00
: param select : id or list of ids .
: param context : context arguments , like lang , time zone
: rtype : object or list of objects requested
"""
self . _list_class = list_class or browse_record_list
cache = { }
# need to accepts ints and longs because ids coming from a method
# launched by button in the interface have a type long...
if isinstance ( select , ( int , long ) ) :
return browse_record ( cr , uid , select , self , cache , context = context , list_class = self . _list_class , fields_process = fields_process )
elif isinstance ( select , list ) :
return self . _list_class ( [ browse_record ( cr , uid , id , self , cache , context = context , list_class = self . _list_class , fields_process = fields_process ) for id in select ] , context = context )
else :
return browse_null ( )
2008-12-13 06:01:18 +00:00
def _store_get_values ( self , cr , uid , ids , fields , context ) :
2011-01-14 21:16:42 +00:00
""" Returns an ordered list of fields.functions to call due to
an update operation on ` ` fields ` ` of records with ` ` ids ` ` ,
obtained by calling the ' store ' functions of these fields ,
as setup by their ' store ' attribute .
: return : [ ( priority , model_name , [ record_ids , ] , [ function_fields , ] ) ]
"""
2011-09-19 09:19:52 +00:00
if fields is None : fields = [ ]
stored_functions = self . pool . _store_function . get ( self . _name , [ ] )
# use indexed names for the details of the stored_functions:
model_name_ , func_field_to_compute_ , id_mapping_fnct_ , trigger_fields_ , priority_ = range ( 5 )
2009-08-20 15:29:21 +00:00
2011-09-19 09:19:52 +00:00
# only keep functions that should be triggered for the ``fields``
# being written to.
to_compute = [ f for f in stored_functions \
if ( ( not f [ trigger_fields_ ] ) or set ( fields ) . intersection ( f [ trigger_fields_ ] ) ) ]
2009-12-22 20:56:00 +00:00
2011-09-19 09:19:52 +00:00
mapping = { }
for function in to_compute :
2011-06-27 09:25:38 +00:00
# use admin user for accessing objects having rules defined on store fields
2011-09-26 09:01:56 +00:00
target_ids = [ id for id in function [ id_mapping_fnct_ ] ( self , cr , SUPERUSER_ID , ids , context ) if id ]
2011-09-19 09:19:52 +00:00
# the compound key must consider the priority and model name
key = ( function [ priority_ ] , function [ model_name_ ] )
for target_id in target_ids :
mapping . setdefault ( key , { } ) . setdefault ( target_id , set ( ) ) . add ( tuple ( function ) )
# Here mapping looks like:
# { (10, 'model_a') : { target_id1: [ (function_1_tuple, function_2_tuple) ], ... }
# (20, 'model_a') : { target_id2: [ (function_3_tuple, function_4_tuple) ], ... }
# (99, 'model_a') : { target_id1: [ (function_5_tuple, function_6_tuple) ], ... }
# }
# Now we need to generate the batch function calls list
# call_map =
# { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
call_map = { }
for ( ( priority , model ) , id_map ) in mapping . iteritems ( ) :
functions_ids_maps = { }
# function_ids_maps =
# { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
for id , functions in id_map . iteritems ( ) :
functions_ids_maps . setdefault ( tuple ( functions ) , [ ] ) . append ( id )
for functions , ids in functions_ids_maps . iteritems ( ) :
call_map . setdefault ( ( priority , model ) , [ ] ) . append ( ( priority , model , ids ,
[ f [ func_field_to_compute_ ] for f in functions ] ) )
ordered_keys = call_map . keys ( )
ordered_keys . sort ( )
result = [ ]
if ordered_keys :
result = reduce ( operator . add , ( call_map [ k ] for k in ordered_keys ) )
return result
2008-12-13 06:01:18 +00:00
def _store_set_values ( self , cr , uid , ids , fields , context ) :
2011-01-14 21:16:42 +00:00
""" Calls the fields.function ' s " implementation function " for all ``fields``, on records with ``ids`` (taking care of
respecting ` ` multi ` ` attributes ) , and stores the resulting values in the database directly . """
2010-06-10 20:29:14 +00:00
if not ids :
return True
2009-09-17 07:27:12 +00:00
field_flag = False
field_dict = { }
if self . _log_access :
2010-09-03 10:59:56 +00:00
cr . execute ( ' select id,write_date from ' + self . _table + ' where id IN %s ' , ( tuple ( ids ) , ) )
2009-09-17 07:27:12 +00:00
res = cr . fetchall ( )
for r in res :
if r [ 1 ] :
field_dict . setdefault ( r [ 0 ] , [ ] )
res_date = time . strptime ( ( r [ 1 ] ) [ : 19 ] , ' % Y- % m- %d % H: % M: % S ' )
write_date = datetime . datetime . fromtimestamp ( time . mktime ( res_date ) )
for i in self . pool . _store_function . get ( self . _name , [ ] ) :
if i [ 5 ] :
up_write_date = write_date + datetime . timedelta ( hours = i [ 5 ] )
if datetime . datetime . now ( ) < up_write_date :
if i [ 1 ] in fields :
field_dict [ r [ 0 ] ] . append ( i [ 1 ] )
if not field_flag :
2009-10-27 06:23:53 +00:00
field_flag = True
2008-12-13 06:01:18 +00:00
todo = { }
2008-12-14 16:46:47 +00:00
keys = [ ]
2008-12-13 06:01:18 +00:00
for f in fields :
2008-12-14 16:46:47 +00:00
if self . _columns [ f ] . _multi not in keys :
keys . append ( self . _columns [ f ] . _multi )
2008-12-13 06:01:18 +00:00
todo . setdefault ( self . _columns [ f ] . _multi , [ ] )
todo [ self . _columns [ f ] . _multi ] . append ( f )
2008-12-14 16:46:47 +00:00
for key in keys :
val = todo [ key ]
2008-12-13 06:01:18 +00:00
if key :
2011-06-27 09:25:38 +00:00
# use admin user for accessing objects having rules defined on store fields
2011-09-26 09:01:56 +00:00
result = self . _columns [ val [ 0 ] ] . get ( cr , self , ids , val , SUPERUSER_ID , context = context )
2010-09-03 10:59:56 +00:00
for id , value in result . items ( ) :
2009-09-17 07:27:12 +00:00
if field_flag :
for f in value . keys ( ) :
if f in field_dict [ id ] :
2009-10-27 06:23:53 +00:00
value . pop ( f )
2008-12-13 06:01:18 +00:00
upd0 = [ ]
upd1 = [ ]
for v in value :
2008-12-14 16:46:47 +00:00
if v not in val :
continue
2008-12-13 06:01:18 +00:00
if self . _columns [ v ] . _type in ( ' many2one ' , ' one2one ' ) :
try :
value [ v ] = value [ v ] [ 0 ]
except :
pass
upd0 . append ( ' " ' + v + ' " = ' + self . _columns [ v ] . _symbol_set [ 0 ] )
upd1 . append ( self . _columns [ v ] . _symbol_set [ 1 ] ( value [ v ] ) )
upd1 . append ( id )
2010-04-01 13:37:42 +00:00
if upd0 and upd1 :
cr . execute ( ' update " ' + self . _table + ' " set ' + \
2010-09-03 07:03:30 +00:00
' , ' . join ( upd0 ) + ' where id = %s ' , upd1 )
2008-12-13 06:01:18 +00:00
else :
for f in val :
2011-06-27 09:25:38 +00:00
# use admin user for accessing objects having rules defined on store fields
2011-09-26 09:01:56 +00:00
result = self . _columns [ f ] . get ( cr , self , ids , f , SUPERUSER_ID , context = context )
2009-09-17 07:27:12 +00:00
for r in result . keys ( ) :
if field_flag :
if r in field_dict . keys ( ) :
if f in field_dict [ r ] :
2009-10-27 06:23:53 +00:00
result . pop ( r )
2010-09-03 10:59:56 +00:00
for id , value in result . items ( ) :
2008-12-13 06:01:18 +00:00
if self . _columns [ f ] . _type in ( ' many2one ' , ' one2one ' ) :
try :
value = value [ 0 ]
except :
pass
cr . execute ( ' update " ' + self . _table + ' " set ' + \
2010-09-03 10:59:56 +00:00
' " ' + f + ' " = ' + self . _columns [ f ] . _symbol_set [ 0 ] + ' where id = %s ' , ( self . _columns [ f ] . _symbol_set [ 1 ] ( value ) , id ) )
2008-07-22 14:24:36 +00:00
return True
#
# TODO: Validate
#
def perm_write ( self , cr , user , ids , fields , context = None ) :
2010-05-27 06:18:30 +00:00
raise NotImplementedError ( _ ( ' This method does not exist anymore ' ) )
2008-07-22 14:24:36 +00:00
# TODO: ameliorer avec NULL
2010-09-30 13:24:03 +00:00
def _where_calc ( self , cr , user , domain , active_test = True , context = None ) :
2010-08-09 12:59:17 +00:00
""" Computes the WHERE clause needed to implement an OpenERP domain.
2010-09-30 13:24:03 +00:00
: param domain : the domain to compute
: type domain : list
2010-08-09 12:59:17 +00:00
: param active_test : whether the default filtering of records with ` ` active ` `
2010-09-20 06:12:25 +00:00
field set to ` ` False ` ` should be applied .
2010-09-30 13:24:03 +00:00
: return : the query expressing the given domain as provided in domain
: rtype : osv . query . Query
2010-08-09 12:59:17 +00:00
"""
2008-07-22 14:24:36 +00:00
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2010-09-30 13:24:03 +00:00
domain = domain [ : ]
2008-07-22 14:24:36 +00:00
# if the object has a field named 'active', filter out all inactive
# records unless they were explicitely asked for
if ' active ' in self . _columns and ( active_test and context . get ( ' active_test ' , True ) ) :
2010-09-30 13:24:03 +00:00
if domain :
2008-11-05 13:35:25 +00:00
active_in_args = False
2010-09-30 13:24:03 +00:00
for a in domain :
2008-11-05 13:35:25 +00:00
if a [ 0 ] == ' active ' :
active_in_args = True
if not active_in_args :
2010-09-30 13:24:03 +00:00
domain . insert ( 0 , ( ' active ' , ' = ' , 1 ) )
2008-08-14 12:04:41 +00:00
else :
2010-09-30 13:24:03 +00:00
domain = [ ( ' active ' , ' = ' , 1 ) ]
2008-08-04 15:32:50 +00:00
2010-09-30 13:24:03 +00:00
if domain :
2011-07-22 15:36:49 +00:00
e = expression . expression ( cr , user , domain , self , context )
2008-08-04 15:32:50 +00:00
tables = e . get_tables ( )
2010-09-30 13:24:03 +00:00
where_clause , where_params = e . to_sql ( )
where_clause = where_clause and [ where_clause ] or [ ]
2008-08-04 15:32:50 +00:00
else :
2010-09-30 13:24:03 +00:00
where_clause , where_params , tables = [ ] , [ ] , [ ' " %s " ' % self . _table ]
2008-07-22 14:24:36 +00:00
2010-09-30 13:24:03 +00:00
return Query ( tables , where_clause , where_params )
2008-07-22 14:24:36 +00:00
def _check_qorder ( self , word ) :
if not regex_order . match ( word ) :
2010-09-17 14:48:07 +00:00
raise except_orm ( _ ( ' AccessError ' ) , _ ( ' Invalid " order " specified. A valid " order " specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction) ' ) )
2008-07-22 14:24:36 +00:00
return True
2010-09-30 13:24:03 +00:00
def _apply_ir_rules ( self , cr , uid , query , mode = ' read ' , context = None ) :
2010-10-05 11:26:53 +00:00
""" Add what ' s missing in ``query`` to implement all appropriate ir.rules
2010-09-30 13:24:03 +00:00
( using the ` ` model_name ` ` ' s rules or the current model ' s rules if ` ` model_name ` ` is None )
: param query : the current query object
2010-04-02 13:54:12 +00:00
"""
2010-10-05 13:49:08 +00:00
def apply_rule ( added_clause , added_params , added_tables , parent_model = None , child_object = None ) :
2010-09-30 13:24:03 +00:00
if added_clause :
2010-10-05 13:49:08 +00:00
if parent_model and child_object :
# as inherited rules are being applied, we need to add the missing JOIN
# to reach the parent table (if it was not JOINed yet in the query)
2011-08-26 10:35:16 +00:00
child_object . _inherits_join_add ( child_object , parent_model , query )
2010-09-30 13:24:03 +00:00
query . where_clause + = added_clause
query . where_clause_params + = added_params
for table in added_tables :
if table not in query . tables :
query . tables . append ( table )
return True
return False
2010-04-02 13:54:12 +00:00
2010-09-30 13:24:03 +00:00
# apply main rules on the object
rule_obj = self . pool . get ( ' ir.rule ' )
apply_rule ( * rule_obj . domain_get ( cr , uid , self . _name , mode , context = context ) )
2010-04-02 13:54:12 +00:00
2010-10-05 13:49:08 +00:00
# apply ir.rules from the parents (through _inherits)
2010-09-30 13:24:03 +00:00
for inherited_model in self . _inherits :
2010-10-06 07:37:57 +00:00
kwargs = dict ( parent_model = inherited_model , child_object = self ) #workaround for python2.5
apply_rule ( * rule_obj . domain_get ( cr , uid , inherited_model , mode , context = context ) , * * kwargs )
2010-04-02 13:54:12 +00:00
2010-09-30 13:24:03 +00:00
def _generate_m2o_order_by ( self , order_field , query ) :
2010-04-02 13:54:12 +00:00
"""
2010-09-30 13:24:03 +00:00
Add possibly missing JOIN to ` ` query ` ` and generate the ORDER BY clause for m2o fields ,
2010-09-17 14:48:07 +00:00
either native m2o fields or function / related fields that are stored , including
intermediate JOINs for inheritance if required .
2010-10-01 16:54:11 +00:00
2010-09-30 13:24:03 +00:00
: return : the qualified field name to use in an ORDER BY clause to sort by ` ` order_field ` `
2010-09-17 14:48:07 +00:00
"""
if order_field not in self . _columns and order_field in self . _inherit_fields :
# also add missing joins for reaching the table containing the m2o field
2010-09-30 13:24:03 +00:00
qualified_field = self . _inherits_join_calc ( order_field , query )
2010-09-17 14:48:07 +00:00
order_field_column = self . _inherit_fields [ order_field ] [ 2 ]
else :
qualified_field = ' " %s " . " %s " ' % ( self . _table , order_field )
order_field_column = self . _columns [ order_field ]
assert order_field_column . _type == ' many2one ' , ' Invalid field passed to _generate_m2o_order_by() '
2010-12-11 00:08:10 +00:00
if not order_field_column . _classic_write and not getattr ( order_field_column , ' store ' , False ) :
2012-01-24 12:42:52 +00:00
_logger . debug ( " Many2one function/related fields must be stored " \
" to be used as ordering fields! Ignoring sorting for %s . %s " ,
self . _name , order_field )
2010-12-11 00:08:10 +00:00
return
2010-09-17 14:48:07 +00:00
# figure out the applicable order_by for the m2o
dest_model = self . pool . get ( order_field_column . _obj )
m2o_order = dest_model . _order
if not regex_order . match ( m2o_order ) :
# _order is complex, can't use it here, so we default to _rec_name
m2o_order = dest_model . _rec_name
2008-07-22 14:24:36 +00:00
else :
2010-12-21 19:23:44 +00:00
# extract the field names, to be able to qualify them and add desc/asc
m2o_order_list = [ ]
2011-01-17 20:08:37 +00:00
for order_part in m2o_order . split ( " , " ) :
2010-12-21 19:23:44 +00:00
m2o_order_list . append ( order_part . strip ( ) . split ( " " , 1 ) [ 0 ] . strip ( ) )
m2o_order = m2o_order_list
2010-09-17 14:48:07 +00:00
2010-10-05 11:26:53 +00:00
# Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
2010-09-30 13:24:03 +00:00
# as we don't want to exclude results that have NULL values for the m2o
src_table , src_field = qualified_field . replace ( ' " ' , ' ' ) . split ( ' . ' , 1 )
query . join ( ( src_table , dest_model . _table , src_field , ' id ' ) , outer = True )
2010-12-21 19:23:44 +00:00
qualify = lambda field : ' " %s " . " %s " ' % ( dest_model . _table , field )
return map ( qualify , m2o_order ) if isinstance ( m2o_order , list ) else qualify ( m2o_order )
2010-09-17 14:48:07 +00:00
2010-09-30 13:24:03 +00:00
def _generate_order_by ( self , order_spec , query ) :
2010-09-17 14:48:07 +00:00
"""
Attempt to consruct an appropriate ORDER BY clause based on order_spec , which must be
a comma - separated list of valid field names , optionally followed by an ASC or DESC direction .
: raise " except_orm in case order_spec is malformed
"""
order_by_clause = self . _order
if order_spec :
order_by_elements = [ ]
self . _check_qorder ( order_spec )
for order_part in order_spec . split ( ' , ' ) :
order_split = order_part . strip ( ) . split ( ' ' )
order_field = order_split [ 0 ] . strip ( )
order_direction = order_split [ 1 ] . strip ( ) if len ( order_split ) == 2 else ' '
2010-12-11 00:08:10 +00:00
inner_clause = None
2010-10-08 14:15:53 +00:00
if order_field == ' id ' :
order_by_clause = ' " %s " . " %s " ' % ( self . _table , order_field )
elif order_field in self . _columns :
2010-09-17 14:48:07 +00:00
order_column = self . _columns [ order_field ]
if order_column . _classic_read :
2010-12-11 00:08:10 +00:00
inner_clause = ' " %s " . " %s " ' % ( self . _table , order_field )
2010-09-17 14:48:07 +00:00
elif order_column . _type == ' many2one ' :
2010-12-11 00:08:10 +00:00
inner_clause = self . _generate_m2o_order_by ( order_field , query )
2010-09-17 14:48:07 +00:00
else :
2010-12-21 19:23:44 +00:00
continue # ignore non-readable or "non-joinable" fields
2010-09-17 14:48:07 +00:00
elif order_field in self . _inherit_fields :
2011-08-26 10:35:16 +00:00
parent_obj = self . pool . get ( self . _inherit_fields [ order_field ] [ 3 ] )
2010-09-17 14:48:07 +00:00
order_column = parent_obj . _columns [ order_field ]
if order_column . _classic_read :
2010-12-11 00:08:10 +00:00
inner_clause = self . _inherits_join_calc ( order_field , query )
2010-09-17 14:48:07 +00:00
elif order_column . _type == ' many2one ' :
2010-12-11 00:08:10 +00:00
inner_clause = self . _generate_m2o_order_by ( order_field , query )
2010-09-17 14:48:07 +00:00
else :
2010-12-21 19:23:44 +00:00
continue # ignore non-readable or "non-joinable" fields
2010-12-11 00:08:10 +00:00
if inner_clause :
2010-12-21 19:23:44 +00:00
if isinstance ( inner_clause , list ) :
for clause in inner_clause :
order_by_elements . append ( " %s %s " % ( clause , order_direction ) )
else :
order_by_elements . append ( " %s %s " % ( inner_clause , order_direction ) )
2010-12-11 00:08:10 +00:00
if order_by_elements :
order_by_clause = " , " . join ( order_by_elements )
2008-07-22 14:24:36 +00:00
2010-09-17 14:48:07 +00:00
return order_by_clause and ( ' ORDER BY %s ' % order_by_clause ) or ' '
2009-12-22 21:20:36 +00:00
2010-09-01 15:08:33 +00:00
def _search ( self , cr , user , args , offset = 0 , limit = None , order = None , context = None , count = False , access_rights_uid = None ) :
2010-04-02 13:54:12 +00:00
"""
2010-09-20 06:12:25 +00:00
Private implementation of search ( ) method , allowing specifying the uid to use for the access right check .
2010-09-01 15:08:33 +00:00
This is useful for example when filling in the selection list for a drop - down and avoiding access rights errors ,
by specifying ` ` access_rights_uid = 1 ` ` to bypass access rights check , but not ir . rules !
This is ok at the security level because this method is private and not callable through XML - RPC .
2010-09-20 06:12:25 +00:00
2010-09-01 15:08:33 +00:00
: param access_rights_uid : optional user ID to use when checking access rights
( not for ir . rules , this is only for ir . model . access )
2010-04-02 13:54:12 +00:00
"""
2010-05-12 11:26:27 +00:00
if context is None :
2008-07-22 14:24:36 +00:00
context = { }
2011-09-28 13:01:07 +00:00
self . check_read ( cr , access_rights_uid or user )
2010-08-17 18:34:59 +00:00
2011-09-23 17:40:18 +00:00
# For transient models, restrict acces to the current user, except for the super-user
2011-09-26 09:01:56 +00:00
if self . is_transient ( ) and self . _log_access and user != SUPERUSER_ID :
2011-09-23 17:40:18 +00:00
args = expression . AND ( ( [ ( ' create_uid ' , ' = ' , user ) ] , args or [ ] ) )
2010-08-17 18:34:59 +00:00
2010-09-30 13:24:03 +00:00
query = self . _where_calc ( cr , user , args , context = context )
self . _apply_ir_rules ( cr , user , query , ' read ' , context = context )
order_by = self . _generate_order_by ( order , query )
from_clause , where_clause , where_clause_params = query . get_sql ( )
2008-07-22 14:24:36 +00:00
limit_str = limit and ' limit %d ' % limit or ' '
offset_str = offset and ' offset %d ' % offset or ' '
2010-09-30 13:24:03 +00:00
where_str = where_clause and ( " WHERE %s " % where_clause ) or ' '
2008-07-22 14:24:36 +00:00
if count :
2010-09-30 13:24:03 +00:00
cr . execute ( ' SELECT count( " %s " .id) FROM ' % self . _table + from_clause + where_str + limit_str + offset_str , where_clause_params )
2008-07-22 14:24:36 +00:00
res = cr . fetchall ( )
return res [ 0 ] [ 0 ]
2010-09-30 13:24:03 +00:00
cr . execute ( ' SELECT " %s " .id FROM ' % self . _table + from_clause + where_str + order_by + limit_str + offset_str , where_clause_params )
2008-07-22 14:24:36 +00:00
res = cr . fetchall ( )
return [ x [ 0 ] for x in res ]
# returns the different values ever entered for one field
# this is used, for example, in the client when the user hits enter on
# a char field
def distinct_field_get ( self , cr , uid , field , value , args = None , offset = 0 , limit = None ) :
if not args :
2008-08-12 14:44:56 +00:00
args = [ ]
2008-07-22 14:24:36 +00:00
if field in self . _inherit_fields :
2008-08-12 14:44:56 +00:00
return self . pool . get ( self . _inherit_fields [ field ] [ 0 ] ) . distinct_field_get ( cr , uid , field , value , args , offset , limit )
2008-07-22 14:24:36 +00:00
else :
return self . _columns [ field ] . search ( cr , self , args , field , value , offset , limit , uid )
2009-02-04 13:05:37 +00:00
def copy_data ( self , cr , uid , id , default = None , context = None ) :
2010-04-02 13:54:12 +00:00
"""
Copy given record ' s data with all its fields values
: param cr : database cursor
: param user : current user id
2010-08-03 17:38:25 +00:00
: param id : id of the record to copy
: param default : field values to override in the original values of the copied record
: type default : dictionary
2010-04-02 13:54:12 +00:00
: param context : context arguments , like lang , time zone
2010-08-03 17:38:25 +00:00
: type context : dictionary
2010-04-06 12:30:28 +00:00
: return : dictionary containing all the field values
2010-04-02 13:54:12 +00:00
"""
2010-05-12 14:33:48 +00:00
if context is None :
2008-08-12 14:44:56 +00:00
context = { }
2010-11-30 11:09:40 +00:00
# avoid recursion through already copied records in case of circular relationship
seen_map = context . setdefault ( ' __copy_data_seen ' , { } )
if id in seen_map . setdefault ( self . _name , [ ] ) :
return
seen_map [ self . _name ] . append ( id )
2010-05-12 14:33:48 +00:00
if default is None :
2008-07-22 14:24:36 +00:00
default = { }
if ' state ' not in default :
if ' state ' in self . _defaults :
2009-11-25 14:30:58 +00:00
if callable ( self . _defaults [ ' state ' ] ) :
default [ ' state ' ] = self . _defaults [ ' state ' ] ( self , cr , uid , context )
else :
default [ ' state ' ] = self . _defaults [ ' state ' ]
2010-11-16 15:00:25 +00:00
context_wo_lang = context . copy ( )
2010-05-12 14:33:48 +00:00
if ' lang ' in context :
del context_wo_lang [ ' lang ' ]
2010-11-16 15:00:47 +00:00
data = self . read ( cr , uid , [ id , ] , context = context_wo_lang )
if data :
data = data [ 0 ]
else :
raise IndexError ( _ ( " Record # %d of %s not found, cannot copy! " ) % ( id , self . _name ) )
2010-05-12 14:33:48 +00:00
2011-06-27 09:50:59 +00:00
# TODO it seems fields_get can be replaced by _all_columns (no need for translation)
2009-08-07 13:09:50 +00:00
fields = self . fields_get ( cr , uid , context = context )
2008-07-22 14:24:36 +00:00
for f in fields :
ftype = fields [ f ] [ ' type ' ]
2011-09-24 02:03:03 +00:00
if self . _log_access and f in LOG_ACCESS_COLUMNS :
2008-07-22 14:24:36 +00:00
del data [ f ]
if f in default :
data [ f ] = default [ f ]
2010-11-30 11:09:40 +00:00
elif ' function ' in fields [ f ] :
2008-07-22 14:24:36 +00:00
del data [ f ]
elif ftype == ' many2one ' :
try :
data [ f ] = data [ f ] and data [ f ] [ 0 ]
except :
pass
elif ftype in ( ' one2many ' , ' one2one ' ) :
res = [ ]
rel = self . pool . get ( fields [ f ] [ ' relation ' ] )
2010-06-23 15:14:39 +00:00
if data [ f ] :
# duplicate following the order of the ids
# because we'll rely on it later for copying
# translations in copy_translation()!
data [ f ] . sort ( )
2009-04-12 13:42:39 +00:00
for rel_id in data [ f ] :
# the lines are first duplicated using the wrong (old)
# parent but then are reassigned to the correct one thanks
2010-06-23 15:14:39 +00:00
# to the (0, 0, ...)
d = rel . copy_data ( cr , uid , rel_id , context = context )
2010-11-30 11:09:40 +00:00
if d :
res . append ( ( 0 , 0 , d ) )
2008-07-22 14:24:36 +00:00
data [ f ] = res
elif ftype == ' many2many ' :
data [ f ] = [ ( 6 , 0 , data [ f ] ) ]
2008-10-16 11:54:41 +00:00
2008-07-22 14:24:36 +00:00
del data [ ' id ' ]
2008-10-16 11:54:41 +00:00
2010-06-23 15:15:35 +00:00
# make sure we don't break the current parent_store structure and
# force a clean recompute!
for parent_column in [ ' parent_left ' , ' parent_right ' ] :
data . pop ( parent_column , None )
2011-09-20 07:54:01 +00:00
# Remove _inherits field's from data recursively, missing parents will
# be created by create() (so that copy() copy everything).
2011-09-12 09:14:18 +00:00
def remove_ids ( inherits_dict ) :
for parent_table in inherits_dict :
del data [ inherits_dict [ parent_table ] ]
remove_ids ( self . pool . get ( parent_table ) . _inherits )
remove_ids ( self . _inherits )
2010-06-23 15:14:39 +00:00
return data
def copy_translations ( self , cr , uid , old_id , new_id , context = None ) :
2010-11-30 11:09:40 +00:00
if context is None :
context = { }
# avoid recursion through already copied records in case of circular relationship
seen_map = context . setdefault ( ' __copy_translations_seen ' , { } )
if old_id in seen_map . setdefault ( self . _name , [ ] ) :
return
seen_map [ self . _name ] . append ( old_id )
2010-06-23 15:14:39 +00:00
trans_obj = self . pool . get ( ' ir.translation ' )
2011-06-27 09:50:59 +00:00
# TODO it seems fields_get can be replaced by _all_columns (no need for translation)
2010-06-23 15:14:39 +00:00
fields = self . fields_get ( cr , uid , context = context )
translation_records = [ ]
for field_name , field_def in fields . items ( ) :
# we must recursively copy the translations for o2o and o2m
if field_def [ ' type ' ] in ( ' one2one ' , ' one2many ' ) :
target_obj = self . pool . get ( field_def [ ' relation ' ] )
2010-09-03 10:59:56 +00:00
old_record , new_record = self . read ( cr , uid , [ old_id , new_id ] , [ field_name ] , context = context )
2010-06-23 15:14:39 +00:00
# here we rely on the order of the ids to match the translations
# as foreseen in copy_data()
2010-10-25 08:52:22 +00:00
old_children = sorted ( old_record [ field_name ] )
new_children = sorted ( new_record [ field_name ] )
for ( old_child , new_child ) in zip ( old_children , new_children ) :
2010-06-23 15:14:39 +00:00
target_obj . copy_translations ( cr , uid , old_child , new_child , context = context )
# and for translatable fields we keep them for copy
elif field_def . get ( ' translate ' ) :
trans_name = ' '
if field_name in self . _columns :
trans_name = self . _name + " , " + field_name
elif field_name in self . _inherit_fields :
trans_name = self . _inherit_fields [ field_name ] [ 0 ] + " , " + field_name
if trans_name :
trans_ids = trans_obj . search ( cr , uid , [
( ' name ' , ' = ' , trans_name ) ,
2010-09-03 10:59:56 +00:00
( ' res_id ' , ' = ' , old_id )
2010-06-23 15:14:39 +00:00
] )
2010-09-03 10:59:56 +00:00
translation_records . extend ( trans_obj . read ( cr , uid , trans_ids , context = context ) )
2010-06-23 15:14:39 +00:00
for record in translation_records :
del record [ ' id ' ]
record [ ' res_id ' ] = new_id
trans_obj . create ( cr , uid , record , context = context )
2008-10-16 11:54:41 +00:00
2009-02-04 13:05:37 +00:00
def copy ( self , cr , uid , id , default = None , context = None ) :
2010-04-02 13:54:12 +00:00
"""
Duplicate record with given id updating it with default values
: param cr : database cursor
: param uid : current user id
: param id : id of the record to copy
2010-08-03 17:38:25 +00:00
: param default : dictionary of field values to override in the original values of the copied record , e . g : ` ` { ' field_name ' : overriden_value , . . . } ` `
: type default : dictionary
2010-04-02 13:54:12 +00:00
: param context : context arguments , like lang , time zone
2010-08-03 17:38:25 +00:00
: type context : dictionary
2011-10-12 09:03:57 +00:00
: return : id of the newly created record
2010-04-02 13:54:12 +00:00
"""
2010-11-30 11:09:40 +00:00
if context is None :
context = { }
context = context . copy ( )
2010-06-23 15:14:39 +00:00
data = self . copy_data ( cr , uid , id , default , context )
2009-08-07 13:09:50 +00:00
new_id = self . create ( cr , uid , data , context )
2010-06-23 15:14:39 +00:00
self . copy_translations ( cr , uid , id , new_id , context )
2008-10-16 11:54:41 +00:00
return new_id
2008-07-22 14:24:36 +00:00
2010-05-04 14:46:42 +00:00
def exists ( self , cr , uid , ids , context = None ) :
2011-09-22 15:41:55 +00:00
""" Checks whether the given id or ids exist in this model,
and return the list of ids that do . This is simple to use for
a truth test on a browse_record : :
if record . exists ( ) :
pass
: param ids : id or list of ids to check for existence
: type ids : int or [ int ]
: return : the list of ids that currently exist , out of
the given ` ids `
"""
2010-09-03 10:59:56 +00:00
if type ( ids ) in ( int , long ) :
2010-05-04 14:46:42 +00:00
ids = [ ids ]
2011-09-09 15:49:26 +00:00
query = ' SELECT id FROM " %s " ' % ( self . _table )
2010-05-04 14:46:42 +00:00
cr . execute ( query + " WHERE ID IN %s " , ( tuple ( ids ) , ) )
2011-09-09 15:49:26 +00:00
return [ x [ 0 ] for x in cr . fetchall ( ) ]
2009-06-10 11:15:35 +00:00
2010-12-09 10:57:33 +00:00
def check_recursion ( self , cr , uid , ids , context = None , parent = None ) :
2012-01-25 13:24:07 +00:00
_logger . warning ( " You are using deprecated %s .check_recursion(). Please use the ' _check_recursion() ' instead! " % \
self . _name )
2010-11-23 12:56:38 +00:00
assert parent is None or parent in self . _columns or parent in self . _inherit_fields , \
" The ' parent ' parameter passed to check_recursion() must be None or a valid field name "
2010-12-09 10:57:33 +00:00
return self . _check_recursion ( cr , uid , ids , context , parent )
2010-11-16 15:04:35 +00:00
2010-12-09 10:57:33 +00:00
def _check_recursion ( self , cr , uid , ids , context = None , parent = None ) :
2010-04-02 13:54:12 +00:00
"""
2010-08-03 17:38:25 +00:00
Verifies that there is no loop in a hierarchical structure of records ,
by following the parent relationship using the * * parent * * field until a loop
is detected or until a top - level record is found .
2010-04-02 13:54:12 +00:00
: param cr : database cursor
: param uid : current user id
2010-08-03 17:38:25 +00:00
: param ids : list of ids of records to check
: param parent : optional parent field name ( default : ` ` self . _parent_name = parent_id ` ` )
: return : * * True * * if the operation can proceed safely , or * * False * * if an infinite loop is detected .
2010-04-02 13:54:12 +00:00
"""
2008-07-22 14:24:36 +00:00
if not parent :
parent = self . _parent_name
ids_parent = ids [ : ]
2010-06-15 13:27:22 +00:00
query = ' SELECT distinct " %s " FROM " %s " WHERE id IN %% s ' % ( parent , self . _table )
while ids_parent :
2008-07-22 14:24:36 +00:00
ids_parent2 = [ ]
2008-08-05 08:39:45 +00:00
for i in range ( 0 , len ( ids ) , cr . IN_MAX ) :
sub_ids_parent = ids_parent [ i : i + cr . IN_MAX ]
2010-06-15 13:27:22 +00:00
cr . execute ( query , ( tuple ( sub_ids_parent ) , ) )
2008-07-22 14:24:36 +00:00
ids_parent2 . extend ( filter ( None , map ( lambda x : x [ 0 ] , cr . fetchall ( ) ) ) )
ids_parent = ids_parent2
for i in ids_parent :
if i in ids :
return False
return True
2007-07-30 13:35:15 +00:00
2011-09-30 22:23:55 +00:00
def _get_external_ids ( self , cr , uid , ids , * args , * * kwargs ) :
""" Retrieve the External ID(s) of any database record.
2011-01-04 15:07:55 +00:00
* * Synopsis * * : ` ` _get_xml_ids ( cr , uid , ids ) - > { ' id ' : [ ' module.xml_id ' ] } ` `
2011-09-30 22:23:55 +00:00
: return : map of ids to the list of their fully qualified External IDs
in the form ` ` module . key ` ` , or an empty list when there ' s no External
ID for a record , e . g . : :
{ ' id ' : [ ' module.ext_id ' , ' module.ext_id_bis ' ] ,
' id2 ' : [ ] }
2011-01-04 15:07:55 +00:00
"""
2011-09-30 22:23:55 +00:00
ir_model_data = self . pool . get ( ' ir.model.data ' )
data_ids = ir_model_data . search ( cr , uid , [ ( ' model ' , ' = ' , self . _name ) , ( ' res_id ' , ' in ' , ids ) ] )
data_results = ir_model_data . read ( cr , uid , data_ids , [ ' module ' , ' name ' , ' res_id ' ] )
2011-01-04 15:07:55 +00:00
result = { }
for id in ids :
# can't use dict.fromkeys() as the list would be shared!
result [ id ] = [ ]
for record in data_results :
result [ record [ ' res_id ' ] ] . append ( ' %(module)s . %(name)s ' % record )
return result
2011-09-30 22:23:55 +00:00
def get_external_id ( self , cr , uid , ids , * args , * * kwargs ) :
""" Retrieve the External ID of any database record, if there
2010-05-26 23:04:09 +00:00
is one . This method works as a possible implementation
for a function field , to be able to add it to any
2011-09-30 22:23:55 +00:00
model object easily , referencing it as ` ` Model . get_external_id ` ` .
2010-05-26 23:04:09 +00:00
2011-09-30 22:23:55 +00:00
When multiple External IDs exist for a record , only one
2011-01-04 15:07:55 +00:00
of them is returned ( randomly ) .
: return : map of ids to their fully qualified XML ID ,
2010-08-03 17:38:25 +00:00
defaulting to an empty string when there ' s none
2011-09-30 22:23:55 +00:00
( to be usable as a function field ) ,
e . g . : :
{ ' id ' : ' module.ext_id ' ,
' id2 ' : ' ' }
2010-05-26 23:04:09 +00:00
"""
2011-01-18 15:52:19 +00:00
results = self . _get_xml_ids ( cr , uid , ids )
2011-09-30 22:23:55 +00:00
for k , v in results . iteritems ( ) :
2011-01-04 15:07:55 +00:00
if results [ k ] :
results [ k ] = v [ 0 ]
else :
results [ k ] = ' '
return results
2010-05-26 23:04:09 +00:00
2011-09-30 22:23:55 +00:00
# backwards compatibility
get_xml_id = get_external_id
_get_xml_ids = _get_external_ids
2011-09-23 17:40:18 +00:00
# Transience
2011-08-16 09:57:55 +00:00
def is_transient ( self ) :
""" Return whether the model is transient.
See TransientModel .
"""
return self . _transient
2011-09-23 17:40:18 +00:00
def _transient_clean_rows_older_than ( self , cr , seconds ) :
assert self . _transient , " Model %s is not transient, it cannot be vacuumed! " % self . _name
2011-09-23 12:45:25 +00:00
cr . execute ( " SELECT id FROM " + self . _table + " WHERE "
" COALESCE(write_date, create_date, now())::timestamp < "
" (now() - interval %s ) " , ( " %s seconds " % seconds , ) )
ids = [ x [ 0 ] for x in cr . fetchall ( ) ]
2011-09-26 09:01:56 +00:00
self . unlink ( cr , SUPERUSER_ID , ids )
2011-09-23 12:45:25 +00:00
2011-09-23 17:40:18 +00:00
def _transient_clean_old_rows ( self , cr , count ) :
assert self . _transient , " Model %s is not transient, it cannot be vacuumed! " % self . _name
2011-09-23 12:45:25 +00:00
cr . execute (
" SELECT id, COALESCE(write_date, create_date, now())::timestamp "
" AS t FROM " + self . _table +
" ORDER BY t LIMIT %s " , ( count , ) )
ids = [ x [ 0 ] for x in cr . fetchall ( ) ]
2011-09-26 09:01:56 +00:00
self . unlink ( cr , SUPERUSER_ID , ids )
2011-09-23 12:45:25 +00:00
2011-09-23 17:40:18 +00:00
def _transient_vacuum ( self , cr , uid , force = False ) :
""" Clean the transient records.
2011-09-23 12:45:25 +00:00
This unlinks old records from the transient model tables whenever the
2011-09-23 17:40:18 +00:00
" _transient_max_count " or " _max_age " conditions ( if any ) are reached .
Actual cleaning will happen only once every " _transient_check_time " calls .
2011-09-23 12:45:25 +00:00
This means this method can be called frequently called ( e . g . whenever
a new record is created ) .
"""
2011-09-23 17:40:18 +00:00
assert self . _transient , " Model %s is not transient, it cannot be vacuumed! " % self . _name
self . _transient_check_count + = 1
if ( not force ) and ( self . _transient_check_count % self . _transient_check_time ) :
self . _transient_check_count = 0
2011-09-23 12:45:25 +00:00
return True
# Age-based expiration
2011-09-23 17:40:18 +00:00
if self . _transient_max_hours :
self . _transient_clean_rows_older_than ( cr , self . _transient_max_hours * 60 * 60 )
2011-09-23 12:45:25 +00:00
# Count-based expiration
2011-09-23 17:40:18 +00:00
if self . _transient_max_count :
self . _transient_clean_old_rows ( cr , self . _transient_max_count )
2011-09-23 12:45:25 +00:00
return True
2011-10-11 08:03:38 +00:00
def resolve_o2m_commands_to_record_dicts ( self , cr , uid , field_name , o2m_commands , fields = None , context = None ) :
2011-10-10 13:51:57 +00:00
""" Serializes o2m commands into record dictionaries (as if
all the o2m records came from the database via a read ( ) ) , and
2011-10-11 09:45:51 +00:00
returns an iterable over these dictionaries .
2011-10-10 13:51:57 +00:00
Because o2m commands might be creation commands , not all
record ids will contain an ` ` id ` ` field . Commands matching an
2011-10-11 09:45:51 +00:00
existing record ( ` ` UPDATE ` ` and ` ` LINK_TO ` ` ) will have an id .
. . note : : ` ` CREATE ` ` , ` ` UPDATE ` ` and ` ` LINK_TO ` ` stand for the
o2m command codes ` ` 0 ` ` , ` ` 1 ` ` and ` ` 4 ` `
respectively
2011-10-10 13:51:57 +00:00
: param field_name : name of the o2m field matching the commands
: type field_name : str
: param o2m_commands : one2many commands to execute on ` ` field_name ` `
: type o2m_commands : list ( ( int | False , int | False , dict | False ) )
: param fields : list of fields to read from the database , when applicable
: type fields : list ( str )
2011-10-11 09:45:51 +00:00
: raises AssertionError : if a command is not ` ` CREATE ` ` , ` ` UPDATE ` ` or ` ` LINK_TO ` `
2011-10-10 13:51:57 +00:00
: returns : o2m records in a shape similar to that returned by
` ` read ( ) ` ` ( except records may be missing the ` ` id ` `
field if they don ' t exist in db)
2011-10-11 09:45:51 +00:00
: rtype : ` ` list ( dict ) ` `
2011-10-10 13:51:57 +00:00
"""
o2m_model = self . _all_columns [ field_name ] . column . _obj
2011-10-11 08:12:02 +00:00
# convert single ids and pairs to tripled commands
commands = [ ]
for o2m_command in o2m_commands :
if not isinstance ( o2m_command , ( list , tuple ) ) :
2011-10-11 14:41:46 +00:00
command = 4
commands . append ( ( command , o2m_command , False ) )
2011-10-11 08:48:52 +00:00
elif len ( o2m_command ) == 1 :
( command , ) = o2m_command
commands . append ( ( command , False , False ) )
2011-10-11 08:12:02 +00:00
elif len ( o2m_command ) == 2 :
command , id = o2m_command
commands . append ( ( command , id , False ) )
else :
2011-10-11 14:41:46 +00:00
command = o2m_command [ 0 ]
2011-10-11 08:12:02 +00:00
commands . append ( o2m_command )
2011-10-11 14:41:46 +00:00
assert command in ( 0 , 1 , 4 ) , \
" Only CREATE, UPDATE and LINK_TO commands are supported in resolver "
2011-10-11 08:38:24 +00:00
2011-10-10 13:51:57 +00:00
# extract records to read, by id, in a mapping dict
ids_to_read = [ id for ( command , id , _ ) in commands if command in ( 1 , 4 ) ]
records_by_id = dict (
( record [ ' id ' ] , record )
for record in self . pool . get ( o2m_model ) . read (
cr , uid , ids_to_read , fields = fields , context = context ) )
2011-10-11 08:57:15 +00:00
record_dicts = [ ]
2011-10-10 13:51:57 +00:00
# merge record from db with record provided by command
for command , id , record in commands :
item = { }
if command in ( 1 , 4 ) : item . update ( records_by_id [ id ] )
if command in ( 0 , 1 ) : item . update ( record )
2011-10-11 08:57:15 +00:00
record_dicts . append ( item )
return record_dicts
2011-10-10 13:51:57 +00:00
2011-09-24 02:53:46 +00:00
# keep this import here, at top it will cause dependency cycle errors
import expression
2011-09-23 17:40:18 +00:00
class Model ( BaseModel ) :
""" Main super-class for regular database-persisted OpenERP models.
OpenERP models are created by inheriting from this class : :
2011-09-23 12:45:25 +00:00
2011-09-23 17:40:18 +00:00
class user ( Model ) :
. . .
2010-04-02 13:54:12 +00:00
2011-09-23 17:40:18 +00:00
The system will later instantiate the class once per database ( on
which the class ' module is installed).
"""
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = False # True in a TransientModel
class TransientModel ( BaseModel ) :
""" Model super-class for transient records, meant to be temporarily
persisted , and regularly vaccuum - cleaned .
2011-10-11 14:41:46 +00:00
2011-09-23 17:40:18 +00:00
A TransientModel has a simplified access rights management ,
all users can create new records , and may only access the
records they created . The super - user has unrestricted access
to all TransientModel records .
"""
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = True
2011-09-23 12:45:25 +00:00
class AbstractModel ( BaseModel ) :
""" Abstract Model super-class for creating an abstract class meant to be
inherited by regular models ( Models or TransientModels ) but not meant to
be usable on its own , or persisted .
Technical note : we don ' t want to make AbstractModel the super-class of
Model or BaseModel because it would not make sense to put the main
definition of persistence methods such as create ( ) in it , and still we
should be able to override them within an AbstractModel .
"""
_auto = False # don't create any database backend for AbstractModels
_register = False # not visible in ORM registry, meant to be python-inherited only
2010-04-02 13:54:12 +00:00
2011-09-28 13:01:07 +00:00
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: