[MERGE] Merged with main server

bzr revid: tde@openerp.com-20120326091751-gl03qyd1lkuo0jq7
This commit is contained in:
Thibault Delavallée 2012-03-26 11:17:51 +02:00
commit 9f041f4990
16 changed files with 679 additions and 364 deletions

View File

@ -1373,7 +1373,7 @@
<record id="INR" model="res.currency">
<field name="name">INR</field>
<field name="symbol">Rs</field>
<field name="symbol"></field>
<field name="rounding">0.01</field>
<field name="accuracy">4</field>
<field name="company_id" ref="main_company"/>

View File

@ -27,8 +27,10 @@
parent="base.menu_custom" name="Reporting" sequence="30"
/>
<menuitem id="base.menu_reporting" name="Reporting" parent="base.menu_administration" sequence="11"
groups="base.group_extended"/>
<menuitem id="base.menu_reporting" name="Reporting" sequence="45" groups="base.group_extended"/>
<menuitem id="base.menu_dasboard" name="Dashboards" sequence="0" parent="base.menu_reporting" groups="base.group_extended"/>
<menuitem id="menu_audit" name="Audit" parent="base.menu_reporting" sequence="50"/>
<menuitem id="base.menu_reporting_config" name="Configuration" parent="base.menu_reporting" sequence="100"/>
</data>
</openerp>

File diff suppressed because it is too large Load Diff

View File

@ -22,6 +22,7 @@
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email.MIMEMultipart import MIMEMultipart
from email.Charset import Charset
from email.Header import Header
from email.Utils import formatdate, make_msgid, COMMASPACE
from email import Encoders
@ -97,6 +98,26 @@ def encode_header(header_text):
return header_text_ascii if header_text_ascii\
else Header(header_text_utf8, 'utf-8')
def encode_header_param(param_text):
"""Returns an appropriate RFC2047 encoded representation of the given
header parameter value, suitable for direct assignation as the
param value (e.g. via Message.set_param() or Message.add_header())
RFC2822 assumes that headers contain only 7-bit characters,
so we ensure it is the case, using RFC2047 encoding when needed.
:param param_text: unicode or utf-8 encoded string with header value
:rtype: string
:return: if ``param_text`` represents a plain ASCII string,
return the same 7-bit string, otherwise returns an
ASCII string containing the RFC2047 encoded text.
"""
# For details see the encode_header() method that uses the same logic
if not param_text: return ""
param_text_utf8 = tools.ustr(param_text).encode('utf-8')
param_text_ascii = try_coerce_ascii(param_text_utf8)
return param_text_ascii if param_text_ascii\
else Charset('utf8').header_encode(param_text_utf8)
name_with_email_pattern = re.compile(r'("[^<@>]+")\s*<([^ ,<@]+@[^> ,]+)>')
address_pattern = re.compile(r'([^ ,<@]+@[^> ,]+)')
@ -309,7 +330,7 @@ class ir_mail_server(osv.osv):
msg['Cc'] = encode_rfc2822_address_header(COMMASPACE.join(email_cc))
if email_bcc:
msg['Bcc'] = encode_rfc2822_address_header(COMMASPACE.join(email_bcc))
msg['Date'] = formatdate(localtime=True)
msg['Date'] = formatdate()
# Custom headers may override normal headers or provide additional ones
for key, value in headers.iteritems():
msg[ustr(key).encode('utf-8')] = encode_header(value)
@ -334,14 +355,16 @@ class ir_mail_server(osv.osv):
if attachments:
for (fname, fcontent) in attachments:
filename_utf8 = ustr(fname).encode('utf-8')
filename_rfc2047 = encode_header_param(fname)
part = MIMEBase('application', "octet-stream")
# The default RFC2231 encoding of Message.add_header() works in Thunderbird but not GMail
# so we fix it by using RFC2047 encoding for the filename instead.
part.set_param('name', filename_rfc2047)
part.add_header('Content-Disposition', 'attachment', filename=filename_rfc2047)
part.set_payload(fcontent)
Encoders.encode_base64(part)
# Force RFC2231 encoding for attachment filename
# See email.message.Message.add_header doc
part.add_header('Content-Disposition', 'attachment',
filename=('utf-8',None,filename_utf8))
msg.attach(part)
return msg

View File

@ -404,7 +404,7 @@ class ir_values(osv.osv):
results[action['name']] = (action['id'], action['name'], action_def)
except except_orm, e:
continue
return results.values()
return sorted(results.values())
def _map_legacy_model_list(self, model_list, map_fn, merge_results=False):
"""Apply map_fn to the various models passed, according to

View File

@ -452,6 +452,7 @@ class module(osv.osv):
'sequence': terp.get('sequence', 100),
'application': terp.get('application', False),
'auto_install': terp.get('auto_install', False),
'icon': terp.get('icon', False),
}
# update the list of available packages

View File

@ -55,7 +55,7 @@ class ir_property(osv.osv):
'fields_id': fields.many2one('ir.model.fields', 'Field', ondelete='cascade', required=True, select=1),
'value_float' : fields.float('Value'),
'value_integer' : fields.integer_big('Value'), # will contain (int, bigint)
'value_integer' : fields.integer('Value'),
'value_text' : fields.text('Value'), # will contain (char, text)
'value_binary' : fields.binary('Value'),
'value_reference': fields.reference('Value', selection=_models_get2, size=128),
@ -65,7 +65,6 @@ class ir_property(osv.osv):
('float', 'Float'),
('boolean', 'Boolean'),
('integer', 'Integer'),
('integer_big', 'Integer Big'),
('text', 'Text'),
('binary', 'Binary'),
('many2one', 'Many2One'),
@ -100,7 +99,6 @@ class ir_property(osv.osv):
'float': 'value_float',
'boolean' : 'value_integer',
'integer': 'value_integer',
'integer_big': 'value_integer',
'text': 'value_text',
'binary': 'value_binary',
'many2one': 'value_reference',
@ -142,7 +140,7 @@ class ir_property(osv.osv):
return record.value_float
elif record.type == 'boolean':
return bool(record.value_integer)
elif record.type in ('integer', 'integer_big'):
elif record.type == 'integer':
return record.value_integer
elif record.type == 'binary':
return record.value_binary

View File

@ -31,7 +31,7 @@
<separator colspan="4" string="Field Information"/>
<field colspan="4" name="fields_id" select="1"/>
<field colspan="4" name="type"/>
<group colspan="4" attrs="{'invisible' : [('type', 'not in', ('integer', 'integer_big', 'boolean'))]}">
<group colspan="4" attrs="{'invisible' : [('type', 'not in', ('integer', 'boolean'))]}">
<field colspan="4" name="value_integer" widget="integer"/>
</group>
<group colspan="4" attrs="{'invisible' : [('type', '!=', 'float')]}">

View File

@ -346,21 +346,35 @@ class res_partner_address(osv.osv):
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args=[]
if not context:
context={}
if context.get('contact_display', 'contact')=='partner ' or context.get('contact_display', 'contact')=='partner_address ' :
ids = self.search(cr, user, [('partner_id',operator,name)], limit=limit, context=context)
args = []
if context is None:
context = {}
if not name:
ids = self.search(cr, user, args, limit=limit, context=context)
elif context.get('contact_display', 'contact') == 'partner':
ids = self.search(cr, user, [('partner_id', operator, name)] + args, limit=limit, context=context)
else:
if not name:
ids = self.search(cr, user, args, limit=limit, context=context)
# first lookup zip code, as it is a common and efficient way to search on these data
ids = self.search(cr, user, [('zip', '=', name)] + args, limit=limit, context=context)
# then search on other fields:
if context.get('contact_display', 'contact') == 'partner_address':
fields = ['partner_id', 'name', 'country_id', 'city', 'street']
else:
ids = self.search(cr, user, [('zip','=',name)] + args, limit=limit, context=context)
if not ids:
ids = self.search(cr, user, [('city',operator,name)] + args, limit=limit, context=context)
if name:
ids += self.search(cr, user, [('name',operator,name)] + args, limit=limit, context=context)
ids += self.search(cr, user, [('partner_id',operator,name)] + args, limit=limit, context=context)
fields = ['name', 'country_id', 'city', 'street']
# Here we have to search the records that satisfy the domain:
# OR([[(f, operator, name)] for f in fields])) + args
# Searching on such a domain can be dramatically inefficient, due to the expansion made
# for field translations, and the handling of the disjunction by the DB engine itself.
# So instead, we search field by field until the search limit is reached.
while len(ids) < limit and fields:
f = fields.pop(0)
new_ids = self.search(cr, user, [(f, operator, name)] + args, limit=limit, context=context)
# extend ids with the ones in new_ids that are not in ids yet (and keep order)
old_ids = set(ids)
ids.extend([id for id in new_ids if id not in old_ids])
ids = ids[:limit]
return self.name_get(cr, user, ids, context=context)
def get_city(self, cr, uid, id):

View File

@ -6,7 +6,11 @@
web_icon_hover="data/sales-hover.png"
groups="base.group_sale_salesman"/>
<menuitem id="menu_address_book" name="Address Book" parent="menu_base_partner" sequence="2"/>
<menuitem id="base.menu_sales" name="Sales"
parent="base.menu_base_partner" sequence="1"
/>
<!-- <menuitem id="menu_address_book" name="Address Book" parent="menu_base_partner" sequence="2"/> -->
<menuitem id="menu_base_config" name="Configuration" parent="menu_base_partner" sequence="30"
groups="group_system"/>
@ -175,7 +179,7 @@
</kanban>
</field>
</record>
<record id="action_partner_address_form" model="ir.actions.act_window">
<field name="name">Addresses</field>
<field name="type">ir.actions.act_window</field>
@ -198,9 +202,6 @@
<field name="view_id" ref="view_partner_address_form1"/>
<field name="act_window_id" ref="action_partner_address_form"/>
</record>
<menuitem action="action_partner_address_form" id="menu_partner_address_form"
groups="base.group_extended" name="Contacts"
parent="base.menu_address_book" sequence="30"/>
<!--
=========================================
@ -524,8 +525,8 @@
<menuitem
action="action_partner_form"
id="menu_partner_form"
parent="base.menu_address_book"
sequence="2"/>
parent="base.menu_sales"
sequence="8"/>
<record id="action_partner_customer_form" model="ir.actions.act_window">
<field name="name">Customers</field>

View File

@ -35,8 +35,8 @@
<field name="name">user rule</field>
<field model="ir.model" name="model_id" ref="model_res_users"/>
<field eval="True" name="global"/>
<field name="domain_force">['|',('company_id.child_ids','child_of',[user.company_id.id]),('company_id','child_of',[user.company_id.id])]</field>
<field name="domain_force">[('company_ids','child_of',[user.company_id.id])]</field>
</record>
</data>
</data>
</openerp>

View File

@ -34,6 +34,9 @@ from service import security
from tools.translate import _
import openerp
import openerp.exceptions
from lxml import etree
from lxml.builder import E
# for avatar resizing
import io, StringIO
@ -783,29 +786,27 @@ class groups_view(osv.osv):
# and introduces the reified group fields
view = self.get_user_groups_view(cr, uid, context)
if view:
xml = u"""<?xml version="1.0" encoding="utf-8"?>
<!-- GENERATED AUTOMATICALLY BY GROUPS -->
<field name="groups_id" position="replace">
%s
%s
</field>
"""
xml1, xml2 = [], []
xml1.append('<separator string="%s" colspan="4"/>' % _('Applications'))
xml1.append(E.separator(string=_('Application'), colspan="4"))
for app, kind, gs in self.get_groups_by_application(cr, uid, context):
if kind == 'selection':
# application name with a selection field
field_name = name_selection_groups(map(int, gs))
xml1.append('<field name="%s"/>' % field_name)
xml1.append('<newline/>')
xml1.append(E.field(name=field_name))
xml1.append(E.newline())
else:
# application separator with boolean fields
app_name = app and app.name or _('Other')
xml2.append('<separator string="%s" colspan="4"/>' % app_name)
xml2.append(E.separator(string=app_name, colspan="4"))
for g in gs:
field_name = name_boolean_group(g.id)
xml2.append('<field name="%s"/>' % field_name)
view.write({'arch': xml % ('\n'.join(xml1), '\n'.join(xml2))})
xml2.append(E.field(name=field_name))
xml = E.field(*(xml1 + xml2), name="groups_id", position="replace")
xml.addprevious(etree.Comment("GENERATED AUTOMATICALLY BY GROUPS"))
xml_content = etree.tostring(xml, pretty_print=True, xml_declaration=True, encoding="utf-8")
view.write({'arch': xml_content})
return True
def get_user_groups_view(self, cr, uid, context=None):

View File

@ -159,32 +159,6 @@ class integer(_column):
" `required` has no effect, as NULL values are "
"automatically turned into 0.")
class integer_big(_column):
"""Experimental 64 bit integer column type, currently unused.
TODO: this field should work fine for values up
to 32 bits, but greater values will not fit
in the XML-RPC int type, so a specific
get() method is needed to pass them as floats,
like what we do for integer functional fields.
"""
_type = 'integer_big'
# do not reference the _symbol_* of integer class, as that would possibly
# unbind the lambda functions
_symbol_c = '%s'
_symbol_f = lambda x: int(x or 0)
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self,x: x or 0
_deprecated = True
def __init__(self, string='unknown', required=False, **args):
super(integer_big, self).__init__(string=string, required=required, **args)
if required:
_logger.debug(
"required=True is deprecated: making an integer_big field"
" `required` has no effect, as NULL values are "
"automatically turned into 0.")
class reference(_column):
_type = 'reference'
_classic_read = False # post-process to handle missing target
@ -347,20 +321,6 @@ class datetime(_column):
exc_info=True)
return timestamp
class time(_column):
_type = 'time'
_deprecated = True
@staticmethod
def now( *args):
""" Returns the current time in a format fit for being a
default value to a ``time`` field.
This method should be proivided as is to the _defaults dict,
it should not be called.
"""
return DT.datetime.now().strftime(
tools.DEFAULT_SERVER_TIME_FORMAT)
class binary(_column):
_type = 'binary'
_symbol_c = '%s'
@ -426,34 +386,6 @@ class selection(_column):
# (4, ID) link
# (5) unlink all (only valid for one2many)
#
#CHECKME: dans la pratique c'est quoi la syntaxe utilisee pour le 5? (5) ou (5, 0)?
class one2one(_column):
_classic_read = False
_classic_write = True
_type = 'one2one'
_deprecated = True
def __init__(self, obj, string='unknown', **args):
_logger.warning("The one2one field is deprecated and doesn't work anymore.")
_column.__init__(self, string=string, **args)
self._obj = obj
def set(self, cr, obj_src, id, field, act, user=None, context=None):
if not context:
context = {}
obj = obj_src.pool.get(self._obj)
self._table = obj_src.pool.get(self._obj)._table
if act[0] == 0:
id_new = obj.create(cr, user, act[1])
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (id_new, id))
else:
cr.execute('select '+field+' from '+obj_src._table+' where id=%s', (act[0],))
id = cr.fetchone()[0]
obj.write(cr, user, [id], act[1], context=context)
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, context=None):
return obj.pool.get(self._obj).search(cr, uid, args+self._domain+[('name', 'like', value)], offset, limit, context=context)
class many2one(_column):
_classic_read = False
@ -1079,7 +1011,7 @@ class function(_column):
self._symbol_f = boolean._symbol_f
self._symbol_set = boolean._symbol_set
if type in ['integer','integer_big']:
if type == 'integer':
self._symbol_c = integer._symbol_c
self._symbol_f = integer._symbol_f
self._symbol_set = integer._symbol_set
@ -1119,7 +1051,7 @@ class function(_column):
elif not context.get('bin_raw'):
result = sanitize_binary_value(value)
if field_type in ("integer","integer_big") and value > xmlrpclib.MAXINT:
if field_type == "integer" and value > xmlrpclib.MAXINT:
# integer/long values greater than 2^31-1 are not supported
# in pure XMLRPC, so we have to pass them as floats :-(
# This is not needed for stored fields and non-functional integer
@ -1588,7 +1520,7 @@ def field_to_dict(model, cr, user, field, context=None):
else:
# call the 'dynamic selection' function
res['selection'] = field.selection(model, cr, user, context)
if res['type'] in ('one2many', 'many2many', 'many2one', 'one2one'):
if res['type'] in ('one2many', 'many2many', 'many2one'):
res['relation'] = field._obj
res['domain'] = field._domain
res['context'] = field._context

View File

@ -413,7 +413,7 @@ class browse_record(object):
for result_line in field_values:
new_data = {}
for field_name, field_column in fields_to_fetch:
if field_column._type in ('many2one', 'one2one'):
if field_column._type == 'many2one':
if result_line[field_name]:
obj = self._table.pool.get(field_column._obj)
if isinstance(result_line[field_name], (list, tuple)):
@ -544,10 +544,8 @@ def pg_varchar(size=0):
FIELDS_TO_PGTYPES = {
fields.boolean: 'bool',
fields.integer: 'int4',
fields.integer_big: 'int8',
fields.text: 'text',
fields.date: 'date',
fields.time: 'time',
fields.datetime: 'timestamp',
fields.binary: 'bytea',
fields.many2one: 'int4',
@ -1527,11 +1525,11 @@ class BaseModel(object):
for id, field, field_value in res:
if field in fields_list:
fld_def = (field in self._columns) and self._columns[field] or self._inherit_fields[field][2]
if fld_def._type in ('many2one', 'one2one'):
if fld_def._type == 'many2one':
obj = self.pool.get(fld_def._obj)
if not obj.search(cr, uid, [('id', '=', field_value or False)]):
continue
if fld_def._type in ('many2many'):
if fld_def._type == 'many2many':
obj = self.pool.get(fld_def._obj)
field_value2 = []
for i in range(len(field_value)):
@ -1540,18 +1538,18 @@ class BaseModel(object):
continue
field_value2.append(field_value[i])
field_value = field_value2
if fld_def._type in ('one2many'):
if fld_def._type == 'one2many':
obj = self.pool.get(fld_def._obj)
field_value2 = []
for i in range(len(field_value)):
field_value2.append({})
for field2 in field_value[i]:
if field2 in obj._columns.keys() and obj._columns[field2]._type in ('many2one', 'one2one'):
if field2 in obj._columns.keys() and obj._columns[field2]._type == 'many2one':
obj2 = self.pool.get(obj._columns[field2]._obj)
if not obj2.search(cr, uid,
[('id', '=', field_value[i][field2])]):
continue
elif field2 in obj._inherit_fields.keys() and obj._inherit_fields[field2][2]._type in ('many2one', 'one2one'):
elif field2 in obj._inherit_fields.keys() and obj._inherit_fields[field2][2]._type == 'many2one':
obj2 = self.pool.get(obj._inherit_fields[field2][2]._obj)
if not obj2.search(cr, uid,
[('id', '=', field_value[i][field2])]):
@ -4354,7 +4352,7 @@ class BaseModel(object):
for v in value:
if v not in val:
continue
if self._columns[v]._type in ('many2one', 'one2one'):
if self._columns[v]._type == 'many2one':
try:
value[v] = value[v][0]
except:
@ -4376,7 +4374,7 @@ class BaseModel(object):
if f in field_dict[r]:
result.pop(r)
for id, value in result.items():
if self._columns[f]._type in ('many2one', 'one2one'):
if self._columns[f]._type == 'many2one':
try:
value = value[0]
except:
@ -4653,7 +4651,7 @@ class BaseModel(object):
data[f] = data[f] and data[f][0]
except:
pass
elif ftype in ('one2many', 'one2one'):
elif ftype == 'one2many':
res = []
rel = self.pool.get(fields[f]['relation'])
if data[f]:
@ -4704,7 +4702,7 @@ class BaseModel(object):
translation_records = []
for field_name, field_def in fields.items():
# we must recursively copy the translations for o2o and o2m
if field_def['type'] in ('one2one', 'one2many'):
if field_def['type'] == 'one2many':
target_obj = self.pool.get(field_def['relation'])
old_record, new_record = self.read(cr, uid, [old_id, new_id], [field_name], context=context)
# here we rely on the order of the ids to match the translations

View File

@ -173,6 +173,8 @@ class db(netsvc.ExportService):
raise Exception, e
def exp_drop(self, db_name):
if not self.exp_db_exist(db_name):
return False
openerp.modules.registry.RegistryManager.delete(db_name)
sql_db.close_db(db_name)
@ -180,6 +182,17 @@ class db(netsvc.ExportService):
cr = db.cursor()
cr.autocommit(True) # avoid transaction block
try:
# Try to terminate all other connections that might prevent
# dropping the database
try:
cr.execute("""SELECT pg_terminate_backend(procpid)
FROM pg_stat_activity
WHERE datname = %s AND
procpid != pg_backend_pid()""",
(db_name,))
except Exception:
pass
try:
cr.execute('DROP DATABASE "%s"' % db_name)
except Exception, e:

View File

@ -313,7 +313,8 @@ class graph(object):
self.order[level] = self.order[level]+1
for sec_end in self.transitions.get(node, []):
self.init_order(sec_end, self.result[sec_end]['x'])
if node!=sec_end:
self.init_order(sec_end, self.result[sec_end]['x'])
def order_heuristic(self):
@ -438,33 +439,27 @@ class graph(object):
l.reverse()
no = len(l)
if no%2==0:
first_half = l[no/2:]
factor = 1
else:
first_half = l[no/2+1:]
factor = 0
rest = no%2
first_half = l[no/2+rest:]
last_half = l[:no/2]
i=1
for child in first_half:
self.result[child]['y'] = mid_pos - (i - (factor * 0.5))
i += 1
for i, child in enumerate(first_half):
self.result[child]['y'] = mid_pos - (i+1 - (0 if rest else 0.5))
if self.transitions.get(child, False):
if last:
self.result[child]['y'] = last + len(self.transitions[child])/2 + 1
last = self.tree_order(child, last)
if no%2:
if rest:
mid_node = l[no/2]
self.result[mid_node]['y'] = mid_pos
if self.transitions.get((mid_node), False):
if last:
self.result[mid_node]['y'] = last + len(self.transitions[mid_node])/2 + 1
last = self.tree_order(mid_node)
if node!=mid_node:
last = self.tree_order(mid_node)
else:
if last:
self.result[mid_node]['y'] = last + 1
@ -474,13 +469,14 @@ class graph(object):
i=1
last_child = None
for child in last_half:
self.result[child]['y'] = mid_pos + (i - (factor * 0.5))
self.result[child]['y'] = mid_pos + (i - (0 if rest else 0.5))
last_child = child
i += 1
if self.transitions.get(child, False):
if last:
self.result[child]['y'] = last + len(self.transitions[child])/2 + 1
last = self.tree_order(child, last)
if node!=child:
last = self.tree_order(child, last)
if last_child:
last = self.result[last_child]['y']