[MERGE] trunk

bzr revid: al@openerp.com-20140319235117-7trt1y3927mml13u
This commit is contained in:
Antony Lesuisse 2014-03-20 00:51:17 +01:00
commit 666f4d0801
10 changed files with 237 additions and 111 deletions

View File

@ -33,7 +33,7 @@ from openerp.osv.orm import Model, browse_null
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools import config
from openerp.tools.translate import _
from openerp.osv.orm import except_orm, browse_record
from openerp.osv.orm import except_orm, browse_record, MAGIC_COLUMNS
_logger = logging.getLogger(__name__)
@ -302,6 +302,8 @@ class ir_model_fields(osv.osv):
def _drop_column(self, cr, uid, ids, context=None):
for field in self.browse(cr, uid, ids, context):
if field.name in MAGIC_COLUMNS:
continue
model = self.pool[field.model]
cr.execute('select relkind from pg_class where relname=%s', (model._table,))
result = cr.fetchone()

View File

@ -3,7 +3,7 @@
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2013 OpenERP s.a. (<http://openerp.com>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
@ -169,8 +169,9 @@ class res_users(osv.osv):
}
def on_change_login(self, cr, uid, ids, login, context=None):
v = {'email': login} if tools.single_email_re.match(login) else {}
return {'value': v}
if login and tools.single_email_re.match(login):
return {'value': {'email': login}}
return {}
def onchange_state(self, cr, uid, ids, state_id, context=None):
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
@ -899,18 +900,22 @@ class change_password_wizard(osv.TransientModel):
}))
return {'user_ids': res}
def change_password_button(self, cr, uid, id, context=None):
wizard = self.browse(cr, uid, id, context=context)[0]
user_ids = []
for user in wizard.user_ids:
user_ids.append(user.id)
self.pool.get('change.password.user').change_password_button(cr, uid, user_ids, context=context)
need_reload = any(uid == user.user_id.id for user in wizard.user_ids)
line_ids = [user.id for user in wizard.user_ids]
self.pool.get('change.password.user').change_password_button(cr, uid, line_ids, context=context)
# don't keep temporary password copies in the database longer than necessary
self.pool.get('change.password.user').unlink(cr, uid, user_ids)
return {
'type': 'ir.actions.act_window_close',
}
self.pool.get('change.password.user').write(cr, uid, line_ids, {'new_passwd': False}, context=context)
if need_reload:
return {
'type': 'ir.actions.client',
'tag': 'reload'
}
return {'type': 'ir.actions.act_window_close'}
class change_password_user(osv.TransientModel):
"""

View File

@ -9,6 +9,7 @@ class test_base(common.TransactionCase):
super(test_base,self).setUp()
self.res_partner = self.registry('res.partner')
self.res_users = self.registry('res.users')
self.res_partner_title = self.registry('res.partner.title')
# samples use effective TLDs from the Mozilla public suffix
# list at http://publicsuffix.org
@ -285,27 +286,99 @@ class test_base(common.TransactionCase):
def test_60_read_group(self):
cr, uid = self.cr, self.uid
for user_data in [
{'name': 'Alice', 'login': 'alice', 'color': 1, 'function': 'Friend'},
{'name': 'Bob', 'login': 'bob', 'color': 2, 'function': 'Friend'},
{'name': 'Eve', 'login': 'eve', 'color': 3, 'function': 'Eavesdropper'},
{'name': 'Nab', 'login': 'nab', 'color': 2, 'function': '5$ Wrench'},
]:
self.res_users.create(cr, uid, user_data)
title_sir = self.res_partner_title.create(cr, uid, {'name': 'Sir', 'domain': 'contact'})
title_lady = self.res_partner_title.create(cr, uid, {'name': 'Lady', 'domain': 'contact'})
test_users = [
{'name': 'Alice', 'login': 'alice', 'color': 1, 'function': 'Friend', 'date': '2015-03-28', 'title': title_lady},
{'name': 'Alice', 'login': 'alice2', 'color': 0, 'function': 'Friend', 'date': '2015-01-28', 'title': title_lady},
{'name': 'Bob', 'login': 'bob', 'color': 2, 'function': 'Friend', 'date': '2015-03-02', 'title': title_sir},
{'name': 'Eve', 'login': 'eve', 'color': 3, 'function': 'Eavesdropper', 'date': '2015-03-20', 'title': title_lady},
{'name': 'Nab', 'login': 'nab', 'color': -3, 'function': '5$ Wrench', 'date': '2014-09-10', 'title': title_sir},
{'name': 'Nab', 'login': 'nab-she', 'color': 6, 'function': '5$ Wrench', 'date': '2014-01-02', 'title': title_lady},
]
ids = [self.res_users.create(cr, uid, u) for u in test_users]
domain = [('id', 'in', ids)]
groups_data = self.res_users.read_group(cr, uid, domain=[('login', 'in', ('alice', 'bob', 'eve'))], fields=['name', 'color', 'function'], groupby='function')
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# group on local char field without domain and without active_test (-> empty WHERE clause)
groups_data = self.res_users.read_group(cr, uid, [], fields=['login'], groupby=['login'], orderby='login DESC', context={'active_test': False})
self.assertGreater(len(groups_data), 6, "Incorrect number of results when grouping on a field")
# group on local char field with limit
groups_data = self.res_users.read_group(cr, uid, domain, fields=['login'], groupby=['login'], orderby='login DESC', limit=3, offset=3)
self.assertEqual(len(groups_data), 3, "Incorrect number of results when grouping on a field with limit")
self.assertEqual(['bob', 'alice2', 'alice'], [g['login'] for g in groups_data], 'Result mismatch')
# group on inherited char field, aggregate on int field (second groupby ignored on purpose)
groups_data = self.res_users.read_group(cr, uid, domain, fields=['name', 'color', 'function'], groupby=['function', 'login'])
self.assertEqual(len(groups_data), 3, "Incorrect number of results when grouping on a field")
self.assertEqual(['5$ Wrench', 'Eavesdropper', 'Friend'], [g['function'] for g in groups_data], 'incorrect read_group order')
for group_data in groups_data:
self.assertIn('color', group_data, "Aggregated data for the column 'color' is not present in read_group return values")
self.assertEqual(group_data['color'], 3, "Incorrect sum for aggregated data for the column 'color'")
self.assertIn('color', group_data, "Aggregated data for the column 'color' is not present in read_group return values")
self.assertEqual(group_data['color'], 3, "Incorrect sum for aggregated data for the column 'color'")
groups_data = self.res_users.read_group(cr, uid, domain=[('login', 'in', ('alice', 'bob', 'eve'))], fields=['name', 'color'], groupby='name', orderby='name DESC, color asc')
self.assertEqual(len(groups_data), 3, "Incorrect number of results when grouping on a field")
self.assertEqual([user['name'] for user in groups_data], ['Eve', 'Bob', 'Alice'], 'Incorrect ordering of the list')
# group on inherited char field, reverse order
groups_data = self.res_users.read_group(cr, uid, domain, fields=['name', 'color'], groupby='name', orderby='name DESC')
self.assertEqual(['Nab', 'Eve', 'Bob', 'Alice'], [g['name'] for g in groups_data], 'Incorrect ordering of the list')
# group on int field, default ordering
groups_data = self.res_users.read_group(cr, uid, domain, fields=['color'], groupby='color')
self.assertEqual([-3, 0, 1, 2, 3, 6], [g['color'] for g in groups_data], 'Incorrect ordering of the list')
# multi group, second level is int field, should still be summed in first level grouping
groups_data = self.res_users.read_group(cr, uid, domain, fields=['name', 'color'], groupby=['name', 'color'], orderby='name DESC')
self.assertEqual(['Nab', 'Eve', 'Bob', 'Alice'], [g['name'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([3, 3, 2, 1], [g['color'] for g in groups_data], 'Incorrect ordering of the list')
# group on inherited char field, multiple orders with directions
groups_data = self.res_users.read_group(cr, uid, domain, fields=['name', 'color'], groupby='name', orderby='color DESC, name')
self.assertEqual(len(groups_data), 4, "Incorrect number of results when grouping on a field")
self.assertEqual(['Eve', 'Nab', 'Bob', 'Alice'], [g['name'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([1, 2, 1, 2], [g['name_count'] for g in groups_data], 'Incorrect number of results')
# group on inherited date column (res_partner.date) -> Year-Month, default ordering
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'date'], groupby=['date'])
self.assertEqual(len(groups_data), 4, "Incorrect number of results when grouping on a field")
self.assertEqual(['January 2014', 'September 2014', 'January 2015', 'March 2015'], [g['date'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([1, 1, 1, 3], [g['date_count'] for g in groups_data], 'Incorrect number of results')
# group on inherited date column (res_partner.date) -> Year-Month, custom order
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'date'], groupby=['date'], orderby='date DESC')
self.assertEqual(len(groups_data), 4, "Incorrect number of results when grouping on a field")
self.assertEqual(['March 2015', 'January 2015', 'September 2014', 'January 2014'], [g['date'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([3, 1, 1, 1], [g['date_count'] for g in groups_data], 'Incorrect number of results')
# group on inherited many2one (res_partner.title), default order
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'title'], groupby=['title'])
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_lady, 'Lady'), (title_sir, 'Sir')], [g['title'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([4, 2], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([10, -1], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
# group on inherited many2one (res_partner.title), reversed natural order
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'title'], groupby=['title'], orderby="title desc")
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_sir, 'Sir'), (title_lady, 'Lady')], [g['title'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([2, 4], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([-1, 10], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
# group on inherited many2one (res_partner.title), multiple orders with m2o in second position
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'title'], groupby=['title'], orderby="color desc, title desc")
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_lady, 'Lady'), (title_sir, 'Sir')], [g['title'] for g in groups_data], 'Incorrect ordering of the result')
self.assertEqual([4, 2], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([10, -1], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
# group on inherited many2one (res_partner.title), ordered by other inherited field (color)
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'title'], groupby=['title'], orderby='color')
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_sir, 'Sir'), (title_lady, 'Lady')], [g['title'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([2, 4], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([-1, 10], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
groups_data = self.res_users.read_group(cr, uid, domain=[('login', 'in', ('alice', 'bob', 'eve', 'nab'))], fields=['function', 'color'], groupby='function', orderby='color ASC')
self.assertEqual(len(groups_data), 3, "Incorrect number of results when grouping on a field")
self.assertEqual(groups_data, sorted(groups_data, key=lambda x: x['color']), 'Incorrect ordering of the list')
class test_partner_recursion(common.TransactionCase):

View File

@ -8,6 +8,7 @@
# OPENERP_DATABASE=yy PYTHONPATH=../:. unit2 test_ir_sequence
# This assume an existing database.
import psycopg2
import psycopg2.errorcodes
import unittest2
import openerp
@ -111,11 +112,11 @@ class test_ir_sequence_no_gap(unittest2.TestCase):
cr0 = cursor()
cr1 = cursor()
cr1._default_log_exceptions = False # Prevent logging a traceback
msg_re = '^could not obtain lock on row in relation "ir_sequence"$'
with self.assertRaisesRegexp(psycopg2.OperationalError, msg_re):
with self.assertRaises(psycopg2.OperationalError) as e:
n0 = registry('ir.sequence').next_by_code(cr0, ADMIN_USER_ID, 'test_sequence_type_2', {})
assert n0
n1 = registry('ir.sequence').next_by_code(cr1, ADMIN_USER_ID, 'test_sequence_type_2', {})
self.assertEqual(e.exception.pgcode, psycopg2.errorcodes.LOCK_NOT_AVAILABLE, msg="postgresql returned an incorrect errcode")
cr0.close()
cr1.close()

View File

@ -309,8 +309,9 @@ class test_m2o(CreatorCase):
def test_external_id(self):
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
# __export__.$class.$id
external_id = u'__export__.export_many2one_%d' % integer_id
# Expecting the m2o target model name in the external id,
# not this model's name
external_id = u'__export__.export_integer_%d' % integer_id
self.assertEqual(
self.export(integer_id, fields=['value/id']),
[[external_id]])

View File

@ -197,6 +197,10 @@ class WebRequest(object):
self.auth_method = None
self._cr_cm = None
self._cr = None
# prevents transaction commit, use when you catch an exception during handling
self._failed = None
# set db/uid trackers - they're cleaned up at the WSGI
# dispatching phase in openerp.service.wsgi_server.application
if self.db:
@ -244,10 +248,13 @@ class WebRequest(object):
_request_stack.pop()
if self._cr:
# Dont commit test cursors
# Dont close test cursors
if not openerp.tests.common.release_test_cursor(self._cr):
if exc_type is None:
if exc_type is None and not self._failed:
self._cr.commit()
else:
# just to be explicit - happens at close() anyway
self._cr.rollback()
self._cr.close()
# just to be sure no one tries to re-use the request
self.disable_db = True
@ -432,6 +439,7 @@ class JsonRequest(WebRequest):
'message': "OpenERP Session Invalid",
'data': se
}
self._failed = e # prevent tx commit
except Exception, e:
# Mute test cursor error for runbot
if not (openerp.tools.config['test_enable'] and isinstance(e, psycopg2.OperationalError)):
@ -442,6 +450,7 @@ class JsonRequest(WebRequest):
'message': "OpenERP Server Error",
'data': se
}
self._failed = e # prevent tx commit
if error:
response["error"] = error

View File

@ -2,7 +2,7 @@
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 OpenERP SA (<http://www.openerp.com>)
# Copyright (C) 2004-2014 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
@ -125,7 +125,13 @@ class ColoredFormatter(DBFormatter):
record.levelname = COLOR_PATTERN % (30 + fg_color, 40 + bg_color, record.levelname)
return DBFormatter.format(self, record)
_logger_init = False
def init_logger():
global _logger_init
if _logger_init:
return
_logger_init = True
from tools.translate import resetlocale
resetlocale()
@ -175,26 +181,24 @@ def init_logger():
formatter = DBFormatter(format)
handler.setFormatter(formatter)
# Configure handlers
logging.getLogger().addHandler(handler)
if tools.config['log_db']:
postgresqlHandler = PostgreSQLHandler()
postgresqlHandler.setLevel(logging.WARNING)
logging.getLogger().addHandler(postgresqlHandler)
# Configure loggers levels
pseudo_config = PSEUDOCONFIG_MAPPER.get(tools.config['log_level'], [])
logconfig = tools.config['log_handler']
postgresqlHandler = PostgreSQLHandler()
postgresqlHandler.setLevel(logging.WARNING)
logging_configurations = DEFAULT_LOG_CONFIGURATION + pseudo_config + logconfig
for logconfig_item in logging_configurations:
loggername, level = logconfig_item.split(':')
level = getattr(logging, level, logging.INFO)
logger = logging.getLogger(loggername)
logger.handlers = []
logger.setLevel(level)
logger.addHandler(handler)
if tools.config['log_db']:
logger.addHandler(postgresqlHandler)
if loggername != '':
logger.propagate = False
for logconfig_item in logging_configurations:
_logger.debug('logger level set: "%s"', logconfig_item)

View File

@ -75,7 +75,7 @@ _schema = logging.getLogger(__name__ + '.schema')
# List of etree._Element subclasses that we choose to ignore when parsing XML.
from openerp.tools import SKIPPED_ELEMENT_TYPES
regex_order = re.compile('^(([a-z0-9_]+|"[a-z0-9_]+")( *desc| *asc)?( *, *|))+$', re.I)
regex_order = re.compile('^( *([a-z0-9_]+|"[a-z0-9_]+")( *desc| *asc)?( *, *|))+$', re.I)
regex_object_name = re.compile(r'^[a-z0-9_.]+$')
AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
@ -1124,7 +1124,7 @@ class BaseModel(object):
def _get_xml_id(self, cr, uid, r):
model_data = self.pool.get('ir.model.data')
data_ids = model_data.search(cr, uid, [('model', '=', r._table_name), ('res_id', '=', r['id'])])
data_ids = model_data.search(cr, uid, [('model', '=', r._model._name), ('res_id', '=', r['id'])])
if len(data_ids):
d = model_data.read(cr, uid, data_ids, ['name', 'module'])[0]
if d['module']:
@ -1134,13 +1134,13 @@ class BaseModel(object):
else:
postfix = 0
while True:
n = self._table+'_'+str(r['id']) + (postfix and ('_'+str(postfix)) or '' )
n = r._model._table+'_'+str(r['id']) + (postfix and ('_'+str(postfix)) or '' )
if not model_data.search(cr, uid, [('name', '=', n)]):
break
postfix += 1
model_data.create(cr, SUPERUSER_ID, {
'name': n,
'model': self._name,
'model': r._model._name,
'res_id': r['id'],
'module': '__export__',
})
@ -2185,36 +2185,42 @@ class BaseModel(object):
r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
return result
def _read_group_generate_order_by(self, orderby, aggregated_fields, groupby, query):
def _read_group_prepare(self, orderby, aggregated_fields, groupby, qualified_groupby_field, query, groupby_type=None):
"""
Generates the ORDER BY sql clause for the read group method. Adds the missing JOIN clause
Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
to the query if order should be computed against m2o field.
:param orderby: the orderby definition in the form "%(field)s %(order)s"
:param aggregated_fields: list of aggregated fields in the query
:param groupby: the current groupby field name
:param query: the query object used to construct the query afterwards
:param qualified_groupby_field: the fully qualified SQL name for the grouped field
:param osv.Query query: the query under construction
:param groupby_type: the type of the grouped field
:return: (groupby_terms, orderby_terms)
"""
orderby_list = []
ob = []
for order_splits in orderby.split(','):
order_split = order_splits.split()
orderby_field = order_split[0]
fields = openerp.osv.fields
if isinstance(self._all_columns[orderby_field].column, (fields.date, fields.datetime)):
continue
orderby_dir = len(order_split) == 2 and order_split[1].upper() == 'ASC' and 'ASC' or 'DESC'
if orderby_field == groupby:
orderby_item = self._generate_order_by(order_splits, query).replace('ORDER BY ', '')
if orderby_item:
orderby_list.append(orderby_item)
ob += [obi.split()[0] for obi in orderby_item.split(',')]
elif orderby_field in aggregated_fields:
orderby_list.append('%s %s' % (orderby_field,orderby_dir))
orderby_terms = []
groupby_terms = [qualified_groupby_field] if groupby else []
if not orderby:
return groupby_terms, orderby_terms
if orderby_list:
return ' ORDER BY %s' % (','.join(orderby_list)), ob and ','.join(ob) or ''
else:
return '', ''
self._check_qorder(orderby)
for order_part in orderby.split(','):
order_split = order_part.split()
order_field = order_split[0]
if order_field == groupby:
if groupby_type == 'many2one':
order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
if order_clause:
orderby_terms.append(order_clause)
groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
else:
orderby_terms.append(order_part)
elif order_field in aggregated_fields:
orderby_terms.append(order_part)
else:
# Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
_logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
self._name, order_part)
return groupby_terms, orderby_terms
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False):
"""
@ -2275,9 +2281,9 @@ class BaseModel(object):
# TODO it seems fields_get can be replaced by _all_columns (no need for translation)
fget = self.fields_get(cr, uid, fields)
flist = ''
group_count = group_by = groupby
group_by_params = {}
select_terms = []
groupby_type = None
if groupby:
if fget.get(groupby):
groupby_type = fget[groupby]['type']
@ -2305,12 +2311,9 @@ class BaseModel(object):
timezone = context.get('tz', 'UTC')
qualified_groupby_field = "timezone('%s', timezone('UTC',%s))" % (timezone, qualified_groupby_field)
qualified_groupby_field = "date_trunc('%s', %s)" % (interval, qualified_groupby_field)
flist = "%s as %s " % (qualified_groupby_field, groupby)
elif groupby_type == 'boolean':
qualified_groupby_field = "coalesce(%s,false)" % qualified_groupby_field
flist = "%s as %s " % (qualified_groupby_field, groupby)
else:
flist = qualified_groupby_field
select_terms.append("%s as %s " % (qualified_groupby_field, groupby))
else:
# Don't allow arbitrary values, as this would be a SQL injection vector!
raise except_orm(_('Invalid group_by'),
@ -2318,34 +2321,48 @@ class BaseModel(object):
aggregated_fields = [
f for f in fields
if f not in ('id', 'sequence')
if f not in ('id', 'sequence', groupby)
if fget[f]['type'] in ('integer', 'float')
if (f in self._all_columns and getattr(self._all_columns[f].column, '_classic_write'))]
for f in aggregated_fields:
group_operator = fget[f].get('group_operator', 'sum')
if flist:
flist += ', '
qualified_field = self._inherits_join_calc(f, query)
flist += "%s(%s) AS %s" % (group_operator, qualified_field, f)
select_terms.append("%s(%s) AS %s" % (group_operator, qualified_field, f))
order = orderby or groupby
orderby_clause = ''
ob = ''
if order:
orderby_clause, ob = self._read_group_generate_order_by(order, aggregated_fields, groupby, query)
gb = groupby and (' GROUP BY ' + qualified_groupby_field) or ''
order = orderby or groupby or ''
groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, groupby, qualified_groupby_field, query, groupby_type)
from_clause, where_clause, where_clause_params = query.get_sql()
where_clause = where_clause and ' WHERE ' + where_clause
limit_str = limit and ' limit %d' % limit or ''
offset_str = offset and ' offset %d' % offset or ''
if len(groupby_list) < 2 and context.get('group_by_no_leaf'):
group_count = '_'
cr.execute('SELECT min(%s.id) AS id, count(%s.id) AS %s_count' % (self._table, self._table, group_count) + (flist and ',') + flist + ' FROM ' + from_clause + where_clause + gb + (ob and ',') + ob + orderby_clause + limit_str + offset_str, where_clause_params)
alldata = {}
groupby = group_by
count_field = '_'
else:
count_field = groupby
prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
query = """
SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s_count
%(extra_fields)s
FROM %(from)s
%(where)s
%(groupby)s
%(orderby)s
%(limit)s
%(offset)s
""" % {
'table': self._table,
'count_field': count_field,
'extra_fields': prefix_terms(',', select_terms),
'from': from_clause,
'where': prefix_term('WHERE', where_clause),
'groupby': prefix_terms('GROUP BY', groupby_terms),
'orderby': prefix_terms('ORDER BY', orderby_terms),
'limit': prefix_term('LIMIT', int(limit) if limit else None),
'offset': prefix_term('OFFSET', int(offset) if limit else None),
}
cr.execute(query, where_clause_params)
alldata = {}
fetched_data = cr.dictfetchall()
data_ids = []
@ -2356,8 +2373,6 @@ class BaseModel(object):
data_ids.append(r['id'])
del r['id']
if groupby:
data = self.read(cr, uid, data_ids, [groupby], context=context)
# restore order of the search as read() uses the default _order (this is only for groups, so the footprint of data should be small):
@ -2847,7 +2862,7 @@ class BaseModel(object):
msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
_schema.debug(msg, self._table, k, f._type)
if isinstance(f, fields.many2one):
if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
dest_model = self.pool[f._obj]
if dest_model._table != 'ir_actions':
self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
@ -2882,7 +2897,7 @@ class BaseModel(object):
todo_end.append((order, self._update_store, (f, k)))
# and add constraints if needed
if isinstance(f, fields.many2one):
if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
if f._obj not in self.pool:
raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
dest_model = self.pool[f._obj]

View File

@ -32,7 +32,7 @@ from random import randint
# Image resizing
# ----------------------------------------
def image_resize_image(base64_source, size=(1024, 1024), encoding='base64', filetype='PNG', avoid_if_small=False):
def image_resize_image(base64_source, size=(1024, 1024), encoding='base64', filetype=None, avoid_if_small=False):
""" Function to resize an image. The image will be resized to the given
size, while keeping the aspect ratios, and holes in the image will be
filled with transparent background. The image will not be stretched if
@ -58,7 +58,8 @@ def image_resize_image(base64_source, size=(1024, 1024), encoding='base64', file
height mean an automatically computed value based respectivelly
on height or width of the source image.
:param encoding: the output encoding
:param filetype: the output filetype
:param filetype: the output filetype, by default the source image's
:type filetype: str, any PIL image format (supported for creation)
:param avoid_if_small: do not resize if image height and width
are smaller than the expected size.
"""
@ -68,6 +69,8 @@ def image_resize_image(base64_source, size=(1024, 1024), encoding='base64', file
return base64_source
image_stream = StringIO.StringIO(base64_source.decode(encoding))
image = Image.open(image_stream)
# store filetype here, as Image.new below will lose image.format
filetype = filetype or image.format
asked_width, asked_height = size
if asked_width is None:
@ -95,21 +98,21 @@ def image_resize_image(base64_source, size=(1024, 1024), encoding='base64', file
image.save(background_stream, filetype)
return background_stream.getvalue().encode(encoding)
def image_resize_image_big(base64_source, size=(1204, 1204), encoding='base64', filetype='PNG', avoid_if_small=True):
def image_resize_image_big(base64_source, size=(1204, 1024), encoding='base64', filetype=None, avoid_if_small=True):
""" Wrapper on image_resize_image, to resize images larger than the standard
'big' image size: 1024x1024px.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
def image_resize_image_medium(base64_source, size=(128, 128), encoding='base64', filetype='PNG', avoid_if_small=False):
def image_resize_image_medium(base64_source, size=(128, 128), encoding='base64', filetype=None, avoid_if_small=False):
""" Wrapper on image_resize_image, to resize to the standard 'medium'
image size: 180x180.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
def image_resize_image_small(base64_source, size=(64, 64), encoding='base64', filetype='PNG', avoid_if_small=False):
def image_resize_image_small(base64_source, size=(64, 64), encoding='base64', filetype=None, avoid_if_small=False):
""" Wrapper on image_resize_image, to resize to the standard 'small' image
size: 50x50.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image

View File

@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
##############################################################################
# Copyright (C) 2004-2012 OpenERP s.a. (<http://www.openerp.com>).
# Copyright (C) 2004-2014 OpenERP s.a. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
@ -225,9 +225,14 @@ def safe_eval(expr, globals_dict=None, locals_dict=None, mode="eval", nocopy=Fal
'False': False,
'None': None,
'str': str,
'unicode': unicode,
'globals': locals,
'locals': locals,
'bool': bool,
'int': int,
'float': float,
'long': long,
'enumerate': enumerate,
'dict': dict,
'list': list,
'tuple': tuple,
@ -235,15 +240,23 @@ def safe_eval(expr, globals_dict=None, locals_dict=None, mode="eval", nocopy=Fal
'abs': abs,
'min': min,
'max': max,
'sum': sum,
'reduce': reduce,
'filter': filter,
'round': round,
'len': len,
'set': set,
'repr': repr,
'int': int,
'float': float,
'set': set,
'all': all,
'any': any,
'ord': ord,
'chr': chr,
'cmp': cmp,
'divmod': divmod,
'isinstance': isinstance,
'range': range,
'xrange': xrange,
'zip': zip,
}
)
if locals_builtins: