[MERGE] From trunk (second time)

bzr revid: cto@openerp.com-20121217144753-1rboqdw01h8yrehj
This commit is contained in:
Cecile Tonglet 2012-12-17 15:47:53 +01:00
commit ad8b76de53
57 changed files with 297 additions and 370 deletions

1
debian/control vendored
View File

@ -19,6 +19,7 @@ Depends:
python-docutils, python-docutils,
python-feedparser, python-feedparser,
python-gdata, python-gdata,
python-jinja2,
python-ldap, python-ldap,
python-libxslt1, python-libxslt1,
python-lxml, python-lxml,

View File

@ -670,7 +670,7 @@ class actions_server(osv.osv):
context['object'] = obj context['object'] = obj
for i in expr: for i in expr:
context['active_id'] = i.id context['active_id'] = i.id
result = self.run(cr, uid, [action.loop_action.id], context) self.run(cr, uid, [action.loop_action.id], context)
if action.state == 'object_write': if action.state == 'object_write':
res = {} res = {}
@ -715,8 +715,6 @@ class actions_server(osv.osv):
expr = exp.value expr = exp.value
res[exp.col1.name] = expr res[exp.col1.name] = expr
obj_pool = None
res_id = False
obj_pool = self.pool.get(action.srcmodel_id.model) obj_pool = self.pool.get(action.srcmodel_id.model)
res_id = obj_pool.create(cr, uid, res) res_id = obj_pool.create(cr, uid, res)
if action.record_id: if action.record_id:
@ -735,7 +733,7 @@ class actions_server(osv.osv):
model = action.copy_object.split(',')[0] model = action.copy_object.split(',')[0]
cid = action.copy_object.split(',')[1] cid = action.copy_object.split(',')[1]
obj_pool = self.pool.get(model) obj_pool = self.pool.get(model)
res_id = obj_pool.copy(cr, uid, int(cid), res) obj_pool.copy(cr, uid, int(cid), res)
return False return False

View File

@ -226,7 +226,7 @@ class ir_cron(osv.osv):
_logger.warning('Tried to poll an undefined table on database %s.', db_name) _logger.warning('Tried to poll an undefined table on database %s.', db_name)
else: else:
raise raise
except Exception, ex: except Exception:
_logger.warning('Exception in cron:', exc_info=True) _logger.warning('Exception in cron:', exc_info=True)
finally: finally:

View File

@ -24,9 +24,6 @@ from openerp.osv import osv, fields
from openerp.tools.translate import _ from openerp.tools.translate import _
class ir_filters(osv.osv): class ir_filters(osv.osv):
'''
Filters
'''
_name = 'ir.filters' _name = 'ir.filters'
_description = 'Filters' _description = 'Filters'

View File

@ -227,7 +227,7 @@ class ir_mail_server(osv.osv):
:param int port: SMTP port to connect to :param int port: SMTP port to connect to
:param user: optional username to authenticate with :param user: optional username to authenticate with
:param password: optional password to authenticate with :param password: optional password to authenticate with
:param string encryption: optional: ``'ssl'`` | ``'starttls'`` :param string encryption: optional, ``'ssl'`` | ``'starttls'``
:param bool smtp_debug: toggle debugging of SMTP sessions (all i/o :param bool smtp_debug: toggle debugging of SMTP sessions (all i/o
will be output in logs) will be output in logs)
""" """

View File

@ -158,9 +158,10 @@ class ir_model(osv.osv):
if context is None: context = {} if context is None: context = {}
if isinstance(ids, (int, long)): if isinstance(ids, (int, long)):
ids = [ids] ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG) and \ if not context.get(MODULE_UNINSTALL_FLAG):
any(model.state != 'manual' for model in self.browse(cr, user, ids, context)): for model in self.browse(cr, user, ids, context):
raise except_orm(_('Error'), _("Model '%s' contains module data and cannot be removed!") % (model.name,)) if model.state != 'manual':
raise except_orm(_('Error'), _("Model '%s' contains module data and cannot be removed!") % (model.name,))
self._drop_table(cr, user, ids, context) self._drop_table(cr, user, ids, context)
res = super(ir_model, self).unlink(cr, user, ids, context) res = super(ir_model, self).unlink(cr, user, ids, context)
@ -256,7 +257,7 @@ class ir_model_fields(osv.osv):
'selection': "", 'selection': "",
'domain': "[]", 'domain': "[]",
'name': 'x_', 'name': 'x_',
'state': lambda self,cr,uid,ctx={}: (ctx and ctx.get('manual',False)) and 'manual' or 'base', 'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
'on_delete': 'set null', 'on_delete': 'set null',
'select_level': '0', 'select_level': '0',
'size': 64, 'size': 64,
@ -271,7 +272,7 @@ class ir_model_fields(osv.osv):
except Exception: except Exception:
_logger.warning('Invalid selection list definition for fields.selection', exc_info=True) _logger.warning('Invalid selection list definition for fields.selection', exc_info=True)
raise except_orm(_('Error'), raise except_orm(_('Error'),
_("The Selection Options expression is not a valid Pythonic expression." \ _("The Selection Options expression is not a valid Pythonic expression."
"Please provide an expression in the [('key','Label'), ...] format.")) "Please provide an expression in the [('key','Label'), ...] format."))
check = True check = True
@ -514,7 +515,7 @@ class ir_model_constraint(Model):
# double-check we are really going to delete all the owners of this schema element # double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,)) cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()] external_ids = [x[0] for x in cr.fetchall()]
if (set(external_ids)-ids_set): if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it! # as installed modules have defined this element we must not delete it!
continue continue
@ -567,13 +568,12 @@ class ir_model_relation(Model):
ids.reverse() ids.reverse()
for data in self.browse(cr, uid, ids, context): for data in self.browse(cr, uid, ids, context):
model = data.model model = data.model
model_obj = self.pool.get(model)
name = openerp.tools.ustr(data.name) name = openerp.tools.ustr(data.name)
# double-check we are really going to delete all the owners of this schema element # double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,)) cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()] external_ids = [x[0] for x in cr.fetchall()]
if (set(external_ids)-ids_set): if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it! # as installed modules have defined this element we must not delete it!
continue continue
@ -585,7 +585,7 @@ class ir_model_relation(Model):
# drop m2m relation tables # drop m2m relation tables
for table in to_drop_table: for table in to_drop_table:
cr.execute('DROP TABLE %s CASCADE'% (table),) cr.execute('DROP TABLE %s CASCADE'% table,)
_logger.info('Dropped table %s', table) _logger.info('Dropped table %s', table)
cr.commit() cr.commit()
@ -862,7 +862,7 @@ class ir_model_data(osv.osv):
res = self.read(cr, uid, data_id, ['model', 'res_id']) res = self.read(cr, uid, data_id, ['model', 'res_id'])
if not res['res_id']: if not res['res_id']:
raise ValueError('No such external ID currently defined in the system: %s.%s' % (module, xml_id)) raise ValueError('No such external ID currently defined in the system: %s.%s' % (module, xml_id))
return (res['model'], res['res_id']) return res['model'], res['res_id']
def get_object(self, cr, uid, module, xml_id, context=None): def get_object(self, cr, uid, module, xml_id, context=None):
"""Returns a browsable record for the given module name and xml_id or raise ValueError if not found""" """Returns a browsable record for the given module name and xml_id or raise ValueError if not found"""
@ -903,7 +903,7 @@ class ir_model_data(osv.osv):
# records created during module install should not display the messages of OpenChatter # records created during module install should not display the messages of OpenChatter
context = dict(context, install_mode=True) context = dict(context, install_mode=True)
if xml_id and ('.' in xml_id): if xml_id and ('.' in xml_id):
assert len(xml_id.split('.'))==2, _("'%s' contains too many dots. XML ids should not contain dots ! These are used to refer to other modules data, as in module.reference_id") % (xml_id) assert len(xml_id.split('.'))==2, _("'%s' contains too many dots. XML ids should not contain dots ! These are used to refer to other modules data, as in module.reference_id") % xml_id
module, xml_id = xml_id.split('.') module, xml_id = xml_id.split('.')
if (not xml_id) and (not self.doinit): if (not xml_id) and (not self.doinit):
return False return False
@ -1073,7 +1073,6 @@ class ir_model_data(osv.osv):
if model == 'ir.model.fields') if model == 'ir.model.fields')
ir_model_relation = self.pool.get('ir.model.relation') ir_model_relation = self.pool.get('ir.model.relation')
relation_ids = ir_model_relation.search(cr, uid, [('module', 'in', modules_to_remove)])
ir_module_module = self.pool.get('ir.module.module') ir_module_module = self.pool.get('ir.module.module')
modules_to_remove_ids = ir_module_module.search(cr, uid, [('name', 'in', modules_to_remove)]) modules_to_remove_ids = ir_module_module.search(cr, uid, [('name', 'in', modules_to_remove)])
relation_ids = ir_model_relation.search(cr, uid, [('module', 'in', modules_to_remove_ids)]) relation_ids = ir_model_relation.search(cr, uid, [('module', 'in', modules_to_remove_ids)])

View File

@ -23,7 +23,7 @@ from openerp.osv import osv
class ir_needaction_mixin(osv.AbstractModel): class ir_needaction_mixin(osv.AbstractModel):
'''Mixin class for objects using the need action feature. """Mixin class for objects using the need action feature.
Need action feature can be used by models that have to be able to Need action feature can be used by models that have to be able to
signal that an action is required on a particular record. If in signal that an action is required on a particular record. If in
@ -37,7 +37,7 @@ class ir_needaction_mixin(osv.AbstractModel):
This class also offers several global services: This class also offers several global services:
- ``_needaction_count``: returns the number of actions uid has to perform - ``_needaction_count``: returns the number of actions uid has to perform
''' """
_name = 'ir.needaction_mixin' _name = 'ir.needaction_mixin'
_needaction = True _needaction = True
@ -56,7 +56,7 @@ class ir_needaction_mixin(osv.AbstractModel):
# "Need action" API # "Need action" API
#------------------------------------------------------ #------------------------------------------------------
def _needaction_count(self, cr, uid, domain=[], context=None): def _needaction_count(self, cr, uid, domain=None, context=None):
""" Get the number of actions uid has to perform. """ """ Get the number of actions uid has to perform. """
dom = self._needaction_domain_get(cr, uid, context=context) dom = self._needaction_domain_get(cr, uid, context=context)
if not dom: if not dom:

View File

@ -139,7 +139,7 @@ class ir_sequence(openerp.osv.osv.osv):
values = self._add_missing_default_values(cr, uid, values, context) values = self._add_missing_default_values(cr, uid, values, context)
values['id'] = super(ir_sequence, self).create(cr, uid, values, context) values['id'] = super(ir_sequence, self).create(cr, uid, values, context)
if values['implementation'] == 'standard': if values['implementation'] == 'standard':
f = self._create_sequence(cr, values['id'], values['number_increment'], values['number_next']) self._create_sequence(cr, values['id'], values['number_increment'], values['number_next'])
return values['id'] return values['id']
def unlink(self, cr, uid, ids, context=None): def unlink(self, cr, uid, ids, context=None):

View File

@ -134,7 +134,7 @@ class ir_translation_import_cursor(object):
""" % (self._parent_table, self._table_name, self._parent_table, find_expr)) """ % (self._parent_table, self._table_name, self._parent_table, find_expr))
if self._debug: if self._debug:
cr.execute('SELECT COUNT(*) FROM ONLY %s' % (self._parent_table)) cr.execute('SELECT COUNT(*) FROM ONLY %s' % self._parent_table)
c1 = cr.fetchone()[0] c1 = cr.fetchone()[0]
cr.execute('SELECT COUNT(*) FROM ONLY %s AS irt, %s AS ti WHERE %s' % \ cr.execute('SELECT COUNT(*) FROM ONLY %s AS irt, %s AS ti WHERE %s' % \
(self._parent_table, self._table_name, find_expr)) (self._parent_table, self._table_name, find_expr))
@ -217,11 +217,11 @@ class ir_translation(osv.osv):
def _get_ids(self, cr, uid, name, tt, lang, ids): def _get_ids(self, cr, uid, name, tt, lang, ids):
translations = dict.fromkeys(ids, False) translations = dict.fromkeys(ids, False)
if ids: if ids:
cr.execute('select res_id,value ' \ cr.execute('select res_id,value '
'from ir_translation ' \ 'from ir_translation '
'where lang=%s ' \ 'where lang=%s '
'and type=%s ' \ 'and type=%s '
'and name=%s ' \ 'and name=%s '
'and res_id IN %s', 'and res_id IN %s',
(lang,tt,name,tuple(ids))) (lang,tt,name,tuple(ids)))
for res_id, value in cr.fetchall(): for res_id, value in cr.fetchall():
@ -237,10 +237,10 @@ class ir_translation(osv.osv):
self._get_ids.clear_cache(self, uid, name, tt, lang, res_id) self._get_ids.clear_cache(self, uid, name, tt, lang, res_id)
self._get_source.clear_cache(self, uid, name, tt, lang) self._get_source.clear_cache(self, uid, name, tt, lang)
cr.execute('delete from ir_translation ' \ cr.execute('delete from ir_translation '
'where lang=%s ' \ 'where lang=%s '
'and type=%s ' \ 'and type=%s '
'and name=%s ' \ 'and name=%s '
'and res_id IN %s', 'and res_id IN %s',
(lang,tt,name,tuple(ids),)) (lang,tt,name,tuple(ids),))
for id in ids: for id in ids:

View File

@ -44,9 +44,8 @@ class ir_ui_menu(osv.osv):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
self.cache_lock = threading.RLock() self.cache_lock = threading.RLock()
self._cache = {} self._cache = {}
r = super(ir_ui_menu, self).__init__(*args, **kwargs) super(ir_ui_menu, self).__init__(*args, **kwargs)
self.pool.get('ir.model.access').register_cache_clearing_method(self._name, 'clear_cache') self.pool.get('ir.model.access').register_cache_clearing_method(self._name, 'clear_cache')
return r
def clear_cache(self): def clear_cache(self):
with self.cache_lock: with self.cache_lock:
@ -144,7 +143,7 @@ class ir_ui_menu(osv.osv):
return res return res
def _get_full_name(self, cr, uid, ids, name=None, args=None, context=None): def _get_full_name(self, cr, uid, ids, name=None, args=None, context=None):
if context == None: if context is None:
context = {} context = {}
res = {} res = {}
for elmt in self.browse(cr, uid, ids, context=context): for elmt in self.browse(cr, uid, ids, context=context):
@ -195,7 +194,7 @@ class ir_ui_menu(osv.osv):
next_num=int(concat[0])+1 next_num=int(concat[0])+1
datas['name']=rex.sub(('(%d)'%next_num),datas['name']) datas['name']=rex.sub(('(%d)'%next_num),datas['name'])
else: else:
datas['name']=datas['name']+'(1)' datas['name'] += '(1)'
self.write(cr,uid,[res],{'name':datas['name']}) self.write(cr,uid,[res],{'name':datas['name']})
ids = ir_values_obj.search(cr, uid, [ ids = ir_values_obj.search(cr, uid, [
('model', '=', 'ir.ui.menu'), ('model', '=', 'ir.ui.menu'),

View File

@ -255,7 +255,7 @@ class view(osv.osv):
if label: if label:
for lbl in eval(label): for lbl in eval(label):
if t.has_key(tools.ustr(lbl)) and tools.ustr(t[lbl])=='False': if t.has_key(tools.ustr(lbl)) and tools.ustr(t[lbl])=='False':
label_string = label_string + ' ' label_string += ' '
else: else:
label_string = label_string + " " + tools.ustr(t[lbl]) label_string = label_string + " " + tools.ustr(t[lbl])
labels[str(t['id'])] = (a['id'],label_string) labels[str(t['id'])] = (a['id'],label_string)

View File

@ -306,10 +306,10 @@ class ir_values(osv.osv):
ORDER BY v.user_id, u.company_id""" ORDER BY v.user_id, u.company_id"""
params = ('default', model, uid, uid) params = ('default', model, uid, uid)
if condition: if condition:
query = query % 'AND v.key2 = %s' query %= 'AND v.key2 = %s'
params += (condition[:200],) params += (condition[:200],)
else: else:
query = query % 'AND v.key2 is NULL' query %= 'AND v.key2 is NULL'
cr.execute(query, params) cr.execute(query, params)
# keep only the highest priority default for each field # keep only the highest priority default for each field
@ -416,7 +416,7 @@ class ir_values(osv.osv):
continue continue
# keep only the first action registered for each action name # keep only the first action registered for each action name
results[action['name']] = (action['id'], action['name'], action_def) results[action['name']] = (action['id'], action['name'], action_def)
except except_orm, e: except except_orm:
continue continue
return sorted(results.values()) return sorted(results.values())

View File

@ -75,8 +75,10 @@ def graph_get(cr, graph, wkf_ids, nested, workitem, processed_subflows):
for t in transitions: for t in transitions:
if not t['act_to'] in activities: if not t['act_to'] in activities:
continue continue
args = {} args = {
args['label'] = str(t['condition']).replace(' or ', '\\nor ').replace(' and ', '\\nand ') 'label': str(t['condition']).replace(' or ', '\\nor ')
.replace(' and ','\\nand ')
}
if t['signal']: if t['signal']:
args['label'] += '\\n'+str(t['signal']) args['label'] += '\\n'+str(t['signal'])
args['style'] = 'bold' args['style'] = 'bold'
@ -92,20 +94,19 @@ def graph_get(cr, graph, wkf_ids, nested, workitem, processed_subflows):
activity_from = actfrom[t['act_from']][1].get(t['signal'], actfrom[t['act_from']][0]) activity_from = actfrom[t['act_from']][1].get(t['signal'], actfrom[t['act_from']][0])
activity_to = actto[t['act_to']][1].get(t['signal'], actto[t['act_to']][0]) activity_to = actto[t['act_to']][1].get(t['signal'], actto[t['act_to']][0])
graph.add_edge(pydot.Edge( str(activity_from) ,str(activity_to), fontsize='10', **args)) graph.add_edge(pydot.Edge( str(activity_from) ,str(activity_to), fontsize='10', **args))
nodes = cr.dictfetchall()
cr.execute('select * from wkf_activity where flow_start=True and wkf_id in ('+','.join(['%s']*len(wkf_ids))+')', wkf_ids) cr.execute('select * from wkf_activity where flow_start=True and wkf_id in ('+','.join(['%s']*len(wkf_ids))+')', wkf_ids)
start = cr.fetchone()[0] start = cr.fetchone()[0]
cr.execute("select 'subflow.'||name,id from wkf_activity where flow_stop=True and wkf_id in ("+','.join(['%s']*len(wkf_ids))+')', wkf_ids) cr.execute("select 'subflow.'||name,id from wkf_activity where flow_stop=True and wkf_id in ("+','.join(['%s']*len(wkf_ids))+')', wkf_ids)
stop = cr.fetchall() stop = cr.fetchall()
if (stop): if stop:
stop = (stop[0][1], dict(stop)) stop = (stop[0][1], dict(stop))
else: else:
stop = ("stop",{}) stop = ("stop",{})
return ((start,{}),stop) return (start, {}), stop
def graph_instance_get(cr, graph, inst_id, nested=False): def graph_instance_get(cr, graph, inst_id, nested=False):
workitems = {}
cr.execute('select wkf_id from wkf_instance where id=%s', (inst_id,)) cr.execute('select wkf_id from wkf_instance where id=%s', (inst_id,))
inst = cr.fetchall() inst = cr.fetchall()
@ -167,7 +168,7 @@ showpage'''
inst_id = inst_id[0] inst_id = inst_id[0]
graph_instance_get(cr, graph, inst_id, data.get('nested', False)) graph_instance_get(cr, graph, inst_id, data.get('nested', False))
ps_string = graph.create(prog='dot', format='ps') ps_string = graph.create(prog='dot', format='ps')
except Exception, e: except Exception:
_logger.exception('Exception in call:') _logger.exception('Exception in call:')
# string is in PS, like the success message would have been # string is in PS, like the success message would have been
ps_string = '''%PS-Adobe-3.0 ps_string = '''%PS-Adobe-3.0
@ -204,13 +205,13 @@ class report_graph(report.interface.report_int):
def result(self): def result(self):
if self.obj.is_done(): if self.obj.is_done():
return (True, self.obj.get(), 'pdf') return True, self.obj.get(), 'pdf'
else: else:
return (False, False, False) return False, False, False
def create(self, cr, uid, ids, data, context=None): def create(self, cr, uid, ids, data, context=None):
self.obj = report_graph_instance(cr, uid, ids, data) self.obj = report_graph_instance(cr, uid, ids, data)
return (self.obj.get(), 'pdf') return self.obj.get(), 'pdf'
report_graph('report.workflow.instance.graph', 'ir.workflow') report_graph('report.workflow.instance.graph', 'ir.workflow')

View File

@ -3,16 +3,16 @@
<template pageSize="(595.0,842.0)" title="Test" author="Martin Simon" allowSplitting="20"> <template pageSize="(595.0,842.0)" title="Test" author="Martin Simon" allowSplitting="20">
<pageTemplate id="first"> <pageTemplate id="first">
<frame id="first" x1="42.0" y1="42.0" width="511" height="758"/> <frame id="first" x1="42.0" y1="42.0" width="511" height="758"/>
<header> <header>
<pageGraphics> <pageGraphics>
<setFont name="Helvetica-Bold" size="9"/> <setFont name="Helvetica-Bold" size="9"/>
<drawString x="1.0cm" y="28.1cm">[[ company.name ]]</drawString> <drawString x="1.0cm" y="28.1cm">[[ company.name ]]</drawString>
<drawRightString x="20cm" y="28.1cm"> Reference Guide </drawRightString> <drawRightString x="20cm" y="28.1cm"> Reference Guide </drawRightString>
<lineMode width="0.7"/> <lineMode width="0.7"/>
<stroke color="black"/> <stroke color="black"/>
<lines>1cm 28cm 20cm 28cm</lines> <lines>1cm 28cm 20cm 28cm</lines>
</pageGraphics> </pageGraphics>
</header> </header>
</pageTemplate> </pageTemplate>
</template> </template>
@ -236,7 +236,7 @@
<tr> <tr>
<td> <td>
<para style="terp_default_9">[[ repeatIn(objdoc2(object.model) or [], 'sline') ]]</para> <para style="terp_default_9">[[ repeatIn(objdoc2(object.model) or [], 'sline') ]]</para>
<para style="terp_default_9"> [[ sline ]] </para> <para style="terp_default_9"> [[ sline ]] </para>
</td> </td>
</tr> </tr>
</blockTable> </blockTable>

View File

@ -1,31 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<openerp>
<data>
<record id="view_base_module_scan" model="ir.ui.view">
<field name="name">Module Scan</field>
<field name="model">base.module.scan</field>
<field name="arch" type="xml">
<form string="Scan for new modules" version="7.0">
<label string="This function will check if you installed new modules in the 'addons' path of your server installation."/>
<footer>
<button name="watch_dir" string="Check new modules" type="object" class="oe_highlight"/>
or
<button string="Cancel" class="oe_link" special="cancel" />
</footer>
</form>
</field>
</record>
<record id="action_view_base_module_scan" model="ir.actions.act_window">
<field name="name">Module Scan</field>
<field name="type">ir.actions.act_window</field>
<field name="res_model">base.module.scan</field>
<field name="view_type">form</field>
<field name="view_mode">form</field>
<field name="target">new</field>
</record>
</data>
</openerp>

View File

@ -308,7 +308,7 @@ class res_config_installer(osv.osv_memory):
hooks_results = set() hooks_results = set()
for module in base: for module in base:
hook = getattr(self, '_if_%s'%(module), None) hook = getattr(self, '_if_%s'% module, None)
if hook: if hook:
hooks_results.update(hook(cr, uid, ids, context=None) or set()) hooks_results.update(hook(cr, uid, ids, context=None) or set())

View File

@ -1186,7 +1186,7 @@
<record id="us" model="res.country"> <record id="us" model="res.country">
<field name="name">United States</field> <field name="name">United States</field>
<field name="code">us</field> <field name="code">us</field>
<field name="address_format" eval="'%(street)s\n%(street2)s\n%(city)s %(state_code)s %(zip)s\n%(country_name)s'" /> <field name="address_format" eval="'%(street)s\n%(street2)s\n%(city)s, %(state_code)s %(zip)s\n%(country_name)s'" />
<field name="currency_id" ref="USD"/> <field name="currency_id" ref="USD"/>
</record> </record>
<record id="uy" model="res.country"> <record id="uy" model="res.country">

View File

@ -99,7 +99,7 @@ class res_currency(osv.osv):
res = super(res_currency, self).read(cr, user, ids, fields, context, load) res = super(res_currency, self).read(cr, user, ids, fields, context, load)
currency_rate_obj = self.pool.get('res.currency.rate') currency_rate_obj = self.pool.get('res.currency.rate')
values = res values = res
if not isinstance(values, (list)): if not isinstance(values, list):
values = [values] values = [values]
for r in values: for r in values:
if r.__contains__('rate_ids'): if r.__contains__('rate_ids'):
@ -217,7 +217,7 @@ class res_currency(osv.osv):
if round: if round:
return self.round(cr, uid, to_currency, from_amount * rate) return self.round(cr, uid, to_currency, from_amount * rate)
else: else:
return (from_amount * rate) return from_amount * rate
res_currency() res_currency()

View File

@ -168,7 +168,7 @@ class lang(osv.osv):
thousands_sep = lang_obj.thousands_sep or conv[monetary and 'mon_thousands_sep' or 'thousands_sep'] thousands_sep = lang_obj.thousands_sep or conv[monetary and 'mon_thousands_sep' or 'thousands_sep']
decimal_point = lang_obj.decimal_point decimal_point = lang_obj.decimal_point
grouping = lang_obj.grouping grouping = lang_obj.grouping
return (grouping, thousands_sep, decimal_point) return grouping, thousands_sep, decimal_point
def write(self, cr, uid, ids, vals, context=None): def write(self, cr, uid, ids, vals, context=None):
for lang_id in ids : for lang_id in ids :
@ -231,7 +231,7 @@ lang()
def original_group(s, grouping, thousands_sep=''): def original_group(s, grouping, thousands_sep=''):
if not grouping: if not grouping:
return (s, 0) return s, 0
result = "" result = ""
seps = 0 seps = 0

View File

@ -304,7 +304,7 @@ class res_partner(osv.osv, format_address):
if default is None: if default is None:
default = {} default = {}
name = self.read(cr, uid, [id], ['name'], context)[0]['name'] name = self.read(cr, uid, [id], ['name'], context)[0]['name']
default.update({'name': _('%s (copy)') % (name)}) default.update({'name': _('%s (copy)') % name})
return super(res_partner, self).copy(cr, uid, id, default, context) return super(res_partner, self).copy(cr, uid, id, default, context)
def onchange_type(self, cr, uid, ids, is_company, context=None): def onchange_type(self, cr, uid, ids, is_company, context=None):
@ -514,7 +514,7 @@ class res_partner(osv.osv, format_address):
def view_header_get(self, cr, uid, view_id, view_type, context): def view_header_get(self, cr, uid, view_id, view_type, context):
res = super(res_partner, self).view_header_get(cr, uid, view_id, view_type, context) res = super(res_partner, self).view_header_get(cr, uid, view_id, view_type, context)
if res: return res if res: return res
if (not context.get('category_id', False)): if not context.get('category_id', False):
return False return False
return _('Partners: ')+self.pool.get('res.partner.category').browse(cr, uid, context['category_id'], context).name return _('Partners: ')+self.pool.get('res.partner.category').browse(cr, uid, context['category_id'], context).name

View File

@ -59,7 +59,7 @@ class res_request(osv.osv):
ids = map(lambda x:x[0], cr.fetchall()) ids = map(lambda x:x[0], cr.fetchall())
cr.execute('select id from res_request where act_from=%s and (act_to<>%s) and (trigger_date<=%s or trigger_date is null) and active=True and state != %s', (uid,uid,time.strftime('%Y-%m-%d'), 'closed')) cr.execute('select id from res_request where act_from=%s and (act_to<>%s) and (trigger_date<=%s or trigger_date is null) and active=True and state != %s', (uid,uid,time.strftime('%Y-%m-%d'), 'closed'))
ids2 = map(lambda x:x[0], cr.fetchall()) ids2 = map(lambda x:x[0], cr.fetchall())
return (ids, ids2) return ids, ids2
_columns = { _columns = {
'create_date': fields.datetime('Created Date', readonly=True), 'create_date': fields.datetime('Created Date', readonly=True),

View File

@ -27,6 +27,7 @@ import os
import time import time
import base64 import base64
import socket import socket
import string
admin_passwd = 'admin' admin_passwd = 'admin'
waittime = 10 waittime = 10
@ -64,10 +65,10 @@ def execute(connector, method, *args):
except socket.error,e: except socket.error,e:
if e.args[0] == 111: if e.args[0] == 111:
if wait_count > wait_limit: if wait_count > wait_limit:
print "Server is taking too long to start, it has exceeded the maximum limit of %d seconds."%(wait_limit) print "Server is taking too long to start, it has exceeded the maximum limit of %d seconds." % wait_limit
clean() clean()
sys.exit(1) sys.exit(1)
print 'Please wait %d sec to start server....'%(waittime) print 'Please wait %d sec to start server....' % waittime
wait_count += 1 wait_count += 1
time.sleep(waittime) time.sleep(waittime)
res = execute(connector, method, *args) res = execute(connector, method, *args)
@ -133,7 +134,7 @@ def check_quality(uri, user, pwd, dbname, modules, quality_logs):
detail_html +='''<div id=\"%s\"><h3>%s (Score : %s)</h3><font color=red><h5>%s</h5></font>%s</div>'''%(test.replace(' ', '-'), test, score, msg, detail.get('detail', '')) detail_html +='''<div id=\"%s\"><h3>%s (Score : %s)</h3><font color=red><h5>%s</h5></font>%s</div>'''%(test.replace(' ', '-'), test, score, msg, detail.get('detail', ''))
test_detail[test] = (score,msg,detail.get('detail','')) test_detail[test] = (score,msg,detail.get('detail',''))
html += "</ul>" html += "</ul>"
html += "%s"%(detail_html) html += "%s"% detail_html
html += "</div></body></html>" html += "</div></body></html>"
if not os.path.isdir(quality_logs): if not os.path.isdir(quality_logs):
os.mkdir(quality_logs) os.mkdir(quality_logs)
@ -302,13 +303,11 @@ options = {
'port' : opt.port or 8069, 'port' : opt.port or 8069,
'netport':opt.netport or 8070, 'netport':opt.netport or 8070,
'database': opt.db_name or 'terp', 'database': opt.db_name or 'terp',
'modules' : opt.modules or [], 'modules' : map(string.strip, opt.modules.split(',')) if opt.modules else [],
'login' : opt.login or 'admin', 'login' : opt.login or 'admin',
'pwd' : opt.pwd or '', 'pwd' : opt.pwd or '',
'extra-addons':opt.extra_addons or [] 'extra-addons':opt.extra_addons or []
} }
options['modules'] = opt.modules and map(lambda m: m.strip(), opt.modules.split(',')) or []
# Hint:i18n-import=purchase:ar_AR.po+sale:fr_FR.po,nl_BE.po # Hint:i18n-import=purchase:ar_AR.po+sale:fr_FR.po,nl_BE.po
if opt.translate_in: if opt.translate_in:
translate = opt.translate_in translate = opt.translate_in

View File

@ -110,7 +110,7 @@ def get_encodings(hint_encoding='utf-8'):
# some defaults (also taking care of pure ASCII) # some defaults (also taking care of pure ASCII)
for charset in ['utf8','latin1']: for charset in ['utf8','latin1']:
if not (hint_encoding) or (charset.lower() != hint_encoding.lower()): if not hint_encoding or (charset.lower() != hint_encoding.lower()):
yield charset yield charset
from locale import getpreferredencoding from locale import getpreferredencoding
@ -129,7 +129,7 @@ def ustr(value, hint_encoding='utf-8', errors='strict'):
:param: value: the value to convert :param: value: the value to convert
:param: hint_encoding: an optional encoding that was detecte :param: hint_encoding: an optional encoding that was detecte
upstream and should be tried first to decode ``value``. upstream and should be tried first to decode ``value``.
:param str error: optional `errors` flag to pass to the unicode :param str errors: optional `errors` flag to pass to the unicode
built-in to indicate how illegal character values should be built-in to indicate how illegal character values should be
treated when converting a string: 'strict', 'ignore' or 'replace' treated when converting a string: 'strict', 'ignore' or 'replace'
(see ``unicode()`` constructor). (see ``unicode()`` constructor).

View File

@ -332,33 +332,31 @@ def generate_table_alias(src_table_alias, joined_tables=[]):
- src_model='res_users', join_tables=[(res.partner, 'parent_id')] - src_model='res_users', join_tables=[(res.partner, 'parent_id')]
alias = ('res_users__parent_id', '"res_partner" as "res_users__parent_id"') alias = ('res_users__parent_id', '"res_partner" as "res_users__parent_id"')
:param model src_model: model source of the alias :param model src_table_alias: model source of the alias
:param list join_tables: list of tuples :param list joined_tables: list of tuples
(dst_model, link_field) (dst_model, link_field)
:return tuple: (table_alias, alias statement for from clause with quotes added) :return tuple: (table_alias, alias statement for from clause with quotes added)
""" """
alias = src_table_alias alias = src_table_alias
if not joined_tables: if not joined_tables:
return ('%s' % alias, '%s' % _quote(alias)) return '%s' % alias, '%s' % _quote(alias)
for link in joined_tables: for link in joined_tables:
alias += '__' + link[1] alias += '__' + link[1]
assert len(alias) < 64, 'Table alias name %s is longer than the 64 characters size accepted by default in postgresql.' % (alias) assert len(alias) < 64, 'Table alias name %s is longer than the 64 characters size accepted by default in postgresql.' % alias
return ('%s' % alias, '%s as %s' % (_quote(joined_tables[-1][0]), _quote(alias))) return '%s' % alias, '%s as %s' % (_quote(joined_tables[-1][0]), _quote(alias))
def get_alias_from_query(from_query): def get_alias_from_query(from_query):
""" :param string from_query: is something like : """ :param string from_query: is something like :
- '"res_partner"' OR - '"res_partner"' OR
- '"res_partner" as "res_users__partner_id"'' - '"res_partner" as "res_users__partner_id"''
:param tuple result: (unquoted table name, unquoted alias)
i.e. (res_partners, res_partner) OR (res_partner, res_users__partner_id)
""" """
from_splitted = from_query.split(' as ') from_splitted = from_query.split(' as ')
if len(from_splitted) > 1: if len(from_splitted) > 1:
return (from_splitted[0].replace('"', ''), from_splitted[1].replace('"', '')) return from_splitted[0].replace('"', ''), from_splitted[1].replace('"', '')
else: else:
return (from_splitted[0].replace('"', ''), from_splitted[0].replace('"', '')) return from_splitted[0].replace('"', ''), from_splitted[0].replace('"', '')
def normalize_leaf(element): def normalize_leaf(element):
@ -377,7 +375,7 @@ def normalize_leaf(element):
if isinstance(right, (list, tuple)) and operator in ('=', '!='): if isinstance(right, (list, tuple)) and operator in ('=', '!='):
_logger.warning("The domain term '%s' should use the 'in' or 'not in' operator." % ((left, original, right),)) _logger.warning("The domain term '%s' should use the 'in' or 'not in' operator." % ((left, original, right),))
operator = 'in' if operator == '=' else 'not in' operator = 'in' if operator == '=' else 'not in'
return (left, operator, right) return left, operator, right
def is_operator(element): def is_operator(element):
@ -497,11 +495,19 @@ class ExtendedLeaf(object):
adding joins adding joins
:attr list join_context: list of join contexts. This is a list of :attr list join_context: list of join contexts. This is a list of
tuples like ``(lhs, table, lhs_col, col, link)`` tuples like ``(lhs, table, lhs_col, col, link)``
:param obj lhs: source (left hand) model
:param obj model: destination (right hand) model where
:param string lhs_col: source model column for join condition
:param string col: destination model column for join condition lhs
:param link: link column between source and destination model source (left hand) model
model
destination (right hand) model
lhs_col
source model column for join condition
col
destination model column for join condition
link
link column between source and destination model
that is not necessarily (but generally) a real column used that is not necessarily (but generally) a real column used
in the condition (i.e. in many2one); this link is used to in the condition (i.e. in many2one); this link is used to
compute aliases compute aliases
@ -829,7 +835,7 @@ class expression(object):
push(create_substitution_leaf(leaf, AND_OPERATOR, relational_model)) push(create_substitution_leaf(leaf, AND_OPERATOR, relational_model))
elif len(field_path) > 1 and field._auto_join: elif len(field_path) > 1 and field._auto_join:
raise NotImplementedError('_auto_join attribute not supported on many2many field %s' % (left)) raise NotImplementedError('_auto_join attribute not supported on many2many field %s' % left)
elif len(field_path) > 1 and field._type == 'many2one': elif len(field_path) > 1 and field._type == 'many2one':
right_ids = relational_model.search(cr, uid, [(field_path[1], operator, right)], context=context) right_ids = relational_model.search(cr, uid, [(field_path[1], operator, right)], context=context)
@ -989,7 +995,7 @@ class expression(object):
res_ids = [x[0] for x in relational_model.name_search(cr, uid, right, [], operator, limit=None, context=c)] res_ids = [x[0] for x in relational_model.name_search(cr, uid, right, [], operator, limit=None, context=c)]
if operator in NEGATIVE_TERM_OPERATORS: if operator in NEGATIVE_TERM_OPERATORS:
res_ids.append(False) # TODO this should not be appended if False was in 'right' res_ids.append(False) # TODO this should not be appended if False was in 'right'
return (left, 'in', res_ids) return left, 'in', res_ids
# resolve string-based m2o criterion into IDs # resolve string-based m2o criterion into IDs
if isinstance(right, basestring) or \ if isinstance(right, basestring) or \
right and isinstance(right, (tuple, list)) and all(isinstance(item, basestring) for item in right): right and isinstance(right, (tuple, list)) and all(isinstance(item, basestring) for item in right):
@ -1098,7 +1104,7 @@ class expression(object):
query = '(%s."%s" IS %s)' % (table_alias, left, r) query = '(%s."%s" IS %s)' % (table_alias, left, r)
params = [] params = []
elif isinstance(right, (list, tuple)): elif isinstance(right, (list, tuple)):
params = right[:] params = list(right)
check_nulls = False check_nulls = False
for i in range(len(params))[::-1]: for i in range(len(params))[::-1]:
if params[i] == False: if params[i] == False:
@ -1140,8 +1146,8 @@ class expression(object):
query = '%s."%s" IS NOT NULL' % (table_alias, left) query = '%s."%s" IS NOT NULL' % (table_alias, left)
params = [] params = []
elif (operator == '=?'): elif operator == '=?':
if (right is False or right is None): if right is False or right is None:
# '=?' is a short-circuit that makes the term TRUE if right is None or False # '=?' is a short-circuit that makes the term TRUE if right is None or False
query = 'TRUE' query = 'TRUE'
params = [] params = []
@ -1187,7 +1193,7 @@ class expression(object):
if isinstance(params, basestring): if isinstance(params, basestring):
params = [params] params = [params]
return (query, params) return query, params
def to_sql(self): def to_sql(self):
stack = [] stack = []
@ -1213,6 +1219,6 @@ class expression(object):
if joins: if joins:
query = '(%s) AND %s' % (joins, query) query = '(%s) AND %s' % (joins, query)
return (query, tools.flatten(params)) return query, tools.flatten(params)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

View File

@ -668,7 +668,7 @@ class many2many(_column):
col1 = '%s_id' % source_model._table col1 = '%s_id' % source_model._table
if not col2: if not col2:
col2 = '%s_id' % dest_model._table col2 = '%s_id' % dest_model._table
return (tbl, col1, col2) return tbl, col1, col2
def _get_query_and_where_params(self, cr, model, ids, values, where_params): def _get_query_and_where_params(self, cr, model, ids, values, where_params):
""" Extracted from ``get`` to facilitate fine-tuning of the generated """ Extracted from ``get`` to facilitate fine-tuning of the generated
@ -1304,7 +1304,7 @@ class sparse(function):
def __init__(self, serialization_field, **kwargs): def __init__(self, serialization_field, **kwargs):
self.serialization_field = serialization_field self.serialization_field = serialization_field
return super(sparse, self).__init__(self._fnct_read, fnct_inv=self._fnct_write, multi='__sparse_multi', **kwargs) super(sparse, self).__init__(self._fnct_read, fnct_inv=self._fnct_write, multi='__sparse_multi', **kwargs)

View File

@ -454,7 +454,7 @@ class browse_record(object):
new_data[field_name] = browse_null() new_data[field_name] = browse_null()
elif field_column._type in ('one2many', 'many2many') and len(result_line[field_name]): elif field_column._type in ('one2many', 'many2many') and len(result_line[field_name]):
new_data[field_name] = self._list_class([browse_record(self._cr, self._uid, id, self._table.pool.get(field_column._obj), self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process) for id in result_line[field_name]], self._context) new_data[field_name] = self._list_class([browse_record(self._cr, self._uid, id, self._table.pool.get(field_column._obj), self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process) for id in result_line[field_name]], self._context)
elif field_column._type in ('reference'): elif field_column._type == 'reference':
if result_line[field_name]: if result_line[field_name]:
if isinstance(result_line[field_name], browse_record): if isinstance(result_line[field_name], browse_record):
new_data[field_name] = result_line[field_name] new_data[field_name] = result_line[field_name]
@ -1742,7 +1742,7 @@ class BaseModel(object):
views = {} views = {}
xml = "<form>" xml = "<form>"
for f in node: for f in node:
if f.tag in ('field'): if f.tag == 'field':
xml += etree.tostring(f, encoding="utf-8") xml += etree.tostring(f, encoding="utf-8")
xml += "</form>" xml += "</form>"
new_xml = etree.fromstring(encode(xml)) new_xml = etree.fromstring(encode(xml))
@ -2011,7 +2011,7 @@ class BaseModel(object):
view = etree.Element('calendar', string=self._description) view = etree.Element('calendar', string=self._description)
etree.SubElement(view, 'field', self._rec_name_fallback(cr, user, context)) etree.SubElement(view, 'field', self._rec_name_fallback(cr, user, context))
if (self._date_name not in self._columns): if self._date_name not in self._columns:
date_found = False date_found = False
for dt in ['date', 'date_start', 'x_date', 'x_date_start']: for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
if dt in self._columns: if dt in self._columns:
@ -2032,7 +2032,7 @@ class BaseModel(object):
self._columns, 'date_delay'): self._columns, 'date_delay'):
raise except_orm( raise except_orm(
_('Invalid Object Architecture!'), _('Invalid Object Architecture!'),
_("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % (self._name))) _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
return view return view
@ -2412,7 +2412,7 @@ class BaseModel(object):
:rtype: tuple :rtype: tuple
:return: the :meth:`~.name_get` pair value for the newly-created record. :return: the :meth:`~.name_get` pair value for the newly-created record.
""" """
rec_id = self.create(cr, uid, {self._rec_name: name}, context); rec_id = self.create(cr, uid, {self._rec_name: name}, context)
return self.name_get(cr, uid, [rec_id], context)[0] return self.name_get(cr, uid, [rec_id], context)[0]
# private implementation of name_search, allows passing a dedicated user for the name_get part to # private implementation of name_search, allows passing a dedicated user for the name_get part to
@ -2676,7 +2676,7 @@ class BaseModel(object):
groupby = group_by groupby = group_by
for r in cr.dictfetchall(): for r in cr.dictfetchall():
for fld, val in r.items(): for fld, val in r.items():
if val == None: r[fld] = False if val is None: r[fld] = False
alldata[r['id']] = r alldata[r['id']] = r
del r['id'] del r['id']
@ -3098,7 +3098,7 @@ class BaseModel(object):
else: else:
default = self._defaults[k] default = self._defaults[k]
if (default is not None): if default is not None:
ss = self._columns[k]._symbol_set ss = self._columns[k]._symbol_set
query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (self._table, k, ss[0], k) query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (self._table, k, ss[0], k)
cr.execute(query, (ss[1](default),)) cr.execute(query, (ss[1](default),))
@ -3177,7 +3177,7 @@ class BaseModel(object):
# and add constraints if needed # and add constraints if needed
if isinstance(f, fields.many2one): if isinstance(f, fields.many2one):
if not self.pool.get(f._obj): if not self.pool.get(f._obj):
raise except_orm('Programming Error', ('There is no reference available for %s') % (f._obj,)) raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
dest_model = self.pool.get(f._obj) dest_model = self.pool.get(f._obj)
ref = dest_model._table ref = dest_model._table
# ir_actions is inherited so foreign key doesn't work on it # ir_actions is inherited so foreign key doesn't work on it
@ -3304,7 +3304,7 @@ class BaseModel(object):
# TODO the condition could use fields_get_keys(). # TODO the condition could use fields_get_keys().
if f._fields_id not in other._columns.keys(): if f._fields_id not in other._columns.keys():
if f._fields_id not in other._inherit_fields.keys(): if f._fields_id not in other._inherit_fields.keys():
raise except_orm('Programming Error', ("There is no reference field '%s' found for '%s'") % (f._fields_id, f._obj,)) raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
def _m2m_raise_or_create_relation(self, cr, f): def _m2m_raise_or_create_relation(self, cr, f):
m2m_tbl, col1, col2 = f._sql_names(self) m2m_tbl, col1, col2 = f._sql_names(self)
@ -3312,7 +3312,7 @@ class BaseModel(object):
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,)) cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
if not cr.dictfetchall(): if not cr.dictfetchall():
if not self.pool.get(f._obj): if not self.pool.get(f._obj):
raise except_orm('Programming Error', ('Many2Many destination model does not exist: `%s`') % (f._obj,)) raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
dest_model = self.pool.get(f._obj) dest_model = self.pool.get(f._obj)
ref = dest_model._table ref = dest_model._table
cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2)) cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
@ -3486,7 +3486,7 @@ class BaseModel(object):
:param cr: database cursor :param cr: database cursor
:param user: current user id :param user: current user id
:param fields: list of fields :param allfields: list of fields
:param context: context arguments, like lang, time zone :param context: context arguments, like lang, time zone
:return: dictionary of field dictionaries, each one describing a field of the business object :return: dictionary of field dictionaries, each one describing a field of the business object
:raise AccessError: * if user has no create/write rights on the requested object :raise AccessError: * if user has no create/write rights on the requested object
@ -3615,7 +3615,7 @@ class BaseModel(object):
context = {} context = {}
if not ids: if not ids:
return [] return []
if fields_to_read == None: if fields_to_read is None:
fields_to_read = self._columns.keys() fields_to_read = self._columns.keys()
# Construct a clause for the security rules. # Construct a clause for the security rules.
@ -4705,7 +4705,7 @@ class BaseModel(object):
new_tables = [] new_tables = []
for table in added_tables: for table in added_tables:
# table is just a table name -> switch to the full alias # table is just a table name -> switch to the full alias
if table == '"%s"' % (parent_table): if table == '"%s"' % parent_table:
new_tables.append('"%s" as "%s"' % (parent_table, parent_alias)) new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
# table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
else: else:
@ -4871,7 +4871,7 @@ class BaseModel(object):
Copy given record's data with all its fields values Copy given record's data with all its fields values
:param cr: database cursor :param cr: database cursor
:param user: current user id :param uid: current user id
:param id: id of the record to copy :param id: id of the record to copy
:param default: field values to override in the original values of the copied record :param default: field values to override in the original values of the copied record
:type default: dictionary :type default: dictionary
@ -5030,7 +5030,7 @@ class BaseModel(object):
""" """
if type(ids) in (int, long): if type(ids) in (int, long):
ids = [ids] ids = [ids]
query = 'SELECT id FROM "%s"' % (self._table) query = 'SELECT id FROM "%s"' % self._table
cr.execute(query + "WHERE ID IN %s", (tuple(ids),)) cr.execute(query + "WHERE ID IN %s", (tuple(ids),))
return [x[0] for x in cr.fetchall()] return [x[0] for x in cr.fetchall()]

View File

@ -151,7 +151,7 @@ class Query(object):
query_from = add_joins_for_table(table_alias, query_from) query_from = add_joins_for_table(table_alias, query_from)
query_from += ',' query_from += ','
query_from = query_from[:-1] # drop last comma query_from = query_from[:-1] # drop last comma
return (query_from, " AND ".join(self.where_clause), self.where_clause_params) return query_from, " AND ".join(self.where_clause), self.where_clause_params
def __str__(self): def __str__(self):
return '<osv.Query: "SELECT ... FROM %s WHERE %s" with params: %r>' % self.get_sql() return '<osv.Query: "SELECT ... FROM %s WHERE %s" with params: %r>' % self.get_sql()

View File

@ -96,7 +96,7 @@ class report_custom(report_int):
else: else:
# Process group_by data first # Process group_by data first
key = [] key = []
if group_by != None and fields[group_by] != None: if group_by is not None and fields[group_by] is not None:
if fields[group_by][0] in levels.keys(): if fields[group_by][0] in levels.keys():
key.append(fields[group_by][0]) key.append(fields[group_by][0])
for l in levels.keys(): for l in levels.keys():
@ -144,10 +144,11 @@ class report_custom(report_int):
parent_field = self.pool.get('ir.model.fields').read(cr, uid, [report['field_parent'][0]], ['model']) parent_field = self.pool.get('ir.model.fields').read(cr, uid, [report['field_parent'][0]], ['model'])
model_name = self.pool.get('ir.model').read(cr, uid, [report['model_id'][0]], ['model'], context=context)[0]['model'] model_name = self.pool.get('ir.model').read(cr, uid, [report['model_id'][0]], ['model'], context=context)[0]['model']
fct = {} fct = {
fct['id'] = lambda x : x 'id': lambda x: x,
fct['gety'] = lambda x: x.split('-')[0] 'gety': lambda x: x.split('-')[0],
fct['in'] = lambda x: x.split(',') 'in': lambda x: x.split(',')
}
new_fields = [] new_fields = []
new_cond = [] new_cond = []
for f in fields: for f in fields:
@ -212,7 +213,7 @@ class report_custom(report_int):
new_res = [] new_res = []
prev = None prev = None
if groupby != None: if groupby is not None:
res_dic = {} res_dic = {}
for line in results: for line in results:
if not line[groupby] and prev in res_dic: if not line[groupby] and prev in res_dic:
@ -272,7 +273,7 @@ class report_custom(report_int):
res = self._create_bars(cr,uid, ids, report, fields, results2, context) res = self._create_bars(cr,uid, ids, report, fields, results2, context)
elif report['type']=='line': elif report['type']=='line':
res = self._create_lines(cr,uid, ids, report, fields, results2, context) res = self._create_lines(cr,uid, ids, report, fields, results2, context)
return (self.obj.get(), 'pdf') return self.obj.get(), 'pdf'
def _create_tree(self, uid, ids, report, fields, level, results, context): def _create_tree(self, uid, ids, report, fields, level, results, context):
pageSize=common.pageSize.get(report['print_format'], [210.0,297.0]) pageSize=common.pageSize.get(report['print_format'], [210.0,297.0])
@ -322,7 +323,7 @@ class report_custom(report_int):
col.attrib.update(para='yes', col.attrib.update(para='yes',
tree='yes', tree='yes',
space=str(3*shift)+'mm') space=str(3*shift)+'mm')
if line[f] != None: if line[f] is not None:
col.text = prefix+str(line[f]) or '' col.text = prefix+str(line[f]) or ''
else: else:
col.text = '/' col.text = '/'
@ -350,15 +351,17 @@ class report_custom(report_int):
x_axis = axis.X(label = fields[0]['name'], format="/a-30{}%s"), x_axis = axis.X(label = fields[0]['name'], format="/a-30{}%s"),
y_axis = axis.Y(label = ', '.join(map(lambda x : x['name'], fields[1:])))) y_axis = axis.Y(label = ', '.join(map(lambda x : x['name'], fields[1:]))))
process_date = {} process_date = {
process_date['D'] = lambda x : reduce(lambda xx,yy : xx+'-'+yy,x.split('-')[1:3]) 'D': lambda x: reduce(lambda xx, yy: xx + '-' + yy, x.split('-')[1:3]),
process_date['M'] = lambda x : x.split('-')[1] 'M': lambda x: x.split('-')[1],
process_date['Y'] = lambda x : x.split('-')[0] 'Y': lambda x: x.split('-')[0]
}
order_date = {} order_date = {
order_date['D'] = lambda x : time.mktime((2005,int(x.split('-')[0]), int(x.split('-')[1]),0,0,0,0,0,0)) 'D': lambda x: time.mktime((2005, int(x.split('-')[0]), int(x.split('-')[1]), 0, 0, 0, 0, 0, 0)),
order_date['M'] = lambda x : x 'M': lambda x: x,
order_date['Y'] = lambda x : x 'Y': lambda x: x
}
abscissa = [] abscissa = []
@ -381,7 +384,7 @@ class report_custom(report_int):
# plots are usually displayed year by year # plots are usually displayed year by year
# so we do so if the first field is a date # so we do so if the first field is a date
data_by_year = {} data_by_year = {}
if date_idx != None: if date_idx is not None:
for r in results: for r in results:
key = process_date['Y'](r[date_idx]) key = process_date['Y'](r[date_idx])
if key not in data_by_year: if key not in data_by_year:
@ -447,15 +450,17 @@ class report_custom(report_int):
can.show(80,380,'/16/H'+report['title']) can.show(80,380,'/16/H'+report['title'])
process_date = {} process_date = {
process_date['D'] = lambda x : reduce(lambda xx,yy : xx+'-'+yy,x.split('-')[1:3]) 'D': lambda x: reduce(lambda xx, yy: xx + '-' + yy, x.split('-')[1:3]),
process_date['M'] = lambda x : x.split('-')[1] 'M': lambda x: x.split('-')[1],
process_date['Y'] = lambda x : x.split('-')[0] 'Y': lambda x: x.split('-')[0]
}
order_date = {} order_date = {
order_date['D'] = lambda x : time.mktime((2005,int(x.split('-')[0]), int(x.split('-')[1]),0,0,0,0,0,0)) 'D': lambda x: time.mktime((2005, int(x.split('-')[0]), int(x.split('-')[1]), 0, 0, 0, 0, 0, 0)),
order_date['M'] = lambda x : x 'M': lambda x: x,
order_date['Y'] = lambda x : x 'Y': lambda x: x
}
ar = area.T(size=(350,350), ar = area.T(size=(350,350),
x_axis = axis.X(label = fields[0]['name'], format="/a-30{}%s"), x_axis = axis.X(label = fields[0]['name'], format="/a-30{}%s"),
@ -480,7 +485,7 @@ class report_custom(report_int):
# plot are usually displayed year by year # plot are usually displayed year by year
# so we do so if the first field is a date # so we do so if the first field is a date
data_by_year = {} data_by_year = {}
if date_idx != None: if date_idx is not None:
for r in results: for r in results:
key = process_date['Y'](r[date_idx]) key = process_date['Y'](r[date_idx])
if key not in data_by_year: if key not in data_by_year:
@ -602,7 +607,7 @@ class report_custom(report_int):
node_line = etree.SubElement(lines, 'row') node_line = etree.SubElement(lines, 'row')
for f in range(len(fields)): for f in range(len(fields)):
col = etree.SubElement(node_line, 'col', tree='no') col = etree.SubElement(node_line, 'col', tree='no')
if line[f] != None: if line[f] is not None:
col.text = line[f] or '' col.text = line[f] or ''
else: else:
col.text = '/' col.text = '/'

View File

@ -52,7 +52,7 @@ def _1000_to_text(chiffre):
d2 = chiffre/100 d2 = chiffre/100
if d2>0 and d: if d2>0 and d:
return centaine[d2]+' '+d return centaine[d2]+' '+d
elif d2>1 and not(d): elif d2>1 and not d:
return centaine[d2]+'s' return centaine[d2]+'s'
else: else:
return centaine[d2] or d return centaine[d2] or d

View File

@ -55,13 +55,12 @@ class report_int(netsvc.Service):
def create(self, cr, uid, ids, datas, context=None): def create(self, cr, uid, ids, datas, context=None):
return False return False
"""
Class to automatically build a document using the transformation process:
XML -> DATAS -> RML -> PDF
-> HTML
using a XSL:RML transformation
"""
class report_rml(report_int): class report_rml(report_int):
"""
Automatically builds a document using the transformation process:
XML -> DATAS -> RML -> PDF -> HTML
using a XSL:RML transformation
"""
def __init__(self, name, table, tmpl, xsl): def __init__(self, name, table, tmpl, xsl):
super(report_rml, self).__init__(name) super(report_rml, self).__init__(name)
self.table = table self.table = table
@ -85,7 +84,7 @@ class report_rml(report_int):
xml = tools.ustr(xml).encode('utf8') xml = tools.ustr(xml).encode('utf8')
report_type = datas.get('report_type', 'pdf') report_type = datas.get('report_type', 'pdf')
if report_type == 'raw': if report_type == 'raw':
return (xml,report_type) return xml, report_type
rml = self.create_rml(cr, xml, uid, context) rml = self.create_rml(cr, xml, uid, context)
pool = pooler.get_pool(cr.dbname) pool = pooler.get_pool(cr.dbname)
ir_actions_report_xml_obj = pool.get('ir.actions.report.xml') ir_actions_report_xml_obj = pool.get('ir.actions.report.xml')
@ -93,7 +92,7 @@ class report_rml(report_int):
self.title = report_xml_ids and ir_actions_report_xml_obj.browse(cr,uid,report_xml_ids)[0].name or 'OpenERP Report' self.title = report_xml_ids and ir_actions_report_xml_obj.browse(cr,uid,report_xml_ids)[0].name or 'OpenERP Report'
create_doc = self.generators[report_type] create_doc = self.generators[report_type]
pdf = create_doc(rml, title=self.title) pdf = create_doc(rml, title=self.title)
return (pdf, report_type) return pdf, report_type
def create_xml(self, cr, uid, ids, datas, context=None): def create_xml(self, cr, uid, ids, datas, context=None):
if not context: if not context:
@ -244,10 +243,10 @@ class report_rml(report_int):
return obj.get() return obj.get()
def _get_path(self): def _get_path(self):
ret = [] return [
ret.append(self.tmpl.replace(os.path.sep, '/').rsplit('/',1)[0]) # Same dir as the report rml self.tmpl.replace(os.path.sep, '/').rsplit('/', 1)[0],
ret.append('addons') 'addons',
ret.append(tools.config['root_path']) tools.config['root_path']
return ret ]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

View File

@ -65,7 +65,7 @@ class report_printscreen_list(report_int):
fields_order = self._parse_string(result['arch']) fields_order = self._parse_string(result['arch'])
rows = model.read(cr, uid, datas['ids'], result['fields'].keys() ) rows = model.read(cr, uid, datas['ids'], result['fields'].keys() )
self._create_table(uid, datas['ids'], result['fields'], fields_order, rows, context, model._description) self._create_table(uid, datas['ids'], result['fields'], fields_order, rows, context, model._description)
return (self.obj.get(), 'pdf') return self.obj.get(), 'pdf'
def _create_table(self, uid, ids, fields, fields_order, results, context, title=''): def _create_table(self, uid, ids, fields, fields_order, results, context, title=''):
@ -119,7 +119,7 @@ class report_printscreen_list(report_int):
precision=(('digits' in fields[f]) and fields[f]['digits'][1]) or 2 precision=(('digits' in fields[f]) and fields[f]['digits'][1]) or 2
line[f]=round(line[f],precision) line[f]=round(line[f],precision)
col = etree.SubElement(node_line, 'col', tree='no') col = etree.SubElement(node_line, 'col', tree='no')
if line[f] != None: if line[f] is not None:
col.text = tools.ustr(line[f] or '') col.text = tools.ustr(line[f] or '')
else: else:
col.text = '/' col.text = '/'

View File

@ -115,7 +115,7 @@ class report_printscreen_list(report_int):
rows_new += [elem for elem in rows if elem['id'] == id] rows_new += [elem for elem in rows if elem['id'] == id]
rows = rows_new rows = rows_new
res = self._create_table(uid, datas['ids'], result['fields'], fields_order, rows, context, model_desc) res = self._create_table(uid, datas['ids'], result['fields'], fields_order, rows, context, model_desc)
return (self.obj.get(), 'pdf') return self.obj.get(), 'pdf'
def _create_table(self, uid, ids, fields, fields_order, results, context, title=''): def _create_table(self, uid, ids, fields, fields_order, results, context, title=''):
@ -147,7 +147,7 @@ class report_printscreen_list(report_int):
for i in range(0, len(fields_order)): for i in range(0, len(fields_order)):
temp.append(0) temp.append(0)
tsum.append(0) tsum.append(0)
ince = -1; ince = -1
for f in fields_order: for f in fields_order:
s = 0 s = 0
ince += 1 ince += 1
@ -230,14 +230,14 @@ class report_printscreen_list(report_int):
col.text = line[f] = 'Undefined' col.text = line[f] = 'Undefined'
col.set('tree', 'undefined') col.set('tree', 'undefined')
if line[f] != None: if line[f] is not None:
col.text = tools.ustr(line[f] or '') col.text = tools.ustr(line[f] or '')
if float_flag: if float_flag:
col.set('tree','float') col.set('tree','float')
if line.get('__no_leaf') and temp[count] == 1 and f != 'id' and not line['__context']['group_by']: if line.get('__no_leaf') and temp[count] == 1 and f != 'id' and not line['__context']['group_by']:
tsum[count] = float(tsum[count]) + float(line[f]) tsum[count] = float(tsum[count]) + float(line[f])
if not line.get('__group') and f != 'id' and temp[count] == 1: if not line.get('__group') and f != 'id' and temp[count] == 1:
tsum[count] = float(tsum[count]) + float(line[f]); tsum[count] = float(tsum[count]) + float(line[f])
else: else:
col.text = '/' col.text = '/'
@ -245,7 +245,7 @@ class report_printscreen_list(report_int):
for f in range(0, len(fields_order)): for f in range(0, len(fields_order)):
col = etree.SubElement(node_line, 'col', para='group', tree='no') col = etree.SubElement(node_line, 'col', para='group', tree='no')
col.set('tree', 'float') col.set('tree', 'float')
if tsum[f] != None: if tsum[f] is not None:
if tsum[f] != 0.0: if tsum[f] != 0.0:
digits = fields[fields_order[f]].get('digits', (16, 2)) digits = fields[fields_order[f]].get('digits', (16, 2))
prec = '%%.%sf' % (digits[1], ) prec = '%%.%sf' % (digits[1], )

View File

@ -106,7 +106,7 @@ class FlateDecode(object):
if predictor != 1: if predictor != 1:
columns = decodeParms["/Columns"] columns = decodeParms["/Columns"]
# PNG prediction: # PNG prediction:
if predictor >= 10 and predictor <= 15: if 10 <= predictor <= 15:
output = StringIO() output = StringIO()
# PNG prediction can vary from row to row # PNG prediction can vary from row to row
rowlength = columns + 1 rowlength = columns + 1
@ -191,7 +191,7 @@ class ASCII85Decode(object):
break break
else: else:
c = ord(c) - 33 c = ord(c) - 33
assert c >= 0 and c < 85 assert 0 <= c < 85
group += [ c ] group += [ c ]
if len(group) >= 5: if len(group) >= 5:
b = group[0] * (85**4) + \ b = group[0] * (85**4) + \

View File

@ -81,7 +81,7 @@ def readObject(stream, pdf):
return NumberObject.readFromStream(stream) return NumberObject.readFromStream(stream)
peek = stream.read(20) peek = stream.read(20)
stream.seek(-len(peek), 1) # reset to start stream.seek(-len(peek), 1) # reset to start
if re.match(r"(\d+)\s(\d+)\sR[^a-zA-Z]", peek) != None: if re.match(r"(\d+)\s(\d+)\sR[^a-zA-Z]", peek) is not None:
return IndirectObject.readFromStream(stream, pdf) return IndirectObject.readFromStream(stream, pdf)
else: else:
return NumberObject.readFromStream(stream) return NumberObject.readFromStream(stream)
@ -169,7 +169,7 @@ class IndirectObject(PdfObject):
def __eq__(self, other): def __eq__(self, other):
return ( return (
other != None and other is not None and
isinstance(other, IndirectObject) and isinstance(other, IndirectObject) and
self.idnum == other.idnum and self.idnum == other.idnum and
self.generation == other.generation and self.generation == other.generation and
@ -489,7 +489,7 @@ class DictionaryObject(dict, PdfObject):
# return None if no metadata was found on the document root. # return None if no metadata was found on the document root.
def getXmpMetadata(self): def getXmpMetadata(self):
metadata = self.get("/Metadata", None) metadata = self.get("/Metadata", None)
if metadata == None: if metadata is None:
return None return None
metadata = metadata.getObject() metadata = metadata.getObject()
import xmp import xmp

View File

@ -53,13 +53,7 @@ import utils
from generic import * from generic import *
from utils import readNonWhitespace, readUntilWhitespace, ConvertFunctionsToVirtualList from utils import readNonWhitespace, readUntilWhitespace, ConvertFunctionsToVirtualList
if version_info < ( 2, 4 ): from hashlib import md5
from sets import ImmutableSet as frozenset
if version_info < ( 2, 5 ):
from md5 import md5
else:
from hashlib import md5
## ##
# This class supports writing PDF files out, given pages produced by another # This class supports writing PDF files out, given pages produced by another
@ -197,7 +191,7 @@ class PdfFileWriter(object):
# flag is on. # flag is on.
def encrypt(self, user_pwd, owner_pwd = None, use_128bit = True): def encrypt(self, user_pwd, owner_pwd = None, use_128bit = True):
import time, random import time, random
if owner_pwd == None: if owner_pwd is None:
owner_pwd = user_pwd owner_pwd = user_pwd
if use_128bit: if use_128bit:
V = 2 V = 2
@ -251,7 +245,7 @@ class PdfFileWriter(object):
# copying in a new copy of the page object. # copying in a new copy of the page object.
for objIndex in xrange(len(self._objects)): for objIndex in xrange(len(self._objects)):
obj = self._objects[objIndex] obj = self._objects[objIndex]
if isinstance(obj, PageObject) and obj.indirectRef != None: if isinstance(obj, PageObject) and obj.indirectRef is not None:
data = obj.indirectRef data = obj.indirectRef
if not externalReferenceMap.has_key(data.pdf): if not externalReferenceMap.has_key(data.pdf):
externalReferenceMap[data.pdf] = {} externalReferenceMap[data.pdf] = {}
@ -305,7 +299,7 @@ class PdfFileWriter(object):
trailer.writeToStream(stream, None) trailer.writeToStream(stream, None)
# eof # eof
stream.write("\nstartxref\n%s\n%%%%EOF\n" % (xref_location)) stream.write("\nstartxref\n%s\n%%%%EOF\n" % xref_location)
def _sweepIndirectReferences(self, externMap, data): def _sweepIndirectReferences(self, externMap, data):
if isinstance(data, DictionaryObject): if isinstance(data, DictionaryObject):
@ -340,7 +334,7 @@ class PdfFileWriter(object):
return data return data
else: else:
newobj = externMap.get(data.pdf, {}).get(data.generation, {}).get(data.idnum, None) newobj = externMap.get(data.pdf, {}).get(data.generation, {}).get(data.idnum, None)
if newobj == None: if newobj is None:
newobj = data.pdf.getObject(data) newobj = data.pdf.getObject(data)
self._objects.append(None) # placeholder self._objects.append(None) # placeholder
idnum = len(self._objects) idnum = len(self._objects)
@ -426,7 +420,7 @@ class PdfFileReader(object):
# Stability: Added in v1.0, will exist for all v1.x releases. # Stability: Added in v1.0, will exist for all v1.x releases.
# @return Returns an integer. # @return Returns an integer.
def getNumPages(self): def getNumPages(self):
if self.flattenedPages == None: if self.flattenedPages is None:
self._flatten() self._flatten()
return len(self.flattenedPages) return len(self.flattenedPages)
@ -445,7 +439,7 @@ class PdfFileReader(object):
def getPage(self, pageNumber): def getPage(self, pageNumber):
## ensure that we're not trying to access an encrypted PDF ## ensure that we're not trying to access an encrypted PDF
#assert not self.trailer.has_key("/Encrypt") #assert not self.trailer.has_key("/Encrypt")
if self.flattenedPages == None: if self.flattenedPages is None:
self._flatten() self._flatten()
return self.flattenedPages[pageNumber] return self.flattenedPages[pageNumber]
@ -465,7 +459,7 @@ class PdfFileReader(object):
# @return Returns a dict which maps names to {@link #Destination # @return Returns a dict which maps names to {@link #Destination
# destinations}. # destinations}.
def getNamedDestinations(self, tree=None, retval=None): def getNamedDestinations(self, tree=None, retval=None):
if retval == None: if retval is None:
retval = {} retval = {}
catalog = self.trailer["/Root"] catalog = self.trailer["/Root"]
@ -477,7 +471,7 @@ class PdfFileReader(object):
if names.has_key("/Dests"): if names.has_key("/Dests"):
tree = names['/Dests'] tree = names['/Dests']
if tree == None: if tree is None:
return retval return retval
if tree.has_key("/Kids"): if tree.has_key("/Kids"):
@ -493,7 +487,7 @@ class PdfFileReader(object):
if isinstance(val, DictionaryObject) and val.has_key('/D'): if isinstance(val, DictionaryObject) and val.has_key('/D'):
val = val['/D'] val = val['/D']
dest = self._buildDestination(key, val) dest = self._buildDestination(key, val)
if dest != None: if dest is not None:
retval[key] = dest retval[key] = dest
return retval return retval
@ -511,7 +505,7 @@ class PdfFileReader(object):
# Stability: Added in v1.10, will exist for all future v1.x releases. # Stability: Added in v1.10, will exist for all future v1.x releases.
# @return Returns a nested list of {@link #Destination destinations}. # @return Returns a nested list of {@link #Destination destinations}.
def getOutlines(self, node=None, outlines=None): def getOutlines(self, node=None, outlines=None):
if outlines == None: if outlines is None:
outlines = [] outlines = []
catalog = self.trailer["/Root"] catalog = self.trailer["/Root"]
@ -522,7 +516,7 @@ class PdfFileReader(object):
node = lines["/First"] node = lines["/First"]
self._namedDests = self.getNamedDestinations() self._namedDests = self.getNamedDestinations()
if node == None: if node is None:
return outlines return outlines
# see if there are any more outlines # see if there are any more outlines
@ -588,9 +582,9 @@ class PdfFileReader(object):
NameObject("/Resources"), NameObject("/MediaBox"), NameObject("/Resources"), NameObject("/MediaBox"),
NameObject("/CropBox"), NameObject("/Rotate") NameObject("/CropBox"), NameObject("/Rotate")
) )
if inherit == None: if inherit is None:
inherit = dict() inherit = dict()
if pages == None: if pages is None:
self.flattenedPages = [] self.flattenedPages = []
catalog = self.trailer["/Root"].getObject() catalog = self.trailer["/Root"].getObject()
pages = catalog["/Pages"].getObject() pages = catalog["/Pages"].getObject()
@ -616,7 +610,7 @@ class PdfFileReader(object):
def getObject(self, indirectReference): def getObject(self, indirectReference):
retval = self.resolvedObjects.get(indirectReference.generation, {}).get(indirectReference.idnum, None) retval = self.resolvedObjects.get(indirectReference.generation, {}).get(indirectReference.idnum, None)
if retval != None: if retval is not None:
return retval return retval
if indirectReference.generation == 0 and \ if indirectReference.generation == 0 and \
self.xref_objStm.has_key(indirectReference.idnum): self.xref_objStm.has_key(indirectReference.idnum):
@ -844,7 +838,6 @@ class PdfFileReader(object):
else: else:
# no xref table found at specified location # no xref table found at specified location
assert False assert False
break
def _pairs(self, array): def _pairs(self, array):
i = 0 i = 0
@ -959,10 +952,10 @@ def getRectangle(self, name, defaults):
retval = self.get(name) retval = self.get(name)
if isinstance(retval, RectangleObject): if isinstance(retval, RectangleObject):
return retval return retval
if retval == None: if retval is None:
for d in defaults: for d in defaults:
retval = self.get(d) retval = self.get(d)
if retval != None: if retval is not None:
break break
if isinstance(retval, IndirectObject): if isinstance(retval, IndirectObject):
retval = self.pdf.getObject(retval) retval = self.pdf.getObject(retval)

View File

@ -78,7 +78,7 @@ class ConvertFunctionsToVirtualList(object):
len_self = len(self) len_self = len(self)
if index < 0: if index < 0:
# support negative indexes # support negative indexes
index = len_self + index index += len_self
if index < 0 or index >= len_self: if index < 0 or index >= len_self:
raise IndexError, "sequence index out of range" raise IndexError, "sequence index out of range"
return self.getFunction(index) return self.getFunction(index)

View File

@ -66,7 +66,7 @@ class XmpInformation(PdfObject):
for desc in self.rdfRoot.getElementsByTagNameNS(RDF_NAMESPACE, "Description"): for desc in self.rdfRoot.getElementsByTagNameNS(RDF_NAMESPACE, "Description"):
if desc.getAttributeNS(RDF_NAMESPACE, "about") == aboutUri: if desc.getAttributeNS(RDF_NAMESPACE, "about") == aboutUri:
attr = desc.getAttributeNodeNS(namespace, name) attr = desc.getAttributeNodeNS(namespace, name)
if attr != None: if attr is not None:
yield attr yield attr
for element in desc.getElementsByTagNameNS(namespace, name): for element in desc.getElementsByTagNameNS(namespace, name):
yield element yield element
@ -187,7 +187,7 @@ class XmpInformation(PdfObject):
else: else:
value = self._getText(element) value = self._getText(element)
break break
if value != None: if value is not None:
value = converter(value) value = converter(value)
ns_cache = self.cache.setdefault(namespace, {}) ns_cache = self.cache.setdefault(namespace, {})
ns_cache[name] = value ns_cache[name] = value
@ -353,5 +353,5 @@ class XmpInformation(PdfObject):
custom_properties = property(custom_properties) custom_properties = property(custom_properties)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

View File

@ -391,7 +391,7 @@ class _rml_doc(object):
list_story.append(story_text) list_story.append(story_text)
del f del f
if template.data: if template.data:
tag = '''<img src = '%s' width=80 height=72/>'''%(template.data) tag = '''<img src = '%s' width=80 height=72/>'''% template.data
else: else:
tag = '' tag = ''
self.result +=''' self.result +='''

View File

@ -28,14 +28,14 @@ regex_t = re.compile('\(([0-9\.]*),([0-9\.]*),([0-9\.]*)\)')
regex_h = re.compile('#([0-9a-zA-Z][0-9a-zA-Z])([0-9a-zA-Z][0-9a-zA-Z])([0-9a-zA-Z][0-9a-zA-Z])') regex_h = re.compile('#([0-9a-zA-Z][0-9a-zA-Z])([0-9a-zA-Z][0-9a-zA-Z])([0-9a-zA-Z][0-9a-zA-Z])')
def get(col_str): def get(col_str):
if col_str == None: if col_str is None:
col_str = '' col_str = ''
global allcols global allcols
if col_str in allcols.keys(): if col_str in allcols.keys():
return allcols[col_str] return allcols[col_str]
res = regex_t.search(col_str, 0) res = regex_t.search(col_str, 0)
if res: if res:
return (float(res.group(1)),float(res.group(2)),float(res.group(3))) return float(res.group(1)), float(res.group(2)), float(res.group(3))
res = regex_h.search(col_str, 0) res = regex_h.search(col_str, 0)
if res: if res:
return tuple([ float(int(res.group(i),16))/255 for i in range(1,4)]) return tuple([ float(int(res.group(i),16))/255 for i in range(1,4)])

View File

@ -96,7 +96,7 @@ class NumberedCanvas(canvas.Canvas):
key=self._pageCounter key=self._pageCounter
if not self.pages.get(key,False): if not self.pages.get(key,False):
while not self.pages.get(key,False): while not self.pages.get(key,False):
key = key + 1 key += 1
self.setFont("Helvetica", 8) self.setFont("Helvetica", 8)
self.drawRightString((self._pagesize[0]-30), (self._pagesize[1]-40), self.drawRightString((self._pagesize[0]-30), (self._pagesize[1]-40),
" %(this)i / %(total)i" % { " %(this)i / %(total)i" % {
@ -123,7 +123,7 @@ class PageCount(platypus.Flowable):
self.story_count = story_count self.story_count = story_count
def draw(self): def draw(self):
self.canv.beginForm("pageCount%d" % (self.story_count)) self.canv.beginForm("pageCount%d" % self.story_count)
self.canv.setFont("Helvetica", utils.unit_get(str(8))) self.canv.setFont("Helvetica", utils.unit_get(str(8)))
self.canv.drawString(0, 0, str(self.canv.getPageNumber())) self.canv.drawString(0, 0, str(self.canv.getPageNumber()))
self.canv.endForm() self.canv.endForm()
@ -268,18 +268,18 @@ class _rml_doc(object):
if fontname not in pdfmetrics._fonts: if fontname not in pdfmetrics._fonts:
pdfmetrics.registerFont(TTFont(fontname, filename)) pdfmetrics.registerFont(TTFont(fontname, filename))
if (mode == 'all'): if mode == 'all':
addMapping(face, 0, 0, fontname) #normal addMapping(face, 0, 0, fontname) #normal
addMapping(face, 0, 1, fontname) #italic addMapping(face, 0, 1, fontname) #italic
addMapping(face, 1, 0, fontname) #bold addMapping(face, 1, 0, fontname) #bold
addMapping(face, 1, 1, fontname) #italic and bold addMapping(face, 1, 1, fontname) #italic and bold
elif (mode== 'normal') or (mode == 'regular'): elif (mode== 'normal') or (mode == 'regular'):
addMapping(face, 0, 0, fontname) #normal addMapping(face, 0, 0, fontname) #normal
elif (mode == 'italic'): elif mode == 'italic':
addMapping(face, 0, 1, fontname) #italic addMapping(face, 0, 1, fontname) #italic
elif (mode == 'bold'): elif mode == 'bold':
addMapping(face, 1, 0, fontname) #bold addMapping(face, 1, 0, fontname) #bold
elif (mode == 'bolditalic'): elif mode == 'bolditalic':
addMapping(face, 1, 1, fontname) #italic and bold addMapping(face, 1, 1, fontname) #italic and bold
def _textual_image(self, node): def _textual_image(self, node):
@ -602,7 +602,7 @@ class _rml_Illustration(platypus.flowables.Flowable):
self.height = utils.unit_get(node.get('height')) self.height = utils.unit_get(node.get('height'))
self.self2 = self2 self.self2 = self2
def wrap(self, *args): def wrap(self, *args):
return (self.width, self.height) return self.width, self.height
def draw(self): def draw(self):
drw = _rml_draw(self.localcontext ,self.node,self.styles, images=self.self2.images, path=self.self2.path, title=self.self2.title) drw = _rml_draw(self.localcontext ,self.node,self.styles, images=self.self2.images, path=self.self2.path, title=self.self2.title)
drw.render(self.canv, None) drw.render(self.canv, None)
@ -890,7 +890,7 @@ class TinyDocTemplate(platypus.BaseDocTemplate):
self.canv._storyCount = 0 self.canv._storyCount = 0
def ___handle_pageBegin(self): def ___handle_pageBegin(self):
self.page = self.page + 1 self.page += 1
self.pageTemplate.beforeDrawPage(self.canv,self) self.pageTemplate.beforeDrawPage(self.canv,self)
self.pageTemplate.checkPageSize(self.canv,self) self.pageTemplate.checkPageSize(self.canv,self)
self.pageTemplate.onPage(self.canv,self) self.pageTemplate.onPage(self.canv,self)

View File

@ -29,7 +29,8 @@ import utils
Font_size= 10.0 Font_size= 10.0
def verbose(text): def verbose(text):
sys.stderr.write(text+"\n"); sys.stderr.write(text+"\n")
class textbox(object): class textbox(object):
"""A box containing plain text. """A box containing plain text.
@ -107,11 +108,11 @@ class textbox(object):
def haplines(self,arr,offset,cc= ''): def haplines(self,arr,offset,cc= ''):
""" Horizontaly append lines """ Horizontaly append lines
""" """
while (len(self.lines) < len(arr)): while len(self.lines) < len(arr):
self.lines.append("") self.lines.append("")
for i in range(len(self.lines)): for i in range(len(self.lines)):
while (len(self.lines[i]) < offset): while len(self.lines[i]) < offset:
self.lines[i] += " " self.lines[i] += " "
for i in range(len(arr)): for i in range(len(arr)):
self.lines[i] += cc +arr[i] self.lines[i] += cc +arr[i]
@ -220,7 +221,7 @@ class _flowable(object):
def rec_render(self,node): def rec_render(self,node):
""" Recursive render: fill outarr with text of current node """ Recursive render: fill outarr with text of current node
""" """
if node.tag != None: if node.tag is not None:
if node.tag in self._tags: if node.tag in self._tags:
self._tags[node.tag](node) self._tags[node.tag](node)
else: else:
@ -255,12 +256,10 @@ class _rml_tmpl_frame(_rml_tmpl_tag):
self.posx = posx self.posx = posx
def tag_start(self): def tag_start(self):
return "frame start" return "frame start"
return '<table border="0" width="%d"><tr><td width="%d">&nbsp;</td><td>' % (self.width+self.posx,self.posx)
def tag_end(self): def tag_end(self):
return True return True
def tag_stop(self): def tag_stop(self):
return "frame stop" return "frame stop"
return '</td></tr></table><br/>'
def tag_mergeable(self): def tag_mergeable(self):
return False return False
@ -282,24 +281,7 @@ class _rml_tmpl_draw_string(_rml_tmpl_tag):
def tag_start(self): def tag_start(self):
return "draw string \"%s\" @(%d,%d)..\n" %("txt",self.posx,self.posy) return "draw string \"%s\" @(%d,%d)..\n" %("txt",self.posx,self.posy)
self.pos.sort()
res = '\\table ...'
posx = 0
i = 0
for (x,y,align,txt, style, fs) in self.pos:
if align=="left":
pos2 = len(txt)*fs
res+='<td width="%d"></td><td style="%s" width="%d">%s</td>' % (x - posx, style, pos2, txt)
posx = x+pos2
if align=="right":
res+='<td width="%d" align="right" style="%s">%s</td>' % (x - posx, style, txt)
posx = x
if align=="center":
res+='<td width="%d" align="center" style="%s">%s</td>' % ((x - posx)*2, style, txt)
posx = 2*x-posx
i+=1
res+='\\table end'
return res
def merge(self, ds): def merge(self, ds):
self.pos+=ds.pos self.pos+=ds.pos
@ -316,10 +298,6 @@ class _rml_tmpl_draw_lines(_rml_tmpl_tag):
def tag_start(self): def tag_start(self):
return "draw lines..\n" return "draw lines..\n"
if self.ok:
return '<table border="0" cellpadding="0" cellspacing="0" width="%d"><tr><td width="%d"></td><td><hr width="100%%" style="margin:0px; %s"></td></tr></table>' % (self.posx+self.width,self.posx,self.style)
else:
return ''
class _rml_stylesheet(object): class _rml_stylesheet(object):
def __init__(self, stylesheet, doc): def __init__(self, stylesheet, doc):
@ -456,11 +434,6 @@ class _rml_template(object):
def end(self): def end(self):
return "template end\n" return "template end\n"
result = ''
while not self.loop:
result += self.frame_start()
result += self.frame_stop()
return result
class _rml_doc(object): class _rml_doc(object):
def __init__(self, node, localcontext=None, images=None, path='.', title=None): def __init__(self, node, localcontext=None, images=None, path='.', title=None):

View File

@ -441,7 +441,7 @@ class report_sxw(report_rml, preprocess.report):
raise NotImplementedError(_('Unknown report type: %s') % report_type) raise NotImplementedError(_('Unknown report type: %s') % report_type)
fnct_ret = fnct(cr, uid, ids, data, report_xml, context) fnct_ret = fnct(cr, uid, ids, data, report_xml, context)
if not fnct_ret: if not fnct_ret:
return (False,False) return False, False
return fnct_ret return fnct_ret
def create_source_odt(self, cr, uid, ids, data, report_xml, context=None): def create_source_odt(self, cr, uid, ids, data, report_xml, context=None):
@ -531,7 +531,7 @@ class report_sxw(report_rml, preprocess.report):
logo = base64.decodestring(rml_parser.logo) logo = base64.decodestring(rml_parser.logo)
create_doc = self.generators[report_xml.report_type] create_doc = self.generators[report_xml.report_type]
pdf = create_doc(etree.tostring(processed_rml),rml_parser.localcontext,logo,title.encode('utf8')) pdf = create_doc(etree.tostring(processed_rml),rml_parser.localcontext,logo,title.encode('utf8'))
return (pdf, report_xml.report_type) return pdf, report_xml.report_type
def create_single_odt(self, cr, uid, ids, data, report_xml, context=None): def create_single_odt(self, cr, uid, ids, data, report_xml, context=None):
if not context: if not context:
@ -644,7 +644,7 @@ class report_sxw(report_rml, preprocess.report):
sxw_z.close() sxw_z.close()
final_op = sxw_io.getvalue() final_op = sxw_io.getvalue()
sxw_io.close() sxw_io.close()
return (final_op, mime_type) return final_op, mime_type
def create_single_html2html(self, cr, uid, ids, data, report_xml, context=None): def create_single_html2html(self, cr, uid, ids, data, report_xml, context=None):
if not context: if not context:
@ -666,7 +666,7 @@ class report_sxw(report_rml, preprocess.report):
create_doc = self.generators['html2html'] create_doc = self.generators['html2html']
html = etree.tostring(create_doc(html_dom, html_parser.localcontext)) html = etree.tostring(create_doc(html_dom, html_parser.localcontext))
return (html.replace('&amp;','&').replace('&lt;', '<').replace('&gt;', '>').replace('</br>',''), report_type) return html.replace('&amp;','&').replace('&lt;', '<').replace('&gt;', '>').replace('</br>',''), report_type
def create_single_mako2html(self, cr, uid, ids, data, report_xml, context=None): def create_single_mako2html(self, cr, uid, ids, data, report_xml, context=None):
mako_html = report_xml.report_rml_content mako_html = report_xml.report_rml_content
@ -675,7 +675,7 @@ class report_sxw(report_rml, preprocess.report):
html_parser.set_context(objs, data, ids, 'html') html_parser.set_context(objs, data, ids, 'html')
create_doc = self.generators['makohtml2html'] create_doc = self.generators['makohtml2html']
html = create_doc(mako_html,html_parser.localcontext) html = create_doc(mako_html,html_parser.localcontext)
return (html,'html') return html,'html'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

View File

@ -139,7 +139,7 @@ class OpenERPAuthProvider(AuthProvider):
uid = security.login(db,user,passwd) uid = security.login(db,user,passwd)
if uid is False: if uid is False:
return False return False
return (user, passwd, db, uid) return user, passwd, db, uid
except Exception,e: except Exception,e:
_logger.debug("Fail auth: %s" % e ) _logger.debug("Fail auth: %s" % e )
return False return False

View File

@ -172,13 +172,13 @@ class db(netsvc.ExportService):
def exp_get_progress(self, id): def exp_get_progress(self, id):
if self.actions[id]['thread'].isAlive(): if self.actions[id]['thread'].isAlive():
# return openerp.modules.init_progress[db_name] # return openerp.modules.init_progress[db_name]
return (min(self.actions[id].get('progress', 0),0.95), []) return min(self.actions[id].get('progress', 0),0.95), []
else: else:
clean = self.actions[id]['clean'] clean = self.actions[id]['clean']
if clean: if clean:
users = self.actions[id]['users'] users = self.actions[id]['users']
self.actions.pop(id) self.actions.pop(id)
return (1.0, users) return 1.0, users
else: else:
e = self.actions[id]['exception'] # TODO this seems wrong: actions[id]['traceback'] is set, but not 'exception'. e = self.actions[id]['exception'] # TODO this seems wrong: actions[id]['traceback'] is set, but not 'exception'.
self.actions.pop(id) self.actions.pop(id)
@ -543,7 +543,7 @@ GNU Public Licence.
if os.name == 'posix': if os.name == 'posix':
if platform.system() == 'Linux': if platform.system() == 'Linux':
lsbinfo = os.popen('lsb_release -a').read() lsbinfo = os.popen('lsb_release -a').read()
environment += '%s'%(lsbinfo) environment += '%s'% lsbinfo
else: else:
environment += 'Your System is not lsb compliant\n' environment += 'Your System is not lsb compliant\n'
environment += 'Operating System Release : %s\n' \ environment += 'Operating System Release : %s\n' \

View File

@ -226,9 +226,9 @@ class HttpOptions:
Sometimes, like in special DAV folders, the OPTIONS may contain Sometimes, like in special DAV folders, the OPTIONS may contain
extra keywords, perhaps also dependant on the request url. extra keywords, perhaps also dependant on the request url.
@param the options already. MUST be copied before being altered :param opts: MUST be copied before being altered
@return the updated options. :returns: the updated options.
""" """
return opts return opts

View File

@ -74,8 +74,8 @@ import threading
from inspect import currentframe from inspect import currentframe
import re import re
re_from = re.compile('.* from "?([a-zA-Z_0-9]+)"? .*$'); re_from = re.compile('.* from "?([a-zA-Z_0-9]+)"? .*$')
re_into = re.compile('.* into "?([a-zA-Z_0-9]+)"? .*$'); re_into = re.compile('.* into "?([a-zA-Z_0-9]+)"? .*$')
sql_counter = 0 sql_counter = 0
@ -226,11 +226,11 @@ class Cursor(object):
params = params or None params = params or None
res = self._obj.execute(query, params) res = self._obj.execute(query, params)
except psycopg2.ProgrammingError, pe: except psycopg2.ProgrammingError, pe:
if (self._default_log_exceptions if log_exceptions is None else log_exceptions): if self._default_log_exceptions if log_exceptions is None else log_exceptions:
_logger.error("Programming error: %s, in query %s", pe, query) _logger.error("Programming error: %s, in query %s", pe, query)
raise raise
except Exception: except Exception:
if (self._default_log_exceptions if log_exceptions is None else log_exceptions): if self._default_log_exceptions if log_exceptions is None else log_exceptions:
_logger.exception("bad query: %s", self._obj.query or query) _logger.exception("bad query: %s", self._obj.query or query)
raise raise
@ -357,11 +357,6 @@ class Cursor(object):
def __getattr__(self, name): def __getattr__(self, name):
return getattr(self._obj, name) return getattr(self._obj, name)
""" Set the mode of postgres operations for all cursors
"""
"""Obtain the mode of postgres operations for all cursors
"""
class PsycoConnection(psycopg2.extensions.connection): class PsycoConnection(psycopg2.extensions.connection):
pass pass
@ -521,8 +516,8 @@ def db_connect(db_name):
return Connection(_Pool, db_name) return Connection(_Pool, db_name)
def close_db(db_name): def close_db(db_name):
global _Pool
""" You might want to call openerp.modules.registry.RegistryManager.delete(db_name) along this function.""" """ You might want to call openerp.modules.registry.RegistryManager.delete(db_name) along this function."""
global _Pool
if _Pool: if _Pool:
_Pool.close_all(dsn(db_name)) _Pool.close_all(dsn(db_name))
ct = currentThread() ct = currentThread()

View File

@ -57,9 +57,9 @@ def _convert_nnn_fr(val):
if rem > 0: if rem > 0:
word = to_19_fr[rem] + ' Cent' word = to_19_fr[rem] + ' Cent'
if mod > 0: if mod > 0:
word = word + ' ' word += ' '
if mod > 0: if mod > 0:
word = word + _convert_nn_fr(mod) word += _convert_nn_fr(mod)
return word return word
def french_number(val): def french_number(val):
@ -125,9 +125,9 @@ def _convert_nnn_nl(val):
if rem > 0: if rem > 0:
word = to_19_nl[rem] + ' Honderd' word = to_19_nl[rem] + ' Honderd'
if mod > 0: if mod > 0:
word = word + ' ' word += ' '
if mod > 0: if mod > 0:
word = word + _convert_nn_nl(mod) word += _convert_nn_nl(mod)
return word return word
def dutch_number(val): def dutch_number(val):

View File

@ -60,9 +60,9 @@ def _convert_nnn(val):
if rem > 0: if rem > 0:
word = to_19[rem] + ' Hundred' word = to_19[rem] + ' Hundred'
if mod > 0: if mod > 0:
word = word + ' ' word += ' '
if mod > 0: if mod > 0:
word = word + _convert_nn(mod) word += _convert_nn(mod)
return word return word
def english_number(val): def english_number(val):

View File

@ -352,7 +352,7 @@ class configmanager(object):
# Check if the config file exists (-c used, but not -s) # Check if the config file exists (-c used, but not -s)
die(not opt.save and opt.config and not os.path.exists(opt.config), die(not opt.save and opt.config and not os.path.exists(opt.config),
"The config file '%s' selected with -c/--config doesn't exist, "\ "The config file '%s' selected with -c/--config doesn't exist, "\
"use -s/--save if you want to generate it"%(opt.config)) "use -s/--save if you want to generate it"% opt.config)
# place/search the config file on Win32 near the server installation # place/search the config file on Win32 near the server installation
# (../etc from the server) # (../etc from the server)

View File

@ -661,7 +661,7 @@ form: module.record_id""" % (xml_id,)
if rec.get('action') and pid: if rec.get('action') and pid:
action = "ir.actions.%s,%d" % (a_type, a_id) action = "ir.actions.%s,%d" % (a_type, a_id)
self.pool.get('ir.model.data').ir_set(cr, self.uid, 'action', 'tree_but_open', 'Menuitem', [('ir.ui.menu', int(pid))], action, True, True, xml_id=rec_id) self.pool.get('ir.model.data').ir_set(cr, self.uid, 'action', 'tree_but_open', 'Menuitem', [('ir.ui.menu', int(pid))], action, True, True, xml_id=rec_id)
return ('ir.ui.menu', pid) return 'ir.ui.menu', pid
def _assert_equals(self, f1, f2, prec=4): def _assert_equals(self, f1, f2, prec=4):
return not round(f1 - f2, prec) return not round(f1 - f2, prec)

View File

@ -45,7 +45,7 @@ def frame_codeinfo(fframe, back=0):
try: try:
if not fframe: if not fframe:
return ("<unknown>", '') return "<unknown>", ''
for i in range(back): for i in range(back):
fframe = fframe.f_back fframe = fframe.f_back
try: try:
@ -53,8 +53,8 @@ def frame_codeinfo(fframe, back=0):
except TypeError: except TypeError:
fname = '<builtin>' fname = '<builtin>'
lineno = fframe.f_lineno or '' lineno = fframe.f_lineno or ''
return (fname, lineno) return fname, lineno
except Exception: except Exception:
return ("<unknown>", '') return "<unknown>", ''
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

View File

@ -51,7 +51,7 @@ class graph(object):
for link in self.links: for link in self.links:
self.edge_wt[link] = self.result[link[1]]['x'] - self.result[link[0]]['x'] self.edge_wt[link] = self.result[link[1]]['x'] - self.result[link[0]]['x']
tot_node = self.partial_order.__len__() tot_node = len(self.partial_order)
#do until all the nodes in the component are searched #do until all the nodes in the component are searched
while self.tight_tree()<tot_node: while self.tight_tree()<tot_node:
list_node = [] list_node = []
@ -68,9 +68,9 @@ class graph(object):
slack = 100 slack = 100
for edge in list_edge: for edge in list_edge:
if ((self.reachable_nodes.__contains__(edge[0]) and edge[1] not in self.reachable_nodes) or if ((edge[0] in self.reachable_nodes and edge[1] not in self.reachable_nodes) or
(self.reachable_nodes.__contains__(edge[1]) and edge[0] not in self.reachable_nodes)): (edge[1] in self.reachable_nodes and edge[0] not in self.reachable_nodes)):
if(slack>self.edge_wt[edge]-1): if slack > self.edge_wt[edge]-1:
slack = self.edge_wt[edge]-1 slack = self.edge_wt[edge]-1
new_edge = edge new_edge = edge
@ -93,7 +93,7 @@ class graph(object):
self.reachable_nodes = [] self.reachable_nodes = []
self.tree_edges = [] self.tree_edges = []
self.reachable_node(self.start) self.reachable_node(self.start)
return self.reachable_nodes.__len__() return len(self.reachable_nodes)
def reachable_node(self, node): def reachable_node(self, node):
@ -117,13 +117,13 @@ class graph(object):
""" """
self.cut_edges = {} self.cut_edges = {}
self.head_nodes = [] self.head_nodes = []
i=0; i=0
for edge in self.tree_edges: for edge in self.tree_edges:
self.head_nodes = [] self.head_nodes = []
rest_edges = [] rest_edges = []
rest_edges += self.tree_edges rest_edges += self.tree_edges
rest_edges.__delitem__(i) del rest_edges[i]
self.head_component(self.start, rest_edges) self.head_component(self.start, rest_edges)
i+=1 i+=1
positive = 0 positive = 0
@ -197,7 +197,7 @@ class graph(object):
des = link[1] des = link[1]
edge_len = self.partial_order[des]['level'] - self.partial_order[src]['level'] edge_len = self.partial_order[des]['level'] - self.partial_order[src]['level']
if edge_len < 0: if edge_len < 0:
self.links.__delitem__(i) del self.links[i]
self.links.insert(i, (des, src)) self.links.insert(i, (des, src))
self.transitions[src].remove(des) self.transitions[src].remove(des)
self.transitions.setdefault(des, []).append(src) self.transitions.setdefault(des, []).append(src)
@ -210,10 +210,10 @@ class graph(object):
def exchange(self, e, f): def exchange(self, e, f):
"""Exchange edges to make feasible-tree optimized """Exchange edges to make feasible-tree optimized
@param edge edge with negative cut-value :param e: edge with negative cut-value
@param edge new edge with minimum slack-value :param f: new edge with minimum slack-value
""" """
self.tree_edges.__delitem__(self.tree_edges.index(e)) del self.tree_edges[self.tree_edges.index(e)]
self.tree_edges.append(f) self.tree_edges.append(f)
self.init_cutvalues() self.init_cutvalues()
@ -227,13 +227,13 @@ class graph(object):
self.head_nodes = [] self.head_nodes = []
rest_edges = [] rest_edges = []
rest_edges += self.tree_edges rest_edges += self.tree_edges
rest_edges.__delitem__(rest_edges.index(edge)) del rest_edges[rest_edges.index(edge)]
self.head_component(self.start, rest_edges) self.head_component(self.start, rest_edges)
if self.head_nodes.__contains__(edge[1]): if edge[1] in self.head_nodes:
l = [] l = []
for node in self.result: for node in self.result:
if not self.head_nodes.__contains__(node): if node not in self.head_nodes:
l.append(node) l.append(node)
self.head_nodes = l self.head_nodes = l
@ -243,7 +243,7 @@ class graph(object):
if source_node in self.head_nodes: if source_node in self.head_nodes:
for dest_node in self.transitions[source_node]: for dest_node in self.transitions[source_node]:
if dest_node not in self.head_nodes: if dest_node not in self.head_nodes:
if(slack>(self.edge_wt[edge]-1)): if slack>(self.edge_wt[edge]-1):
slack = self.edge_wt[edge]-1 slack = self.edge_wt[edge]-1
new_edge = (source_node, dest_node) new_edge = (source_node, dest_node)
@ -276,7 +276,7 @@ class graph(object):
least_rank = min(map(lambda x: x['x'], self.result.values())) least_rank = min(map(lambda x: x['x'], self.result.values()))
if(least_rank!=0): if least_rank!=0:
for node in self.result: for node in self.result:
self.result[node]['x']-=least_rank self.result[node]['x']-=least_rank
@ -310,7 +310,7 @@ class graph(object):
""" """
if not self.result[node]['y']: if not self.result[node]['y']:
self.result[node]['y'] = self.order[level] self.result[node]['y'] = self.order[level]
self.order[level] = self.order[level]+1 self.order[level] += 1
for sec_end in self.transitions.get(node, []): for sec_end in self.transitions.get(node, []):
if node!=sec_end: if node!=sec_end:
@ -377,7 +377,7 @@ class graph(object):
if pre_level_nodes: if pre_level_nodes:
for src in pre_level_nodes: for src in pre_level_nodes:
if (self.transitions.get(src) and self.transitions[src].__contains__(node)): if self.transitions.get(src) and node in self.transitions[src]:
adj_nodes.append(self.result[src]['y']) adj_nodes.append(self.result[src]['y'])
return adj_nodes return adj_nodes
@ -455,7 +455,7 @@ class graph(object):
mid_node = l[no/2] mid_node = l[no/2]
self.result[mid_node]['y'] = mid_pos self.result[mid_node]['y'] = mid_pos
if self.transitions.get((mid_node), False): if self.transitions.get(mid_node, False):
if last: if last:
self.result[mid_node]['y'] = last + len(self.transitions[mid_node])/2 + 1 self.result[mid_node]['y'] = last + len(self.transitions[mid_node])/2 + 1
if node!=mid_node: if node!=mid_node:
@ -494,7 +494,7 @@ class graph(object):
if max_level%2: if max_level%2:
self.result[self.start]['y'] = (max_level+1)/2 + self.max_order + (self.max_order and 1) self.result[self.start]['y'] = (max_level+1)/2 + self.max_order + (self.max_order and 1)
else: else:
self.result[self.start]['y'] = (max_level)/2 + self.max_order + (self.max_order and 1) self.result[self.start]['y'] = max_level /2 + self.max_order + (self.max_order and 1)
self.graph_order() self.graph_order()
@ -511,7 +511,7 @@ class graph(object):
for start in self.start_nodes[:index]: for start in self.start_nodes[:index]:
same = True same = True
for edge in self.tree_list[start][1:]: for edge in self.tree_list[start][1:]:
if self.tree_list[self.start].__contains__(edge): if edge in self.tree_list[self.start]:
continue continue
else: else:
same = False same = False
@ -590,9 +590,9 @@ class graph(object):
for edge in largest_tree: for edge in largest_tree:
if rem_nodes.__contains__(edge[0]): if edge[0] in rem_nodes:
rem_nodes.remove(edge[0]) rem_nodes.remove(edge[0])
if rem_nodes.__contains__(edge[1]): if edge[1] in rem_nodes:
rem_nodes.remove(edge[1]) rem_nodes.remove(edge[1])
if not rem_nodes: if not rem_nodes:
@ -601,8 +601,6 @@ class graph(object):
def rank(self): def rank(self):
"""Finds the optimized rank of the nodes using Network-simplex algorithm """Finds the optimized rank of the nodes using Network-simplex algorithm
@param start starting node of the component
""" """
self.levels = {} self.levels = {}
self.critical_edges = [] self.critical_edges = []
@ -641,8 +639,6 @@ class graph(object):
def order_in_rank(self): def order_in_rank(self):
"""Finds optimized order of the nodes within their ranks using median heuristic """Finds optimized order of the nodes within their ranks using median heuristic
@param start: starting node of the component
""" """
self.make_chain() self.make_chain()
@ -716,7 +712,7 @@ class graph(object):
#for flat edges ie. source an destination nodes are on the same rank #for flat edges ie. source an destination nodes are on the same rank
for src in self.transitions: for src in self.transitions:
for des in self.transitions[src]: for des in self.transitions[src]:
if (self.result[des]['x'] - self.result[src]['x'] == 0): if self.result[des]['x'] - self.result[src]['x'] == 0:
self.result[src]['x'] += 0.08 self.result[src]['x'] += 0.08
self.result[des]['x'] -= 0.08 self.result[des]['x'] -= 0.08

View File

@ -77,7 +77,7 @@ class LRU(object):
@synchronized() @synchronized()
def __iter__(self): def __iter__(self):
cur = self.first cur = self.first
while cur != None: while cur is not None:
cur2 = cur.next cur2 = cur.next
yield cur.me[1] yield cur.me[1]
cur = cur2 cur = cur2
@ -89,7 +89,7 @@ class LRU(object):
@synchronized() @synchronized()
def iteritems(self): def iteritems(self):
cur = self.first cur = self.first
while cur != None: while cur is not None:
cur2 = cur.next cur2 = cur.next
yield cur.me yield cur.me
cur = cur2 cur = cur2

View File

@ -88,7 +88,7 @@ def exec_pg_command_pipe(name, *args):
pop = subprocess.Popen((prog,) + args, bufsize= -1, pop = subprocess.Popen((prog,) + args, bufsize= -1,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=(os.name=="posix")) close_fds=(os.name=="posix"))
return (pop.stdin, pop.stdout) return pop.stdin, pop.stdout
def exec_command_pipe(name, *args): def exec_command_pipe(name, *args):
prog = find_in_path(name) prog = find_in_path(name)
@ -99,7 +99,7 @@ def exec_command_pipe(name, *args):
pop = subprocess.Popen((prog,) + args, bufsize= -1, pop = subprocess.Popen((prog,) + args, bufsize= -1,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=(os.name=="posix")) close_fds=(os.name=="posix"))
return (pop.stdin, pop.stdout) return pop.stdin, pop.stdout
#---------------------------------------------------------- #----------------------------------------------------------
# File paths # File paths
@ -177,7 +177,7 @@ def _fileopen(path, mode, basedir, pathinfo, basename=None):
if os.path.isfile(name): if os.path.isfile(name):
fo = open(name, mode) fo = open(name, mode)
if pathinfo: if pathinfo:
return (fo, name) return fo, name
return fo return fo
# Support for loading modules in zipped form. # Support for loading modules in zipped form.
@ -204,7 +204,7 @@ def _fileopen(path, mode, basedir, pathinfo, basename=None):
os.sep, '/'))) os.sep, '/')))
fo.seek(0) fo.seek(0)
if pathinfo: if pathinfo:
return (fo, name) return fo, name
return fo return fo
except Exception: except Exception:
pass pass
@ -557,8 +557,8 @@ def human_size(sz):
sz=len(sz) sz=len(sz)
s, i = float(sz), 0 s, i = float(sz), 0
while s >= 1024 and i < len(units)-1: while s >= 1024 and i < len(units)-1:
s = s / 1024 s /= 1024
i = i + 1 i += 1
return "%0.2f %s" % (s, units[i]) return "%0.2f %s" % (s, units[i])
def logged(f): def logged(f):
@ -721,7 +721,7 @@ def get_win32_timezone():
@return the standard name of the current win32 timezone, or False if it cannot be found. @return the standard name of the current win32 timezone, or False if it cannot be found.
""" """
res = False res = False
if (sys.platform == "win32"): if sys.platform == "win32":
try: try:
import _winreg import _winreg
hklm = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE) hklm = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
@ -752,7 +752,7 @@ def detect_server_timezone():
(time.tzname[0], 'time.tzname'), (time.tzname[0], 'time.tzname'),
(os.environ.get('TZ',False),'TZ environment variable'), ] (os.environ.get('TZ',False),'TZ environment variable'), ]
# Option 4: OS-specific: /etc/timezone on Unix # Option 4: OS-specific: /etc/timezone on Unix
if (os.path.exists("/etc/timezone")): if os.path.exists("/etc/timezone"):
tz_value = False tz_value = False
try: try:
f = open("/etc/timezone") f = open("/etc/timezone")
@ -763,7 +763,7 @@ def detect_server_timezone():
f.close() f.close()
sources.append((tz_value,"/etc/timezone file")) sources.append((tz_value,"/etc/timezone file"))
# Option 5: timezone info from registry on Win32 # Option 5: timezone info from registry on Win32
if (sys.platform == "win32"): if sys.platform == "win32":
# Timezone info is stored in windows registry. # Timezone info is stored in windows registry.
# However this is not likely to work very well as the standard name # However this is not likely to work very well as the standard name
# of timezones in windows is rarely something that is known to pytz. # of timezones in windows is rarely something that is known to pytz.

View File

@ -27,16 +27,16 @@ import os
from os.path import join as opj from os.path import join as opj
def listdir(dir, recursive=False): def listdir(dir, recursive=False):
"""Allow to recursively get the file listing""" """Allow to recursively get the file listing"""
dir = os.path.normpath(dir) dir = os.path.normpath(dir)
if not recursive: if not recursive:
return os.listdir(dir) return os.listdir(dir)
res = [] res = []
for root, dirs, files in walksymlinks(dir): for root, dirs, files in walksymlinks(dir):
root = root[len(dir)+1:] root = root[len(dir)+1:]
res.extend([opj(root, f) for f in files]) res.extend([opj(root, f) for f in files])
return res return res
def walksymlinks(top, topdown=True, onerror=None): def walksymlinks(top, topdown=True, onerror=None):
""" """
@ -58,7 +58,7 @@ def walksymlinks(top, topdown=True, onerror=None):
if __name__ == '__main__': if __name__ == '__main__':
from pprint import pprint as pp from pprint import pprint as pp
pp(listdir('../report', True)) pp(listdir('../report', True))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

View File

@ -93,7 +93,6 @@ _LOCALE2WIN32 = {
'lt_LT': 'Lithuanian_Lithuania', 'lt_LT': 'Lithuanian_Lithuania',
'lat': 'Latvian_Latvia', 'lat': 'Latvian_Latvia',
'ml_IN': 'Malayalam_India', 'ml_IN': 'Malayalam_India',
'id_ID': 'Indonesian_indonesia',
'mi_NZ': 'Maori', 'mi_NZ': 'Maori',
'mn': 'Cyrillic_Mongolian', 'mn': 'Cyrillic_Mongolian',
'no_NO': 'Norwegian_Norway', 'no_NO': 'Norwegian_Norway',
@ -103,7 +102,6 @@ _LOCALE2WIN32 = {
'pt_BR': 'Portuguese_Brazil', 'pt_BR': 'Portuguese_Brazil',
'ro_RO': 'Romanian_Romania', 'ro_RO': 'Romanian_Romania',
'ru_RU': 'Russian_Russia', 'ru_RU': 'Russian_Russia',
'mi_NZ': 'Maori',
'sr_CS': 'Serbian (Cyrillic)_Serbia and Montenegro', 'sr_CS': 'Serbian (Cyrillic)_Serbia and Montenegro',
'sk_SK': 'Slovak_Slovakia', 'sk_SK': 'Slovak_Slovakia',
'sl_SI': 'Slovenian_Slovenia', 'sl_SI': 'Slovenian_Slovenia',
@ -131,7 +129,6 @@ _LOCALE2WIN32 = {
'sv_SE': 'Swedish_Sweden', 'sv_SE': 'Swedish_Sweden',
'ta_IN': 'English_Australia', 'ta_IN': 'English_Australia',
'th_TH': 'Thai_Thailand', 'th_TH': 'Thai_Thailand',
'mi_NZ': 'Maori',
'tr_TR': 'Turkish_Turkey', 'tr_TR': 'Turkish_Turkey',
'uk_UA': 'Ukrainian_Ukraine', 'uk_UA': 'Ukrainian_Ukraine',
'vi_VN': 'Vietnamese_Viet Nam', 'vi_VN': 'Vietnamese_Viet Nam',
@ -275,7 +272,7 @@ class TinyPoFile(object):
def __iter__(self): def __iter__(self):
self.buffer.seek(0) self.buffer.seek(0)
self.lines = self._get_lines() self.lines = self._get_lines()
self.lines_count = len(self.lines); self.lines_count = len(self.lines)
self.first = True self.first = True
self.extra_lines= [] self.extra_lines= []
@ -291,7 +288,7 @@ class TinyPoFile(object):
return lines return lines
def cur_line(self): def cur_line(self):
return (self.lines_count - len(self.lines)) return self.lines_count - len(self.lines)
def next(self): def next(self):
trans_type = name = res_id = source = trad = None trans_type = name = res_id = source = trad = None
@ -304,7 +301,7 @@ class TinyPoFile(object):
targets = [] targets = []
line = None line = None
fuzzy = False fuzzy = False
while (not line): while not line:
if 0 == len(self.lines): if 0 == len(self.lines):
raise StopIteration() raise StopIteration()
line = self.lines.pop(0).strip() line = self.lines.pop(0).strip()
@ -864,7 +861,7 @@ def trans_generate(lang, modules, cr):
frelativepath = fabsolutepath[len(path):] frelativepath = fabsolutepath[len(path):]
display_path = "addons%s" % frelativepath display_path = "addons%s" % frelativepath
module = get_module_from_path(fabsolutepath, mod_paths=mod_paths) module = get_module_from_path(fabsolutepath, mod_paths=mod_paths)
if (('all' in modules) or (module in modules)) and module in installed_modules: if ('all' in modules or module in modules) and module in installed_modules:
return module, fabsolutepath, frelativepath, display_path return module, fabsolutepath, frelativepath, display_path
return None, None, None, None return None, None, None, None

View File

@ -125,7 +125,7 @@ def _execute(cr, workitem, activity, ident, stack):
_state_set(cr, workitem, activity, 'running', ident) _state_set(cr, workitem, activity, 'running', ident)
if activity.get('action', False): if activity.get('action', False):
id_new = wkf_expr.execute(cr, ident, workitem, activity) id_new = wkf_expr.execute(cr, ident, workitem, activity)
if not (id_new): if not id_new:
cr.execute('delete from wkf_workitem where id=%s', (workitem['id'],)) cr.execute('delete from wkf_workitem where id=%s', (workitem['id'],))
return False return False
assert type(id_new)==type(1) or type(id_new)==type(1L), 'Wrong return value: '+str(id_new)+' '+str(type(id_new)) assert type(id_new)==type(1) or type(id_new)==type(1L), 'Wrong return value: '+str(id_new)+' '+str(type(id_new))