[MERGE] Merged with latest trunk

bzr revid: pan@tinyerp.com-20121210084122-n7qyuh8hm6jvgmd9
This commit is contained in:
Anand Patel (OpenERP) 2012-12-10 14:11:22 +05:30
commit b3ab5a63ae
117 changed files with 40820 additions and 22953 deletions

View File

@ -1,19 +0,0 @@
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import sys
if __name__ == "__main__":
print '-' * 70
print "DEPRECATED: you are starting the OpenERP server with its old path,"
print "please use the new executable (available in the parent directory)."
print '-' * 70
# Change to the parent directory ...
os.chdir(os.path.normpath(os.path.dirname(__file__)))
os.chdir('..')
# ... and execute the new executable.
os.execv('openerp-server', sys.argv)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

2
debian/control vendored
View File

@ -23,6 +23,7 @@ Depends:
python-libxslt1,
python-lxml,
python-mako,
python-mock,
python-openid,
python-psutil,
python-psycopg2,
@ -33,6 +34,7 @@ Depends:
python-reportlab,
python-simplejson,
python-tz,
python-unittest2,
python-vatnumber,
python-vobject,
python-webdav,

View File

@ -40,7 +40,6 @@ import service
import sql_db
import test
import tools
import wizard
import workflow
# backward compatilbility
# TODO: This is for the web addons, can be removed later.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -42,7 +42,7 @@ class actions(osv.osv):
_order = 'name'
_columns = {
'name': fields.char('Name', size=64, required=True),
'type': fields.char('Action Type', required=True, size=32,readonly=True),
'type': fields.char('Action Type', required=True, size=32),
'usage': fields.char('Action Usage', size=32),
'help': fields.text('Action description',
help='Optional help text for the users with a description of the target view, such as its usage and purpose.',

View File

@ -502,6 +502,19 @@
<menuitem id="menu_ir_actions_todo" name="Configuration Wizards" parent="menu_custom" sequence="20" groups="base.group_no_one"/>
<menuitem id="menu_ir_actions_todo_form" action="act_ir_actions_todo_form" parent="menu_ir_actions_todo"/>
<record id="action_run_ir_action_todo" model="ir.actions.server">
<field name="name">Run Remaining Action Todo</field>
<field name="condition">True</field>
<field name="type">ir.actions.server</field>
<field name="model_id" ref="model_res_config"/>
<field name="state">code</field>
<field name="code">
config = self.next(cr, uid, [], context=context) or {}
if config.get('type') not in ('ir.actions.act_window_close',):
action = config
</field>
</record>
</data>
</openerp>

View File

@ -64,10 +64,18 @@ class ir_attachment(osv.osv):
return 0
return []
# Work with a set, as list.remove() is prohibitive for large lists of documents
# (takes 20+ seconds on a db with 100k docs during search_count()!)
orig_ids = ids
ids = set(ids)
# For attachments, the permissions of the document they are attached to
# apply, so we must remove attachments for which the user cannot access
# the linked document.
targets = super(ir_attachment,self).read(cr, uid, ids, ['id', 'res_model', 'res_id'])
# Use pure SQL rather than read() as it is about 50% faster for large dbs (100k+ docs),
# and the permissions are checked in super() and below anyway.
cr.execute("""SELECT id, res_model, res_id FROM ir_attachment WHERE id = ANY(%s)""", (list(ids),))
targets = cr.dictfetchall()
model_attachments = {}
for target_dict in targets:
if not (target_dict['res_id'] and target_dict['res_model']):
@ -92,9 +100,10 @@ class ir_attachment(osv.osv):
for res_id in disallowed_ids:
for attach_id in targets[res_id]:
ids.remove(attach_id)
if count:
return len(ids)
return ids
# sort result according to the original sort ordering
result = [id for id in orig_ids if id in ids]
return len(result) if count else list(result)
def read(self, cr, uid, ids, fields_to_read=None, context=None, load='_classic_read'):
self.check(cr, uid, ids, 'read', context=context)

View File

@ -31,7 +31,6 @@ import netsvc
import openerp
import pooler
import tools
from openerp.cron import WAKE_UP_NOW
from osv import fields, osv
from tools import DEFAULT_SERVER_DATETIME_FORMAT
from tools.safe_eval import safe_eval as eval
@ -142,130 +141,6 @@ class ir_cron(osv.osv):
except Exception, e:
self._handle_callback_exception(cr, uid, model_name, method_name, args, job_id, e)
def _run_job(self, cr, job, now):
""" Run a given job taking care of the repetition.
The cursor has a lock on the job (aquired by _run_jobs_multithread()) and this
method is run in a worker thread (spawned by _run_jobs_multithread())).
:param job: job to be run (as a dictionary).
:param now: timestamp (result of datetime.now(), no need to call it multiple time).
"""
try:
nextcall = datetime.strptime(job['nextcall'], DEFAULT_SERVER_DATETIME_FORMAT)
numbercall = job['numbercall']
ok = False
while nextcall < now and numbercall:
if numbercall > 0:
numbercall -= 1
if not ok or job['doall']:
self._callback(cr, job['user_id'], job['model'], job['function'], job['args'], job['id'])
if numbercall:
nextcall += _intervalTypes[job['interval_type']](job['interval_number'])
ok = True
addsql = ''
if not numbercall:
addsql = ', active=False'
cr.execute("UPDATE ir_cron SET nextcall=%s, numbercall=%s"+addsql+" WHERE id=%s",
(nextcall.strftime(DEFAULT_SERVER_DATETIME_FORMAT), numbercall, job['id']))
if numbercall:
# Reschedule our own main cron thread if necessary.
# This is really needed if this job runs longer than its rescheduling period.
nextcall = calendar.timegm(nextcall.timetuple())
openerp.cron.schedule_wakeup(nextcall, cr.dbname)
finally:
cr.commit()
cr.close()
openerp.cron.release_thread_slot()
def _run_jobs_multithread(self):
# TODO remove 'check' argument from addons/base_action_rule/base_action_rule.py
""" Process the cron jobs by spawning worker threads.
This selects in database all the jobs that should be processed. It then
tries to lock each of them and, if it succeeds, spawns a thread to run
the cron job (if it doesn't succeed, it means the job was already
locked to be taken care of by another thread).
The cursor used to lock the job in database is given to the worker
thread (which has to close it itself).
"""
db = self.pool.db
cr = db.cursor()
db_name = db.dbname
try:
jobs = {} # mapping job ids to jobs for all jobs being processed.
now = datetime.now()
# Careful to compare timestamps with 'UTC' - everything is UTC as of v6.1.
cr.execute("""SELECT * FROM ir_cron
WHERE numbercall != 0
AND active AND nextcall <= (now() at time zone 'UTC')
ORDER BY priority""")
for job in cr.dictfetchall():
if not openerp.cron.get_thread_slots():
break
jobs[job['id']] = job
task_cr = db.cursor()
try:
# Try to grab an exclusive lock on the job row from within the task transaction
acquired_lock = False
task_cr.execute("""SELECT *
FROM ir_cron
WHERE id=%s
FOR UPDATE NOWAIT""",
(job['id'],), log_exceptions=False)
acquired_lock = True
except psycopg2.OperationalError, e:
if e.pgcode == '55P03':
# Class 55: Object not in prerequisite state; 55P03: lock_not_available
_logger.debug('Another process/thread is already busy executing job `%s`, skipping it.', job['name'])
continue
else:
# Unexpected OperationalError
raise
finally:
if not acquired_lock:
# we're exiting due to an exception while acquiring the lot
task_cr.close()
# Got the lock on the job row, now spawn a thread to execute it in the transaction with the lock
task_thread = threading.Thread(target=self._run_job, name=job['name'], args=(task_cr, job, now))
# force non-daemon task threads (the runner thread must be daemon, and this property is inherited by default)
task_thread.setDaemon(False)
openerp.cron.take_thread_slot()
task_thread.start()
_logger.debug('Cron execution thread for job `%s` spawned', job['name'])
# Find next earliest job ignoring currently processed jobs (by this and other cron threads)
find_next_time_query = """SELECT min(nextcall) AS min_next_call
FROM ir_cron WHERE numbercall != 0 AND active"""
if jobs:
cr.execute(find_next_time_query + " AND id NOT IN %s", (tuple(jobs.keys()),))
else:
cr.execute(find_next_time_query)
next_call = cr.dictfetchone()['min_next_call']
if next_call:
next_call = calendar.timegm(time.strptime(next_call, DEFAULT_SERVER_DATETIME_FORMAT))
else:
# no matching cron job found in database, re-schedule arbitrarily in 1 day,
# this delay will likely be modified when running jobs complete their tasks
next_call = time.time() + (24*3600)
openerp.cron.schedule_wakeup(next_call, db_name)
except Exception, ex:
_logger.warning('Exception in cron:', exc_info=True)
finally:
cr.commit()
cr.close()
def _process_job(self, cr, job):
""" Run a given job taking care of the repetition.
@ -365,19 +240,6 @@ class ir_cron(osv.osv):
return False
def update_running_cron(self, cr):
""" Schedule as soon as possible a wake-up for this database. """
# Verify whether the server is already started and thus whether we need to commit
# immediately our changes and restart the cron agent in order to apply the change
# immediately. The commit() is needed because as soon as the cron is (re)started it
# will query the database with its own cursor, possibly before the end of the
# current transaction.
# This commit() is not an issue in most cases, but we must absolutely avoid it
# when the server is only starting or loading modules (hence the test on pool._init).
if not self.pool._init:
cr.commit()
openerp.cron.schedule_wakeup(WAKE_UP_NOW, self.pool.db.dbname)
def _try_lock(self, cr, uid, ids, context=None):
"""Try to grab a dummy exclusive write-lock to the rows with the given ids,
to make sure a following write() or unlink() will not block due
@ -393,20 +255,16 @@ class ir_cron(osv.osv):
def create(self, cr, uid, vals, context=None):
res = super(ir_cron, self).create(cr, uid, vals, context=context)
self.update_running_cron(cr)
return res
def write(self, cr, uid, ids, vals, context=None):
self._try_lock(cr, uid, ids, context)
res = super(ir_cron, self).write(cr, uid, ids, vals, context=context)
self.update_running_cron(cr)
return res
def unlink(self, cr, uid, ids, context=None):
self._try_lock(cr, uid, ids, context)
res = super(ir_cron, self).unlink(cr, uid, ids, context=context)
self.update_running_cron(cr)
return res
ir_cron()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

View File

@ -70,7 +70,10 @@ class ir_model(osv.osv):
models = self.browse(cr, uid, ids, context=context)
res = dict.fromkeys(ids)
for model in models:
res[model.id] = self.pool.get(model.model).is_transient()
if self.pool.get(model.model):
res[model.id] = self.pool.get(model.model).is_transient()
else:
_logger.error('Missing model %s' % (model.model, ))
return res
def _search_osv_memory(self, cr, uid, model, name, domain, context=None):
@ -206,6 +209,7 @@ class ir_model(osv.osv):
else:
x_name = a._columns.keys()[0]
x_custom_model._rec_name = x_name
a._rec_name = x_name
class ir_model_fields(osv.osv):
_name = 'ir.model.fields'

View File

@ -44,7 +44,7 @@
<field name="selection" attrs="{'required': [('ttype','in',['selection','reference'])], 'readonly': [('ttype','not in',['selection','reference'])]}"/>
<field name="size" attrs="{'required': [('ttype','in',['char','reference'])], 'readonly': [('ttype','not in',['char','reference'])]}"/>
<field name="domain" attrs="{'readonly': [('relation','=','')]}"/>
<field name="serialization_field_id" attrs="{'readonly': [('state','=','base')]}" domain="[('ttype','=','serialized'), ('model_id', '=', model_id)]"/>
<field name="serialization_field_id" attrs="{'readonly': [('state','=','base')]}" domain="[('ttype','=','serialized'), ('model_id', '=', parent.model)]"/>
</group>
<group>
<field name="required"/>

View File

@ -3,7 +3,7 @@
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2011 OpenERP SA (<http://openerp.com>).
# Copyright (C) 2010-2012 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
@ -43,7 +43,7 @@ class ir_ui_menu(osv.osv):
def __init__(self, *args, **kwargs):
self.cache_lock = threading.RLock()
self.clear_cache()
self._cache = {}
r = super(ir_ui_menu, self).__init__(*args, **kwargs)
self.pool.get('ir.model.access').register_cache_clearing_method(self._name, 'clear_cache')
return r
@ -51,6 +51,10 @@ class ir_ui_menu(osv.osv):
def clear_cache(self):
with self.cache_lock:
# radical but this doesn't frequently happen
if self._cache:
# Normally this is done by openerp.tools.ormcache
# but since we do not use it, set it by ourself.
self.pool._any_cache_cleared = True
self._cache = {}
def _filter_visible_menus(self, cr, uid, ids, context=None):
@ -62,7 +66,7 @@ class ir_ui_menu(osv.osv):
modelaccess = self.pool.get('ir.model.access')
user_groups = set(self.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['groups_id'])['groups_id'])
result = []
for menu in self.browse(cr, uid, ids, context=context):
for menu in self.browse(cr, SUPERUSER_ID, ids, context=context):
# this key works because user access rights are all based on user's groups (cfr ir_model_access.check)
key = (cr.dbname, menu.id, tuple(user_groups))
if key in self._cache:

View File

@ -30,6 +30,7 @@ import re
import urllib
import zipimport
import openerp
from openerp import modules, pooler, release, tools, addons
from openerp.modules.db import create_categories
from openerp.tools.parse_version import parse_version
@ -385,6 +386,8 @@ class module(osv.osv):
# Mark them to be installed.
if to_install_ids:
self.button_install(cr, uid, to_install_ids, context=context)
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return dict(ACTION_DICT, name=_('Install'))
def button_immediate_install(self, cr, uid, ids, context=None):

View File

@ -166,7 +166,7 @@
<field name="name">ir.module.module.tree</field>
<field name="model">ir.module.module</field>
<field name="arch" type="xml">
<tree colors="blue:state=='to upgrade' or state=='to install';red:state=='uninstalled';grey:state=='uninstallable';black:state=='installed'" string="Modules">
<tree colors="blue:state=='to upgrade' or state=='to install';red:state=='uninstalled';grey:state=='uninstallable';black:state=='installed'" create="false" string="Modules">
<field name="shortdesc"/>
<field name="name" groups="base.group_no_one"/>
<field name="author"/>

View File

@ -202,7 +202,7 @@ class res_partner(osv.osv, format_address):
help="The partner's timezone, used to output proper date and time values inside printed reports. "
"It is important to set a value for this field. You should use the same timezone "
"that is otherwise used to pick and render date and time values: your computer's timezone."),
'tz_offset': fields.function(_get_tz_offset, type='char', size=5, string='Timezone offset', store=True),
'tz_offset': fields.function(_get_tz_offset, type='char', size=5, string='Timezone offset', invisible=True),
'user_id': fields.many2one('res.users', 'Salesperson', help='The internal user that is in charge of communicating with this contact if any.'),
'vat': fields.char('TIN', size=32, help="Tax Identification Number. Check the box if this contact is subjected to taxes. Used by the some of the legal statements."),
'bank_ids': fields.one2many('res.partner.bank', 'partner_id', 'Banks'),

View File

@ -258,7 +258,7 @@
</group>
<group>
<field name="customer"/>
<field name="supplier" invisible="not context.get('default_supplier')"/>
<field name="supplier"/>
</group>
<group>
<field name="ref"/>
@ -345,8 +345,6 @@
<li t-if="record.city.raw_value and !record.country.raw_value"><field name="city"/></li>
<li t-if="!record.city.raw_value and record.country.raw_value"><field name="country"/></li>
<li t-if="record.city.raw_value and record.country.raw_value"><field name="city"/>, <field name="country"/></li>
<li t-if="record.phone.raw_value">Tel: <field name="phone"/></li>
<li t-if="record.mobile.raw_value">Mobile: <field name="mobile"/></li>
<li t-if="record.email.raw_value"><a t-attf-href="mailto:#{record.email.raw_value}"><field name="email"/></a></li>
</ul>
</div>

View File

@ -153,7 +153,7 @@ class res_users(osv.osv):
help="Specify a value only when creating a user or if you're "\
"changing the user's password, otherwise leave empty. After "\
"a change of password, the user has to login again."),
'signature': fields.text('Signature', size=64),
'signature': fields.text('Signature'),
'active': fields.boolean('Active'),
'action_id': fields.many2one('ir.actions.actions', 'Home Action', help="If specified, this action will be opened at logon for this user, in addition to the standard menu."),
'menu_id': fields.many2one('ir.actions.actions', 'Menu Action', help="If specified, the action will replace the standard menu for this user."),
@ -343,7 +343,7 @@ class res_users(osv.osv):
for k in self._all_columns.keys():
if k.startswith('context_'):
context_key = k[8:]
elif k in ['lang', 'tz', 'tz_offset']:
elif k in ['lang', 'tz']:
context_key = k
else:
context_key = False

View File

@ -50,20 +50,25 @@ class test_ir_values(common.TransactionCase):
# Create some action bindings for a non-existing model.
act_id_1 = self.ref('base.act_values_form_action')
act_id_2 = self.ref('base.act_values_form_defaults')
act_id_3 = self.ref('base.action_res_company_form')
act_id_4 = self.ref('base.action_res_company_tree')
ir_values = self.registry('ir.values')
ir_values.set(self.cr, self.uid, 'action', 'tree_but_open', 'OnDblClick Action', ['unexisting_model'], 'ir.actions.act_window,10', isobject=True)
ir_values.set(self.cr, self.uid, 'action', 'tree_but_open', 'OnDblClick Action 2', ['unexisting_model'], 'ir.actions.act_window,11', isobject=True)
ir_values.set(self.cr, self.uid, 'action', 'client_action_multi', 'Side Wizard', ['unexisting_model'], 'ir.actions.act_window,12', isobject=True)
ir_values.set(self.cr, self.uid, 'action', 'tree_but_open', 'OnDblClick Action', ['unexisting_model'], 'ir.actions.act_window,%d' % act_id_1, isobject=True)
ir_values.set(self.cr, self.uid, 'action', 'tree_but_open', 'OnDblClick Action 2', ['unexisting_model'], 'ir.actions.act_window,%d' % act_id_2, isobject=True)
ir_values.set(self.cr, self.uid, 'action', 'client_action_multi', 'Side Wizard', ['unexisting_model'], 'ir.actions.act_window,%d' % act_id_3, isobject=True)
report_ids = self.registry('ir.actions.report.xml').search(self.cr, self.uid, [], {})
reports = self.registry('ir.actions.report.xml').browse(self.cr, self.uid, report_ids, {})
report_id = [report.id for report in reports if not report.groups_id][0] # assume at least one
ir_values.set(self.cr, self.uid, 'action', 'client_print_multi', 'Nice Report', ['unexisting_model'], 'ir.actions.report.xml,%s'%report_id, isobject=True)
ir_values.set(self.cr, self.uid, 'action', 'client_action_relate', 'Related Stuff', ['unexisting_model'], 'ir.actions.act_window,14', isobject=True)
report_id = [report.id for report in reports if not report.groups_id][0] # assume at least one
ir_values.set(self.cr, self.uid, 'action', 'client_print_multi', 'Nice Report', ['unexisting_model'], 'ir.actions.report.xml,%d' % report_id, isobject=True)
ir_values.set(self.cr, self.uid, 'action', 'client_action_relate', 'Related Stuff', ['unexisting_model'], 'ir.actions.act_window,%d' % act_id_4, isobject=True)
# Replace one action binding to set a new name.
ir_values = self.registry('ir.values')
ir_values.set(self.cr, self.uid, 'action', 'tree_but_open', 'OnDblClick Action New', ['unexisting_model'], 'ir.actions.act_window,10', isobject=True)
ir_values.set(self.cr, self.uid, 'action', 'tree_but_open', 'OnDblClick Action New', ['unexisting_model'], 'ir.actions.act_window,%d' % act_id_1, isobject=True)
# Retrieve the action bindings and check they're correct
@ -73,17 +78,17 @@ class test_ir_values(common.TransactionCase):
#first action
assert len(actions[0]) == 3, "Malformed action definition"
assert actions[0][1] == 'OnDblClick Action 2', 'Bound action does not match definition'
assert isinstance(actions[0][2], dict) and actions[0][2]['id'] == 11, 'Bound action does not match definition'
assert isinstance(actions[0][2], dict) and actions[0][2]['id'] == act_id_2, 'Bound action does not match definition'
#second action - this ones comes last because it was re-created with a different name
assert len(actions[1]) == 3, "Malformed action definition"
assert actions[1][1] == 'OnDblClick Action New', 'Re-Registering an action should replace it'
assert isinstance(actions[1][2], dict) and actions[1][2]['id'] == 10, 'Bound action does not match definition'
assert isinstance(actions[1][2], dict) and actions[1][2]['id'] == act_id_1, 'Bound action does not match definition'
actions = ir_values.get(self.cr, self.uid, 'action', 'client_action_multi', ['unexisting_model'])
assert len(actions) == 1, "Mismatching number of bound actions"
assert len(actions[0]) == 3, "Malformed action definition"
assert actions[0][1] == 'Side Wizard', 'Bound action does not match definition'
assert isinstance(actions[0][2], dict) and actions[0][2]['id'] == 12, 'Bound action does not match definition'
assert isinstance(actions[0][2], dict) and actions[0][2]['id'] == act_id_3, 'Bound action does not match definition'
actions = ir_values.get(self.cr, self.uid, 'action', 'client_print_multi', ['unexisting_model'])
assert len(actions) == 1, "Mismatching number of bound actions"
@ -95,7 +100,7 @@ class test_ir_values(common.TransactionCase):
assert len(actions) == 1, "Mismatching number of bound actions"
assert len(actions[0]) == 3, "Malformed action definition"
assert actions[0][1] == 'Related Stuff', 'Bound action does not match definition'
assert isinstance(actions[0][2], dict) and actions[0][2]['id'] == 14, 'Bound action does not match definition'
assert isinstance(actions[0][2], dict) and actions[0][2]['id'] == act_id_4, 'Bound action does not match definition'
if __name__ == '__main__':

View File

@ -2,6 +2,8 @@ import logging
import sys
import openerp
from openerp import tools
from openerp.modules import module
_logger = logging.getLogger(__name__)
@ -32,8 +34,25 @@ import server
def main():
args = sys.argv[1:]
# The only shared option is '--addons-path=' needed to discover additional
# commands from modules
if len(args) > 1 and args[0].startswith('--addons-path=') and not args[1].startswith("-"):
tools.config.parse_config([args[0]])
args = args[1:]
# Default legacy command
command = "server"
# Subcommand discovery
if len(args) and not args[0].startswith("-"):
for m in module.get_modules():
m = 'openerp.addons.' + m
__import__(m)
#try:
#except Exception, e:
# raise
# print e
command = args[0]
args = args[1:]

View File

@ -94,11 +94,7 @@ def setup_pid_file():
def preload_registry(dbname):
""" Preload a registry, and start the cron."""
try:
config = openerp.tools.config
update_module = True if config['init'] or config['update'] else False
db, registry = openerp.pooler.get_db_and_pool(
dbname, update_module=update_module, pooljobs=False,
force_demo=not config['without_demo'])
db, registry = openerp.pooler.get_db_and_pool(dbname, update_module=openerp.tools.config['init'] or openerp.tools.config['update'], pooljobs=False)
# jobs will start to be processed later, when openerp.cron.start_master_thread() is called by openerp.service.start_services()
registry.schedule_cron_jobs()
@ -109,9 +105,7 @@ def run_test_file(dbname, test_file):
""" Preload a registry, possibly run a test file, and start the cron."""
try:
config = openerp.tools.config
update_module = True if config['init'] or config['update'] else False
db, registry = openerp.pooler.get_db_and_pool(
dbname, update_module=update_module, pooljobs=False, force_demo=not config['without_demo'])
db, registry = openerp.pooler.get_db_and_pool(dbname, update_module=config['init'] or config['update'], pooljobs=False)
cr = db.cursor()
_logger.info('loading test file %s', test_file)
openerp.tools.convert_yaml_import(cr, 'base', file(test_file), 'test', {}, 'test', True)

View File

@ -35,10 +35,6 @@ must be used.
import deprecation
# Maximum number of threads processing concurrently cron jobs.
max_cron_threads = 4 # Actually the default value here is meaningless,
# look at tools.config for the default value.
# Paths to search for OpenERP addons.
addons_paths = []

View File

@ -1,212 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Cron jobs scheduling
Cron jobs are defined in the ir_cron table/model. This module deals with all
cron jobs, for all databases of a single OpenERP server instance.
It defines a single master thread that will spawn (a bounded number of)
threads to process individual cron jobs.
The thread runs forever, checking every 60 seconds for new
'database wake-ups'. It maintains a heapq of database wake-ups. At each
wake-up, it will call ir_cron._run_jobs_multithread() for the given database. _run_jobs_multithread
will check the jobs defined in the ir_cron table and spawn accordingly threads
to process them.
This module's behavior depends on the following configuration variable:
openerp.conf.max_cron_threads.
"""
import heapq
import logging
import threading
import time
import openerp
import tools
_logger = logging.getLogger(__name__)
# Heapq of database wake-ups. Note that 'database wake-up' meaning is in
# the context of the cron management. This is not originally about loading
# a database, although having the database name in the queue will
# cause it to be loaded when the schedule time is reached, even if it was
# unloaded in the mean time. Normally a database's wake-up is cancelled by
# the RegistryManager when the database is unloaded - so this should not
# cause it to be reloaded.
#
# TODO: perhaps in the future we could consider a flag on ir.cron jobs
# that would cause database wake-up even if the database has not been
# loaded yet or was already unloaded (e.g. 'force_db_wakeup' or something)
#
# Each element is a triple (timestamp, database-name, boolean). The boolean
# specifies if the wake-up is canceled (so a wake-up can be canceled without
# relying on the heapq implementation detail; no need to remove the job from
# the heapq).
_wakeups = []
# Mapping of database names to the wake-up defined in the heapq,
# so that we can cancel the wake-up without messing with the heapq
# invariant: lookup the wake-up by database-name, then set
# its third element to True.
_wakeup_by_db = {}
# Re-entrant lock to protect the above _wakeups and _wakeup_by_db variables.
# We could use a simple (non-reentrant) lock if the runner function below
# was more fine-grained, but we are fine with the loop owning the lock
# while spawning a few threads.
_wakeups_lock = threading.RLock()
# Maximum number of threads allowed to process cron jobs concurrently. This
# variable is set by start_master_thread using openerp.conf.max_cron_threads.
_thread_slots = None
# A (non re-entrant) lock to protect the above _thread_slots variable.
_thread_slots_lock = threading.Lock()
# Sleep duration limits - must not loop too quickly, but can't sleep too long
# either, because a new job might be inserted in ir_cron with a much sooner
# execution date than current known ones. We won't see it until we wake!
MAX_SLEEP = 60 # 1 min
MIN_SLEEP = 1 # 1 sec
# Dummy wake-up timestamp that can be used to force a database wake-up asap
WAKE_UP_NOW = 1
def get_thread_slots():
""" Return the number of available thread slots """
return _thread_slots
def release_thread_slot():
""" Increment the number of available thread slots """
global _thread_slots
with _thread_slots_lock:
_thread_slots += 1
def take_thread_slot():
""" Decrement the number of available thread slots """
global _thread_slots
with _thread_slots_lock:
_thread_slots -= 1
def cancel(db_name):
""" Cancel the next wake-up of a given database, if any.
:param db_name: database name for which the wake-up is canceled.
"""
_logger.debug("Cancel next wake-up for database '%s'.", db_name)
with _wakeups_lock:
if db_name in _wakeup_by_db:
_wakeup_by_db[db_name][2] = True
def cancel_all():
""" Cancel all database wake-ups. """
_logger.debug("Cancel all database wake-ups")
global _wakeups
global _wakeup_by_db
with _wakeups_lock:
_wakeups = []
_wakeup_by_db = {}
def schedule_wakeup(timestamp, db_name):
""" Schedule a new wake-up for a database.
If an earlier wake-up is already defined, the new wake-up is discarded.
If another wake-up is defined, that wake-up is discarded and the new one
is scheduled.
:param db_name: database name for which a new wake-up is scheduled.
:param timestamp: when the wake-up is scheduled.
"""
if not timestamp:
return
with _wakeups_lock:
if db_name in _wakeup_by_db:
task = _wakeup_by_db[db_name]
if not task[2] and timestamp > task[0]:
# existing wakeup is valid and occurs earlier than new one
return
task[2] = True # cancel existing task
task = [timestamp, db_name, False]
heapq.heappush(_wakeups, task)
_wakeup_by_db[db_name] = task
_logger.debug("Wake-up scheduled for database '%s' @ %s", db_name,
'NOW' if timestamp == WAKE_UP_NOW else timestamp)
def runner():
"""Neverending function (intended to be run in a dedicated thread) that
checks every 60 seconds the next database wake-up. TODO: make configurable
"""
while True:
runner_body()
def runner_body():
with _wakeups_lock:
while _wakeups and _wakeups[0][0] < time.time() and get_thread_slots():
task = heapq.heappop(_wakeups)
timestamp, db_name, canceled = task
if canceled:
continue
del _wakeup_by_db[db_name]
registry = openerp.pooler.get_pool(db_name)
if not registry._init:
_logger.debug("Database '%s' wake-up! Firing multi-threaded cron job processing", db_name)
registry['ir.cron']._run_jobs_multithread()
amount = MAX_SLEEP
with _wakeups_lock:
# Sleep less than MAX_SLEEP if the next known wake-up will happen before that.
if _wakeups and get_thread_slots():
amount = min(MAX_SLEEP, max(MIN_SLEEP, _wakeups[0][0] - time.time()))
_logger.debug("Going to sleep for %ss", amount)
time.sleep(amount)
def start_master_thread():
""" Start the above runner function in a daemon thread.
The thread is a typical daemon thread: it will never quit and must be
terminated when the main process exits - with no consequence (the processing
threads it spawns are not marked daemon).
"""
global _thread_slots
_thread_slots = openerp.conf.max_cron_threads
db_maxconn = tools.config['db_maxconn']
if _thread_slots >= tools.config.get('db_maxconn', 64):
_logger.warning("Connection pool size (%s) is set lower than max number of cron threads (%s), "
"this may cause trouble if you reach that number of parallel cron tasks.",
db_maxconn, _thread_slots)
t = threading.Thread(target=runner, name="openerp.cron.master_thread")
t.setDaemon(True)
t.start()
_logger.debug("Master cron daemon started!")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

View File

@ -131,12 +131,13 @@ def ustr(value, hint_encoding='utf-8', errors='strict'):
upstream and should be tried first to decode ``value``.
:param str error: optional `errors` flag to pass to the unicode
built-in to indicate how illegal character values should be
treated when converting a string: 'strict', 'ignore' or 'replace'.
treated when converting a string: 'strict', 'ignore' or 'replace'
(see ``unicode()`` constructor).
Passing anything other than 'strict' means that the first
encoding tried will be used, even if it's not the correct
one to use, so be careful! Ignore if value is not a string/unicode.
:rtype: unicode
one to use, so be careful! Ignored if value is not a string/unicode.
:raise: UnicodeError if value cannot be coerced to unicode
:return: unicode string representing the given value
"""
if isinstance(value, Exception):
return exception_to_unicode(value)

View File

@ -87,19 +87,15 @@ class Graph(dict):
for k, v in additional_data[package.name].items():
setattr(package, k, v)
def add_module(self, cr, module, force_demo=False):
self.add_modules(cr, [module], force_demo)
def add_module(self, cr, module, force=None):
self.add_modules(cr, [module], force)
def add_modules(self, cr, module_list, force_demo=False):
def add_modules(self, cr, module_list, force=None):
if force is None:
force = []
packages = []
len_graph = len(self)
for module in module_list:
if force_demo:
cr.execute("""
UPDATE ir_module_module
SET demo='t'
WHERE name = %s""",
(module,))
# This will raise an exception if no/unreadable descriptor file.
# NOTE The call to load_information_from_description_file is already
# done by db.initialize, so it is possible to not do it again here.
@ -125,6 +121,9 @@ class Graph(dict):
current.remove(package)
node = self.add_node(package, info)
node.data = info
for kind in ('init', 'demo', 'update'):
if package in tools.config[kind] or 'all' in tools.config[kind] or kind in force:
setattr(node, kind, True)
else:
later.add(package)
packages.append((package, info))
@ -186,11 +185,18 @@ class Node(Singleton):
node.depth = self.depth + 1
if node not in self.children:
self.children.append(node)
for attr in ('init', 'update', 'demo'):
if hasattr(self, attr):
setattr(node, attr, True)
self.children.sort(lambda x, y: cmp(x.name, y.name))
return node
def __setattr__(self, name, value):
super(Singleton, self).__setattr__(name, value)
if name in ('init', 'update', 'demo'):
tools.config[name][self.name] = 1
for child in self.children:
setattr(child, name, value)
if name == 'depth':
for child in self.children:
setattr(child, name, value + 1)

View File

@ -42,7 +42,6 @@ from openerp import SUPERUSER_ID
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
from openerp.tools import assertion_report
from openerp.modules.module import initialize_sys_path, \
load_openerp_module, init_module_models
@ -105,7 +104,10 @@ def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=
"""
for filename in package.data[kind]:
_logger.info("module %s: loading %s", module_name, filename)
if kind == 'test':
_logger.log(logging.TEST, "module %s: loading %s", module_name, filename)
else:
_logger.info("module %s: loading %s", module_name, filename)
_, ext = os.path.splitext(filename)
pathname = os.path.join(module_name, filename)
fp = tools.file_open(pathname)
@ -158,7 +160,7 @@ def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=
models = pool.load(cr, package)
loaded_modules.append(package.name)
if package.state in ('to install', 'to upgrade'):
if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'):
init_module_models(cr, package.name, models)
pool._init_modules.add(package.name)
status['progress'] = float(index) / len(graph)
@ -172,19 +174,18 @@ def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=
idref = {}
if package.state == 'to install':
mode = 'update'
if hasattr(package, 'init') or package.state == 'to install':
mode = 'init'
else:
mode = 'update'
if package.state in ('to install', 'to upgrade'):
if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'):
if package.state=='to upgrade':
# upgrading the module information
modobj.write(cr, SUPERUSER_ID, [module_id], modobj.get_values_from_terp(package.data))
load_init_xml(module_name, idref, mode)
load_update_xml(module_name, idref, mode)
load_data(module_name, idref, mode)
if package.dbdemo and package.state != 'installed':
if hasattr(package, 'demo') or (package.dbdemo and package.state != 'installed'):
status['progress'] = (index + 0.75) / len(graph)
load_demo_xml(module_name, idref, mode)
load_demo(module_name, idref, mode)
@ -214,6 +215,9 @@ def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=
modobj.update_translations(cr, SUPERUSER_ID, [module_id], None)
package.state = 'installed'
for kind in ('init', 'demo', 'update'):
if hasattr(package, kind):
delattr(package, kind)
cr.commit()
@ -268,7 +272,10 @@ def load_modules(db, force_demo=False, status=None, update_module=False):
if not openerp.modules.db.is_initialized(cr):
_logger.info("init db")
openerp.modules.db.initialize(cr)
update_module = True
tools.config["init"]["all"] = 1
tools.config['update']['all'] = 1
if not tools.config['without_demo']:
tools.config["demo"]['all'] = 1
# This is a brand new pool, just created in pooler.get_db_and_pool()
pool = pooler.get_pool(cr.dbname)
@ -278,51 +285,43 @@ def load_modules(db, force_demo=False, status=None, update_module=False):
# STEP 1: LOAD BASE (must be done before module dependencies can be computed for later steps)
graph = openerp.modules.graph.Graph()
graph.add_module(cr, 'base', force_demo)
graph.add_module(cr, 'base', force)
if not graph:
_logger.critical('module base cannot be loaded! (hint: verify addons-path)')
raise osv.osv.except_osv(_('Could not load base module'), _('module base cannot be loaded! (hint: verify addons-path)'))
# processed_modules: for cleanup step after install
# loaded_modules: to avoid double loading
# After load_module_graph(), 'base' has been installed or updated and its state is 'installed'.
report = assertion_report.assertion_report()
loaded_modules, processed_modules = load_module_graph(cr, graph, status, report=report)
report = pool._assertion_report
loaded_modules, processed_modules = load_module_graph(cr, graph, status, perform_checks=(not update_module), report=report)
if tools.config['load_language']:
for lang in tools.config['load_language'].split(','):
tools.load_language(cr, lang)
# STEP 2: Mark other modules to be loaded/updated
# This is a one-shot use of tools.config[init|update] from the command line
# arguments. It is directly cleared to not interfer with later create/update
# issued via RPC.
if update_module:
modobj = pool.get('ir.module.module')
if ('base' in tools.config['init']) or ('base' in tools.config['update']) \
or ('all' in tools.config['init']) or ('all' in tools.config['update']):
if ('base' in tools.config['init']) or ('base' in tools.config['update']):
_logger.info('updating modules list')
modobj.update_list(cr, SUPERUSER_ID)
if 'all' in tools.config['init']:
ids = modobj.search(cr, 1, [])
tools.config['init'] = dict.fromkeys([m['name'] for m in modobj.read(cr, 1, ids, ['name'])], 1)
_check_module_names(cr, itertools.chain(tools.config['init'].keys(), tools.config['update'].keys()))
mods = [k for k in tools.config['init'] if tools.config['init'][k] and k not in ('base', 'all')]
ids = modobj.search(cr, SUPERUSER_ID, ['&', ('state', '=', 'uninstalled'), ('name', 'in', mods)])
if ids:
modobj.button_install(cr, SUPERUSER_ID, ids) # goes from 'uninstalled' to 'to install'
mods = [k for k in tools.config['init'] if tools.config['init'][k]]
if mods:
ids = modobj.search(cr, SUPERUSER_ID, ['&', ('state', '=', 'uninstalled'), ('name', 'in', mods)])
if ids:
modobj.button_install(cr, SUPERUSER_ID, ids)
mods = [k for k in tools.config['update'] if tools.config['update'][k] and k not in ('base', 'all')]
ids = modobj.search(cr, SUPERUSER_ID, ['&', ('state', '=', 'installed'), ('name', 'in', mods)])
if ids:
modobj.button_upgrade(cr, SUPERUSER_ID, ids) # goes from 'installed' to 'to upgrade'
mods = [k for k in tools.config['update'] if tools.config['update'][k]]
if mods:
ids = modobj.search(cr, SUPERUSER_ID, ['&', ('state', '=', 'installed'), ('name', 'in', mods)])
if ids:
modobj.button_upgrade(cr, SUPERUSER_ID, ids)
cr.execute("update ir_module_module set state=%s where name=%s", ('installed', 'base'))
# Remove that funky global one-shot thingy.
for kind in ('init', 'demo', 'update'):
tools.config[kind] = {}
# STEP 3: Load marked modules (skipping base which was done in STEP 1)
# IMPORTANT: this is done in two parts, first loading all installed or
@ -374,6 +373,9 @@ def load_modules(db, force_demo=False, status=None, update_module=False):
# Cleanup orphan records
pool.get('ir.model.data')._process_end(cr, SUPERUSER_ID, processed_modules)
for kind in ('init', 'demo', 'update'):
tools.config[kind] = {}
cr.commit()
# STEP 5: Cleanup menus

View File

@ -433,8 +433,9 @@ def get_modules():
return name
def is_really_module(name):
name = opj(dir, name)
return os.path.isdir(name) or zipfile.is_zipfile(name)
manifest_name = opj(dir, name, '__openerp__.py')
zipfile_name = opj(dir, name)
return os.path.isfile(manifest_name) or zipfile.is_zipfile(zipfile_name)
return map(clean, filter(is_really_module, os.listdir(dir)))
plist = []
@ -553,7 +554,7 @@ def run_unit_tests(module_name):
for m in ms:
suite.addTests(unittest2.TestLoader().loadTestsFromModule(m))
if ms:
_logger.info('module %s: executing %s `fast_suite` and/or `checks` sub-modules', module_name, len(ms))
_logger.log(logging.TEST, 'module %s: executing %s `fast_suite` and/or `checks` sub-modules', module_name, len(ms))
# Use a custom stream object to log the test executions.
class MyStream(object):
def __init__(self):

View File

@ -28,7 +28,6 @@ import threading
import openerp.sql_db
import openerp.osv.orm
import openerp.cron
import openerp.tools
import openerp.modules.db
import openerp.tools.config
@ -58,6 +57,21 @@ class Registry(object):
self.db_name = db_name
self.db = openerp.sql_db.db_connect(db_name)
# In monoprocess cron jobs flag (pooljobs)
self.cron = False
# Inter-process signaling (used only when openerp.multi_process is True):
# The `base_registry_signaling` sequence indicates the whole registry
# must be reloaded.
# The `base_cache_signaling sequence` indicates all caches must be
# invalidated (i.e. cleared).
self.base_registry_signaling_sequence = 1
self.base_cache_signaling_sequence = 1
# Flag indicating if at least one model cache has been cleared.
# Useful only in a multi-process context.
self._any_cache_cleared = False
cr = self.db.cursor()
has_unaccent = openerp.modules.db.has_unaccent(cr)
if openerp.tools.config['unaccent'] and not has_unaccent:
@ -112,7 +126,7 @@ class Registry(object):
monitor the ir.cron model for future jobs. See openerp.cron for
details.
"""
openerp.cron.schedule_wakeup(openerp.cron.WAKE_UP_NOW, self.db.dbname)
self.cron = True
def clear_caches(self):
""" Clear the caches
@ -121,6 +135,36 @@ class Registry(object):
"""
for model in self.models.itervalues():
model.clear_caches()
# Special case for ir_ui_menu which does not use openerp.tools.ormcache.
ir_ui_menu = self.models.get('ir.ui.menu')
if ir_ui_menu:
ir_ui_menu.clear_cache()
# Useful only in a multi-process context.
def reset_any_cache_cleared(self):
self._any_cache_cleared = False
# Useful only in a multi-process context.
def any_cache_cleared(self):
return self._any_cache_cleared
@classmethod
def setup_multi_process_signaling(cls, cr):
if not openerp.multi_process:
return
# Inter-process signaling:
# The `base_registry_signaling` sequence indicates the whole registry
# must be reloaded.
# The `base_cache_signaling sequence` indicates all caches must be
# invalidated (i.e. cleared).
cr.execute("""SELECT sequence_name FROM information_schema.sequences WHERE sequence_name='base_registry_signaling'""")
if not cr.fetchall():
cr.execute("""CREATE SEQUENCE base_registry_signaling INCREMENT BY 1 START WITH 1""")
cr.execute("""SELECT nextval('base_registry_signaling')""")
cr.execute("""CREATE SEQUENCE base_cache_signaling INCREMENT BY 1 START WITH 1""")
cr.execute("""SELECT nextval('base_cache_signaling')""")
@contextmanager
def cursor(self, auto_commit=True):
@ -182,6 +226,7 @@ class RegistryManager(object):
cr = registry.db.cursor()
try:
Registry.setup_multi_process_signaling(cr)
registry.do_parent_store(cr)
registry.get('ir.actions.report.xml').register_all(cr)
cr.commit()
@ -195,20 +240,11 @@ class RegistryManager(object):
@classmethod
def delete(cls, db_name):
"""Delete the registry linked to a given database.
This also cleans the associated caches. For good measure this also
cancels the associated cron job. But please note that the cron job can
be running and take some time before ending, and that you should not
remove a registry if it can still be used by some thread. So it might
be necessary to call yourself openerp.cron.Agent.cancel(db_name) and
and join (i.e. wait for) the thread.
"""
"""Delete the registry linked to a given database. """
with cls.registries_lock:
if db_name in cls.registries:
cls.registries[db_name].clear_caches()
del cls.registries[db_name]
openerp.cron.cancel(db_name)
@classmethod
def delete_all(cls):
@ -232,5 +268,71 @@ class RegistryManager(object):
if db_name in cls.registries:
cls.registries[db_name].clear_caches()
@classmethod
def check_registry_signaling(cls, db_name):
if openerp.multi_process and db_name in cls.registries:
registry = cls.get(db_name, pooljobs=False)
cr = registry.db.cursor()
try:
cr.execute("""
SELECT base_registry_signaling.last_value,
base_cache_signaling.last_value
FROM base_registry_signaling, base_cache_signaling""")
r, c = cr.fetchone()
# Check if the model registry must be reloaded (e.g. after the
# database has been updated by another process).
if registry.base_registry_signaling_sequence != r:
_logger.info("Reloading the model registry after database signaling.")
# Don't run the cron in the Gunicorn worker.
registry = cls.new(db_name, pooljobs=False)
registry.base_registry_signaling_sequence = r
# Check if the model caches must be invalidated (e.g. after a write
# occured on another process). Don't clear right after a registry
# has been reload.
elif registry.base_cache_signaling_sequence != c:
_logger.info("Invalidating all model caches after database signaling.")
registry.base_cache_signaling_sequence = c
registry.clear_caches()
registry.reset_any_cache_cleared()
# One possible reason caches have been invalidated is the
# use of decimal_precision.write(), in which case we need
# to refresh fields.float columns.
for model in registry.models.values():
for column in model._columns.values():
if hasattr(column, 'digits_change'):
column.digits_change(cr)
finally:
cr.close()
@classmethod
def signal_caches_change(cls, db_name):
if openerp.multi_process and db_name in cls.registries:
# Check the registries if any cache has been cleared and signal it
# through the database to other processes.
registry = cls.get(db_name, pooljobs=False)
if registry.any_cache_cleared():
_logger.info("At least one model cache has been cleared, signaling through the database.")
cr = registry.db.cursor()
r = 1
try:
cr.execute("select nextval('base_cache_signaling')")
r = cr.fetchone()[0]
finally:
cr.close()
registry.base_cache_signaling_sequence = r
registry.reset_any_cache_cleared()
@classmethod
def signal_registry_change(cls, db_name):
if openerp.multi_process and db_name in cls.registries:
registry = cls.get(db_name, pooljobs=False)
cr = registry.db.cursor()
r = 1
try:
cr.execute("select nextval('base_registry_signaling')")
r = cr.fetchone()[0]
finally:
cr.close()
registry.base_registry_signaling_sequence = r
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

View File

@ -40,23 +40,6 @@ import openerp
_logger = logging.getLogger(__name__)
def close_socket(sock):
""" Closes a socket instance cleanly
:param sock: the network socket to close
:type sock: socket.socket
"""
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error, e:
# On OSX, socket shutdowns both sides if any side closes it
# causing an error 57 'Socket is not connected' on shutdown
# of the other side (or something), see
# http://bugs.python.org/issue4397
# note: stdlib fixed test, not behavior
if e.errno != errno.ENOTCONN or platform.system() != 'Darwin':
raise
sock.close()
#.apidoc title: Common Services: netsvc
@ -67,9 +50,9 @@ def abort_response(dummy_1, description, dummy_2, details):
raise openerp.osv.osv.except_osv(description, details)
class Service(object):
""" Base class for *Local* services
Functionality here is trusted, no authentication.
""" Base class for Local services
Functionality here is trusted, no authentication.
Workflow engine and reports subclass this.
"""
_services = {}
def __init__(self, name):
@ -145,7 +128,6 @@ class ColoredFormatter(DBFormatter):
record.levelname = COLOR_PATTERN % (30 + fg_color, 40 + bg_color, record.levelname)
return DBFormatter.format(self, record)
def init_logger():
from tools.translate import resetlocale
resetlocale()
@ -246,85 +228,6 @@ def init_alternative_logger():
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Server:
""" Generic interface for all servers with an event loop etc.
Override this to impement http, net-rpc etc. servers.
Servers here must have threaded behaviour. start() must not block,
there is no run().
"""
__is_started = False
__servers = []
__starter_threads = []
# we don't want blocking server calls (think select()) to
# wait forever and possibly prevent exiting the process,
# but instead we want a form of polling/busy_wait pattern, where
# _server_timeout should be used as the default timeout for
# all I/O blocking operations
_busywait_timeout = 0.5
def __init__(self):
Server.__servers.append(self)
if Server.__is_started:
# raise Exception('All instances of servers must be inited before the startAll()')
# Since the startAll() won't be called again, allow this server to
# init and then start it after 1sec (hopefully). Register that
# timer thread in a list, so that we can abort the start if quitAll
# is called in the meantime
t = threading.Timer(1.0, self._late_start)
t.name = 'Late start timer for %s' % str(self.__class__)
Server.__starter_threads.append(t)
t.start()
def start(self):
_logger.debug("called stub Server.start")
def _late_start(self):
self.start()
for thr in Server.__starter_threads:
if thr.finished.is_set():
Server.__starter_threads.remove(thr)
def stop(self):
_logger.debug("called stub Server.stop")
def stats(self):
""" This function should return statistics about the server """
return "%s: No statistics" % str(self.__class__)
@classmethod
def startAll(cls):
if cls.__is_started:
return
_logger.info("Starting %d services" % len(cls.__servers))
for srv in cls.__servers:
srv.start()
cls.__is_started = True
@classmethod
def quitAll(cls):
if not cls.__is_started:
return
_logger.info("Stopping %d services" % len(cls.__servers))
for thr in cls.__starter_threads:
if not thr.finished.is_set():
thr.cancel()
cls.__starter_threads.remove(thr)
for srv in cls.__servers:
srv.stop()
cls.__is_started = False
@classmethod
def allStats(cls):
res = ["Servers %s" % ('stopped', 'started')[cls.__is_started]]
res.extend(srv.stats() for srv in cls.__servers)
return '\n'.join(res)
def _close_socket(self):
close_socket(self.socket)
def replace_request_password(args):
# password is always 3rd argument in a request, we replace it in RPC logs
# so it's easier to forward logs for diagnostics/debugging purposes...

View File

@ -163,7 +163,7 @@ class boolean(_column):
_logger.debug(
"required=True is deprecated: making a boolean field"
" `required` has no effect, as NULL values are "
"automatically turned into False.")
"automatically turned into False. args: %r",args)
class integer(_column):
_type = 'integer'
@ -706,10 +706,7 @@ class many2many(_column):
if where_c:
where_c = ' AND ' + where_c
if offset or self._limit:
order_by = ' ORDER BY "%s".%s' %(obj._table, obj._order.split(',')[0])
else:
order_by = ''
order_by = ' ORDER BY "%s".%s' %(obj._table, obj._order.split(',')[0])
limit_str = ''
if self._limit is not None:

View File

@ -916,7 +916,16 @@ class BaseModel(object):
else:
new.extend(cls.__dict__.get(s, []))
nattr[s] = new
# Keep links to non-inherited constraints, e.g. useful when exporting translations
nattr['_local_constraints'] = cls.__dict__.get('_constraints', [])
nattr['_local_sql_constraints'] = cls.__dict__.get('_sql_constraints', [])
cls = type(name, (cls, parent_class), dict(nattr, _register=False))
else:
cls._local_constraints = getattr(cls, '_constraints', [])
cls._local_sql_constraints = getattr(cls, '_sql_constraints', [])
if not getattr(cls, '_original_module', None):
cls._original_module = cls._module
obj = object.__new__(cls)
@ -1111,7 +1120,7 @@ class BaseModel(object):
if not model_data.search(cr, uid, [('name', '=', n)]):
break
postfix += 1
model_data.create(cr, uid, {
model_data.create(cr, SUPERUSER_ID, {
'name': n,
'model': self._name,
'res_id': r['id'],
@ -1515,6 +1524,8 @@ class BaseModel(object):
error_msgs = []
for constraint in self._constraints:
fun, msg, fields = constraint
# We don't pass around the context here: validation code
# must always yield the same results.
if not fun(self, cr, uid, ids):
# Check presence of __call__ directly instead of using
# callable() because it will be deprecated as of Python 3.0
@ -1838,7 +1849,7 @@ class BaseModel(object):
if trans:
node.set('string', trans)
for attr_name in ('confirm', 'sum', 'help', 'placeholder'):
for attr_name in ('confirm', 'sum', 'avg', 'help', 'placeholder'):
attr_value = node.get(attr_name)
if attr_value:
trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], attr_value)
@ -2498,6 +2509,7 @@ class BaseModel(object):
try:
getattr(self, '_ormcache')
self._ormcache = {}
self.pool._any_cache_cleared = True
except AttributeError:
pass
@ -2670,13 +2682,19 @@ class BaseModel(object):
order = orderby or groupby
data_ids = self.search(cr, uid, [('id', 'in', alldata.keys())], order=order, context=context)
# the IDS of records that have groupby field value = False or '' should be sorted too
data_ids += filter(lambda x:x not in data_ids, alldata.keys())
data = self.read(cr, uid, data_ids, groupby and [groupby] or ['id'], context=context)
# restore order of the search as read() uses the default _order (this is only for groups, so the size of data_read shoud be small):
data.sort(lambda x,y: cmp(data_ids.index(x['id']), data_ids.index(y['id'])))
# the IDs of records that have groupby field value = False or '' should be included too
data_ids += set(alldata.keys()).difference(data_ids)
if groupby:
data = self.read(cr, uid, data_ids, [groupby], context=context)
# restore order of the search as read() uses the default _order (this is only for groups, so the footprint of data should be small):
data_dict = dict((d['id'], d[groupby] ) for d in data)
result = [{'id': i, groupby: data_dict[i]} for i in data_ids]
else:
result = [{'id': i} for i in data_ids]
for d in data:
for d in result:
if groupby:
d['__domain'] = [(groupby, '=', alldata[d['id']][groupby] or False)] + domain
if not isinstance(groupby_list, (str, unicode)):
@ -2697,11 +2715,11 @@ class BaseModel(object):
del d['id']
if groupby and groupby in self._group_by_full:
data = self._read_group_fill_results(cr, uid, domain, groupby, groupby_list,
aggregated_fields, data, read_group_order=order,
context=context)
result = self._read_group_fill_results(cr, uid, domain, groupby, groupby_list,
aggregated_fields, result, read_group_order=order,
context=context)
return data
return result
def _inherits_join_add(self, current_table, parent_model_name, query):
"""
@ -3217,7 +3235,7 @@ class BaseModel(object):
def _create_table(self, cr):
cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id)) WITHOUT OIDS' % (self._table,))
cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
_schema.debug("Table '%s': created", self._table)
@ -3301,7 +3319,7 @@ class BaseModel(object):
raise except_orm('Programming Error', ('Many2Many destination model does not exist: `%s`') % (f._obj,))
dest_model = self.pool.get(f._obj)
ref = dest_model._table
cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s")) WITH OIDS' % (m2m_tbl, col1, col2, col1, col2))
cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
# create foreign key references with ondelete=cascade, unless the targets are SQL views
cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
if not cr.fetchall():
@ -3898,7 +3916,7 @@ class BaseModel(object):
if isinstance(ids, (int, long)):
ids = [ids]
result_store = self._store_get_values(cr, uid, ids, None, context)
result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
self._check_concurrency(cr, ids, context)
@ -4305,11 +4323,16 @@ class BaseModel(object):
del vals[self._inherits[table]]
record_id = tocreate[table].pop('id', None)
# When linking/creating parent records, force context without 'no_store_function' key that
# defers stored functions computing, as these won't be computed in batch at the end of create().
parent_context = dict(context)
parent_context.pop('no_store_function', None)
if record_id is None or not record_id:
record_id = self.pool.get(table).create(cr, user, tocreate[table], context=context)
record_id = self.pool.get(table).create(cr, user, tocreate[table], context=parent_context)
else:
self.pool.get(table).write(cr, user, [record_id], tocreate[table], context=context)
self.pool.get(table).write(cr, user, [record_id], tocreate[table], context=parent_context)
upd0 += ',' + self._inherits[table]
upd1 += ',%s'
@ -5198,6 +5221,7 @@ class AbstractModel(BaseModel):
"""
_auto = False # don't create any database backend for AbstractModels
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = False
def itemgetter_tuple(items):
""" Fixes itemgetter inconsistency (useful in some cases) of not returning

View File

@ -23,6 +23,8 @@
from functools import wraps
import logging
import threading
from psycopg2 import IntegrityError, errorcodes
import orm
@ -168,6 +170,7 @@ class object_proxy(object):
@check
def execute(self, db, uid, obj, method, *args, **kw):
threading.currentThread().dbname = db
cr = pooler.get_db(db).cursor()
try:
try:

View File

@ -163,15 +163,30 @@ class report_rml(report_int):
# * (re)build/update the stylesheet with the translated items
def translate(doc, lang):
for node in doc.xpath('//*[@t]'):
if not node.text:
continue
translation = ir_translation_obj._get_source(cr, uid, self.name2, 'xsl', lang, node.text)
if translation:
node.text = translation
translate_aux(doc, lang, False)
def translate_aux(doc, lang, t):
for node in doc:
t = t or node.get("t")
if t:
text = None
tail = None
if node.text:
text = node.text.strip().replace('\n',' ')
if node.tail:
tail = node.tail.strip().replace('\n',' ')
if text:
translation1 = ir_translation_obj._get_source(cr, uid, self.name2, 'xsl', lang, text)
if translation1:
node.text = node.text.replace(text, translation1)
if tail:
translation2 = ir_translation_obj._get_source(cr, uid, self.name2, 'xsl', lang, tail)
if translation2:
node.tail = node.tail.replace(tail, translation2)
translate_aux(node, lang, t)
if context.get('lang', False):
translate(stylesheet, context['lang'])
translate(stylesheet.iter(), context['lang'])
transform = etree.XSLT(stylesheet)
xml = etree.tostring(

View File

@ -99,7 +99,7 @@ class NumberedCanvas(canvas.Canvas):
key = key + 1
self.setFont("Helvetica", 8)
self.drawRightString((self._pagesize[0]-30), (self._pagesize[1]-40),
"Page %(this)i of %(total)i" % {
" %(this)i / %(total)i" % {
'this': self._pageNumber+1,
'total': self.pages.get(key,False),
}
@ -118,15 +118,19 @@ class NumberedCanvas(canvas.Canvas):
self._doc.SaveToFile(self._filename, self)
class PageCount(platypus.Flowable):
def __init__(self, story_count=0):
platypus.Flowable.__init__(self)
self.story_count = story_count
def draw(self):
self.canv.beginForm("pageCount")
self.canv.beginForm("pageCount%d" % (self.story_count))
self.canv.setFont("Helvetica", utils.unit_get(str(8)))
self.canv.drawString(0, 0, str(self.canv.getPageNumber()))
self.canv.endForm()
class PageReset(platypus.Flowable):
def draw(self):
self.canv._pageNumber = 0
self.canv._doPageReset = True
class _rml_styles(object,):
def __init__(self, nodes, localcontext):
@ -343,7 +347,7 @@ class _rml_canvas(object):
if n.tag == 'pageCount':
if x or y:
self.canvas.translate(x,y)
self.canvas.doForm('pageCount')
self.canvas.doForm('pageCount%s' % (self.canvas._storyCount,))
if x or y:
self.canvas.translate(-x,-y)
if n.tag == 'pageNumber':
@ -878,6 +882,13 @@ class EndFrameFlowable(ActionFlowable):
ActionFlowable.__init__(self,('frameEnd',resume))
class TinyDocTemplate(platypus.BaseDocTemplate):
def beforeDocument(self):
# Store some useful value directly inside canvas, so it's available
# on flowable drawing (needed for proper PageCount handling)
self.canv._doPageReset = False
self.canv._storyCount = 0
def ___handle_pageBegin(self):
self.page = self.page + 1
self.pageTemplate.beforeDrawPage(self.canv,self)
@ -893,12 +904,24 @@ class TinyDocTemplate(platypus.BaseDocTemplate):
self.frame = f
break
self.handle_frameBegin()
def afterFlowable(self, flowable):
if isinstance(flowable, PageReset):
self.canv._pageCount=self.page
self.page=0
self.canv._flag=True
def afterPage(self):
if self.canv._doPageReset:
# Following a <pageReset/> tag:
# - we reset page number to 0
# - we add an new PageCount flowable (relative to the current
# story number), but not for NumeredCanvas at is handle page
# count itself)
# NOTE: _rml_template render() method add a PageReset flowable at end
# of each story, so we're sure to pass here at least once per story.
if not isinstance(self.canv, NumberedCanvas):
self.handle_flowable([ PageCount(story_count=self.canv._storyCount) ])
self.canv._pageCount = self.page
self.page = 0
self.canv._flag = True
self.canv._pageNumber = 0
self.canv._doPageReset = False
self.canv._storyCount += 1
class _rml_template(object):
def __init__(self, localcontext, out, node, doc, images=None, path='.', title=None):
@ -965,7 +988,6 @@ class _rml_template(object):
self.doc_tmpl.afterFlowable(fis)
self.doc_tmpl.build(fis,canvasmaker=NumberedCanvas)
else:
fis.append(PageCount())
self.doc_tmpl.build(fis)
def parseNode(rml, localcontext=None, fout=None, images=None, path='.', title=None):

View File

@ -1,8 +1,9 @@
# -*- coding: utf-8 -*-
##############################################################################
#
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
@ -15,7 +16,7 @@
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
@ -23,17 +24,16 @@ import logging
import threading
import time
import http_server
import cron
import netrpc_server
import web_services
import websrv_lib
import web_services
import wsgi_server
import openerp.cron
import openerp.modules
import openerp.netsvc
import openerp.osv
import openerp.tools
import openerp.service.wsgi_server
#.apidoc title: RPC Services
@ -61,6 +61,7 @@ Maybe you forgot to add those addons in your addons_path configuration."""
_logger.exception('Failed to load server-wide module `%s`.%s', m, msg)
start_internal_done = False
main_thread_id = threading.currentThread().ident
def start_internal():
global start_internal_done
@ -68,38 +69,32 @@ def start_internal():
return
openerp.netsvc.init_logger()
openerp.modules.loading.open_openerp_namespace()
# Instantiate local services (this is a legacy design).
openerp.osv.osv.start_object_proxy()
# Export (for RPC) services.
web_services.start_web_services()
web_services.start_service()
load_server_wide_modules()
start_internal_done = True
def start_services():
""" Start all services including http, netrpc and cron """
start_internal()
# Initialize the HTTP stack.
netrpc_server.init_servers()
# Start the main cron thread.
if openerp.conf.max_cron_threads:
openerp.cron.start_master_thread()
# Start the top-level servers threads (normally HTTP, HTTPS, and NETRPC).
openerp.netsvc.Server.startAll()
# Initialize the NETRPC server.
netrpc_server.start_service()
# Start the WSGI server.
openerp.service.wsgi_server.start_server()
wsgi_server.start_service()
# Start the main cron thread.
cron.start_service()
def stop_services():
""" Stop all services. """
# stop scheduling new jobs; we will have to wait for the jobs to complete below
openerp.cron.cancel_all()
# stop services
cron.stop_service()
netrpc_server.stop_service()
wsgi_server.stop_service()
openerp.netsvc.Server.quitAll()
openerp.service.wsgi_server.stop_server()
config = openerp.tools.config
_logger.info("Initiating shutdown")
_logger.info("Hit CTRL-C again or send a second signal to force the shutdown.")
logging.shutdown()
@ -107,8 +102,9 @@ def stop_services():
# Manually join() all threads before calling sys.exit() to allow a second signal
# to trigger _force_quit() in case some non-daemon threads won't exit cleanly.
# threading.Thread.join() should not mask signals (at least in python 2.5).
me = threading.currentThread()
for thread in threading.enumerate():
if thread != threading.currentThread() and not thread.isDaemon():
if thread != me and not thread.isDaemon() and thread.ident != main_thread_id:
while thread.isAlive():
# Need a busyloop here as thread.join() masks signals
# and would prevent the forced shutdown.
@ -119,10 +115,9 @@ def stop_services():
def start_services_workers():
import openerp.service.workers
openerp.multi_process = True # Nah!
openerp.multi_process = True
openerp.service.workers.Multicorn(openerp.service.wsgi_server.application).run()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

74
openerp/service/cron.py Normal file
View File

@ -0,0 +1,74 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Cron jobs scheduling
Cron jobs are defined in the ir_cron table/model. This module deals with all
cron jobs, for all databases of a single OpenERP server instance.
"""
import logging
import threading
import time
import openerp
_logger = logging.getLogger(__name__)
SLEEP_INTERVAL = 60 # 1 min
def cron_runner(number):
while True:
time.sleep(SLEEP_INTERVAL + number) # Steve Reich timing style
registries = openerp.modules.registry.RegistryManager.registries
_logger.debug('cron%d polling for jobs', number)
for db_name, registry in registries.items():
while True and registry.cron:
# acquired = openerp.addons.base.ir.ir_cron.ir_cron._acquire_job(db_name)
# TODO why isnt openerp.addons.base defined ?
import sys
base = sys.modules['addons.base']
acquired = base.ir.ir_cron.ir_cron._acquire_job(db_name)
if not acquired:
break
def start_service():
""" Start the above runner function in a daemon thread.
The thread is a typical daemon thread: it will never quit and must be
terminated when the main process exits - with no consequence (the processing
threads it spawns are not marked daemon).
"""
for i in range(openerp.tools.config['max_cron_threads']):
def target():
cron_runner(i)
t = threading.Thread(target=target, name="openerp.service.cron.cron%d" % i)
t.setDaemon(True)
t.start()
_logger.debug("cron%d started!" % i)
def stop_service():
pass
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

Some files were not shown because too many files have changed in this diff Show More