[MERGE] merged logging changes: use a top-level `openerp` name space. Similar changes for the addons will be done after 6.1.

bzr revid: vmt@openerp.com-20120208105913-habqxas406osaz9z
This commit is contained in:
Vo Minh Thu 2012-02-08 11:59:13 +01:00
commit 876c61b052
51 changed files with 557 additions and 563 deletions

View File

@ -30,6 +30,7 @@ GNU Public Licence.
(c) 2003-TODAY, Fabien Pinckaers - OpenERP SA
"""
import imp
import logging
import os
import signal
@ -42,8 +43,8 @@ import openerp
__author__ = openerp.release.author
__version__ = openerp.release.version
import sys
import imp
# Also use the `openerp` logger for the main script.
_logger = logging.getLogger('openerp')
def check_root_user():
""" Exit if the process's user is 'root' (on POSIX system)."""
@ -69,13 +70,12 @@ def report_configuration():
This function assumes the configuration has been initialized.
"""
config = openerp.tools.config
logger = logging.getLogger('server')
logger.info("OpenERP version %s", __version__)
_logger.info("OpenERP version %s", __version__)
for name, value in [('addons paths', config['addons_path']),
('database hostname', config['db_host'] or 'localhost'),
('database port', config['db_port'] or '5432'),
('database user', config['db_user'])]:
logger.info("%s: %s", name, value)
_logger.info("%s: %s", name, value)
def setup_pid_file():
""" Create a file with the process id written in it.
@ -97,32 +97,30 @@ def preload_registry(dbname):
# jobs will start to be processed later, when openerp.cron.start_master_thread() is called by openerp.service.start_services()
registry.schedule_cron_jobs()
except Exception:
logging.exception('Failed to initialize database `%s`.', dbname)
_logger.exception('Failed to initialize database `%s`.', dbname)
def run_test_file(dbname, test_file):
""" Preload a registry, possibly run a test file, and start the cron."""
try:
db, registry = openerp.pooler.get_db_and_pool(dbname, update_module=config['init'] or config['update'], pooljobs=False)
cr = db.cursor()
logger = logging.getLogger('server')
logger.info('loading test file %s', test_file)
_logger.info('loading test file %s', test_file)
openerp.tools.convert_yaml_import(cr, 'base', file(test_file), {}, 'test', True)
cr.rollback()
cr.close()
except Exception:
logging.exception('Failed to initialize database `%s` and run test file `%s`.', dbname, test_file)
_logger.exception('Failed to initialize database `%s` and run test file `%s`.', dbname, test_file)
def export_translation():
config = openerp.tools.config
dbname = config['db_name']
logger = logging.getLogger('server')
if config["language"]:
msg = "language %s" % (config["language"],)
else:
msg = "new language"
logger.info('writing translation file for %s to %s', msg,
_logger.info('writing translation file for %s to %s', msg,
config["translate_out"])
fileformat = os.path.splitext(config["translate_out"])[-1][1:].lower()
@ -133,7 +131,7 @@ def export_translation():
cr.close()
buf.close()
logger.info('translation file written successfully')
_logger.info('translation file written successfully')
def import_translation():
config = openerp.tools.config
@ -176,7 +174,7 @@ def dumpstacks(sig, frame):
code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
code.append(" %s" % (line.strip()))
logging.getLogger('dumpstacks').info("\n".join(code))
_logger.info("\n".join(code))
def setup_signal_handlers():
""" Register the signal handler defined above. """
@ -254,7 +252,7 @@ if __name__ == "__main__":
msg = """
The `web` module is provided by the addons found in the `openerp-web` project.
Maybe you forgot to add those addons in your addons_path configuration."""
logging.exception('Failed to load server-wide module `%s`.%s', m, msg)
_logger.exception('Failed to load server-wide module `%s`.%s', m, msg)
if config['db_name']:
for dbname in config['db_name'].split(','):
@ -264,8 +262,7 @@ Maybe you forgot to add those addons in your addons_path configuration."""
sys.exit(0)
setup_pid_file()
logger = logging.getLogger('server')
logger.info('OpenERP server is running, waiting for connections...')
_logger.info('OpenERP server is running, waiting for connections...')
quit_on_signals()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

View File

@ -36,6 +36,8 @@ from tools.safe_eval import safe_eval as eval
from tools.translate import _
from socket import gethostname
_logger = logging.getLogger(__name__)
class actions(osv.osv):
_name = 'ir.actions.actions'
_table = 'ir_actions'
@ -550,7 +552,6 @@ class actions_server(osv.osv):
}
def get_email(self, cr, uid, action, context):
logger = logging.getLogger('Workflow')
obj_pool = self.pool.get(action.model_id.model)
id = context.get('active_id')
obj = obj_pool.browse(cr, uid, id)
@ -566,12 +567,11 @@ class actions_server(osv.osv):
try:
obj = getattr(obj, field)
except Exception:
logger.exception('Failed to parse: %s', field)
_logger.exception('Failed to parse: %s', field)
return obj
def get_mobile(self, cr, uid, action, context):
logger = logging.getLogger('Workflow')
obj_pool = self.pool.get(action.model_id.model)
id = context.get('active_id')
obj = obj_pool.browse(cr, uid, id)
@ -587,7 +587,7 @@ class actions_server(osv.osv):
try:
obj = getattr(obj, field)
except Exception:
logger.exception('Failed to parse: %s', field)
_logger.exception('Failed to parse: %s', field)
return obj
@ -624,7 +624,6 @@ class actions_server(osv.osv):
# FIXME: refactor all the eval() calls in run()!
def run(self, cr, uid, ids, context=None):
logger = logging.getLogger(self._name)
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid)
@ -668,11 +667,11 @@ class actions_server(osv.osv):
pass
if not address:
logger.info('No partner email address specified, not sending any email.')
_logger.info('No partner email address specified, not sending any email.')
continue
if not email_from:
logger.debug('--email-from command line option is not specified, using a fallback value instead.')
_logger.debug('--email-from command line option is not specified, using a fallback value instead.')
if user.user_email:
email_from = user.user_email
else:
@ -685,9 +684,9 @@ class actions_server(osv.osv):
msg = ir_mail_server.build_email(email_from, [address], subject, body)
res_email = ir_mail_server.send_email(cr, uid, msg)
if res_email:
logger.info('Email successfully sent to: %s', address)
_logger.info('Email successfully sent to: %s', address)
else:
logger.warning('Failed to send email to: %s', address)
_logger.warning('Failed to send email to: %s', address)
if action.state == 'trigger':
wf_service = netsvc.LocalService("workflow")
@ -701,7 +700,7 @@ class actions_server(osv.osv):
#TODO: set the user and password from the system
# for the sms gateway user / password
# USE smsclient module from extra-addons
logger.warning('SMS Facility has not been implemented yet. Use smsclient module!')
_logger.warning('SMS Facility has not been implemented yet. Use smsclient module!')
if action.state == 'other':
res = []

View File

@ -37,6 +37,8 @@ from tools import DEFAULT_SERVER_DATETIME_FORMAT
from tools.safe_eval import safe_eval as eval
from tools.translate import _
_logger = logging.getLogger(__name__)
def str2tuple(s):
return eval('tuple(%s)' % (s or ''))
@ -87,8 +89,6 @@ class ir_cron(osv.osv):
'doall' : lambda *a: 1
}
_logger = logging.getLogger('cron')
def _check_args(self, cr, uid, ids, context=None):
try:
for this in self.browse(cr, uid, ids, context):
@ -114,7 +114,7 @@ class ir_cron(osv.osv):
"""
cr.rollback()
self._logger.exception("Call of self.pool.get('%s').%s(cr, uid, *%r) failed in Job %s" % (model_name, method_name, args, job_id))
_logger.exception("Call of self.pool.get('%s').%s(cr, uid, *%r) failed in Job %s" % (model_name, method_name, args, job_id))
def _callback(self, cr, uid, model_name, method_name, args, job_id):
""" Run the method associated to a given job
@ -131,15 +131,14 @@ class ir_cron(osv.osv):
if model and hasattr(model, method_name):
method = getattr(model, method_name)
try:
netsvc.log('cron', (cr.dbname,uid,'*',model_name,method_name)+tuple(args), channel=logging.DEBUG,
depth=(None if self._logger.isEnabledFor(logging.DEBUG_RPC_ANSWER) else 1), fn='object.execute')
logger = logging.getLogger('execution time')
if logger.isEnabledFor(logging.DEBUG):
log_depth = (None if _logger.isEnabledFor(logging.DEBUG) else 1)
netsvc.log(_logger, logging.DEBUG, 'cron.object.execute', (cr.dbname,uid,'*',model_name,method_name)+tuple(args), depth=log_depth)
if _logger.isEnabledFor(logging.DEBUG):
start_time = time.time()
method(cr, uid, *args)
if logger.isEnabledFor(logging.DEBUG):
if _logger.isEnabledFor(logging.DEBUG):
end_time = time.time()
logger.log(logging.DEBUG, '%.3fs (%s, %s)' % (end_time - start_time, model_name, method_name))
_logger.debug('%.3fs (%s, %s)' % (end_time - start_time, model_name, method_name))
except Exception, e:
self._handle_callback_exception(cr, uid, model_name, method_name, args, job_id, e)
@ -224,7 +223,7 @@ class ir_cron(osv.osv):
except psycopg2.OperationalError, e:
if e.pgcode == '55P03':
# Class 55: Object not in prerequisite state; 55P03: lock_not_available
self._logger.debug('Another process/thread is already busy executing job `%s`, skipping it.', job['name'])
_logger.debug('Another process/thread is already busy executing job `%s`, skipping it.', job['name'])
continue
else:
# Unexpected OperationalError
@ -240,7 +239,7 @@ class ir_cron(osv.osv):
task_thread.setDaemon(False)
openerp.cron.take_thread_slot()
task_thread.start()
self._logger.debug('Cron execution thread for job `%s` spawned', job['name'])
_logger.debug('Cron execution thread for job `%s` spawned', job['name'])
# Find next earliest job ignoring currently processed jobs (by this and other cron threads)
find_next_time_query = """SELECT min(nextcall) AS min_next_call
@ -261,7 +260,7 @@ class ir_cron(osv.osv):
openerp.cron.schedule_wakeup(next_call, db_name)
except Exception, ex:
self._logger.warning('Exception in cron:', exc_info=True)
_logger.warning('Exception in cron:', exc_info=True)
finally:
cr.commit()

View File

@ -40,7 +40,7 @@ import openerp.tools as tools
# it is moved to loglevels until we refactor tools.
from openerp.loglevels import ustr
_logger = logging.getLogger('ir.mail_server')
_logger = logging.getLogger(__name__)
class MailDeliveryException(osv.except_osv):
"""Specific exception subclass for mail delivery errors"""

View File

@ -32,6 +32,8 @@ from tools import config
from tools.translate import _
import pooler
_logger = logging.getLogger(__name__)
def _get_fields_type(self, cr, uid, context=None):
return sorted([(k,k) for k,v in fields.__dict__.iteritems()
if type(v) == types.TypeType
@ -232,7 +234,7 @@ class ir_model_fields(osv.osv):
try:
selection_list = eval(selection)
except Exception:
logging.getLogger('ir.model').warning('Invalid selection list definition for fields.selection', exc_info=True)
_logger.warning('Invalid selection list definition for fields.selection', exc_info=True)
raise except_orm(_('Error'),
_("The Selection Options expression is not a valid Pythonic expression." \
"Please provide an expression in the [('key','Label'), ...] format."))
@ -588,7 +590,6 @@ class ir_model_data(osv.osv):
update them seamlessly.
"""
_name = 'ir.model.data'
__logger = logging.getLogger('addons.base.'+_name)
_order = 'module,model,name'
_columns = {
'name': fields.char('External Identifier', required=True, size=128, select=1,
@ -821,13 +822,13 @@ class ir_model_data(osv.osv):
if not config.get('import_partial'):
for (model, res_id) in to_unlink:
if self.pool.get(model):
self.__logger.info('Deleting %s@%s', res_id, model)
_logger.info('Deleting %s@%s', res_id, model)
try:
self.pool.get(model).unlink(cr, uid, [res_id])
cr.commit()
except Exception:
cr.rollback()
self.__logger.warn(
_logger.warning(
'Could not delete obsolete record with id: %d of model %s\n'
'There should be some relation that points to this resource\n'
'You should manually fix this and restart with --update=module',

View File

@ -24,7 +24,7 @@ import time
import openerp
_logger = logging.getLogger('ir_sequence')
_logger = logging.getLogger(__name__)
class ir_sequence_type(openerp.osv.osv.osv):
_name = 'ir.sequence.type'

View File

@ -23,6 +23,8 @@ from osv import fields, osv
import tools
import logging
_logger = logging.getLogger(__name__)
TRANSLATION_TYPE = [
('field', 'Field'),
('model', 'Object'),
@ -87,13 +89,12 @@ class ir_translation_import_cursor(object):
def finish(self):
""" Transfer the data from the temp table to ir.translation
"""
logger = logging.getLogger('orm')
cr = self._cr
if self._debug:
cr.execute("SELECT count(*) FROM %s" % self._table_name)
c = cr.fetchone()[0]
logger.debug("ir.translation.cursor: We have %d entries to process", c)
_logger.debug("ir.translation.cursor: We have %d entries to process", c)
# Step 1: resolve ir.model.data references to res_ids
cr.execute("""UPDATE %s AS ti
@ -109,7 +110,7 @@ class ir_translation_import_cursor(object):
cr.execute("SELECT imd_module, imd_model, imd_name FROM %s " \
"WHERE res_id IS NULL AND imd_module IS NOT NULL" % self._table_name)
for row in cr.fetchall():
logger.debug("ir.translation.cursor: missing res_id for %s. %s/%s ", *row)
_logger.debug("ir.translation.cursor: missing res_id for %s. %s/%s ", *row)
cr.execute("DELETE FROM %s WHERE res_id IS NULL AND imd_module IS NOT NULL" % \
self._table_name)
@ -143,7 +144,7 @@ class ir_translation_import_cursor(object):
cr.execute('SELECT COUNT(*) FROM ONLY %s AS irt, %s AS ti WHERE %s' % \
(self._parent_table, self._table_name, find_expr))
c = cr.fetchone()[0]
logger.debug("ir.translation.cursor: %d entries now in ir.translation, %d common entries with tmp", c1, c)
_logger.debug("ir.translation.cursor: %d entries now in ir.translation, %d common entries with tmp", c1, c)
# Step 4: cleanup
cr.execute("DROP TABLE %s" % self._table_name)

View File

@ -27,6 +27,8 @@ import tools
import os
import logging
_logger = logging.getLogger(__name__)
class view_custom(osv.osv):
_name = 'ir.ui.view.custom'
_order = 'create_date desc' # search(limit=1) should return the last customization
@ -72,7 +74,6 @@ class view(osv.osv):
_order = "priority,name"
def _check_xml(self, cr, uid, ids, context=None):
logger = logging.getLogger('init')
for view in self.browse(cr, uid, ids, context):
eview = etree.fromstring(view.arch.encode('utf8'))
frng = tools.file_open(os.path.join('base','rng','view.rng'))
@ -81,7 +82,7 @@ class view(osv.osv):
relaxng = etree.RelaxNG(relaxng_doc)
if not relaxng.validate(eview):
for error in relaxng.error_log:
logger.error(tools.ustr(error))
_logger.error(tools.ustr(error))
return False
finally:
frng.close()

View File

@ -19,12 +19,15 @@
#
##############################################################################
import logging
import time, os
import netsvc
import report,pooler,tools
from operator import itemgetter
_logger = logging.getLogger(__name__)
def graph_get(cr, graph, wkf_ids, nested, workitem, processed_subflows):
import pydot
cr.execute('select * from wkf_activity where wkf_id in ('+','.join(['%s']*len(wkf_ids))+')', wkf_ids)
@ -126,13 +129,12 @@ def graph_instance_get(cr, graph, inst_id, nested=False):
class report_graph_instance(object):
def __init__(self, cr, uid, ids, data):
logger = netsvc.Logger()
try:
import pydot
except Exception,e:
logger.notifyChannel('workflow', netsvc.LOG_WARNING,
'Import Error for pydot, you will not be able to render workflows\n'
'Consider Installing PyDot or dependencies: http://dkbza.org/pydot.html')
_logger.warning(
'Import Error for pydot, you will not be able to render workflows.\n'
'Consider Installing PyDot or dependencies: http://dkbza.org/pydot.html.')
raise e
self.done = False
@ -168,9 +170,7 @@ showpage'''
graph_instance_get(cr, graph, inst_id, data.get('nested', False))
ps_string = graph.create(prog='dot', format='ps')
except Exception, e:
import traceback, sys
tb_s = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
logger.notifyChannel('workflow', netsvc.LOG_ERROR, 'Exception in call: ' + tb_s)
_logger.exception('Exception in call:')
# string is in PS, like the success message would have been
ps_string = '''%PS-Adobe-3.0
/inch {72 mul} def

View File

@ -40,6 +40,8 @@ from tools.translate import _
from osv import fields, osv, orm
_logger = logging.getLogger(__name__)
ACTION_DICT = {
'view_type': 'form',
'view_mode': 'form',
@ -88,7 +90,6 @@ class module_category(osv.osv):
class module(osv.osv):
_name = "ir.module.module"
_description = "Module"
__logger = logging.getLogger('base.' + _name)
@classmethod
def get_module_info(cls, name):
@ -97,8 +98,8 @@ class module(osv.osv):
info = addons.load_information_from_description_file(name)
info['version'] = release.major_version + '.' + info['version']
except Exception:
cls.__logger.debug('Error when trying to fetch informations for '
'module %s', name, exc_info=True)
_logger.debug('Error when trying to fetch informations for '
'module %s', name, exc_info=True)
return info
def _get_latest_version(self, cr, uid, ids, field_name=None, arg=None, context=None):
@ -156,14 +157,14 @@ class module(osv.osv):
for um in menu_obj.browse(cr, uid, imd_models.get('ir.ui.menu', []), context=context):
res_mod_dic['menus_by_module'].append(um.complete_name)
except KeyError, e:
self.__logger.warning(
'Data not found for items of %s', module_rec.name)
_logger.warning(
'Data not found for items of %s', module_rec.name)
except AttributeError, e:
self.__logger.warning(
'Data not found for items of %s %s', module_rec.name, str(e))
_logger.warning(
'Data not found for items of %s %s', module_rec.name, str(e))
except Exception, e:
self.__logger.warning('Unknown error while fetching data of %s',
module_rec.name, exc_info=True)
_logger.warning('Unknown error while fetching data of %s',
module_rec.name, exc_info=True)
for key, value in res.iteritems():
for k, v in res[key].iteritems():
res[key][k] = "\n".join(sorted(v))
@ -514,8 +515,8 @@ class module(osv.osv):
with open(fname, 'wb') as fp:
fp.write(zip_content)
except Exception:
self.__logger.exception('Error when trying to create module '
'file %s', fname)
_logger.exception('Error when trying to create module '
'file %s', fname)
raise orm.except_orm(_('Error'), _('Can not create the module file:\n %s') % (fname,))
terp = self.get_module_info(mod.name)
self.write(cr, uid, mod.id, self.get_values_from_terp(terp))
@ -568,7 +569,6 @@ class module(osv.osv):
def update_translations(self, cr, uid, ids, filter_lang=None, context=None):
if context is None:
context = {}
logger = logging.getLogger('i18n')
if not filter_lang:
pool = pooler.get_pool(cr.dbname)
lang_obj = pool.get('res.lang')
@ -592,7 +592,7 @@ class module(osv.osv):
iso_lang2 = iso_lang.split('_')[0]
f2 = addons.get_module_resource(mod.name, 'i18n', iso_lang2 + '.po')
if f2:
logger.info('module %s: loading base translation file %s for language %s', mod.name, iso_lang2, lang)
_logger.info('module %s: loading base translation file %s for language %s', mod.name, iso_lang2, lang)
tools.trans_load(cr, f2, lang, verbose=False, context=context)
context2['overwrite'] = True
# Implementation notice: we must first search for the full name of
@ -602,23 +602,22 @@ class module(osv.osv):
iso_lang = iso_lang.split('_')[0]
f = addons.get_module_resource(mod.name, 'i18n', iso_lang + '.po')
if f:
logger.info('module %s: loading translation file (%s) for language %s', mod.name, iso_lang, lang)
_logger.info('module %s: loading translation file (%s) for language %s', mod.name, iso_lang, lang)
tools.trans_load(cr, f, lang, verbose=False, context=context2)
elif iso_lang != 'en':
logger.warning('module %s: no translation for language %s', mod.name, iso_lang)
_logger.warning('module %s: no translation for language %s', mod.name, iso_lang)
def check(self, cr, uid, ids, context=None):
logger = logging.getLogger('init')
for mod in self.browse(cr, uid, ids, context=context):
if not mod.description:
logger.warn('module %s: description is empty !', mod.name)
_logger.warning('module %s: description is empty !', mod.name)
if not mod.certificate or not mod.certificate.isdigit():
logger.info('module %s: no quality certificate', mod.name)
_logger.info('module %s: no quality certificate', mod.name)
else:
val = long(mod.certificate[2:]) % 97 == 29
if not val:
logger.critical('module %s: invalid quality certificate: %s', mod.name, mod.certificate)
_logger.critical('module %s: invalid quality certificate: %s', mod.name, mod.certificate)
raise osv.except_osv(_('Error'), _('Module %s: Invalid Quality Certificate') % (mod.name,))
def root_menus(self, cr, uid, ids, context=None):

View File

@ -27,6 +27,7 @@ import netsvc
from tools import ustr
import pooler
_logger = logging.getLogger(__name__)
class res_config_configurable(osv.osv_memory):
''' Base classes for new-style configuration items
@ -37,11 +38,10 @@ class res_config_configurable(osv.osv_memory):
'''
_name = 'res.config'
_inherit = 'ir.wizard.screen'
__logger = logging.getLogger(_name)
def _next_action(self, cr, uid, context=None):
Todos = self.pool['ir.actions.todo']
self.__logger.info('getting next %s', Todos)
_logger.info('getting next %s', Todos)
active_todos = Todos.browse(cr, uid,
Todos.search(cr, uid, ['&', ('type', '=', 'automatic'), ('state','=','open')]),
@ -63,9 +63,9 @@ class res_config_configurable(osv.osv_memory):
return None
def _next(self, cr, uid, context=None):
self.__logger.info('getting next operation')
_logger.info('getting next operation')
next = self._next_action(cr, uid, context=context)
self.__logger.info('next action is %s', next)
_logger.info('next action is %s', next)
if next:
res = next.action_launch(context=context)
res['nodestroy'] = False
@ -244,7 +244,6 @@ class res_config_installer(osv.osv_memory):
"""
_name = 'res.config.installer'
_inherit = 'res.config'
__logger = logging.getLogger(_name)
_install_if = {}
@ -352,7 +351,7 @@ class res_config_installer(osv.osv_memory):
modules = self.pool.get('ir.module.module')
to_install = list(self.modules_to_install(
cr, uid, ids, context=context))
self.__logger.info('Selecting addons %s to install', to_install)
_logger.info('Selecting addons %s to install', to_install)
modules.state_update(
cr, uid,
modules.search(cr, uid, [('name','in',to_install)]),
@ -374,7 +373,6 @@ class ir_actions_configuration_wizard(osv.osv_memory):
'''
_name='ir.actions.configuration.wizard'
_inherit = 'res.config'
__logger = logging.getLogger(_name)
def _next_action_note(self, cr, uid, ids, context=None):
next = self._next_action(cr, uid)
@ -394,7 +392,7 @@ class ir_actions_configuration_wizard(osv.osv_memory):
}
def execute(self, cr, uid, ids, context=None):
self.__logger.warn(DEPRECATION_MESSAGE)
_logger.warning(DEPRECATION_MESSAGE)
ir_actions_configuration_wizard()

View File

@ -29,6 +29,8 @@ import tools
from tools.safe_eval import safe_eval as eval
from tools.translate import _
_logger = logging.getLogger(__name__)
class lang(osv.osv):
_name = "res.lang"
_description = "Languages"
@ -64,7 +66,6 @@ class lang(osv.osv):
def load_lang(self, cr, uid, lang, lang_name=None):
# create the language with locale information
fail = True
logger = logging.getLogger('i18n')
iso_lang = tools.get_iso_codes(lang)
for ln in tools.get_locales(lang):
try:
@ -76,7 +77,7 @@ class lang(osv.osv):
if fail:
lc = locale.getdefaultlocale()[0]
msg = 'Unable to get information for locale %s. Information from the default locale (%s) have been used.'
logger.warning(msg, lang, lc)
_logger.warning(msg, lang, lc)
if not lang_name:
lang_name = tools.get_languages().get(lang, lang)

View File

@ -35,6 +35,8 @@ from tools.translate import _
import openerp
import openerp.exceptions
_logger = logging.getLogger(__name__)
class groups(osv.osv):
_name = "res.groups"
_description = "Access Groups"
@ -462,7 +464,7 @@ class users(osv.osv):
user_agent_env['base_location'])
cr.commit()
except Exception:
logging.getLogger('res.users').exception("Failed to update web.base.url configuration parameter")
_logger.exception("Failed to update web.base.url configuration parameter")
finally:
cr.close()
return uid

View File

@ -23,7 +23,7 @@ from osv import fields, osv
import re
import logging
_logger = logging.getLogger('mass.mailing')
_logger = logging.getLogger(__name__)
class partner_massmail_wizard(osv.osv_memory):
""" Mass Mailing """

View File

@ -47,6 +47,8 @@ import time
import openerp
import tools
_logger = logging.getLogger(__name__)
# Heapq of database wake-ups. Note that 'database wake-up' meaning is in
# the context of the cron management. This is not originally about loading
# a database, although having the database name in the queue will
@ -84,8 +86,6 @@ _thread_slots = None
# A (non re-entrant) lock to protect the above _thread_slots variable.
_thread_slots_lock = threading.Lock()
_logger = logging.getLogger('cron')
# Sleep duration limits - must not loop too quickly, but can't sleep too long
# either, because a new job might be inserted in ir_cron with a much sooner
# execution date than current known ones. We won't see it until we wake!

View File

@ -21,12 +21,8 @@
import sys
import logging
import warnings
LOG_NOTSET = 'notset'
LOG_DEBUG_SQL = 'debug_sql'
LOG_DEBUG_RPC_ANSWER = 'debug_rpc_answer'
LOG_DEBUG_RPC = 'debug_rpc'
LOG_DEBUG = 'debug'
LOG_TEST = 'test'
LOG_INFO = 'info'
@ -34,32 +30,27 @@ LOG_WARNING = 'warn'
LOG_ERROR = 'error'
LOG_CRITICAL = 'critical'
logging.DEBUG_RPC_ANSWER = logging.DEBUG - 4
logging.addLevelName(logging.DEBUG_RPC_ANSWER, 'DEBUG_RPC_ANSWER')
logging.DEBUG_RPC = logging.DEBUG - 2
logging.addLevelName(logging.DEBUG_RPC, 'DEBUG_RPC')
logging.DEBUG_SQL = logging.DEBUG_RPC - 3
logging.addLevelName(logging.DEBUG_SQL, 'DEBUG_SQL')
logging.TEST = logging.INFO - 5
logging.addLevelName(logging.TEST, 'TEST')
_logger = logging.getLogger(__name__)
class Logger(object):
def __init__(self):
warnings.warn("The netsvc.Logger API shouldn't be used anymore, please "
"use the standard `logging.getLogger` API instead",
PendingDeprecationWarning, stacklevel=2)
_logger.warning(
"The netsvc.Logger API shouldn't be used anymore, please "
"use the standard `logging.getLogger` API instead.")
super(Logger, self).__init__()
def notifyChannel(self, name, level, msg):
warnings.warn("notifyChannel API shouldn't be used anymore, please use "
"the standard `logging` module instead",
PendingDeprecationWarning, stacklevel=2)
_logger.warning(
"notifyChannel API shouldn't be used anymore, please use "
"the standard `logging` module instead.")
from service.web_services import common
log = logging.getLogger(ustr(name))
log = logging.getLogger(__name__ + '.deprecated.' + ustr(name))
if level in [LOG_DEBUG_RPC, LOG_TEST] and not hasattr(log, level):
if level in [LOG_TEST] and not hasattr(log, level):
fct = lambda msg, *args, **kwargs: log.log(getattr(logging, level.upper()), msg, *args, **kwargs)
setattr(log, level, fct)

View File

@ -23,6 +23,8 @@
import openerp.modules
import logging
_logger = logging.getLogger(__name__)
def is_initialized(cr):
""" Check if a database has been initialized for the ORM.
@ -43,7 +45,7 @@ def initialize(cr):
f = openerp.modules.get_module_resource('base', 'base.sql')
if not f:
m = "File not found: 'base.sql' (provided by module 'base')."
logging.getLogger('init').critical(m)
_logger.critical(m)
raise IOError(m)
base_sql_file = openerp.tools.misc.file_open(f)
try:

View File

@ -48,8 +48,7 @@ from cStringIO import StringIO
import logging
logger = netsvc.Logger()
_logger = logging.getLogger(__name__)
class Graph(dict):
""" Modules dependency graph.
@ -104,7 +103,7 @@ class Graph(dict):
if info and info['installable']:
packages.append((module, info)) # TODO directly a dict, like in get_modules_with_version
else:
logger.notifyChannel('init', netsvc.LOG_WARNING, 'module %s: not installable, skipped' % (module))
_logger.warning('module %s: not installable, skipped', module)
dependencies = dict([(p, info['depends']) for p, info in packages])
current, later = set([p for p, info in packages]), set()
@ -134,11 +133,11 @@ class Graph(dict):
for package in later:
unmet_deps = filter(lambda p: p not in self, dependencies[package])
logger.notifyChannel('init', netsvc.LOG_ERROR, 'module %s: Unmet dependencies: %s' % (package, ', '.join(unmet_deps)))
_logger.error('module %s: Unmet dependencies: %s', package, ', '.join(unmet_deps))
result = len(self) - len_graph
if result != len(module_list):
logger.notifyChannel('init', netsvc.LOG_WARNING, 'Not all modules have loaded.')
_logger.warning('Some modules were not loaded.')
return result

View File

@ -60,8 +60,7 @@ from openerp.modules.module import \
get_module_path, initialize_sys_path, \
register_module_classes, init_module_models
logger = netsvc.Logger()
_logger = logging.getLogger(__name__)
def open_openerp_namespace():
# See comment for open_openerp_namespace.
@ -80,7 +79,6 @@ def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=
:param skip_modules: optional list of module names (packages) which have previously been loaded and can be skipped
:return: list of modules that were installed or updated
"""
logger = logging.getLogger('init.load')
def process_sql_file(cr, fp):
queries = fp.read().split(';')
for query in queries:
@ -101,7 +99,7 @@ def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=
threading.currentThread().testing = True
_load_data(cr, module_name, idref, mode, 'test')
except Exception, e:
logging.getLogger('init.test').exception(
_logger.exception(
'Tests failed to execute in module %s', module_name)
finally:
threading.currentThread().testing = False
@ -120,7 +118,7 @@ def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=
"""
for filename in package.data[kind]:
logger.info("module %s: loading %s", module_name, filename)
_logger.info("module %s: loading %s", module_name, filename)
_, ext = os.path.splitext(filename)
pathname = os.path.join(module_name, filename)
fp = tools.file_open(pathname)
@ -148,7 +146,7 @@ def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=
loaded_modules = []
pool = pooler.get_pool(cr.dbname)
migrations = openerp.modules.migration.MigrationManager(cr, graph)
logger.debug('loading %d packages...', len(graph))
_logger.debug('loading %d packages...', len(graph))
# get db timestamp
cr.execute("select now()::timestamp")
@ -162,7 +160,7 @@ def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=
if skip_modules and module_name in skip_modules:
continue
logger.info('module %s: loading objects', package.name)
_logger.info('module %s: loading objects', package.name)
migrations.migrate_module(package, 'pre')
register_module_classes(package.name)
models = pool.load(cr, package)
@ -240,7 +238,7 @@ def _check_module_names(cr, module_names):
# find out what module name(s) are incorrect:
cr.execute("SELECT name FROM ir_module_module")
incorrect_names = mod_names.difference([x['name'] for x in cr.dictfetchall()])
logging.getLogger('init').warning('invalid module names, ignored: %s', ", ".join(incorrect_names))
_logger.warning('invalid module names, ignored: %s', ", ".join(incorrect_names))
def load_marked_modules(cr, graph, states, force, progressdict, report, loaded_modules):
"""Loads modules marked with ``states``, adding them to ``graph`` and
@ -250,7 +248,7 @@ def load_marked_modules(cr, graph, states, force, progressdict, report, loaded_m
cr.execute("SELECT name from ir_module_module WHERE state IN %s" ,(tuple(states),))
module_list = [name for (name,) in cr.fetchall() if name not in graph]
new_modules_in_graph = graph.add_modules(cr, module_list, force)
logger.notifyChannel('init', netsvc.LOG_DEBUG, 'Updating graph with %d more modules' % (len(module_list)))
_logger.debug('Updating graph with %d more modules', len(module_list))
loaded, processed = load_module_graph(cr, graph, progressdict, report=report, skip_modules=loaded_modules)
processed_modules.extend(processed)
loaded_modules.extend(loaded)
@ -273,7 +271,7 @@ def load_modules(db, force_demo=False, status=None, update_module=False):
cr = db.cursor()
try:
if not openerp.modules.db.is_initialized(cr):
logger.notifyChannel("init", netsvc.LOG_INFO, "init db")
_logger.info("init db")
openerp.modules.db.initialize(cr)
tools.config["init"]["all"] = 1
tools.config['update']['all'] = 1
@ -291,7 +289,7 @@ def load_modules(db, force_demo=False, status=None, update_module=False):
graph = openerp.modules.graph.Graph()
graph.add_module(cr, 'base', force)
if not graph:
logger.notifyChannel('init', netsvc.LOG_CRITICAL, 'module base cannot be loaded! (hint: verify addons-path)')
_logger.critical('module base cannot be loaded! (hint: verify addons-path)')
raise osv.osv.except_osv(_('Could not load base module'), _('module base cannot be loaded! (hint: verify addons-path)'))
# processed_modules: for cleanup step after install
@ -306,7 +304,7 @@ def load_modules(db, force_demo=False, status=None, update_module=False):
if update_module:
modobj = pool.get('ir.module.module')
if ('base' in tools.config['init']) or ('base' in tools.config['update']):
logger.notifyChannel('init', netsvc.LOG_INFO, 'updating modules list')
_logger.info('updating modules list')
modobj.update_list(cr, 1)
_check_module_names(cr, itertools.chain(tools.config['init'].keys(), tools.config['update'].keys()))
@ -350,7 +348,7 @@ def load_modules(db, force_demo=False, status=None, update_module=False):
for (model, name) in cr.fetchall():
model_obj = pool.get(model)
if model_obj and not model_obj.is_transient():
logger.notifyChannel('init', netsvc.LOG_WARNING, 'Model %s (%s) has no access rules!' % (model, name))
_logger.warning('Model %s (%s) has no access rules!', model, name)
# Temporary warning while we remove access rights on osv_memory objects, as they have
# been replaced by owner-only access rights
@ -358,7 +356,7 @@ def load_modules(db, force_demo=False, status=None, update_module=False):
for (model, name) in cr.fetchall():
model_obj = pool.get(model)
if model_obj and model_obj.is_transient():
logger.notifyChannel('init', netsvc.LOG_WARNING, 'The transient model %s (%s) should not have explicit access rules!' % (model, name))
_logger.warning('The transient model %s (%s) should not have explicit access rules!', model, name)
cr.execute("SELECT model from ir_model")
for (model,) in cr.fetchall():
@ -366,14 +364,11 @@ def load_modules(db, force_demo=False, status=None, update_module=False):
if obj:
obj._check_removed_columns(cr, log=True)
else:
logger.notifyChannel('init', netsvc.LOG_WARNING, "Model %s is declared but cannot be loaded! (Perhaps a module was partially removed or renamed)" % model)
_logger.warning("Model %s is declared but cannot be loaded! (Perhaps a module was partially removed or renamed)", model)
# Cleanup orphan records
pool.get('ir.model.data')._process_end(cr, 1, processed_modules)
if report.get_report():
logger.notifyChannel('init', netsvc.LOG_INFO, report)
for kind in ('init', 'demo', 'update'):
tools.config[kind] = {}
@ -391,7 +386,7 @@ def load_modules(db, force_demo=False, status=None, update_module=False):
# TODO group by module so that we can delete multiple ids in a call
rmod_module.unlink(cr, uid, [rid])
else:
logger.notifyChannel('init', netsvc.LOG_ERROR, 'Could not locate %s to remove res=%d' % (rmod,rid))
_logger.error('Could not locate %s to remove res=%d' % (rmod,rid))
cr.execute('delete from ir_model_data where noupdate=%s and module=%s', (False, mod_name,))
cr.commit()
@ -412,11 +407,13 @@ def load_modules(db, force_demo=False, status=None, update_module=False):
if not cr.rowcount:
break
else:
logger.notifyChannel('init', netsvc.LOG_INFO, 'removed %d unused menus' % (cr.rowcount,))
_logger.info('removed %d unused menus', cr.rowcount)
# Pretend that modules to be removed are actually uninstalled.
cr.execute("update ir_module_module set state=%s where state=%s", ('uninstalled', 'to remove',))
cr.commit()
_logger.info('Modules loaded.')
finally:
cr.close()

View File

@ -51,8 +51,7 @@ import logging
import openerp.modules.db
import openerp.modules.graph
logger = netsvc.Logger()
_logger = logging.getLogger(__name__)
class MigrationManager(object):
"""
@ -183,13 +182,13 @@ class MigrationManager(object):
fp2.seek(0)
try:
mod = imp.load_source(name, pyfile, fp2)
logger.notifyChannel('migration', netsvc.LOG_INFO, 'module %(addon)s: Running migration %(version)s %(name)s' % mergedict({'name': mod.__name__}, strfmt))
_logger.info('module %(addon)s: Running migration %(version)s %(name)s' % mergedict({'name': mod.__name__}, strfmt))
mod.migrate(self.cr, pkg.installed_version)
except ImportError:
logger.notifyChannel('migration', netsvc.LOG_ERROR, 'module %(addon)s: Unable to load %(stage)s-migration file %(file)s' % mergedict({'file': pyfile}, strfmt))
_logger.error('module %(addon)s: Unable to load %(stage)s-migration file %(file)s' % mergedict({'file': pyfile}, strfmt))
raise
except AttributeError:
logger.notifyChannel('migration', netsvc.LOG_ERROR, 'module %(addon)s: Each %(stage)s-migration file must have a "migrate(cr, installed_version)" function' % strfmt)
_logger.error('module %(addon)s: Each %(stage)s-migration file must have a "migrate(cr, installed_version)" function' % strfmt)
except:
raise
finally:

View File

@ -56,7 +56,7 @@ ad_paths = []
# Modules already loaded
loaded = []
logger = netsvc.Logger()
_logger = logging.getLogger(__name__)
class AddonsImportHook(object):
"""
@ -100,8 +100,7 @@ class AddonsImportHook(object):
try:
# Check if the bare module name clashes with another module.
f, path, descr = imp.find_module(module_parts[0])
logger = logging.getLogger('init')
logger.warning("""
_logger.warning("""
Ambiguous import: the OpenERP module `%s` is shadowed by another
module (available at %s).
To import it, use `import openerp.addons.<module>.`.""" % (module_name, path))
@ -137,7 +136,7 @@ To import it, use `import openerp.addons.<module>.`.""" % (module_name, path))
# Note: we don't support circular import.
f, path, descr = imp.find_module(module_part, ad_paths)
mod = imp.load_module(module_name, f, path, descr)
mod = imp.load_module('openerp.addons.' + module_part, f, path, descr)
if not is_shadowing:
sys.modules[module_part] = mod
for k in sys.modules.keys():
@ -179,7 +178,7 @@ def get_module_path(module, downloaded=False, display_warning=True):
if downloaded:
return opj(_ad, module)
if display_warning:
logger.notifyChannel('init', netsvc.LOG_WARNING, 'module %s: module not found' % (module,))
_logger.warning('module %s: module not found', module)
return False
@ -359,7 +358,7 @@ def load_information_from_description_file(module):
#TODO: refactor the logger in this file to follow the logging guidelines
# for 6.0
logging.getLogger('modules').debug('module %s: no descriptor file'
_logger.debug('module %s: no descriptor file'
' found: __openerp__.py or __terp__.py (deprecated)', module)
return {}
@ -373,8 +372,7 @@ def init_module_models(cr, module_name, obj_list):
TODO better explanation of _auto_init and init.
"""
logger.notifyChannel('init', netsvc.LOG_INFO,
'module %s: creating or updating database tables' % module_name)
_logger.info('module %s: creating or updating database tables', module_name)
todo = []
for obj in obj_list:
result = obj._auto_init(cr, {'module': module_name})
@ -402,13 +400,13 @@ def register_module_classes(m):
def log(e):
mt = isinstance(e, zipimport.ZipImportError) and 'zip ' or ''
msg = "Couldn't load %smodule %s" % (mt, m)
logger.notifyChannel('init', netsvc.LOG_CRITICAL, msg)
logger.notifyChannel('init', netsvc.LOG_CRITICAL, e)
_logger.critical(msg)
_logger.critical(e)
global loaded
if m in loaded:
return
logger.notifyChannel('init', netsvc.LOG_INFO, 'module %s: registering objects' % m)
_logger.info('module %s: registering objects', m)
mod_path = get_module_path(m)
initialize_sys_path()

View File

@ -22,9 +22,8 @@
""" Models registries.
"""
import threading
import logging
import threading
import openerp.sql_db
import openerp.osv.orm
@ -33,6 +32,8 @@ import openerp.tools
import openerp.modules.db
import openerp.tools.config
_logger = logging.getLogger(__name__)
class Registry(object):
""" Model registry for a particular database.
@ -53,8 +54,7 @@ class Registry(object):
cr = self.db.cursor()
has_unaccent = openerp.modules.db.has_unaccent(cr)
if openerp.tools.config['unaccent'] and not has_unaccent:
logger = logging.getLogger('unaccent')
logger.warning("The option --unaccent was given but no unaccent() function was found in database.")
_logger.warning("The option --unaccent was given but no unaccent() function was found in database.")
self.has_unaccent = openerp.tools.config['unaccent'] and has_unaccent
cr.close()

View File

@ -38,6 +38,8 @@ from loglevels import *
import tools
import openerp
_logger = logging.getLogger(__name__)
def close_socket(sock):
""" Closes a socket instance cleanly
@ -100,12 +102,11 @@ class ExportService(object):
"""
_services = {}
_logger = logging.getLogger('web-services')
def __init__(self, name):
ExportService._services[name] = self
self.__name = name
self._logger.debug("Registered an exported service: %s" % name)
_logger.debug("Registered an exported service: %s" % name)
@classmethod
def getService(cls,name):
@ -124,9 +125,6 @@ COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLOR_PATTERN = "%s%s%%s%s" % (COLOR_SEQ, COLOR_SEQ, RESET_SEQ)
LEVEL_COLOR_MAPPING = {
logging.DEBUG_SQL: (WHITE, MAGENTA),
logging.DEBUG_RPC: (BLUE, WHITE),
logging.DEBUG_RPC_ANSWER: (BLUE, WHITE),
logging.DEBUG: (BLUE, DEFAULT),
logging.INFO: (GREEN, DEFAULT),
logging.TEST: (WHITE, BLUE),
@ -137,6 +135,7 @@ LEVEL_COLOR_MAPPING = {
class DBFormatter(logging.Formatter):
def format(self, record):
record.pid = os.getpid()
record.dbname = getattr(threading.currentThread(), 'dbname', '?')
return logging.Formatter.format(self, record)
@ -146,12 +145,13 @@ class ColoredFormatter(DBFormatter):
record.levelname = COLOR_PATTERN % (30 + fg_color, 40 + bg_color, record.levelname)
return DBFormatter.format(self, record)
def init_logger():
from tools.translate import resetlocale
resetlocale()
# create a format for log messages and dates
format = '[%(asctime)s][%(dbname)s] %(levelname)s:%(name)s:%(message)s'
format = '%(asctime)s %(pid)s %(levelname)s %(dbname)s %(name)s: %(message)s'
if tools.config['syslog']:
# SysLog Handler
@ -188,11 +188,49 @@ def init_logger():
formatter = DBFormatter(format)
handler.setFormatter(formatter)
# add the handler to the root logger
logger = logging.getLogger()
logger.handlers = []
logger.addHandler(handler)
logger.setLevel(int(tools.config['log_level'] or '0'))
# Configure handlers
default_config = [
'openerp.netsvc.rpc.request:INFO',
'openerp.netsvc.rpc.response:INFO',
'openerp.addons.web.common.http:INFO',
'openerp.addons.web.common.openerplib:INFO',
'openerp.sql_db:INFO',
':INFO',
]
if tools.config['log_level'] == 'info':
pseudo_config = []
elif tools.config['log_level'] == 'debug_rpc':
pseudo_config = ['openerp:DEBUG','openerp.netsvc.rpc.request:DEBUG']
elif tools.config['log_level'] == 'debug_rpc_answer':
pseudo_config = ['openerp:DEBUG','openerp.netsvc.rpc.request:DEBUG', 'openerp.netsvc.rpc.response:DEBUG']
elif tools.config['log_level'] == 'debug':
pseudo_config = ['openerp:DEBUG']
elif tools.config['log_level'] == 'test':
pseudo_config = ['openerp:TEST']
elif tools.config['log_level'] == 'warn':
pseudo_config = ['openerp:WARNING']
elif tools.config['log_level'] == 'error':
pseudo_config = ['openerp:ERROR']
elif tools.config['log_level'] == 'critical':
pseudo_config = ['openerp:CRITICAL']
elif tools.config['log_level'] == 'debug_sql':
pseudo_config = ['openerp.sql_db:DEBUG']
logconfig = tools.config['log_handler']
for logconfig_item in default_config + pseudo_config + logconfig:
loggername, level = logconfig_item.split(':')
level = getattr(logging, level, logging.INFO)
logger = logging.getLogger(loggername)
logger.handlers = []
logger.setLevel(level)
logger.addHandler(handler)
if loggername != '':
logger.propagate = False
for logconfig_item in default_config + pseudo_config + logconfig:
_logger.debug('logger level set: "%s"', logconfig_item)
# A alternative logging scheme for automated runs of the
# server intended to test it.
@ -202,8 +240,8 @@ def init_alternative_logger():
if record.levelno > 20:
print record.levelno, record.pathname, record.msg
handler = H()
logger = logging.getLogger()
logger.handlers = []
# Add the handler to the 'openerp' logger.
logger = logging.getLogger('openerp')
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
@ -225,9 +263,6 @@ class Server:
# all I/O blocking operations
_busywait_timeout = 0.5
__logger = logging.getLogger('server')
def __init__(self):
Server.__servers.append(self)
if Server.__is_started:
@ -242,7 +277,7 @@ class Server:
t.start()
def start(self):
self.__logger.debug("called stub Server.start")
_logger.debug("called stub Server.start")
def _late_start(self):
self.start()
@ -251,7 +286,7 @@ class Server:
Server.__starter_threads.remove(thr)
def stop(self):
self.__logger.debug("called stub Server.stop")
_logger.debug("called stub Server.stop")
def stats(self):
""" This function should return statistics about the server """
@ -261,7 +296,7 @@ class Server:
def startAll(cls):
if cls.__is_started:
return
cls.__logger.info("Starting %d services" % len(cls.__servers))
_logger.info("Starting %d services" % len(cls.__servers))
for srv in cls.__servers:
srv.start()
cls.__is_started = True
@ -270,7 +305,7 @@ class Server:
def quitAll(cls):
if not cls.__is_started:
return
cls.__logger.info("Stopping %d services" % len(cls.__servers))
_logger.info("Stopping %d services" % len(cls.__servers))
for thr in cls.__starter_threads:
if not thr.finished.is_set():
thr.cancel()
@ -292,19 +327,17 @@ class Server:
def replace_request_password(args):
# password is always 3rd argument in a request, we replace it in RPC logs
# so it's easier to forward logs for diagnostics/debugging purposes...
args = list(args)
if len(args) > 2:
args = list(args)
args[2] = '*'
return args
return tuple(args)
def log(title, msg, channel=logging.DEBUG_RPC, depth=None, fn=""):
logger = logging.getLogger(title)
if logger.isEnabledFor(channel):
indent=''
indent_after=' '*len(fn)
for line in (fn+pformat(msg, depth=depth)).split('\n'):
logger.log(channel, indent+line)
indent=indent_after
def log(logger, level, prefix, msg, depth=None):
indent=''
indent_after=' '*len(prefix)
for line in (prefix+pformat(msg, depth=depth)).split('\n'):
logger.log(level, indent+line)
indent=indent_after
def dispatch_rpc(service_name, method, params):
""" Handle a RPC call.
@ -312,22 +345,25 @@ def dispatch_rpc(service_name, method, params):
This is pure Python code, the actual marshalling (from/to XML-RPC or
NET-RPC) is done in a upper layer.
"""
def _log(title, msg, channel=logging.DEBUG_RPC, depth=None, fn=""):
log(title, msg, channel=channel, depth=depth, fn=fn)
try:
logger = logging.getLogger('result')
start_time = end_time = 0
if logger.isEnabledFor(logging.DEBUG_RPC_ANSWER):
_log('service', tuple(replace_request_password(params)), depth=None, fn='%s.%s'%(service_name,method))
if logger.isEnabledFor(logging.DEBUG_RPC):
rpc_request = logging.getLogger(__name__ + '.rpc.request')
rpc_response = logging.getLogger(__name__ + '.rpc.response')
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
rpc_response_flag = rpc_response.isEnabledFor(logging.DEBUG)
if rpc_request_flag or rpc_response_flag:
start_time = time.time()
if rpc_request and rpc_response_flag:
log(rpc_request,logging.DEBUG,'%s.%s'%(service_name,method), replace_request_password(params))
result = ExportService.getService(service_name).dispatch(method, params)
if logger.isEnabledFor(logging.DEBUG_RPC):
if rpc_request_flag or rpc_response_flag:
end_time = time.time()
if not logger.isEnabledFor(logging.DEBUG_RPC_ANSWER):
_log('service (%.3fs)' % (end_time - start_time), tuple(replace_request_password(params)), depth=1, fn='%s.%s'%(service_name,method))
_log('execution time', '%.3fs' % (end_time - start_time), channel=logging.DEBUG_RPC_ANSWER)
_log('result', result, channel=logging.DEBUG_RPC_ANSWER)
if rpc_response_flag:
log(rpc_response,logging.DEBUG,'%s.%s time:%.3fs '%(service_name,method,end_time - start_time), result)
else:
log(rpc_request,logging.DEBUG,'%s.%s time:%.3fs '%(service_name,method,end_time - start_time), replace_request_password(params), depth=1)
return result
except openerp.exceptions.AccessError:
raise
@ -336,11 +372,11 @@ def dispatch_rpc(service_name, method, params):
except openerp.exceptions.Warning:
raise
except openerp.exceptions.DeferredException, e:
_log('exception', tools.exception_to_unicode(e))
_logger.error(tools.exception_to_unicode(e))
post_mortem(e.traceback)
raise
except Exception, e:
_log('exception', tools.exception_to_unicode(e))
_logger.error(tools.exception_to_unicode(e))
post_mortem(sys.exc_info())
raise

View File

@ -159,7 +159,7 @@ FALSE_LEAF = (0, '=', 1)
TRUE_DOMAIN = [TRUE_LEAF]
FALSE_DOMAIN = [FALSE_LEAF]
_logger = logging.getLogger('expression')
_logger = logging.getLogger(__name__)
def normalize(domain):
"""Returns a normalized version of ``domain_expr``, where all implicit '&' operators

View File

@ -34,10 +34,10 @@
import base64
import datetime as DT
import logging
import re
import string
import sys
import warnings
import xmlrpclib
from psycopg2 import Binary
@ -48,6 +48,8 @@ from openerp.tools.translate import _
from openerp.tools import float_round, float_repr
import simplejson
_logger = logging.getLogger(__name__)
def _symbol_set(symb):
if symb == None or symb == False:
return None
@ -139,8 +141,10 @@ class boolean(_column):
def __init__(self, string='unknown', required=False, **args):
super(boolean, self).__init__(string=string, required=required, **args)
if required:
warnings.warn("Making a boolean field `required` has no effect, as NULL values are "
"automatically turned into False", PendingDeprecationWarning, stacklevel=2)
_logger.warning(
"required=True is deprecated: making a boolean field"
" `required` has no effect, as NULL values are "
"automatically turned into False.")
class integer(_column):
_type = 'integer'
@ -152,8 +156,10 @@ class integer(_column):
def __init__(self, string='unknown', required=False, **args):
super(integer, self).__init__(string=string, required=required, **args)
if required:
warnings.warn("Making an integer field `required` has no effect, as NULL values are "
"automatically turned into 0", PendingDeprecationWarning, stacklevel=2)
_logger.warning(
"required=True is deprecated: making an integer field"
" `required` has no effect, as NULL values are "
"automatically turned into 0.")
class integer_big(_column):
"""Experimental 64 bit integer column type, currently unused.
@ -176,8 +182,10 @@ class integer_big(_column):
def __init__(self, string='unknown', required=False, **args):
super(integer_big, self).__init__(string=string, required=required, **args)
if required:
warnings.warn("Making an integer_big field `required` has no effect, as NULL values are "
"automatically turned into 0", PendingDeprecationWarning, stacklevel=2)
_logger.warning(
"required=True is deprecated: making an integer_big field"
" `required` has no effect, as NULL values are "
"automatically turned into 0.")
class reference(_column):
_type = 'reference'
@ -238,8 +246,10 @@ class float(_column):
# synopsis: digits_compute(cr) -> (precision, scale)
self.digits_compute = digits_compute
if required:
warnings.warn("Making a float field `required` has no effect, as NULL values are "
"automatically turned into 0.0", PendingDeprecationWarning, stacklevel=2)
_logger.warning(
"required=True is deprecated: making a float field"
" `required` has no effect, as NULL values are "
"automatically turned into 0.0.")
def digits_change(self, cr):
if self.digits_compute:
@ -355,7 +365,7 @@ class one2one(_column):
_deprecated = True
def __init__(self, obj, string='unknown', **args):
warnings.warn("The one2one field doesn't work anymore", DeprecationWarning)
_logger.warning("The one2one field is deprecated and doesn't work anymore.")
_column.__init__(self, string=string, **args)
self._obj = obj
@ -620,8 +630,9 @@ class many2many(_column):
for id in ids:
res[id] = []
if offset:
warnings.warn("Specifying offset at a many2many.get() may produce unpredictable results.",
DeprecationWarning, stacklevel=2)
_logger.warning(
"Specifying offset at a many2many.get() is deprecated and may"
" produce unpredictable results.")
obj = model.pool.get(self._obj)
rel, id1, id2 = self._sql_names(model)

View File

@ -52,7 +52,6 @@ import re
import simplejson
import time
import types
import warnings
from lxml import etree
import fields
@ -65,6 +64,9 @@ from openerp.tools.translate import _
from openerp import SUPERUSER_ID
from query import Query
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__ + '.schema')
# List of etree._Element subclasses that we choose to ignore when parsing XML.
from openerp.tools import SKIPPED_ELEMENT_TYPES
@ -216,8 +218,7 @@ def check_object_name(name):
def raise_on_invalid_object_name(name):
if not check_object_name(name):
msg = "The _name attribute %s is not valid." % name
logger = netsvc.Logger()
logger.notifyChannel('orm', netsvc.LOG_ERROR, msg)
_logger.error(msg)
raise except_orm('ValueError', msg)
POSTGRES_CONFDELTYPES = {
@ -329,8 +330,7 @@ class browse_record(object):
self._table = table # deprecated, use _model!
self._model = table
self._table_name = self._table._name
self.__logger = logging.getLogger(
'openerp.osv.orm.browse_record.' + self._table_name)
self.__logger = logging.getLogger('openerp.osv.orm.browse_record.' + self._table_name)
self._context = context
self._fields_process = fields_process
@ -371,7 +371,7 @@ class browse_record(object):
return attr
else:
error_msg = "Field '%s' does not exist in object '%s'" % (name, self)
self.__logger.warn(error_msg)
self.__logger.warning(error_msg)
raise KeyError(error_msg)
# if the field is a classic one or a many2one, we'll fetch all classic and many2one fields
@ -407,7 +407,7 @@ class browse_record(object):
if not field_values:
# Where did those ids come from? Perhaps old entries in ir_model_dat?
self.__logger.warn("No field_values found for ids %s in %s", ids, self)
_logger.warning("No field_values found for ids %s in %s", ids, self)
raise KeyError('Field %s not found in %s'%(name, self))
# create browse records for 'remote' objects
for result_line in field_values:
@ -584,7 +584,7 @@ def get_pg_type(f, type_override=None):
else:
pg_type = get_pg_type(f, getattr(fields, f._type))
else:
logging.getLogger('orm').warn('%s type not supported!', field_type)
_logger.warning('%s type not supported!', field_type)
pg_type = None
return pg_type
@ -702,8 +702,6 @@ class BaseModel(object):
_log_create = False
_sql_constraints = []
_protected = ['read', 'write', 'create', 'default_get', 'perm_read', 'unlink', 'fields_get', 'fields_view_get', 'search', 'name_get', 'distinct_field_get', 'name_search', 'copy', 'import_data', 'search_count', 'exists']
__logger = logging.getLogger('orm')
__schema = logging.getLogger('orm.schema')
CONCURRENCY_CHECK_FIELD = '__last_update'
@ -957,8 +955,7 @@ class BaseModel(object):
name = type(self).__name__.split('.')[0]
msg = "The class %s has to have a _name attribute" % name
logger = netsvc.Logger()
logger.notifyChannel('orm', netsvc.LOG_ERROR, msg)
_logger.error(msg)
raise except_orm('ValueError', msg)
if not self._description:
@ -1384,7 +1381,7 @@ class BaseModel(object):
res = key
break
if line[i] and not res:
logging.getLogger('orm.import').warn(
_logger.warning(
_("key '%s' not found in selection field '%s'"),
tools.ustr(line[i]), tools.ustr(field_name))
warning.append(_("Key/value '%s' not found in selection field '%s'") % (
@ -1803,7 +1800,7 @@ class BaseModel(object):
res.insert(0, ("Can't find field '%s' in the following view parts composing the view of object model '%s':" % (field, model), None))
msg = "\n * ".join([r[0] for r in res])
msg += "\n\nEither you wrongly customized this view, or some modules bringing those views are not compatible with your current data model"
netsvc.Logger().notifyChannel('orm', netsvc.LOG_ERROR, msg)
_logger.error(msg)
raise except_orm('View error', msg)
return arch, fields
@ -2625,8 +2622,7 @@ class BaseModel(object):
def _parent_store_compute(self, cr):
if not self._parent_store:
return
logger = netsvc.Logger()
logger.notifyChannel('data', netsvc.LOG_INFO, 'Computing parent left and right for table %s...' % (self._table, ))
_logger.info('Computing parent left and right for table %s...', self._table)
def browse_rec(root, pos=0):
# TODO: set order
where = self._parent_name+'='+str(root)
@ -2650,8 +2646,7 @@ class BaseModel(object):
return True
def _update_store(self, cr, f, k):
logger = netsvc.Logger()
logger.notifyChannel('data', netsvc.LOG_INFO, "storing computed values of fields.function '%s'" % (k,))
_logger.info("storing computed values of fields.function '%s'", k)
ss = self._columns[k]._symbol_set
update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
cr.execute('select id from '+self._table)
@ -2707,12 +2702,12 @@ class BaseModel(object):
for column in cr.dictfetchall():
if log:
self.__logger.debug("column %s is in the table %s but not in the corresponding object %s",
column['attname'], self._table, self._name)
_logger.debug("column %s is in the table %s but not in the corresponding object %s",
column['attname'], self._table, self._name)
if column['attnotnull']:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
self.__schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
self._table, column['attname'])
_schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
self._table, column['attname'])
# checked version: for direct m2o starting from `self`
def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
@ -2724,14 +2719,14 @@ class BaseModel(object):
# So unless stated otherwise we default them to ondelete=cascade.
ondelete = ondelete or 'cascade'
self._foreign_keys.append((self._table, source_field, dest_model._table, ondelete or 'set null'))
self.__schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s",
self._table, source_field, dest_model._table, ondelete)
_schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s",
self._table, source_field, dest_model._table, ondelete)
# unchecked version: for custom cases, such as m2m relationships
def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
self._foreign_keys.append((source_table, source_field, dest_model._table, ondelete or 'set null'))
self.__schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s",
source_table, source_field, dest_model._table, ondelete)
_schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s",
source_table, source_field, dest_model._table, ondelete)
def _auto_init(self, cr, context=None):
"""
@ -2805,8 +2800,8 @@ class BaseModel(object):
cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
res['attname'] = k
column_data[k] = res
self.__schema.debug("Table '%s': renamed column '%s' to '%s'",
self._table, f.oldname, k)
_schema.debug("Table '%s': renamed column '%s' to '%s'",
self._table, f.oldname, k)
# The field already exists in database. Possibly
# change its type, rename it, drop it or change its
@ -2817,12 +2812,12 @@ class BaseModel(object):
f_pg_notnull = res['attnotnull']
if isinstance(f, fields.function) and not f.store and\
not getattr(f, 'nodrop', False):
self.__logger.info('column %s (%s) in table %s removed: converted to a function !\n',
k, f.string, self._table)
_logger.info('column %s (%s) in table %s removed: converted to a function !\n',
k, f.string, self._table)
cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
cr.commit()
self.__schema.debug("Table '%s': dropped column '%s' with cascade",
self._table, k)
_schema.debug("Table '%s': dropped column '%s' with cascade",
self._table, k)
f_obj_type = None
else:
f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
@ -2844,7 +2839,7 @@ class BaseModel(object):
cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
cr.commit()
self.__schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
_schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
self._table, k, f_pg_size, f.size)
for c in casts:
if (f_pg_type==c[0]) and (f._type==c[1]):
@ -2855,7 +2850,7 @@ class BaseModel(object):
cr.execute(('UPDATE "%s" SET "%s"=temp_change_size'+c[3]) % (self._table, k))
cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
cr.commit()
self.__schema.debug("Table '%s': column '%s' changed type from %s to %s",
_schema.debug("Table '%s': column '%s' changed type from %s to %s",
self._table, k, c[0], c[1])
break
@ -2876,7 +2871,7 @@ class BaseModel(object):
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
self.__schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
_schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
self._table, k, f_pg_type, f._type, newname)
# if the field is required and hasn't got a NOT NULL constraint
@ -2897,19 +2892,19 @@ class BaseModel(object):
try:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
cr.commit()
self.__schema.debug("Table '%s': column '%s': added NOT NULL constraint",
self._table, k)
_schema.debug("Table '%s': column '%s': added NOT NULL constraint",
self._table, k)
except Exception:
msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
"If you want to have it, you should update the records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
self.__schema.warn(msg, self._table, k, self._table, k)
_schema.warning(msg, self._table, k, self._table, k)
cr.commit()
elif not f.required and f_pg_notnull == 1:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
cr.commit()
self.__schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
self._table, k)
_schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
self._table, k)
# Verify index
indexname = '%s_%s_index' % (self._table, k)
cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
@ -2923,12 +2918,12 @@ class BaseModel(object):
"This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
" because there is a length limit for indexable btree values!\n"\
"Use a search view instead if you simply want to make the field searchable."
self.__schema.warn(msg, self._table, k, f._type)
_schema.warning(msg, self._table, k, f._type)
if res2 and not f.select:
cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
cr.commit()
msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
self.__schema.debug(msg, self._table, k, f._type)
_schema.debug(msg, self._table, k, f._type)
if isinstance(f, fields.many2one):
dest_model = self.pool.get(f._obj)
@ -2955,7 +2950,7 @@ class BaseModel(object):
cr.execute('ALTER TABLE "' + self._table + '" DROP CONSTRAINT "' + res2[0]['conname'] + '"')
self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
cr.commit()
self.__schema.debug("Table '%s': column '%s': XXX",
_schema.debug("Table '%s': column '%s': XXX",
self._table, k)
# The field doesn't exist in database. Create it if necessary.
@ -2964,7 +2959,7 @@ class BaseModel(object):
# add the missing field
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
self.__schema.debug("Table '%s': added column '%s' with definition=%s",
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, k, get_pg_type(f)[1])
# initialize it
@ -2978,7 +2973,7 @@ class BaseModel(object):
query = 'UPDATE "%s" SET "%s"=%s' % (self._table, k, ss[0])
cr.execute(query, (ss[1](default),))
cr.commit()
netsvc.Logger().notifyChannel('data', netsvc.LOG_DEBUG, "Table '%s': setting default value of new column %s" % (self._table, k))
_logger.debug("Table '%s': setting default value of new column %s", self._table, k)
# remember the functions to call for the stored fields
if isinstance(f, fields.function):
@ -3002,14 +2997,14 @@ class BaseModel(object):
try:
cr.commit()
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
self.__schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
_schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
self._table, k)
except Exception:
msg = "WARNING: unable to set column %s of table %s not null !\n"\
"Try to re-run: openerp-server --update=module\n"\
"If it doesn't work, update records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
self.__logger.warn(msg, k, self._table, self._table, k)
_logger.warning(msg, k, self._table, self._table, k)
cr.commit()
else:
@ -3046,7 +3041,7 @@ class BaseModel(object):
def _create_table(self, cr):
cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id)) WITHOUT OIDS' % (self._table,))
cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
self.__schema.debug("Table '%s': created", self._table)
_schema.debug("Table '%s': created", self._table)
def _parent_columns_exist(self, cr):
@ -3061,24 +3056,24 @@ class BaseModel(object):
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
if 'parent_left' not in self._columns:
self.__logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
self._table)
self.__schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, 'parent_left', 'INTEGER')
_logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
self._table)
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, 'parent_left', 'INTEGER')
elif not self._columns['parent_left'].select:
self.__logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
_logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
if 'parent_right' not in self._columns:
self.__logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
self._table)
self.__schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, 'parent_right', 'INTEGER')
_logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
self._table)
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, 'parent_right', 'INTEGER')
elif not self._columns['parent_right'].select:
self.__logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
_logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
if self._columns[self._parent_name].ondelete != 'cascade':
self.__logger.error("The column %s on object %s must be set as ondelete='cascade'",
self._parent_name, self._name)
_logger.error("The column %s on object %s must be set as ondelete='cascade'",
self._parent_name, self._name)
cr.commit()
@ -3093,8 +3088,8 @@ class BaseModel(object):
if not cr.rowcount:
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, field, field_def))
cr.commit()
self.__schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, field, field_def)
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, field, field_def)
def _select_column_data(self, cr):
@ -3142,7 +3137,7 @@ class BaseModel(object):
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
cr.commit()
self.__schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
_schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
def _add_sql_constraints(self, cr):
@ -3195,9 +3190,9 @@ class BaseModel(object):
try:
cr.execute(sql_action['query'])
cr.commit()
self.__schema.debug(sql_action['msg_ok'])
_schema.debug(sql_action['msg_ok'])
except:
self.__schema.warn(sql_action['msg_err'])
_schema.warning(sql_action['msg_err'])
cr.rollback()
@ -3254,11 +3249,11 @@ class BaseModel(object):
def _inherits_check(self):
for table, field_name in self._inherits.items():
if field_name not in self._columns:
logging.getLogger('init').info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.' % (field_name, self._name))
_logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, self._name)
self._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
required=True, ondelete="cascade")
elif not self._columns[field_name].required or self._columns[field_name].ondelete.lower() != "cascade":
logging.getLogger('init').warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade", forcing it.' % (field_name, self._name))
_logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade", forcing it.', field_name, self._name)
self._columns[field_name].required = True
self._columns[field_name].ondelete = "cascade"
@ -3937,7 +3932,7 @@ class BaseModel(object):
self.pool.get(table).write(cr, user, nids, v, context)
if unknown_fields:
self.__logger.warn(
_logger.warning(
'No such field(s) in model %s: %s.',
self._name, ', '.join(unknown_fields))
self._validate(cr, user, ids, context)
@ -4073,7 +4068,7 @@ class BaseModel(object):
del vals[v]
unknown_fields.append(v)
if unknown_fields:
self.__logger.warn(
_logger.warning(
'No such field(s) in model %s: %s.',
self._name, ', '.join(unknown_fields))
@ -4455,9 +4450,9 @@ class BaseModel(object):
assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
logging.getLogger('orm.search').debug("Many2one function/related fields must be stored " \
"to be used as ordering fields! Ignoring sorting for %s.%s",
self._name, order_field)
_logger.debug("Many2one function/related fields must be stored " \
"to be used as ordering fields! Ignoring sorting for %s.%s",
self._name, order_field)
return
# figure out the applicable order_by for the m2o
@ -4753,8 +4748,8 @@ class BaseModel(object):
return [x[0] for x in cr.fetchall()]
def check_recursion(self, cr, uid, ids, context=None, parent=None):
warnings.warn("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
self._name, DeprecationWarning, stacklevel=3)
_logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
self._name)
assert parent is None or parent in self._columns or parent in self._inherit_fields,\
"The 'parent' parameter passed to check_recursion() must be None or a valid field name"
return self._check_recursion(cr, uid, ids, context, parent)

View File

@ -34,6 +34,8 @@ from openerp.tools.translate import translate
from openerp.osv.orm import MetaModel, Model, TransientModel, AbstractModel
import openerp.exceptions
_logger = logging.getLogger(__name__)
# Deprecated.
class except_osv(Exception):
def __init__(self, name, value):
@ -45,7 +47,6 @@ service = None
class object_proxy(object):
def __init__(self):
self.logger = logging.getLogger('web-services')
global service
service = self
@ -130,7 +131,7 @@ class object_proxy(object):
tr(osv_pool._sql_error[key], 'sql_constraint') or inst[0])
if inst.pgcode in (errorcodes.NOT_NULL_VIOLATION, errorcodes.FOREIGN_KEY_VIOLATION, errorcodes.RESTRICT_VIOLATION):
msg = _('The operation cannot be completed, probably due to the following:\n- deletion: you may be trying to delete a record while other records still reference it\n- creation/update: a mandatory field is not correctly set')
self.logger.debug("IntegrityError", exc_info=True)
_logger.debug("IntegrityError", exc_info=True)
try:
errortxt = inst.pgerror.replace('«','"').replace('»','"')
if '"public".' in errortxt:
@ -151,7 +152,7 @@ class object_proxy(object):
else:
netsvc.abort_response(1, _('Integrity Error'), 'warning', inst[0])
except Exception:
self.logger.exception("Uncaught exception")
_logger.exception("Uncaught exception")
raise
return wrapper
@ -174,7 +175,7 @@ class object_proxy(object):
raise except_osv('Access Denied', 'Private methods (such as %s) cannot be called remotely.' % (method,))
res = self.execute_cr(cr, uid, obj, method, *args, **kw)
if res is None:
self.logger.warning('The method %s of the object %s can not return `None` !', method, obj)
_logger.warning('The method %s of the object %s can not return `None` !', method, obj)
cr.commit()
except Exception:
cr.rollback()

View File

@ -50,7 +50,6 @@ except ImportError:
import filters
import utils
import warnings
from generic import *
from utils import readNonWhitespace, readUntilWhitespace, ConvertFunctionsToVirtualList

View File

@ -29,7 +29,8 @@ try:
from PIL import Image
except ImportError:
import logging
logging.warning('Python Imaging not installed, you can use only .JPG pictures !')
_logger = logging.getLogger(__name__)
_logger.warning('Python Imaging not installed, you can use only .JPG pictures !')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

View File

@ -18,6 +18,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import mako
from lxml import etree
from mako.template import Template
@ -25,6 +26,8 @@ from mako.lookup import TemplateLookup
import openerp.netsvc as netsvc
import traceback, sys, os
_logger = logging.getLogger(__name__)
class makohtml2html(object):
def __init__(self, html, localcontext):
self.localcontext = localcontext
@ -125,8 +128,7 @@ class makohtml2html(object):
return final_html
except Exception,e:
tb_s = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
netsvc.Logger().notifyChannel('report', netsvc.LOG_ERROR,
'report :\n%s\n%s\n' % (tb_s, str(e)))
_logger.error('report :\n%s\n%s\n', tb_s, str(e))
def parseNode(html, localcontext = {}):
r = makohtml2html(html, localcontext)

View File

@ -40,6 +40,8 @@ Due to an awful configuration that ships with reportlab at many Linux
and Ubuntu distros, we have to override the search path, too.
"""
_logger = logging.getLogger(__name__)
CustomTTFonts = [ ('Helvetica',"DejaVu Sans", "DejaVuSans.ttf", 'normal'),
('Helvetica',"DejaVu Sans Bold", "DejaVuSans-Bold.ttf", 'bold'),
('Helvetica',"DejaVu Sans Oblique", "DejaVuSans-Oblique.ttf", 'italic'),
@ -97,7 +99,6 @@ def FindCustomFonts():
meanwhile, the server must be restarted eventually.
"""
dirpath = []
log = logging.getLogger('report.fonts')
global __foundFonts
__foundFonts = {}
searchpath = []
@ -127,7 +128,7 @@ def FindCustomFonts():
for d in dirpath:
abs_filename = os.path.join(d, filename)
if os.path.exists(abs_filename):
log.debug("Found font %s at %s", filename, abs_filename)
_logger.debug("Found font %s at %s", filename, abs_filename)
__foundFonts[filename] = abs_filename
break

View File

@ -39,13 +39,14 @@ from openerp.tools.misc import file_open
from reportlab.pdfbase import pdfmetrics
from reportlab.lib.pagesizes import A4, letter
try:
from cStringIO import StringIO
_hush_pyflakes = [ StringIO ]
except ImportError:
from StringIO import StringIO
_logger = logging.getLogger(__name__)
encoding = 'utf-8'
def _open_image(filename, path=None):
@ -217,7 +218,7 @@ class _rml_styles(object,):
if sname in self.styles_obj:
style = self.styles_obj[sname]
else:
sys.stderr.write('Warning: style not found, %s - setting default!\n' % (node.get('style'),) )
_logger.warning('Warning: style not found, %s - setting default!\n' % (node.get('style'),) )
if not style:
style = self.default_style['Normal']
para_update = self._para_style_update(node)
@ -328,7 +329,6 @@ class _rml_canvas(object):
self.images = images
self.path = path
self.title = title
self._logger = logging.getLogger('report.rml.canvas')
if self.title:
self.canvas.setTitle(self.title)
@ -452,7 +452,7 @@ class _rml_canvas(object):
if not nfile:
if node.get('name'):
image_data = self.images[node.get('name')]
self._logger.debug("Image %s used", node.get('name'))
_logger.debug("Image %s used", node.get('name'))
s = StringIO(image_data)
else:
newtext = node.text
@ -466,7 +466,7 @@ class _rml_canvas(object):
if image_data:
s = StringIO(image_data)
else:
self._logger.debug("No image data!")
_logger.debug("No image data!")
return False
else:
if nfile in self.images:
@ -479,16 +479,16 @@ class _rml_canvas(object):
if up and up.scheme:
# RFC: do we really want to open external URLs?
# Are we safe from cross-site scripting or attacks?
self._logger.debug("Retrieve image from %s", nfile)
_logger.debug("Retrieve image from %s", nfile)
u = urllib.urlopen(str(nfile))
s = StringIO(u.read())
else:
self._logger.debug("Open image file %s ", nfile)
_logger.debug("Open image file %s ", nfile)
s = _open_image(nfile, path=self.path)
try:
img = ImageReader(s)
(sx,sy) = img.getSize()
self._logger.debug("Image is %dx%d", sx, sy)
_logger.debug("Image is %dx%d", sx, sy)
args = { 'x': 0.0, 'y': 0.0 }
for tag in ('width','height','x','y'):
if node.get(tag):
@ -540,7 +540,7 @@ class _rml_canvas(object):
try:
pdfmetrics.getFont(fontname)
except Exception:
logging.getLogger('report.fonts').debug('Could not locate font %s, substituting default: %s',
_logger.debug('Could not locate font %s, substituting default: %s',
fontname,
self.canvas._fontname)
fontname = self.canvas._fontname
@ -613,7 +613,6 @@ class _rml_flowable(object):
self.images = images
self.path = path
self.title = title
self._logger = logging.getLogger('report.rml.flowable')
def _textual(self, node):
rc1 = utils._process_text(self, node.text or '')
@ -753,7 +752,7 @@ class _rml_flowable(object):
from reportlab.graphics.barcode import createBarcodeDrawing
except ImportError:
self._logger.warning("Cannot use barcode renderers:", exc_info=True)
_logger.warning("Cannot use barcode renderers:", exc_info=True)
return None
args = utils.attr_get(node, [], {'ratio':'float','xdim':'unit','height':'unit','checksum':'int','quiet':'int','width':'unit','stop':'bool','bearers':'int','barWidth':'float','barHeight':'float'})
codes = {
@ -801,10 +800,10 @@ class _rml_flowable(object):
if not node.get('file'):
if node.get('name'):
if node.get('name') in self.doc.images:
self._logger.debug("Image %s read ", node.get('name'))
_logger.debug("Image %s read ", node.get('name'))
image_data = self.doc.images[node.get('name')].read()
else:
self._logger.warning("Image %s not defined", node.get('name'))
_logger.warning("Image %s not defined", node.get('name'))
return False
else:
import base64
@ -813,11 +812,11 @@ class _rml_flowable(object):
newtext = utils._process_text(self, node.text or '')
image_data = base64.decodestring(newtext)
if not image_data:
self._logger.debug("No inline image data")
_logger.debug("No inline image data")
return False
image = StringIO(image_data)
else:
self._logger.debug("Image get from file %s", node.get('file'))
_logger.debug("Image get from file %s", node.get('file'))
image = _open_image(node.get('file'), path=self.doc.path)
return platypus.Image(image, mask=(250,255,250,255,250,255), **(utils.attr_get(node, ['width','height'])))
elif node.tag=='spacer':
@ -980,7 +979,7 @@ def parseNode(rml, localcontext=None, fout=None, images=None, path='.', title=No
# means there is no custom fonts mapping in this system.
pass
except Exception:
logging.getLogger('report').warning('Cannot set font mapping', exc_info=True)
_logger.warning('Cannot set font mapping', exc_info=True)
pass
fp = StringIO()
r.render(fp)

View File

@ -47,6 +47,8 @@ import openerp.tools as tools
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.misc import ustr
_logger = logging.getLogger(__name__)
_regex = re.compile('\[\[(.+?)\]\]')
def str2xml(s):
@ -68,7 +70,7 @@ def _child_get(node, self=None, tagname=None):
except GeneratorExit:
continue
except Exception, e:
logging.getLogger('report').warning('rml_except: "%s"',n.get('rml_except',''), exc_info=True)
_logger.warning('rml_except: "%s"', n.get('rml_except',''), exc_info=True)
continue
if n.get('rml_tag'):
try:
@ -80,7 +82,7 @@ def _child_get(node, self=None, tagname=None):
except GeneratorExit:
yield n
except Exception, e:
logging.getLogger('report').warning('rml_tag: "%s"',n.get('rml_tag',''), exc_info=True)
_logger.warning('rml_tag: "%s"', n.get('rml_tag',''), exc_info=True)
yield n
else:
yield n
@ -91,7 +93,7 @@ def _child_get(node, self=None, tagname=None):
except GeneratorExit:
continue
except Exception, e:
logging.getLogger('report').warning('rml_except: "%s"',n.get('rml_except',''), exc_info=True)
_logger.warning('rml_except: "%s"', n.get('rml_except',''), exc_info=True)
continue
if self and self.localcontext and n.get('rml_tag'):
try:
@ -104,7 +106,7 @@ def _child_get(node, self=None, tagname=None):
except GeneratorExit:
pass
except Exception, e:
logging.getLogger('report').warning('rml_tag: "%s"',n.get('rml_tag',''), exc_info=True)
_logger.warning('rml_tag: "%s"', n.get('rml_tag',''), exc_info=True)
pass
if (tagname is None) or (n.tag==tagname):
yield n

View File

@ -36,6 +36,8 @@ import common
from openerp.osv.fields import float as float_class, function as function_class
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
DT_FORMAT = '%Y-%m-%d'
DHM_FORMAT = '%Y-%m-%d %H:%M:%S'
HM_FORMAT = '%H:%M:%S'
@ -482,7 +484,7 @@ class report_sxw(report_rml, preprocess.report):
)
except Exception:
#TODO: should probably raise a proper osv_except instead, shouldn't we? see LP bug #325632
logging.getLogger('report').error('Could not create saved report attachment', exc_info=True)
_logger.error('Could not create saved report attachment', exc_info=True)
results.append(result)
if results:
if results[0][1]=='pdf':

View File

@ -46,6 +46,8 @@ import openerp.wsgi
low-level behavior of the wire.
"""
_logger = logging.getLogger(__name__)
def start_services():
""" Start all services.
@ -80,9 +82,8 @@ def stop_services():
openerp.netsvc.Server.quitAll()
openerp.wsgi.stop_server()
config = openerp.tools.config
logger = logging.getLogger('server')
logger.info("Initiating shutdown")
logger.info("Hit CTRL-C again or send a second signal to force the shutdown.")
_logger.info("Initiating shutdown")
_logger.info("Hit CTRL-C again or send a second signal to force the shutdown.")
logging.shutdown()
# Manually join() all threads before calling sys.exit() to allow a second signal

View File

@ -60,6 +60,9 @@ try:
except ImportError:
class SSLError(Exception): pass
_logger = logging.getLogger(__name__)
# TODO delete this for 6.2, it is still needed for 6.1.
class HttpLogHandler:
""" helper class for uniform log handling
Please define self._logger at each class that is derived from this
@ -76,11 +79,12 @@ class HttpLogHandler:
self._logger.exception(format, *args)
def log_request(self, code='-', size='-'):
self._logger.log(netsvc.logging.DEBUG_RPC, '"%s" %s %s',
self.requestline, str(code), str(size))
self._logger.debug('"%s" %s %s',
self.requestline, str(code), str(size))
class StaticHTTPHandler(HttpLogHandler, FixSendError, HttpOptions, HTTPHandler):
_logger = logging.getLogger('httpd')
_logger = logging.getLogger(__name__)
_HTTP_OPTIONS = { 'Allow': ['OPTIONS', 'GET', 'HEAD'] }
def __init__(self,request, client_address, server):
@ -120,8 +124,7 @@ def init_static_http():
reg_http_service(base_path, StaticHTTPHandler)
logging.getLogger("web-services").info("Registered HTTP dir %s for %s" % \
(document_root, base_path))
_logger.info("Registered HTTP dir %s for %s", document_root, base_path)
import security
@ -140,12 +143,9 @@ class OpenERPAuthProvider(AuthProvider):
return False
return (user, passwd, db, uid)
except Exception,e:
logging.getLogger("auth").debug("Fail auth: %s" % e )
_logger.debug("Fail auth: %s" % e )
return False
def log(self, msg, lvl=logging.INFO):
logging.getLogger("auth").log(lvl,msg)
def checkRequest(self,handler,path, db=False):
auth_str = handler.headers.get('Authorization',False)
try:
@ -159,21 +159,21 @@ class OpenERPAuthProvider(AuthProvider):
db = psp[0]
else:
#FIXME!
self.log("Wrong path: %s, failing auth" %path)
_logger.info("Wrong path: %s, failing auth" %path)
raise AuthRejectedExc("Authorization failed. Wrong sub-path.")
if self.auth_creds.get(db):
return True
if auth_str and auth_str.startswith('Basic '):
auth_str=auth_str[len('Basic '):]
(user,passwd) = base64.decodestring(auth_str).split(':')
self.log("Found user=\"%s\", passwd=\"***\" for db=\"%s\"" %(user,db))
_logger.info("Found user=\"%s\", passwd=\"***\" for db=\"%s\"", user, db)
acd = self.authenticate(db,user,passwd,handler.client_address)
if acd != False:
self.auth_creds[db] = acd
self.last_auth = db
return True
if self.auth_tries > 5:
self.log("Failing authorization after 5 requests w/o password")
_logger.info("Failing authorization after 5 requests w/o password")
raise AuthRejectedExc("Authorization failed.")
self.auth_tries += 1
raise AuthRequiredExc(atype='Basic', realm=self.realm)

View File

@ -36,6 +36,8 @@ import openerp.netsvc as netsvc
import openerp.tiny_socket as tiny_socket
import openerp.tools as tools
_logger = logging.getLogger(__name__)
class TinySocketClientThread(threading.Thread):
def __init__(self, sock, threads):
spn = sock and sock.getpeername()
@ -69,12 +71,12 @@ class TinySocketClientThread(threading.Thread):
valid_exception = Exception(netrpc_handle_exception_legacy(e))
valid_traceback = getattr(e, 'traceback', sys.exc_info())
formatted_traceback = "".join(traceback.format_exception(*valid_traceback))
logging.getLogger('web-services').debug("netrpc: communication-level exception", exc_info=True)
_logger.debug("netrpc: communication-level exception", exc_info=True)
ts.mysend(valid_exception, exception=True, traceback=formatted_traceback)
break
except Exception, ex:
#terminate this channel if we can't properly send back the error
logging.getLogger('web-services').exception("netrpc: cannot deliver exception message to client")
_logger.exception("netrpc: cannot deliver exception message to client")
break
netsvc.close_socket(self.sock)
@ -108,8 +110,7 @@ class TinySocketServerThread(threading.Thread,netsvc.Server):
self.socket.bind((self.__interface, self.__port))
self.socket.listen(5)
self.threads = []
netsvc.Logger().notifyChannel("web-services", netsvc.LOG_INFO,
"starting NET-RPC service on %s:%s" % (interface or '0.0.0.0', port,))
_logger.info("starting NET-RPC service on %s:%s", interface or '0.0.0.0', port)
def run(self):
try:
@ -127,11 +128,10 @@ class TinySocketServerThread(threading.Thread,netsvc.Server):
if (lt > 10) and (lt % 10 == 0):
# Not many threads should be serving at the same time, so log
# their abuse.
netsvc.Logger().notifyChannel("web-services", netsvc.LOG_DEBUG,
"Netrpc: %d threads" % len(self.threads))
_logger.debug("Netrpc: %d threads", len(self.threads))
self.socket.close()
except Exception, e:
logging.getLogger('web-services').warning("Netrpc: closing because of exception %s" % str(e))
_logger.warning("Netrpc: closing because of exception %s" % str(e))
self.socket.close()
return False

View File

@ -50,6 +50,8 @@ from openerp.service import http_server
procedures to be called. Each method has its own arguments footprint.
"""
_logger = logging.getLogger(__name__)
RPC_VERSION_1 = {'server_version': '6.1', 'protocol_version': 1}
# This should be moved to openerp.modules.db, along side initialize().
@ -83,7 +85,7 @@ def _initialize_db(serv, id, db_name, demo, lang, user_password):
cr.close()
except Exception, e:
serv.actions[id].update(clean=False, exception=e)
logging.getLogger('db.create').exception('CREATE DATABASE failed:')
_logger.exception('CREATE DATABASE failed:')
serv.actions[id]['traceback'] = traceback.format_exc()
if cr:
cr.close()
@ -134,7 +136,7 @@ class db(netsvc.ExportService):
self._create_empty_database(db_name)
logging.getLogger('db.create').info('CREATE DATABASE %s', db_name.lower())
_logger.info('CREATE DATABASE %s', db_name.lower())
create_thread = threading.Thread(target=_initialize_db,
args=(self, id, db_name, demo, lang, user_password))
create_thread.start()
@ -150,7 +152,7 @@ class db(netsvc.ExportService):
self.actions[id] = {'clean': False}
logging.getLogger('db.create').info('CREATE DATABASE %s', db_name.lower())
_logger.info('CREATE DATABASE %s', db_name.lower())
self._create_empty_database(db_name)
_initialize_db(self, id, db_name, demo, lang, user_password)
return True
@ -173,7 +175,6 @@ class db(netsvc.ExportService):
def exp_drop(self, db_name):
openerp.modules.registry.RegistryManager.delete(db_name)
sql_db.close_db(db_name)
logger = netsvc.Logger()
db = sql_db.db_connect('template1')
cr = db.cursor()
@ -182,12 +183,10 @@ class db(netsvc.ExportService):
try:
cr.execute('DROP DATABASE "%s"' % db_name)
except Exception, e:
logger.notifyChannel("web-services", netsvc.LOG_ERROR,
'DROP DB: %s failed:\n%s' % (db_name, e))
_logger.error('DROP DB: %s failed:\n%s', db_name, e)
raise Exception("Couldn't drop database %s: %s" % (db_name, e))
else:
logger.notifyChannel("web-services", netsvc.LOG_INFO,
'DROP DB: %s' % (db_name))
_logger.info('DROP DB: %s', db_name)
finally:
cr.close()
return True
@ -207,7 +206,6 @@ class db(netsvc.ExportService):
os.environ['PGPASSWORD'] = ''
def exp_dump(self, db_name):
logger = netsvc.Logger()
try:
self._set_pg_psw_env_var()
cmd = ['pg_dump', '--format=c', '--no-owner']
@ -225,27 +223,23 @@ class db(netsvc.ExportService):
res = stdout.close()
if not data or res:
logger.notifyChannel("web-services", netsvc.LOG_ERROR,
_logger.error(
'DUMP DB: %s failed! Please verify the configuration of the database password on the server. '\
'It should be provided as a -w <PASSWD> command-line option, or as `db_password` in the '\
'server configuration file.\n %s' % (db_name, data))
raise Exception, "Couldn't dump database"
logger.notifyChannel("web-services", netsvc.LOG_INFO,
'DUMP DB successful: %s' % (db_name))
_logger.info('DUMP DB successful: %s', db_name)
return base64.encodestring(data)
finally:
self._unset_pg_psw_env_var()
def exp_restore(self, db_name, data):
logger = netsvc.Logger()
try:
self._set_pg_psw_env_var()
if self.exp_db_exist(db_name):
logger.notifyChannel("web-services", netsvc.LOG_WARNING,
'RESTORE DB: %s already exists' % (db_name,))
_logger.warning('RESTORE DB: %s already exists' % (db_name,))
raise Exception, "Database already exists"
self._create_empty_database(db_name)
@ -274,8 +268,7 @@ class db(netsvc.ExportService):
res = stdout.close()
if res:
raise Exception, "Couldn't restore database"
logger.notifyChannel("web-services", netsvc.LOG_INFO,
'RESTORE DB: %s' % (db_name))
_logger.info('RESTORE DB: %s' % (db_name))
return True
finally:
@ -284,7 +277,6 @@ class db(netsvc.ExportService):
def exp_rename(self, old_name, new_name):
openerp.modules.registry.RegistryManager.delete(old_name)
sql_db.close_db(old_name)
logger = netsvc.Logger()
db = sql_db.db_connect('template1')
cr = db.cursor()
@ -293,16 +285,14 @@ class db(netsvc.ExportService):
try:
cr.execute('ALTER DATABASE "%s" RENAME TO "%s"' % (old_name, new_name))
except Exception, e:
logger.notifyChannel("web-services", netsvc.LOG_ERROR,
'RENAME DB: %s -> %s failed:\n%s' % (old_name, new_name, e))
_logger.error('RENAME DB: %s -> %s failed:\n%s', old_name, new_name, e)
raise Exception("Couldn't rename database %s to %s: %s" % (old_name, new_name, e))
else:
fs = os.path.join(tools.config['root_path'], 'filestore')
if os.path.exists(os.path.join(fs, old_name)):
os.rename(os.path.join(fs, old_name), os.path.join(fs, new_name))
logger.notifyChannel("web-services", netsvc.LOG_INFO,
'RENAME DB: %s -> %s' % (old_name, new_name))
_logger.info('RENAME DB: %s -> %s', old_name, new_name)
finally:
cr.close()
return True
@ -359,10 +349,9 @@ class db(netsvc.ExportService):
from openerp.osv.orm import except_orm
from openerp.osv.osv import except_osv
l = netsvc.Logger()
for db in databases:
try:
l.notifyChannel('migration', netsvc.LOG_INFO, 'migrate database %s' % (db,))
_logger.info('migrate database %s', db)
tools.config['update']['base'] = True
pooler.restart_pool(db, force_demo=False, update_module=True)
except except_orm, inst:
@ -370,14 +359,11 @@ class db(netsvc.ExportService):
except except_osv, inst:
netsvc.abort_response(1, inst.name, 'warning', inst.value)
except Exception:
import traceback
tb_s = reduce(lambda x, y: x+y, traceback.format_exception( sys.exc_type, sys.exc_value, sys.exc_traceback))
l.notifyChannel('web-services', netsvc.LOG_ERROR, tb_s)
_logger.exception('Exception in migrate_databases:')
raise
return True
class common(netsvc.ExportService):
_logger = logging.getLogger('web-services')
def __init__(self,name="common"):
netsvc.ExportService.__init__(self,name)
@ -402,7 +388,7 @@ class common(netsvc.ExportService):
# the res.users model
res = security.login(db, login, password)
msg = res and 'successful login' or 'bad login or password'
self._logger.info("%s from '%s' using database '%s'", msg, login, db.lower())
_logger.info("%s from '%s' using database '%s'", msg, login, db.lower())
return res or False
def exp_authenticate(self, db, login, password, user_agent_env):
@ -449,7 +435,6 @@ GNU Public Licence.
def exp_get_migration_scripts(self, contract_id, contract_password):
l = netsvc.Logger()
import openerp.tools.maintenance as tm
try:
rc = tm.remote_contract(contract_id, contract_password)
@ -458,7 +443,7 @@ GNU Public Licence.
if rc.status != 'full':
raise tm.RemoteContractException('Can not get updates for a partial contract')
l.notifyChannel('migration', netsvc.LOG_INFO, 'starting migration with contract %s' % (rc.name,))
_logger.info('starting migration with contract %s', rc.name)
zips = rc.retrieve_updates(rc.id, openerp.modules.get_modules_with_version())
@ -466,12 +451,12 @@ GNU Public Licence.
backup_directory = os.path.join(tools.config['root_path'], 'backup', time.strftime('%Y-%m-%d-%H-%M'))
if zips and not os.path.isdir(backup_directory):
l.notifyChannel('migration', netsvc.LOG_INFO, 'create a new backup directory to \
store the old modules: %s' % (backup_directory,))
_logger.info('create a new backup directory to \
store the old modules: %s', backup_directory)
os.makedirs(backup_directory)
for module in zips:
l.notifyChannel('migration', netsvc.LOG_INFO, 'upgrade module %s' % (module,))
_logger.info('upgrade module %s', module)
mp = openerp.modules.get_module_path(module)
if mp:
if os.path.isdir(mp):
@ -488,7 +473,7 @@ GNU Public Licence.
try:
base64_decoded = base64.decodestring(zips[module])
except Exception:
l.notifyChannel('migration', netsvc.LOG_ERROR, 'unable to read the module %s' % (module,))
_logger.error('unable to read the module %s', module)
raise
zip_contents = StringIO(base64_decoded)
@ -497,13 +482,13 @@ GNU Public Licence.
try:
tools.extract_zip_file(zip_contents, tools.config['addons_path'] )
except Exception:
l.notifyChannel('migration', netsvc.LOG_ERROR, 'unable to extract the module %s' % (module, ))
_logger.error('unable to extract the module %s', module)
rmtree(module)
raise
finally:
zip_contents.close()
except Exception:
l.notifyChannel('migration', netsvc.LOG_ERROR, 'restore the previous version of the module %s' % (module, ))
_logger.error('restore the previous version of the module %s', module)
nmp = os.path.join(backup_directory, module)
if os.path.isdir(nmp):
copytree(nmp, tools.config['addons_path'])
@ -515,9 +500,7 @@ GNU Public Licence.
except tm.RemoteContractException, e:
netsvc.abort_response(1, 'Migration Error', 'warning', str(e))
except Exception, e:
import traceback
tb_s = reduce(lambda x, y: x+y, traceback.format_exception( sys.exc_type, sys.exc_value, sys.exc_traceback))
l.notifyChannel('migration', netsvc.LOG_ERROR, tb_s)
_logger.exception('Exception in get_migration_script:')
raise
def exp_get_server_environment(self):
@ -548,12 +531,11 @@ GNU Public Licence.
return tools.config.get('login_message', False)
def exp_set_loglevel(self, loglevel, logger=None):
l = netsvc.Logger()
l.set_loglevel(int(loglevel), logger)
# TODO Previously, the level was set on the now deprecated
# `openerp.netsvc.Logger` class.
return True
def exp_get_stats(self):
import threading
res = "OpenERP server: %d threads\n" % threading.active_count()
res += netsvc.Server.allStats()
return res
@ -568,9 +550,8 @@ GNU Public Licence.
return os.times()
def exp_get_sqlcount(self):
logger = logging.getLogger('db.cursor')
if not logger.isEnabledFor(logging.DEBUG_SQL):
logger.warning("Counters of SQL will not be reliable unless DEBUG_SQL is set at the server's config.")
if not logging.getLogger('openerp.sql_db').isEnabledFor(logging.DEBUG):
_logger.warning("Counters of SQL will not be reliable unless logger openerp.sql_db is set to level DEBUG or higer.")
return sql_db.sql_counter
@ -686,8 +667,6 @@ class report_spool(netsvc.ExportService):
self._reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None}
cr = pooler.get_db(db).cursor()
import traceback
import sys
try:
obj = netsvc.LocalService('report.'+object)
(result, format) = obj.create(cr, uid, ids, datas, context)
@ -699,14 +678,11 @@ class report_spool(netsvc.ExportService):
self._reports[id]['state'] = True
except Exception, exception:
tb = sys.exc_info()
tb_s = "".join(traceback.format_exception(*tb))
logger = netsvc.Logger()
logger.notifyChannel('web-services', netsvc.LOG_ERROR,
'Exception: %s\n%s' % (str(exception), tb_s))
_logger.exception('Exception: %s\n', str(exception))
if hasattr(exception, 'name') and hasattr(exception, 'value'):
self._reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value))
else:
tb = sys.exc_info()
self._reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb)
self._reports[id]['state'] = True
cr.commit()
@ -729,8 +705,6 @@ class report_spool(netsvc.ExportService):
def go(id, uid, ids, datas, context):
cr = pooler.get_db(db).cursor()
import traceback
import sys
try:
obj = netsvc.LocalService('report.'+object)
(result, format) = obj.create(cr, uid, ids, datas, context)
@ -741,15 +715,11 @@ class report_spool(netsvc.ExportService):
self._reports[id]['format'] = format
self._reports[id]['state'] = True
except Exception, exception:
tb = sys.exc_info()
tb_s = "".join(traceback.format_exception(*tb))
logger = netsvc.Logger()
logger.notifyChannel('web-services', netsvc.LOG_ERROR,
'Exception: %s\n%s' % (str(exception), tb_s))
_logger.exception('Exception: %s\n', str(exception))
if hasattr(exception, 'name') and hasattr(exception, 'value'):
self._reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value))
else:
tb = sys.exc_info()
self._reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb)
self._reports[id]['state'] = True
cr.commit()

View File

@ -32,10 +32,13 @@
usable in other projects, too.
"""
import logging
import SocketServer
from BaseHTTPServer import *
from SimpleHTTPServer import SimpleHTTPRequestHandler
_logger = logging.getLogger(__name__)
class AuthRequiredExc(Exception):
def __init__(self,atype,realm):
Exception.__init__(self)
@ -176,7 +179,7 @@ class FixSendError:
if message is None:
message = short
explain = long
self.log_error("code %d, message %s", code, message)
_logger.error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
content = (self.error_message_format %
{'code': code, 'message': _quote_html(message), 'explain': explain})

View File

@ -42,10 +42,11 @@ from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT, ISOLATION_LEVEL_READ
from psycopg2.pool import PoolError
from psycopg2.psycopg1 import cursor as psycopg1cursor
from threading import currentThread
import warnings
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
_logger = logging.getLogger(__name__)
types_mapping = {
'date': (1082,),
'time': (1083,),
@ -139,7 +140,6 @@ class Cursor(object):
"""
IN_MAX = 1000 # decent limit on size of IN queries - guideline = Oracle limit
__logger = None
def check(f):
@wraps(f)
@ -153,14 +153,12 @@ class Cursor(object):
return wrapper
def __init__(self, pool, dbname, serialized=True):
if self.__class__.__logger is None:
self.__class__.__logger = logging.getLogger('db.cursor')
self.sql_from_log = {}
self.sql_into_log = {}
# default log level determined at cursor creation, could be
# overridden later for debugging purposes
self.sql_log = self.__logger.isEnabledFor(logging.DEBUG_SQL)
self.sql_log = _logger.isEnabledFor(logging.DEBUG)
self.sql_log_count = 0
self.__closed = True # avoid the call of close() (by __del__) if an exception
@ -196,15 +194,15 @@ class Cursor(object):
msg += "Cursor was created at %s:%s" % self.__caller
else:
msg += "Please enable sql debugging to trace the caller."
self.__logger.warn(msg)
_logger.warning(msg)
self._close(True)
@check
def execute(self, query, params=None, log_exceptions=None):
if '%d' in query or '%f' in query:
self.__logger.warn(query)
self.__logger.warn("SQL queries cannot contain %d or %f anymore. "
"Use only %s")
_logger.warning(query)
_logger.warning("SQL queries cannot contain %d or %f anymore. "
"Use only %s")
if self.sql_log:
now = mdt.now()
@ -214,18 +212,18 @@ class Cursor(object):
res = self._obj.execute(query, params)
except psycopg2.ProgrammingError, pe:
if (self._default_log_exceptions if log_exceptions is None else log_exceptions):
self.__logger.error("Programming error: %s, in query %s", pe, query)
_logger.error("Programming error: %s, in query %s", pe, query)
raise
except Exception:
if (self._default_log_exceptions if log_exceptions is None else log_exceptions):
self.__logger.exception("bad query: %s", self._obj.query or query)
_logger.exception("bad query: %s", self._obj.query or query)
raise
if self.sql_log:
delay = mdt.now() - now
delay = delay.seconds * 1E6 + delay.microseconds
self.__logger.log(logging.DEBUG_SQL, "query: %s", self._obj.query)
_logger.debug("query: %s", self._obj.query)
self.sql_log_count+=1
res_from = re_from.match(query.lower())
if res_from:
@ -256,16 +254,16 @@ class Cursor(object):
if sqllogs[type]:
sqllogitems = sqllogs[type].items()
sqllogitems.sort(key=lambda k: k[1][1])
self.__logger.log(logging.DEBUG_SQL, "SQL LOG %s:", type)
_logger.debug("SQL LOG %s:", type)
sqllogitems.sort(lambda x,y: cmp(x[1][0], y[1][0]))
for r in sqllogitems:
delay = timedelta(microseconds=r[1][1])
self.__logger.log(logging.DEBUG_SQL, "table: %s: %s/%s",
_logger.debug("table: %s: %s/%s",
r[0], delay, r[1][0])
sum+= r[1][1]
sqllogs[type].clear()
sum = timedelta(microseconds=sum)
self.__logger.log(logging.DEBUG_SQL, "SUM %s:%s/%d [%d]",
_logger.debug("SUM %s:%s/%d [%d]",
type, sum, self.sql_log_count, sql_counter)
sqllogs[type].clear()
process('from')
@ -359,7 +357,6 @@ class ConnectionPool(object):
The connections are *not* automatically closed. Only a close_db()
can trigger that.
"""
__logger = logging.getLogger('db.connection_pool')
def locked(fun):
@wraps(fun)
@ -383,7 +380,7 @@ class ConnectionPool(object):
return "ConnectionPool(used=%d/count=%d/max=%d)" % (used, count, self._maxconn)
def _debug(self, msg, *args):
self.__logger.log(logging.DEBUG_SQL, ('%r ' + msg), self, *args)
_logger.debug(('%r ' + msg), self, *args)
@locked
def borrow(self, dsn):
@ -399,7 +396,7 @@ class ConnectionPool(object):
delattr(cnx, 'leaked')
self._connections.pop(i)
self._connections.append((cnx, False))
self.__logger.warn('%r: Free leaked connection to %r', self, cnx.dsn)
_logger.warning('%r: Free leaked connection to %r', self, cnx.dsn)
for i, (cnx, used) in enumerate(self._connections):
if not used and dsn_are_equals(cnx.dsn, dsn):
@ -423,7 +420,7 @@ class ConnectionPool(object):
try:
result = psycopg2.connect(dsn=dsn, connection_factory=PsycoConnection)
except psycopg2.Error:
self.__logger.exception('Connection to the database failed')
_logger.exception('Connection to the database failed')
raise
self._connections.append((result, True))
self._debug('Create new connection')
@ -447,7 +444,7 @@ class ConnectionPool(object):
@locked
def close_all(self, dsn):
self.__logger.info('%r: Close all connections to %r', self, dsn)
_logger.info('%r: Close all connections to %r', self, dsn)
for i, (cnx, used) in tools.reverse_enumerate(self._connections):
if dsn_are_equals(cnx.dsn, dsn):
cnx.close()
@ -457,7 +454,6 @@ class ConnectionPool(object):
class Connection(object):
""" A lightweight instance of a connection to postgres
"""
__logger = logging.getLogger('db.connection')
def __init__(self, pool, dbname):
self.dbname = dbname
@ -465,7 +461,7 @@ class Connection(object):
def cursor(self, serialized=True):
cursor_type = serialized and 'serialized ' or ''
self.__logger.log(logging.DEBUG_SQL, 'create %scursor to %r', cursor_type, self.dbname)
_logger.debug('create %scursor to %r', cursor_type, self.dbname)
return Cursor(self._pool, self.dbname, serialized=serialized)
# serialized_cursor is deprecated - cursors are serialized by default
@ -474,8 +470,7 @@ class Connection(object):
def __nonzero__(self):
"""Check if connection is possible"""
try:
warnings.warn("You use an expensive function to test a connection.",
DeprecationWarning, stacklevel=1)
_logger.warning("__nonzero__() is deprecated. (It is too expensive to test a connection.)")
cr = self.cursor()
cr.close()
return True

View File

@ -19,10 +19,14 @@
#
##############################################################################
import logging
from translate import _
_logger = logging.getLogger(__name__)
#-------------------------------------------------------------
#ENGLISH
#-------------------------------------------------------------
from translate import _
to_19 = ( 'Zero', 'One', 'Two', 'Three', 'Four', 'Five', 'Six',
'Seven', 'Eight', 'Nine', 'Ten', 'Eleven', 'Twelve', 'Thirteen',
@ -105,11 +109,11 @@ def amount_to_text(nbr, lang='en', currency='euro'):
"""
import openerp.loglevels as loglevels
# if nbr > 10000000:
# netsvc.Logger().notifyChannel('translate', netsvc.LOG_WARNING, _("Number too large '%d', can not translate it"))
# _logger.warning(_("Number too large '%d', can not translate it"))
# return str(nbr)
if not _translate_funcs.has_key(lang):
loglevels.Logger().notifyChannel('translate', loglevels.LOG_WARNING, _("no translation function found for lang: '%s'" % (lang,)))
_logger.warning(_("no translation function found for lang: '%s'"), lang)
#TODO: (default should be en) same as above
lang = 'en'
return _translate_funcs[lang](abs(nbr), currency)

View File

@ -82,8 +82,7 @@ class configmanager(object):
self.config_file = fname
self.has_ssl = check_ssl()
self._LOGLEVELS = dict([(getattr(loglevels, 'LOG_%s' % x), getattr(logging, x))
for x in ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'TEST', 'DEBUG', 'DEBUG_RPC', 'DEBUG_SQL', 'DEBUG_RPC_ANSWER','NOTSET')])
self._LOGLEVELS = dict([(getattr(loglevels, 'LOG_%s' % x), getattr(logging, x)) for x in ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'TEST', 'DEBUG', 'NOTSET')])
version = "%s %s" % (release.description, release.version)
self.parser = parser = optparse.OptionParser(version=version, option_class=MyOption)
@ -178,13 +177,20 @@ class configmanager(object):
# Logging Group
group = optparse.OptionGroup(parser, "Logging Configuration")
group.add_option("--logfile", dest="logfile", help="file where the server log will be stored")
group.add_option("--no-logrotate", dest="logrotate", action="store_false", my_default=True,
help="do not rotate the logfile")
group.add_option("--syslog", action="store_true", dest="syslog",
my_default=False, help="Send the log to the syslog server")
group.add_option('--log-level', dest='log_level', type='choice', choices=self._LOGLEVELS.keys(),
my_default='info',
help='specify the level of the logging. Accepted values: ' + str(self._LOGLEVELS.keys()))
group.add_option("--no-logrotate", dest="logrotate", action="store_false", my_default=True, help="do not rotate the logfile")
group.add_option("--syslog", action="store_true", dest="syslog", my_default=False, help="Send the log to the syslog server")
group.add_option('--log-handler', action="append", default=[':INFO'], my_default=[':INFO'], metavar="PREFIX:LEVEL", help='setup a handler at LEVEL for a given PREFIX. An empty PREFIX indicates the root logger. This option can be repeated. Example: "openerp.orm:DEBUG" or "werkzeug:CRITICAL" (default: ":INFO")')
group.add_option('--log-request', action="append_const", dest="log_handler", const="openerp.netsvc.rpc.request:DEBUG", help='shortcut for --log-handler=openerp.netsvc.rpc.request:DEBUG')
group.add_option('--log-response', action="append_const", dest="log_handler", const="openerp.netsvc.rpc.response:DEBUG", help='shortcut for --log-handler=openerp.netsvc.rpc.response:DEBUG')
group.add_option('--log-web', action="append_const", dest="log_handler", const="openerp.addons.web.common.http:DEBUG", help='shortcut for --log-handler=openerp.addons.web.common.http:DEBUG')
group.add_option('--log-sql', action="append_const", dest="log_handler", const="openerp.sql_db:DEBUG", help='shortcut for --log-handler=openerp.sql_db:DEBUG')
# For backward-compatibility, map the old log levels to something
# quite close.
levels = ['info', 'debug_rpc', 'warn', 'test', 'critical',
'debug_sql', 'error', 'debug', 'debug_rpc_answer', 'notset']
group.add_option('--log-level', dest='log_level', type='choice', choices=levels,
my_default='info', help='specify the level of the logging. Accepted values: ' + str(levels) + ' (deprecated option).')
parser.add_option_group(group)
# SMTP Group
@ -361,6 +367,7 @@ class configmanager(object):
if self.options['pidfile'] in ('None', 'False'):
self.options['pidfile'] = False
# if defined dont take the configfile value even if the defined value is None
keys = ['xmlrpc_interface', 'xmlrpc_port', 'db_name', 'db_user', 'db_password', 'db_host',
'db_port', 'db_template', 'logfile', 'pidfile', 'smtp_port', 'cache_timeout',
'email_from', 'smtp_server', 'smtp_user', 'smtp_password',
@ -368,7 +375,7 @@ class configmanager(object):
'netrpc', 'xmlrpc', 'syslog', 'without_demo', 'timezone',
'xmlrpcs_interface', 'xmlrpcs_port', 'xmlrpcs',
'static_http_enable', 'static_http_document_root', 'static_http_url_prefix',
'secure_cert_file', 'secure_pkey_file', 'dbfilter'
'secure_cert_file', 'secure_pkey_file', 'dbfilter', 'log_handler', 'log_level'
]
for arg in keys:
@ -379,6 +386,7 @@ class configmanager(object):
elif isinstance(self.options[arg], basestring) and self.casts[arg].type in optparse.Option.TYPE_CHECKER:
self.options[arg] = optparse.Option.TYPE_CHECKER[self.casts[arg].type](self.casts[arg], arg, self.options[arg])
# if defined but None take the configfile value
keys = [
'language', 'translate_out', 'translate_in', 'overwrite_existing_translations',
'debug_mode', 'smtp_ssl', 'load_language',
@ -402,11 +410,6 @@ class configmanager(object):
else:
self.options['assert_exit_level'] = self._LOGLEVELS.get(self.options['assert_exit_level']) or int(self.options['assert_exit_level'])
if opt.log_level:
self.options['log_level'] = self._LOGLEVELS[opt.log_level]
else:
self.options['log_level'] = self._LOGLEVELS.get(self.options['log_level']) or int(self.options['log_level'])
self.options['root_path'] = os.path.abspath(os.path.expanduser(os.path.expandvars(os.path.dirname(openerp.__file__))))
if not self.options['addons_path'] or self.options['addons_path']=='None':
self.options['addons_path'] = os.path.join(self.options['root_path'], 'addons')

View File

@ -29,10 +29,13 @@ import re
# for eval context:
import time
import openerp.release as release
_logger = logging.getLogger(__name__)
try:
import pytz
except:
logging.getLogger("init").warning('could not find pytz library, please install it')
_logger.warning('could not find pytz library, please install it')
class pytzclass(object):
all_timezones=[]
pytz=pytzclass()
@ -135,8 +138,7 @@ def _eval_xml(self, node, pool, cr, uid, idref, context=None):
try:
return unsafe_eval(a_eval, idref2)
except Exception:
logger = logging.getLogger('init')
logger.warning('could not eval(%s) for %s in %s' % (a_eval, node.get('name'), context), exc_info=True)
_logger.warning('could not eval(%s) for %s in %s' % (a_eval, node.get('name'), context), exc_info=True)
return ""
if t == 'xml':
def _process(s, idref):
@ -228,7 +230,6 @@ class assertion_report(object):
return res
class xml_import(object):
__logger = logging.getLogger('tools.convert.xml_import')
@staticmethod
def nodeattr2bool(node, attr, default=False):
if not node.get(attr):
@ -258,7 +259,7 @@ class xml_import(object):
# client-side, so in that case we keep the original context string
# as it is. We also log it, just in case.
context = ctx
logging.getLogger("init").debug('Context value (%s) for element with id "%s" or its data node does not parse '\
_logger.debug('Context value (%s) for element with id "%s" or its data node does not parse '\
'at server-side, keeping original string, in case it\'s meant for client side only',
ctx, node.get('id','n/a'), exc_info=True)
return context
@ -281,7 +282,7 @@ form: module.record_id""" % (xml_id,)
assert modcnt == 1, """The ID "%s" refers to an uninstalled module""" % (xml_id,)
if len(id) > 64:
self.logger.error('id: %s is to long (max: 64)', id)
_logger.error('id: %s is to long (max: 64)', id)
def _tag_delete(self, cr, rec, data_node=None):
d_model = rec.get("model",'')
@ -486,9 +487,9 @@ form: module.record_id""" % (xml_id,)
# Some domains contain references that are only valid at runtime at
# client-side, so in that case we keep the original domain string
# as it is. We also log it, just in case.
logging.getLogger("init").debug('Domain value (%s) for element with id "%s" does not parse '\
'at server-side, keeping original string, in case it\'s meant for client side only',
domain, xml_id or 'n/a', exc_info=True)
_logger.debug('Domain value (%s) for element with id "%s" does not parse '\
'at server-side, keeping original string, in case it\'s meant for client side only',
domain, xml_id or 'n/a', exc_info=True)
res = {
'name': name,
'type': type,
@ -593,7 +594,7 @@ form: module.record_id""" % (xml_id,)
pid = res[0]
else:
# the menuitem does't exist but we are in branch (not a leaf)
self.logger.warning('Warning no ID for submenu %s of menu %s !', menu_elem, str(m_l))
_logger.warning('Warning no ID for submenu %s of menu %s !', menu_elem, str(m_l))
pid = self.pool.get('ir.ui.menu').create(cr, self.uid, {'parent_id' : pid, 'name' : menu_elem})
values['parent_id'] = pid
else:
@ -733,7 +734,7 @@ form: module.record_id""" % (xml_id,)
' obtained count: %d\n' \
% (rec_string, count, len(ids))
sevval = getattr(logging, severity.upper())
self.logger.log(sevval, msg)
_logger.log(sevval, msg)
if sevval >= config['assert_exit_level']:
# TODO: define a dedicated exception
raise Exception('Severe assertion failure')
@ -765,7 +766,7 @@ form: module.record_id""" % (xml_id,)
' obtained value: %r\n' \
% (rec_string, etree.tostring(test), expected_value, expression_value)
sevval = getattr(logging, severity.upper())
self.logger.log(sevval, msg)
_logger.log(sevval, msg)
if sevval >= config['assert_exit_level']:
# TODO: define a dedicated exception
raise Exception('Severe assertion failure')
@ -876,11 +877,11 @@ form: module.record_id""" % (xml_id,)
def parse(self, de):
if not de.tag in ['terp', 'openerp']:
self.logger.error("Mismatch xml format")
_logger.error("Mismatch xml format")
raise Exception( "Mismatch xml format: only terp or openerp as root tag" )
if de.tag == 'terp':
self.logger.warning("The tag <terp/> is deprecated, use <openerp/>")
_logger.warning("The tag <terp/> is deprecated, use <openerp/>")
for n in de.findall('./data'):
for rec in n:
@ -888,17 +889,16 @@ form: module.record_id""" % (xml_id,)
try:
self._tags[rec.tag](self.cr, rec, n)
except:
self.__logger.error('Parse error in %s:%d: \n%s',
rec.getroottree().docinfo.URL,
rec.sourceline,
etree.tostring(rec).strip(), exc_info=True)
_logger.error('Parse error in %s:%d: \n%s',
rec.getroottree().docinfo.URL,
rec.sourceline,
etree.tostring(rec).strip(), exc_info=True)
self.cr.rollback()
raise
return True
def __init__(self, cr, module, idref, mode, report=None, noupdate=False):
self.logger = logging.getLogger('init')
self.mode = mode
self.module = module
self.cr = cr
@ -931,7 +931,6 @@ def convert_csv_import(cr, module, fname, csvcontent, idref=None, mode='init',
encoding: utf-8'''
if not idref:
idref={}
logger = logging.getLogger('init')
model = ('.'.join(fname.split('.')[:-1]).split('-'))[0]
#remove folder path from model
head, model = os.path.split(model)
@ -956,7 +955,7 @@ def convert_csv_import(cr, module, fname, csvcontent, idref=None, mode='init',
reader.next()
if not (mode == 'init' or 'id' in fields):
logger.error("Import specification does not contain 'id' and we are in init mode, Cannot continue.")
_logger.error("Import specification does not contain 'id' and we are in init mode, Cannot continue.")
return
uid = 1
@ -967,7 +966,7 @@ def convert_csv_import(cr, module, fname, csvcontent, idref=None, mode='init',
try:
datas.append(map(lambda x: misc.ustr(x), line))
except:
logger.error("Cannot import the line: %s", line)
_logger.error("Cannot import the line: %s", line)
result, rows, warning_msg, dummy = pool.get(model).import_data(cr, uid, fields, datas,mode, module, noupdate, filename=fname_partial)
if result < 0:
# Report failed import and abort module install
@ -988,9 +987,8 @@ def convert_xml_import(cr, module, xmlfile, idref=None, mode='init', noupdate=Fa
try:
relaxng.assert_(doc)
except Exception:
logger = loglevels.Logger()
logger.notifyChannel('init', loglevels.LOG_ERROR, 'The XML file does not fit the required schema !')
logger.notifyChannel('init', loglevels.LOG_ERROR, misc.ustr(relaxng.error_log.last_error))
_logger.error('The XML file does not fit the required schema !')
_logger.error(misc.ustr(relaxng.error_log.last_error))
raise
if idref is None:

View File

@ -37,7 +37,6 @@ import socket
import sys
import threading
import time
import warnings
import zipfile
from collections import defaultdict
from datetime import datetime
@ -66,7 +65,7 @@ from cache import *
# There are moved to loglevels until we refactor tools.
from openerp.loglevels import get_encodings, ustr, exception_to_unicode
_logger = logging.getLogger('tools')
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
@ -386,7 +385,7 @@ def email_send(email_from, email_to, subject, body, email_cc=None, email_bcc=Non
smtp_server=smtp_server, smtp_port=smtp_port, smtp_user=smtp_user, smtp_password=smtp_password,
smtp_encryption=('ssl' if ssl else None), debug=debug)
except Exception:
_log.exception("tools.email_send failed to deliver email")
_logger.exception("tools.email_send failed to deliver email")
return False
finally:
cr.close()
@ -706,7 +705,7 @@ def logged(f):
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
loglevels.Logger().notifyChannel('logged', loglevels.LOG_DEBUG, '\n'.join(vector))
_logger.debug('\n'.join(vector))
return res
return wrapper
@ -876,8 +875,8 @@ def detect_server_timezone():
try:
import pytz
except Exception:
loglevels.Logger().notifyChannel("detect_server_timezone", loglevels.LOG_WARNING,
"Python pytz module is not available. Timezone will be set to UTC by default.")
_logger.warning("Python pytz module is not available. "
"Timezone will be set to UTC by default.")
return 'UTC'
# Option 1: the configuration option (did not exist before, so no backwards compatibility issue)
@ -910,15 +909,14 @@ def detect_server_timezone():
if value:
try:
tz = pytz.timezone(value)
loglevels.Logger().notifyChannel("detect_server_timezone", loglevels.LOG_INFO,
"Using timezone %s obtained from %s." % (tz.zone,source))
_logger.info("Using timezone %s obtained from %s.", tz.zone, source)
return value
except pytz.UnknownTimeZoneError:
loglevels.Logger().notifyChannel("detect_server_timezone", loglevels.LOG_WARNING,
"The timezone specified in %s (%s) is invalid, ignoring it." % (source,value))
_logger.warning("The timezone specified in %s (%s) is invalid, ignoring it.", source, value)
loglevels.Logger().notifyChannel("detect_server_timezone", loglevels.LOG_WARNING,
"No valid timezone could be detected, using default UTC timezone. You can specify it explicitly with option 'timezone' in the server configuration.")
_logger.warning("No valid timezone could be detected, using default UTC "
"timezone. You can specify it explicitly with option 'timezone' in "
"the server configuration.")
return 'UTC'
def get_server_timezone():

View File

@ -70,7 +70,7 @@ _SAFE_OPCODES = _EXPR_OPCODES.union(set(opmap[x] for x in [
'POP_JUMP_IF_TRUE', 'SETUP_EXCEPT', 'END_FINALLY'
] if x in opmap))
_logger = logging.getLogger('safe_eval')
_logger = logging.getLogger(__name__)
def _get_opcodes(codeobj):
"""_get_opcodes(codeobj) -> [opcodes]
@ -206,8 +206,9 @@ def safe_eval(expr, globals_dict=None, locals_dict=None, mode="eval", nocopy=Fal
# isinstance() does not work below, we want *exactly* the dict class
if (globals_dict is not None and type(globals_dict) is not dict) \
or (locals_dict is not None and type(locals_dict) is not dict):
logging.getLogger('safe_eval').warning('Looks like you are trying to pass a dynamic environment,"\
"you should probably pass nocopy=True to safe_eval()')
_logger.warning(
"Looks like you are trying to pass a dynamic environment, "
"you should probably pass nocopy=True to safe_eval().")
globals_dict = dict(globals_dict)
if locals_dict is not None:

View File

@ -34,15 +34,13 @@ from subprocess import Popen, PIPE
import os
import tempfile
_logger = logging.getLogger(__name__)
def try_report(cr, uid, rname, ids, data=None, context=None, our_module=None):
""" Try to render a report <rname> with contents of ids
This function should also check for common pitfalls of reports.
"""
if our_module:
log = logging.getLogger('tests.%s' % our_module)
else:
log = logging.getLogger('tools.test_reports')
if data is None:
data = {}
if context is None:
@ -51,7 +49,7 @@ def try_report(cr, uid, rname, ids, data=None, context=None, our_module=None):
rname_s = rname[7:]
else:
rname_s = rname
log.log(netsvc.logging.TEST, " - Trying %s.create(%r)", rname, ids)
_logger.log(netsvc.logging.TEST, " - Trying %s.create(%r)", rname, ids)
res = netsvc.LocalService(rname).create(cr, uid, ids, data, context)
if not isinstance(res, tuple):
raise RuntimeError("Result of %s.create() should be a (data,format) tuple, now it is a %s" % \
@ -64,7 +62,7 @@ def try_report(cr, uid, rname, ids, data=None, context=None, our_module=None):
if tools.config['test_report_directory']:
file(os.path.join(tools.config['test_report_directory'], rname+ '.'+res_format), 'wb+').write(res_data)
log.debug("Have a %s report for %s, will examine it", res_format, rname)
_logger.debug("Have a %s report for %s, will examine it", res_format, rname)
if res_format == 'pdf':
if res_data[:5] != '%PDF-':
raise ValueError("Report %s produced a non-pdf header, %r" % (rname, res_data[:10]))
@ -79,21 +77,21 @@ def try_report(cr, uid, rname, ids, data=None, context=None, our_module=None):
res_text = tools.ustr(fp.read())
os.unlink(rfname)
except Exception:
log.debug("Unable to parse PDF report: install pdftotext to perform automated tests.")
_logger.debug("Unable to parse PDF report: install pdftotext to perform automated tests.")
if res_text is not False:
for line in res_text.split('\n'):
if ('[[' in line) or ('[ [' in line):
log.error("Report %s may have bad expression near: \"%s\".", rname, line[80:])
_logger.error("Report %s may have bad expression near: \"%s\".", rname, line[80:])
# TODO more checks, what else can be a sign of a faulty report?
elif res_format == 'foobar':
# TODO
pass
else:
log.warning("Report %s produced a \"%s\" chunk, cannot examine it", rname, res_format)
_logger.warning("Report %s produced a \"%s\" chunk, cannot examine it", rname, res_format)
return False
log.log(netsvc.logging.TEST, " + Report %s produced correctly.", rname)
_logger.log(netsvc.logging.TEST, " + Report %s produced correctly.", rname)
return True
def try_report_action(cr, uid, action_id, active_model=None, active_ids=None,
@ -125,13 +123,9 @@ def try_report_action(cr, uid, action_id, active_model=None, active_ids=None,
# TODO context fill-up
pool = pooler.get_pool(cr.dbname)
if our_module:
log = logging.getLogger('tests.%s' % our_module)
else:
log = logging.getLogger('tools.test_reports')
def log_test(msg, *args):
log.log(netsvc.logging.TEST, " - " + msg, *args)
_logger.log(netsvc.logging.TEST, " - " + msg, *args)
datas = {}
if active_model:
@ -195,7 +189,7 @@ def try_report_action(cr, uid, action_id, active_model=None, active_ids=None,
view_data.update(datas.get('form'))
if wiz_data:
view_data.update(wiz_data)
log.debug("View data is: %r", view_data)
_logger.debug("View data is: %r", view_data)
for fk, field in view_res.get('fields',{}).items():
# Default fields returns list of int, while at create()
@ -237,7 +231,7 @@ def try_report_action(cr, uid, action_id, active_model=None, active_ids=None,
'weight': button_weight,
})
except Exception, e:
log.warning("Cannot resolve the view arch and locate the buttons!", exc_info=True)
_logger.warning("Cannot resolve the view arch and locate the buttons!", exc_info=True)
raise AssertionError(e.args[0])
if not datas['res_id']:
@ -249,7 +243,7 @@ def try_report_action(cr, uid, action_id, active_model=None, active_ids=None,
raise AssertionError("view form doesn't have any buttons to press!")
buttons.sort(key=lambda b: b['weight'])
log.debug('Buttons are: %s', ', '.join([ '%s: %d' % (b['string'], b['weight']) for b in buttons]))
_logger.debug('Buttons are: %s', ', '.join([ '%s: %d' % (b['string'], b['weight']) for b in buttons]))
res = None
while buttons and not res:
@ -262,12 +256,12 @@ def try_report_action(cr, uid, action_id, active_model=None, active_ids=None,
#there we are! press the button!
fn = getattr(pool.get(datas['res_model']), b['name'])
if not fn:
log.error("The %s model doesn't have a %s attribute!", datas['res_model'], b['name'])
_logger.error("The %s model doesn't have a %s attribute!", datas['res_model'], b['name'])
continue
res = fn(cr, uid, [datas['res_id'],], context)
break
else:
log.warning("in the \"%s\" form, the \"%s\" button has unknown type %s",
_logger.warning("in the \"%s\" form, the \"%s\" button has unknown type %s",
action_name, b['string'], b['type'])
return res
@ -293,7 +287,7 @@ def try_report_action(cr, uid, action_id, active_model=None, active_ids=None,
loop += 1
# This part tries to emulate the loop of the Gtk client
if loop > 100:
log.error("Passed %d loops, giving up", loop)
_logger.error("Passed %d loops, giving up", loop)
raise Exception("Too many loops at action")
log_test("it is an %s action at loop #%d", action.get('type', 'unknown'), loop)
result = _exec_action(action, datas, context)

View File

@ -44,6 +44,8 @@ from misc import UpdateableStr
from misc import SKIPPED_ELEMENT_TYPES
import osutil
_logger = logging.getLogger(__name__)
_LOCALE2WIN32 = {
'af_ZA': 'Afrikaans_South Africa',
'sq_AL': 'Albanian_Albania',
@ -153,8 +155,6 @@ def translate(cr, name, source_type, lang, source=None):
res = res_trans and res_trans[0] or False
return res
logger = logging.getLogger('translate')
class GettextAlias(object):
def _get_db(self):
@ -216,11 +216,11 @@ class GettextAlias(object):
pool = pooler.get_pool(cr.dbname)
res = pool.get('ir.translation')._get_source(cr, 1, None, ('code','sql_constraint'), lang, source)
else:
logger.debug('no context cursor detected, skipping translation for "%r"', source)
_logger.debug('no context cursor detected, skipping translation for "%r"', source)
else:
logger.debug('no translation language detected, skipping translation for "%r" ', source)
_logger.debug('no translation language detected, skipping translation for "%r" ', source)
except Exception:
logger.debug('translation went wrong for "%r", skipped', source)
_logger.debug('translation went wrong for "%r", skipped', source)
# if so, double-check the root/base translations filenames
finally:
if cr and is_new_cr:
@ -250,11 +250,10 @@ def unquote(str):
# class to handle po files
class TinyPoFile(object):
def __init__(self, buffer):
self.logger = logging.getLogger('i18n')
self.buffer = buffer
def warn(self, msg, *args):
self.logger.warning(msg, *args)
_logger.warning(msg, *args)
def __iter__(self):
self.buffer.seek(0)
@ -532,7 +531,6 @@ def in_modules(object_name, modules):
return module in modules
def trans_generate(lang, modules, cr):
logger = logging.getLogger('i18n')
dbname = cr.dbname
pool = pooler.get_pool(dbname)
@ -579,12 +577,12 @@ def trans_generate(lang, modules, cr):
xml_name = "%s.%s" % (module, encode(xml_name))
if not pool.get(model):
logger.error("Unable to find object %r", model)
_logger.error("Unable to find object %r", model)
continue
exists = pool.get(model).exists(cr, uid, res_id)
if not exists:
logger.warning("Unable to find object %r with id %d", model, res_id)
_logger.warning("Unable to find object %r with id %d", model, res_id)
continue
obj = pool.get(model).browse(cr, uid, res_id)
@ -612,7 +610,7 @@ def trans_generate(lang, modules, cr):
# export fields
if not result.has_key('fields'):
logger.warning("res has no fields: %r", result)
_logger.warning("res has no fields: %r", result)
continue
for field_name, field_def in result['fields'].iteritems():
res_name = name + ',' + field_name
@ -641,7 +639,7 @@ def trans_generate(lang, modules, cr):
try:
field_name = encode(obj.name)
except AttributeError, exc:
logger.error("name error in %s: %s", xml_name, str(exc))
_logger.error("name error in %s: %s", xml_name, str(exc))
continue
objmodel = pool.get(obj.model)
if not objmodel or not field_name in objmodel._columns:
@ -693,7 +691,7 @@ def trans_generate(lang, modules, cr):
finally:
report_file.close()
except (IOError, etree.XMLSyntaxError):
logger.exception("couldn't export translation for report %s %s %s", name, report_type, fname)
_logger.exception("couldn't export translation for report %s %s %s", name, report_type, fname)
for field_name,field_def in obj._table._columns.items():
if field_def.translate:
@ -721,7 +719,7 @@ def trans_generate(lang, modules, cr):
model_obj = pool.get(model)
if not model_obj:
logging.getLogger("i18n").error("Unable to find object %r", model)
_logger.error("Unable to find object %r", model)
continue
for constraint in getattr(model_obj, '_constraints', []):
@ -765,7 +763,7 @@ def trans_generate(lang, modules, cr):
for bin_path in ['osv', 'report' ]:
path_list.append(os.path.join(config.config['root_path'], bin_path))
logger.debug("Scanning modules at paths: ", path_list)
_logger.debug("Scanning modules at paths: ", path_list)
mod_paths = []
join_dquotes = re.compile(r'([^\\])"[\s\\]*"', re.DOTALL)
@ -779,7 +777,7 @@ def trans_generate(lang, modules, cr):
module = get_module_from_path(fabsolutepath, mod_paths=mod_paths)
is_mod_installed = module in installed_modules
if (('all' in modules) or (module in modules)) and is_mod_installed:
logger.debug("Scanning code of %s at module: %s", frelativepath, module)
_logger.debug("Scanning code of %s at module: %s", frelativepath, module)
src_file = misc.file_open(fabsolutepath, subdir='')
try:
code_string = src_file.read()
@ -823,7 +821,7 @@ def trans_generate(lang, modules, cr):
code_offset = i.end() # we have counted newlines up to the match end
for path in path_list:
logger.debug("Scanning files of modules at %s", path)
_logger.debug("Scanning files of modules at %s", path)
for root, dummy, files in osutil.walksymlinks(path):
for fname in itertools.chain(fnmatch.filter(files, '*.py')):
export_code_terms_from_file(fname, path, root, 'code')
@ -841,24 +839,22 @@ def trans_generate(lang, modules, cr):
return out
def trans_load(cr, filename, lang, verbose=True, context=None):
logger = logging.getLogger('i18n')
try:
fileobj = misc.file_open(filename)
logger.info("loading %s", filename)
_logger.info("loading %s", filename)
fileformat = os.path.splitext(filename)[-1][1:].lower()
r = trans_load_data(cr, fileobj, fileformat, lang, verbose=verbose, context=context)
fileobj.close()
return r
except IOError:
if verbose:
logger.error("couldn't read translation file %s", filename)
_logger.error("couldn't read translation file %s", filename)
return None
def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, context=None):
"""Populates the ir_translation table."""
logger = logging.getLogger('i18n')
if verbose:
logger.info('loading translation file for language %s', lang)
_logger.info('loading translation file for language %s', lang)
if context is None:
context = {}
db_name = cr.dbname
@ -887,7 +883,7 @@ def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True,
reader = TinyPoFile(fileobj)
f = ['type', 'name', 'res_id', 'src', 'value']
else:
logger.error('Bad file format: %s', fileformat)
_logger.error('Bad file format: %s', fileformat)
raise Exception(_('Bad file format'))
# read the rest of the file
@ -932,7 +928,7 @@ def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True,
dic['res_id'] = None
except Exception:
logger.warning("Could not decode resource for %s, please fix the po file.",
_logger.warning("Could not decode resource for %s, please fix the po file.",
dic['res_id'], exc_info=True)
dic['res_id'] = None
@ -940,10 +936,10 @@ def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True,
irt_cursor.finish()
if verbose:
logger.info("translation file loaded succesfully")
_logger.info("translation file loaded succesfully")
except IOError:
filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat)
logger.exception("couldn't read translation file %s", filename)
_logger.exception("couldn't read translation file %s", filename)
def get_locales(lang=None):
if lang is None:

View File

@ -19,7 +19,7 @@ from lxml import etree
unsafe_eval = eval
from safe_eval import safe_eval as eval
logger_channel = 'tests'
_logger = logging.getLogger(__name__)
class YamlImportException(Exception):
pass
@ -133,7 +133,6 @@ class YamlInterpreter(object):
self.filename = filename
self.assert_report = TestReport()
self.noupdate = noupdate
self.logger = logging.getLogger("%s.%s" % (logger_channel, self.module))
self.pool = pooler.get_pool(cr.dbname)
self.uid = 1
self.context = {} # opererp context
@ -163,7 +162,7 @@ class YamlInterpreter(object):
['&', ('name', '=', module), ('state', 'in', ['installed'])])
assert module_count == 1, 'The ID "%s" refers to an uninstalled module.' % (xml_id,)
if len(id) > 64: # TODO where does 64 come from (DB is 128)? should be a constant or loaded form DB
self.logger.log(logging.ERROR, 'id: %s is to long (max: 64)', id)
_logger.error('id: %s is to long (max: 64)', id)
def get_id(self, xml_id):
if xml_id is False or xml_id is None:
@ -219,7 +218,7 @@ class YamlInterpreter(object):
level = severity
levelname = logging.getLevelName(level)
self.assert_report.record(False, levelname)
self.logger.log(level, msg, *args)
_logger.log(level, msg, *args)
if level >= config['assert_exit_level']:
raise YamlImportAbortion('Severe assertion failure (%s), aborting.' % levelname)
return
@ -241,7 +240,7 @@ class YamlInterpreter(object):
assertion, expressions = node, []
if self.isnoupdate(assertion) and self.mode != 'init':
self.logger.warn('This assertion was not evaluated ("%s").' % assertion.string)
_logger.warning('This assertion was not evaluated ("%s").', assertion.string)
return
model = self.get_model(assertion.model)
ids = self._get_assertion_id(assertion)
@ -260,7 +259,7 @@ class YamlInterpreter(object):
try:
success = unsafe_eval(test, self.eval_context, RecordDictWrapper(record))
except Exception, e:
self.logger.debug('Exception during evaluation of !assert block in yaml_file %s.', self.filename, exc_info=True)
_logger.debug('Exception during evaluation of !assert block in yaml_file %s.', self.filename, exc_info=True)
raise YamlImportAbortion(e)
if not success:
msg = 'Assertion "%s" FAILED\ntest: %s\n'
@ -348,7 +347,7 @@ class YamlInterpreter(object):
view_id = etree.fromstring(view['arch'].encode('utf-8'))
record_dict = self._create_record(model, fields, view_id, default=default)
self.logger.debug("RECORD_DICT %s" % record_dict)
_logger.debug("RECORD_DICT %s" % record_dict)
id = self.pool.get('ir.model.data')._update(self.cr, 1, record.model, \
self.module, record_dict, record.id, noupdate=self.isnoupdate(record), mode=self.mode, context=context)
self.id_map[record.id] = int(id)
@ -519,7 +518,7 @@ class YamlInterpreter(object):
def process_python(self, node):
def log(msg, *args):
self.logger.log(logging.TEST, msg, *args)
_logger.log(logging.TEST, msg, *args)
python, statements = node.items()[0]
model = self.get_model(python.model)
statements = statements.replace("\r\n", "\n")
@ -532,7 +531,7 @@ class YamlInterpreter(object):
self._log_assert_failure(python.severity, 'AssertionError in Python code %s: %s', python.name, e)
return
except Exception, e:
self.logger.debug('Exception during evaluation of !python block in yaml_file %s.', self.filename, exc_info=True)
_logger.debug('Exception during evaluation of !python block in yaml_file %s.', self.filename, exc_info=True)
raise
else:
self.assert_report.record(True, python.severity)
@ -754,7 +753,7 @@ class YamlInterpreter(object):
if len(ids):
self.pool.get(node.model).unlink(self.cr, self.uid, ids)
else:
self.logger.log(logging.TEST, "Record not deleted.")
_logger.log(logging.TEST, "Record not deleted.")
def process_url(self, node):
self.validate_xml_id(node.id)
@ -843,9 +842,9 @@ class YamlInterpreter(object):
try:
self._process_node(node)
except YamlImportException, e:
self.logger.exception(e)
_logger.exception(e)
except Exception, e:
self.logger.exception(e)
_logger.exception(e)
raise
def _process_node(self, node):
@ -889,14 +888,14 @@ class YamlInterpreter(object):
def _log(self, node, is_preceded_by_comment):
if is_comment(node):
is_preceded_by_comment = True
self.logger.log(logging.TEST, node)
_logger.log(logging.TEST, node)
elif not is_preceded_by_comment:
if isinstance(node, types.DictionaryType):
msg = "Creating %s\n with %s"
args = node.items()[0]
self.logger.log(logging.TEST, msg, *args)
_logger.log(logging.TEST, msg, *args)
else:
self.logger.log(logging.TEST, node)
_logger.log(logging.TEST, node)
else:
is_preceded_by_comment = False
return is_preceded_by_comment

View File

@ -20,6 +20,7 @@
##############################################################################
import copy
import logging
import openerp.netsvc as netsvc
from openerp.tools.misc import UpdateableStr, UpdateableDict
@ -30,9 +31,9 @@ import openerp.pooler as pooler
from openerp.osv.osv import except_osv
from openerp.osv.orm import except_orm
from openerp.netsvc import Logger, LOG_ERROR
import sys
import warnings
_logger = logging.getLogger(__name__)
class except_wizard(Exception):
def __init__(self, name, value):
@ -49,10 +50,10 @@ class interface(netsvc.Service):
def __init__(self, name):
assert not self.exists('wizard.'+name), 'The wizard "%s" already exists!' % (name,)
warnings.warn(
_logger.warning(
"The wizard %s uses the deprecated openerp.wizard.interface class.\n"
"It must use the openerp.osv.TransientModel class instead." % \
name, DeprecationWarning, stacklevel=3)
name)
super(interface, self).__init__('wizard.'+name)
self.wiz_name = name
@ -170,9 +171,7 @@ class interface(netsvc.Service):
import traceback
tb_s = reduce(lambda x, y: x+y, traceback.format_exception(
sys.exc_type, sys.exc_value, sys.exc_traceback))
logger = Logger()
logger.notifyChannel("web-services", LOG_ERROR,
'Exception in call: ' + tb_s)
_logger.error('Exception in call: ' + tb_s)
raise
return res

View File

@ -34,8 +34,6 @@ def log(cr,ident,act_id,info=''):
#info: %s
#""" % (ident[1], ident[2], ident[0], act_id, info)
#netsvc.Logger().notifyChannel('wkf_log', netsvc.LOG_DEBUG, msg)
#cr.execute('insert into wkf_logs (res_type, res_id, uid, act_id, time, info) values (%s,%s,%s,%s,current_time,%s)', (ident[1],int(ident[2]),int(ident[0]),int(act_id),info))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

View File

@ -43,6 +43,8 @@ import openerp.modules
import openerp.tools.config as config
import service.websrv_lib as websrv_lib
_logger = logging.getLogger(__name__)
# XML-RPC fault codes. Some care must be taken when changing these: the
# constants are also defined client-side and must remain in sync.
# User code must use the exceptions defined in ``openerp.exceptions`` (not
@ -422,12 +424,12 @@ def serve():
try:
import werkzeug.serving
httpd = werkzeug.serving.make_server(interface, port, application, threaded=True)
logging.getLogger('wsgi').info('HTTP service (werkzeug) running on %s:%s', interface, port)
_logger.info('HTTP service (werkzeug) running on %s:%s', interface, port)
except ImportError:
import wsgiref.simple_server
logging.getLogger('wsgi').warn('Werkzeug module unavailable, falling back to wsgiref.')
_logger.warning('Werkzeug module unavailable, falling back to wsgiref.')
httpd = wsgiref.simple_server.make_server(interface, port, application)
logging.getLogger('wsgi').info('HTTP service (wsgiref) running on %s:%s', interface, port)
_logger.info('HTTP service (wsgiref) running on %s:%s', interface, port)
httpd.serve_forever()
@ -473,7 +475,7 @@ def on_starting(server):
msg = """
The `web` module is provided by the addons found in the `openerp-web` project.
Maybe you forgot to add those addons in your addons_path configuration."""
logging.exception('Failed to load server-wide module `%s`.%s', m, msg)
_logger.exception('Failed to load server-wide module `%s`.%s', m, msg)
# Install our own signal handler on the master process.
def when_ready(server):
@ -503,7 +505,7 @@ def post_request(worker, req, environ):
import psutil
rss, vms = psutil.Process(os.getpid()).get_memory_info()
if vms > config['virtual_memory_reset']:
logging.getLogger('wsgi.worker').info('Virtual memory consumption '
_logger.info('Virtual memory consumption '
'too high, rebooting the worker.')
worker.alive = False # Commit suicide after the request.
@ -515,7 +517,7 @@ def make_winch_handler(server):
# SIGXCPU (exceeded CPU time) signal handler will raise an exception.
def time_expired(n, stack):
logging.getLogger('wsgi.worker').info('CPU time limit exceeded.')
_logger.info('CPU time limit exceeded.')
raise Exception('CPU time limit exceeded.') # TODO one of openerp.exception
# Kill gracefuly the workers (e.g. because we want to clear their cache).