[FIX] *: fix/rationalize db logging to avoid incorrect values during logging
The setting/clearing of the tracking were not done consistently, causing log messages that appeared to come from one database while coming from another one or none at all. The tracker is now set at the earliest points of request handling where we can: - in web client, when creating WebRequests (dbname, uid) - at RPC dispatching in server (uid) - at cron job acquisition in CronWorker (dbname) - at Registry acquisition in RegistryManager (dbname) The tracker is cleared at the very entrance of the request in the WSGI `application`, ensuring that no logging is produced with an obsolete db name. (It cannot be cleared at the end of the request handling because the werkzeug wrapper outputs more logging afterwards) bzr revid: odo@openerp.com-20130301120744-jfitcmze2jldecod
This commit is contained in:
parent
b762551211
commit
db81edc287
|
@ -18,8 +18,9 @@
|
|||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
##############################################################################
|
||||
import time
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
import psycopg2
|
||||
from datetime import datetime
|
||||
from dateutil.relativedelta import relativedelta
|
||||
|
@ -181,6 +182,7 @@ class ir_cron(osv.osv):
|
|||
If a job was processed, returns True, otherwise returns False.
|
||||
"""
|
||||
db = openerp.sql_db.db_connect(db_name)
|
||||
threading.current_thread().dbname = db_name
|
||||
cr = db.cursor()
|
||||
jobs = []
|
||||
try:
|
||||
|
@ -237,6 +239,9 @@ class ir_cron(osv.osv):
|
|||
# we're exiting due to an exception while acquiring the lock
|
||||
lock_cr.close()
|
||||
|
||||
if hasattr(threading.current_thread(), 'dbname'): # cron job could have removed it as side-effect
|
||||
del threading.current_thread().dbname
|
||||
|
||||
def _try_lock(self, cr, uid, ids, context=None):
|
||||
"""Try to grab a dummy exclusive write-lock to the rows with the given ids,
|
||||
to make sure a following write() or unlink() will not block due
|
||||
|
|
|
@ -190,6 +190,10 @@ class RegistryManager(object):
|
|||
except KeyError:
|
||||
return cls.new(db_name, force_demo, status,
|
||||
update_module)
|
||||
finally:
|
||||
# set db tracker - cleaned up at the WSGI
|
||||
# dispatching phase in openerp.service.wsgi_server.application
|
||||
threading.current_thread().dbname = db_name
|
||||
|
||||
@classmethod
|
||||
def new(cls, db_name, force_demo=False, status=None,
|
||||
|
|
|
@ -290,8 +290,6 @@ def dispatch_rpc(service_name, method, params):
|
|||
if rpc_request and rpc_response_flag:
|
||||
log(rpc_request,logging.DEBUG,'%s.%s'%(service_name,method), replace_request_password(params))
|
||||
|
||||
threading.current_thread().uid = None
|
||||
threading.current_thread().dbname = None
|
||||
result = ExportService.getService(service_name).dispatch(method, params)
|
||||
|
||||
if rpc_request_flag or rpc_response_flag:
|
||||
|
|
|
@ -189,7 +189,6 @@ class object_proxy(object):
|
|||
|
||||
@check
|
||||
def execute(self, db, uid, obj, method, *args, **kw):
|
||||
threading.currentThread().dbname = db
|
||||
cr = pooler.get_db(db).cursor()
|
||||
try:
|
||||
try:
|
||||
|
|
|
@ -601,7 +601,11 @@ class objects_proxy(netsvc.ExportService):
|
|||
|
||||
def dispatch(self, method, params):
|
||||
(db, uid, passwd ) = params[0:3]
|
||||
|
||||
# set uid tracker - cleaned up at the WSGI
|
||||
# dispatching phase in openerp.service.wsgi_server.application
|
||||
threading.current_thread().uid = uid
|
||||
|
||||
params = params[3:]
|
||||
if method == 'obj_list':
|
||||
raise NameError("obj_list has been discontinued via RPC as of 6.0, please query ir.model directly!")
|
||||
|
|
|
@ -384,6 +384,16 @@ def register_wsgi_handler(handler):
|
|||
|
||||
def application_unproxied(environ, start_response):
|
||||
""" WSGI entry point."""
|
||||
# cleanup db/uid trackers - they're set at HTTP dispatch in
|
||||
# web.session.OpenERPSession.send() and at RPC dispatch in
|
||||
# openerp.service.web_services.objects_proxy.dispatch().
|
||||
# /!\ The cleanup cannot be done at the end of this `application`
|
||||
# method because werkzeug still produces relevant logging afterwards
|
||||
if hasattr(threading.current_thread(), 'uid'):
|
||||
del threading.current_thread().uid
|
||||
if hasattr(threading.current_thread(), 'dbname'):
|
||||
del threading.current_thread().dbname
|
||||
|
||||
openerp.service.start_internal()
|
||||
|
||||
# Try all handlers until one returns some result (i.e. not None).
|
||||
|
@ -395,7 +405,6 @@ def application_unproxied(environ, start_response):
|
|||
continue
|
||||
return result
|
||||
|
||||
|
||||
# We never returned from the loop.
|
||||
response = 'No handler found.\n'
|
||||
start_response('404 Not Found', [('Content-Type', 'text/plain'), ('Content-Length', str(len(response)))])
|
||||
|
|
|
@ -41,7 +41,6 @@ import psycopg2.extensions
|
|||
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT, ISOLATION_LEVEL_READ_COMMITTED, ISOLATION_LEVEL_REPEATABLE_READ
|
||||
from psycopg2.pool import PoolError
|
||||
from psycopg2.psycopg1 import cursor as psycopg1cursor
|
||||
from threading import currentThread
|
||||
|
||||
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
|
||||
|
||||
|
@ -520,7 +519,6 @@ def db_connect(db_name):
|
|||
global _Pool
|
||||
if _Pool is None:
|
||||
_Pool = ConnectionPool(int(tools.config['db_maxconn']))
|
||||
currentThread().dbname = db_name
|
||||
return Connection(_Pool, db_name)
|
||||
|
||||
def close_db(db_name):
|
||||
|
@ -528,9 +526,6 @@ def close_db(db_name):
|
|||
global _Pool
|
||||
if _Pool:
|
||||
_Pool.close_all(dsn(db_name))
|
||||
ct = currentThread()
|
||||
if hasattr(ct, 'dbname'):
|
||||
delattr(ct, 'dbname')
|
||||
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
||||
|
|
Loading…
Reference in New Issue