[IMP] workers better logging

bzr revid: al@openerp.com-20121209203134-sobl1m3q53bjne2i
This commit is contained in:
Antony Lesuisse 2012-12-09 21:31:34 +01:00
parent a4fcf06301
commit 099ae0f70c
1 changed files with 4 additions and 3 deletions

View File

@ -251,7 +251,7 @@ class Worker(object):
# Reset the worker if it consumes too much memory (e.g. caused by a memory leak). # Reset the worker if it consumes too much memory (e.g. caused by a memory leak).
rss, vms = psutil.Process(os.getpid()).get_memory_info() rss, vms = psutil.Process(os.getpid()).get_memory_info()
if vms > config['limit_memory_soft']: if vms > config['limit_memory_soft']:
_logger.info('Virtual memory consumption too high, rebooting the worker.') _logger.info('Worker (%d) virtual memory limit (%s) reached.', self.pid, vms)
self.alive = False # Commit suicide after the request. self.alive = False # Commit suicide after the request.
# VMS and RLIMIT_AS are the same thing: virtual memory, a.k.a. address space # VMS and RLIMIT_AS are the same thing: virtual memory, a.k.a. address space
@ -262,7 +262,8 @@ class Worker(object):
r = resource.getrusage(resource.RUSAGE_SELF) r = resource.getrusage(resource.RUSAGE_SELF)
cpu_time = r.ru_utime + r.ru_stime cpu_time = r.ru_utime + r.ru_stime
def time_expired(n, stack): def time_expired(n, stack):
_logger.info('CPU time limit exceeded.') _logger.info('Worker (%d) CPU time limit (%s) reached.', config['limit_time_cpu'])
# We dont suicide in such case
raise Exception('CPU time limit exceeded.') raise Exception('CPU time limit exceeded.')
signal.signal(signal.SIGXCPU, time_expired) signal.signal(signal.SIGXCPU, time_expired)
soft, hard = resource.getrlimit(resource.RLIMIT_CPU) soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
@ -358,9 +359,9 @@ class WorkerCron(Worker):
# TODO why isnt openerp.addons.base defined ? # TODO why isnt openerp.addons.base defined ?
import base import base
acquired = base.ir.ir_cron.ir_cron._acquire_job(db_name) acquired = base.ir.ir_cron.ir_cron._acquire_job(db_name)
# TODO Each job should be considered as one request in multiprocessing
if not acquired: if not acquired:
break break
# TODO Each job should be considered as one request instead of each db
self.request_count += 1 self.request_count += 1
def start(self): def start(self):