2009-12-02 10:06:58 +00:00
|
|
|
#!/usr/bin/env python
|
2009-10-20 10:52:23 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
2008-06-16 07:24:04 +00:00
|
|
|
##############################################################################
|
|
|
|
#
|
2008-12-14 16:46:47 +00:00
|
|
|
# OpenERP, Open Source Management Solution
|
2009-01-04 22:13:29 +00:00
|
|
|
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
|
2008-12-19 15:10:11 +00:00
|
|
|
# The refactoring about the OpenSSL support come from Tryton
|
2009-01-04 22:13:29 +00:00
|
|
|
# Copyright (C) 2007-2009 Cédric Krier.
|
|
|
|
# Copyright (C) 2007-2009 Bertrand Chenal.
|
2008-12-19 15:10:11 +00:00
|
|
|
# Copyright (C) 2008 B2CK SPRL.
|
2008-06-16 07:24:04 +00:00
|
|
|
#
|
2008-11-03 18:27:16 +00:00
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
2008-06-16 07:24:04 +00:00
|
|
|
#
|
2008-11-03 18:27:16 +00:00
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
2008-06-16 07:24:04 +00:00
|
|
|
#
|
2008-11-03 18:27:16 +00:00
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2008-06-16 07:24:04 +00:00
|
|
|
#
|
2008-11-03 18:27:16 +00:00
|
|
|
##############################################################################
|
2006-12-07 13:41:40 +00:00
|
|
|
|
2010-05-18 15:41:23 +00:00
|
|
|
import errno
|
2006-12-07 13:41:40 +00:00
|
|
|
import logging
|
2008-11-25 10:35:00 +00:00
|
|
|
import logging.handlers
|
2010-04-21 10:12:00 +00:00
|
|
|
import os
|
2010-05-18 15:41:23 +00:00
|
|
|
import socket
|
2008-12-18 23:54:33 +00:00
|
|
|
import sys
|
|
|
|
import threading
|
|
|
|
import time
|
2008-12-22 23:04:18 +00:00
|
|
|
import release
|
2010-03-03 13:04:35 +00:00
|
|
|
from pprint import pformat
|
2010-03-02 09:33:14 +00:00
|
|
|
import warnings
|
2010-09-20 11:22:23 +00:00
|
|
|
import heapq
|
2006-12-07 13:41:40 +00:00
|
|
|
|
|
|
|
class Service(object):
|
2010-01-29 10:17:12 +00:00
|
|
|
""" Base class for *Local* services
|
|
|
|
|
2009-11-24 14:44:05 +00:00
|
|
|
Functionality here is trusted, no authentication.
|
2009-08-29 15:23:46 +00:00
|
|
|
"""
|
|
|
|
_services = {}
|
2008-07-22 14:24:36 +00:00
|
|
|
def __init__(self, name, audience=''):
|
2009-08-29 15:23:46 +00:00
|
|
|
Service._services[name] = self
|
2008-08-12 14:44:56 +00:00
|
|
|
self.__name = name
|
2009-01-05 18:55:20 +00:00
|
|
|
self._methods = {}
|
2008-08-12 14:44:56 +00:00
|
|
|
|
|
|
|
def joinGroup(self, name):
|
2009-11-24 14:44:05 +00:00
|
|
|
raise Exception("No group for local services")
|
2009-08-29 15:23:46 +00:00
|
|
|
#GROUPS.setdefault(name, {})[self.__name] = self
|
|
|
|
|
2010-01-29 10:17:12 +00:00
|
|
|
@classmethod
|
|
|
|
def exists(cls, name):
|
|
|
|
return name in cls._services
|
2008-07-22 14:24:36 +00:00
|
|
|
|
2010-01-29 10:28:29 +00:00
|
|
|
@classmethod
|
|
|
|
def remove(cls, name):
|
2010-01-29 12:22:20 +00:00
|
|
|
if cls.exists(name):
|
2010-01-29 10:28:29 +00:00
|
|
|
cls._services.pop(name)
|
|
|
|
|
2009-01-05 18:55:20 +00:00
|
|
|
def exportMethod(self, method):
|
|
|
|
if callable(method):
|
|
|
|
self._methods[method.__name__] = method
|
2008-07-22 14:24:36 +00:00
|
|
|
|
|
|
|
def abortResponse(self, error, description, origin, details):
|
|
|
|
if not tools.config['debug_mode']:
|
2008-08-12 14:44:56 +00:00
|
|
|
raise Exception("%s -- %s\n\n%s"%(origin, description, details))
|
2008-07-22 14:24:36 +00:00
|
|
|
else:
|
|
|
|
raise
|
|
|
|
|
2009-08-29 15:23:46 +00:00
|
|
|
class LocalService(object):
|
|
|
|
""" Proxy for local services.
|
|
|
|
|
2009-11-24 14:44:05 +00:00
|
|
|
Any instance of this class will behave like the single instance
|
|
|
|
of Service(name)
|
|
|
|
"""
|
2010-03-03 13:04:35 +00:00
|
|
|
__logger = logging.getLogger('service')
|
2008-07-22 14:24:36 +00:00
|
|
|
def __init__(self, name):
|
2008-08-12 14:44:56 +00:00
|
|
|
self.__name = name
|
2008-11-27 08:27:20 +00:00
|
|
|
try:
|
2009-08-29 15:23:46 +00:00
|
|
|
self._service = Service._services[name]
|
2009-01-05 18:55:20 +00:00
|
|
|
for method_name, method_definition in self._service._methods.items():
|
|
|
|
setattr(self, method_name, method_definition)
|
2008-11-27 08:27:20 +00:00
|
|
|
except KeyError, keyError:
|
2010-03-03 13:04:35 +00:00
|
|
|
self.__logger.error('This service does not exist: %s' % (str(keyError),) )
|
2008-11-27 08:27:20 +00:00
|
|
|
raise
|
2010-03-03 13:04:35 +00:00
|
|
|
|
2009-01-05 19:14:56 +00:00
|
|
|
def __call__(self, method, *params):
|
|
|
|
return getattr(self, method)(*params)
|
2008-11-27 08:27:20 +00:00
|
|
|
|
2009-08-29 15:23:46 +00:00
|
|
|
class ExportService(object):
|
|
|
|
""" Proxy for exported services.
|
|
|
|
|
|
|
|
All methods here should take an AuthProxy as their first parameter. It
|
|
|
|
will be appended by the calling framework.
|
|
|
|
|
|
|
|
Note that this class has no direct proxy, capable of calling
|
|
|
|
eservice.method(). Rather, the proxy should call
|
|
|
|
dispatch(method,auth,params)
|
|
|
|
"""
|
|
|
|
|
|
|
|
_services = {}
|
|
|
|
_groups = {}
|
2010-08-24 08:12:01 +00:00
|
|
|
_logger = logging.getLogger('web-services')
|
2009-08-29 15:23:46 +00:00
|
|
|
|
|
|
|
def __init__(self, name, audience=''):
|
|
|
|
ExportService._services[name] = self
|
|
|
|
self.__name = name
|
2010-08-26 12:52:24 +00:00
|
|
|
self._logger.debug("Registered an exported service: %s" % name)
|
2009-08-29 15:23:46 +00:00
|
|
|
|
|
|
|
def joinGroup(self, name):
|
|
|
|
ExportService._groups.setdefault(name, {})[self.__name] = self
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def getService(cls,name):
|
|
|
|
return cls._services[name]
|
|
|
|
|
|
|
|
def dispatch(self, method, auth, params):
|
2009-10-03 09:00:03 +00:00
|
|
|
raise Exception("stub dispatch at %s" % self.__name)
|
|
|
|
|
2009-08-29 15:23:46 +00:00
|
|
|
def new_dispatch(self,method,auth,params):
|
2009-10-03 09:00:03 +00:00
|
|
|
raise Exception("stub dispatch at %s" % self.__name)
|
2009-08-29 15:23:46 +00:00
|
|
|
|
|
|
|
def abortResponse(self, error, description, origin, details):
|
|
|
|
if not tools.config['debug_mode']:
|
|
|
|
raise Exception("%s -- %s\n\n%s"%(origin, description, details))
|
|
|
|
else:
|
|
|
|
raise
|
2007-06-07 12:00:30 +00:00
|
|
|
|
2008-12-22 23:04:18 +00:00
|
|
|
LOG_NOTSET = 'notset'
|
2010-08-04 13:49:00 +00:00
|
|
|
LOG_DEBUG_SQL = 'debug_sql'
|
2010-09-18 09:30:52 +00:00
|
|
|
LOG_DEBUG_RPC_ANSWER = 'debug_rpc_answer'
|
2010-11-26 13:02:25 +00:00
|
|
|
LOG_DEBUG_RPC = 'debug_rpc'
|
2008-08-12 14:44:56 +00:00
|
|
|
LOG_DEBUG = 'debug'
|
2010-03-04 09:44:58 +00:00
|
|
|
LOG_TEST = 'test'
|
2008-08-12 14:44:56 +00:00
|
|
|
LOG_INFO = 'info'
|
|
|
|
LOG_WARNING = 'warn'
|
|
|
|
LOG_ERROR = 'error'
|
|
|
|
LOG_CRITICAL = 'critical'
|
|
|
|
|
2010-09-18 09:30:52 +00:00
|
|
|
logging.DEBUG_RPC_ANSWER = logging.DEBUG - 4
|
|
|
|
logging.addLevelName(logging.DEBUG_RPC_ANSWER, 'DEBUG_RPC_ANSWER')
|
2010-03-02 09:46:48 +00:00
|
|
|
logging.DEBUG_RPC = logging.DEBUG - 2
|
2010-03-02 09:36:10 +00:00
|
|
|
logging.addLevelName(logging.DEBUG_RPC, 'DEBUG_RPC')
|
2010-09-18 09:30:52 +00:00
|
|
|
logging.DEBUG_SQL = logging.DEBUG_RPC - 3
|
2010-08-04 13:49:00 +00:00
|
|
|
logging.addLevelName(logging.DEBUG_SQL, 'DEBUG_SQL')
|
2006-12-07 13:41:40 +00:00
|
|
|
|
2010-03-04 09:44:58 +00:00
|
|
|
logging.TEST = logging.INFO - 5
|
|
|
|
logging.addLevelName(logging.TEST, 'TEST')
|
|
|
|
|
2010-04-21 10:12:00 +00:00
|
|
|
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, _NOTHING, DEFAULT = range(10)
|
|
|
|
#The background is set with 40 plus the number of the color, and the foreground with 30
|
|
|
|
#These are the sequences need to get colored ouput
|
|
|
|
RESET_SEQ = "\033[0m"
|
|
|
|
COLOR_SEQ = "\033[1;%dm"
|
|
|
|
BOLD_SEQ = "\033[1m"
|
|
|
|
COLOR_PATTERN = "%s%s%%s%s" % (COLOR_SEQ, COLOR_SEQ, RESET_SEQ)
|
|
|
|
LEVEL_COLOR_MAPPING = {
|
2010-08-04 13:49:00 +00:00
|
|
|
logging.DEBUG_SQL: (WHITE, MAGENTA),
|
2010-04-21 10:12:00 +00:00
|
|
|
logging.DEBUG_RPC: (BLUE, WHITE),
|
2010-09-18 09:30:52 +00:00
|
|
|
logging.DEBUG_RPC_ANSWER: (BLUE, WHITE),
|
2010-04-21 10:12:00 +00:00
|
|
|
logging.DEBUG: (BLUE, DEFAULT),
|
|
|
|
logging.INFO: (GREEN, DEFAULT),
|
|
|
|
logging.TEST: (WHITE, BLUE),
|
|
|
|
logging.WARNING: (YELLOW, DEFAULT),
|
|
|
|
logging.ERROR: (RED, DEFAULT),
|
|
|
|
logging.CRITICAL: (WHITE, RED),
|
|
|
|
}
|
|
|
|
|
2010-09-03 07:41:32 +00:00
|
|
|
class DBFormatter(logging.Formatter):
|
2010-04-21 10:12:00 +00:00
|
|
|
def format(self, record):
|
2010-09-06 14:06:45 +00:00
|
|
|
record.dbname = getattr(threading.currentThread(), 'dbname', '?')
|
2010-04-21 10:12:00 +00:00
|
|
|
return logging.Formatter.format(self, record)
|
|
|
|
|
2010-09-03 07:41:32 +00:00
|
|
|
class ColoredFormatter(DBFormatter):
|
|
|
|
def format(self, record):
|
|
|
|
fg_color, bg_color = LEVEL_COLOR_MAPPING[record.levelno]
|
|
|
|
record.levelname = COLOR_PATTERN % (30 + fg_color, 40 + bg_color, record.levelname)
|
|
|
|
return DBFormatter.format(self, record)
|
2010-04-21 10:12:00 +00:00
|
|
|
|
2006-12-07 13:41:40 +00:00
|
|
|
def init_logger():
|
2008-07-22 14:24:36 +00:00
|
|
|
import os
|
2009-02-04 17:27:00 +00:00
|
|
|
from tools.translate import resetlocale
|
2009-02-05 13:40:49 +00:00
|
|
|
resetlocale()
|
2008-07-22 14:24:36 +00:00
|
|
|
|
2008-12-29 10:24:51 +00:00
|
|
|
# create a format for log messages and dates
|
2010-09-03 07:41:32 +00:00
|
|
|
format = '[%(asctime)s][%(dbname)s] %(levelname)s:%(name)s:%(message)s'
|
2009-07-07 10:02:07 +00:00
|
|
|
|
2008-12-26 15:05:51 +00:00
|
|
|
if tools.config['syslog']:
|
2008-12-22 23:04:18 +00:00
|
|
|
# SysLog Handler
|
|
|
|
if os.name == 'nt':
|
2010-03-11 05:51:33 +00:00
|
|
|
handler = logging.handlers.NTEventLogHandler("%s %s" % (release.description, release.version))
|
2008-12-22 23:04:18 +00:00
|
|
|
else:
|
2008-12-29 10:24:51 +00:00
|
|
|
handler = logging.handlers.SysLogHandler('/dev/log')
|
2010-04-21 11:11:18 +00:00
|
|
|
format = '%s %s' % (release.description, release.version) \
|
2010-09-03 07:41:32 +00:00
|
|
|
+ ':%(dbname)s:%(levelname)s:%(name)s:%(message)s'
|
2008-12-22 23:04:18 +00:00
|
|
|
|
2008-12-29 10:24:51 +00:00
|
|
|
elif tools.config['logfile']:
|
2008-12-22 23:04:18 +00:00
|
|
|
# LogFile Handler
|
2008-12-26 15:05:51 +00:00
|
|
|
logf = tools.config['logfile']
|
2008-07-22 14:24:36 +00:00
|
|
|
try:
|
2008-11-25 10:35:00 +00:00
|
|
|
dirname = os.path.dirname(logf)
|
2008-12-12 00:28:15 +00:00
|
|
|
if dirname and not os.path.isdir(dirname):
|
|
|
|
os.makedirs(dirname)
|
2009-11-24 14:44:05 +00:00
|
|
|
if tools.config['logrotate'] is not False:
|
2009-07-09 18:16:35 +00:00
|
|
|
handler = logging.handlers.TimedRotatingFileHandler(logf,'D',1,30)
|
2009-11-24 14:44:05 +00:00
|
|
|
elif os.name == 'posix':
|
|
|
|
handler = logging.handlers.WatchedFileHandler(logf)
|
|
|
|
else:
|
|
|
|
handler = logging.handlers.FileHandler(logf)
|
2010-10-05 12:37:30 +00:00
|
|
|
except Exception:
|
2009-01-30 16:55:51 +00:00
|
|
|
sys.stderr.write("ERROR: couldn't create the logfile directory. Logging to the standard output.\n")
|
2009-07-07 10:02:07 +00:00
|
|
|
handler = logging.StreamHandler(sys.stdout)
|
2008-07-22 14:24:36 +00:00
|
|
|
else:
|
2008-12-22 23:04:18 +00:00
|
|
|
# Normal Handler on standard output
|
2008-07-22 14:24:36 +00:00
|
|
|
handler = logging.StreamHandler(sys.stdout)
|
|
|
|
|
2010-04-21 10:12:00 +00:00
|
|
|
if isinstance(handler, logging.StreamHandler) and os.isatty(handler.stream.fileno()):
|
|
|
|
formatter = ColoredFormatter(format)
|
|
|
|
else:
|
2010-09-03 07:41:32 +00:00
|
|
|
formatter = DBFormatter(format)
|
2008-07-22 14:24:36 +00:00
|
|
|
handler.setFormatter(formatter)
|
|
|
|
|
|
|
|
# add the handler to the root logger
|
2010-09-03 07:41:32 +00:00
|
|
|
logger = logging.getLogger()
|
2008-12-22 23:04:18 +00:00
|
|
|
logger.addHandler(handler)
|
2009-01-06 09:36:06 +00:00
|
|
|
logger.setLevel(int(tools.config['log_level'] or '0'))
|
2006-12-07 13:41:40 +00:00
|
|
|
|
2008-09-12 08:45:21 +00:00
|
|
|
|
2006-12-07 13:41:40 +00:00
|
|
|
class Logger(object):
|
2010-03-02 09:33:14 +00:00
|
|
|
def __init__(self):
|
|
|
|
warnings.warn("The netsvc.Logger API shouldn't be used anymore, please "
|
2010-03-02 09:58:09 +00:00
|
|
|
"use the standard `logging.getLogger` API instead",
|
|
|
|
PendingDeprecationWarning, stacklevel=2)
|
2010-03-02 09:33:14 +00:00
|
|
|
super(Logger, self).__init__()
|
2009-07-07 10:02:07 +00:00
|
|
|
|
2008-08-12 14:44:56 +00:00
|
|
|
def notifyChannel(self, name, level, msg):
|
2010-03-02 09:33:14 +00:00
|
|
|
warnings.warn("notifyChannel API shouldn't be used anymore, please use "
|
2010-03-02 09:58:09 +00:00
|
|
|
"the standard `logging` module instead",
|
|
|
|
PendingDeprecationWarning, stacklevel=2)
|
2009-07-07 10:02:07 +00:00
|
|
|
from service.web_services import common
|
|
|
|
|
2009-02-05 13:40:49 +00:00
|
|
|
log = logging.getLogger(tools.ustr(name))
|
2008-12-18 10:10:43 +00:00
|
|
|
|
2010-03-04 09:44:58 +00:00
|
|
|
if level in [LOG_DEBUG_RPC, LOG_TEST] and not hasattr(log, level):
|
|
|
|
fct = lambda msg, *args, **kwargs: log.log(getattr(logging, level.upper()), msg, *args, **kwargs)
|
|
|
|
setattr(log, level, fct)
|
|
|
|
|
2008-12-18 10:10:43 +00:00
|
|
|
|
2008-12-10 21:23:54 +00:00
|
|
|
level_method = getattr(log, level)
|
|
|
|
|
2009-01-07 18:44:27 +00:00
|
|
|
if isinstance(msg, Exception):
|
|
|
|
msg = tools.exception_to_unicode(msg)
|
|
|
|
|
2009-11-24 14:44:05 +00:00
|
|
|
try:
|
|
|
|
msg = tools.ustr(msg).strip()
|
2010-03-03 13:04:35 +00:00
|
|
|
if level in (LOG_ERROR, LOG_CRITICAL) and tools.config.get_misc('debug','env_info',False):
|
2009-09-25 20:47:45 +00:00
|
|
|
msg = common().exp_get_server_environment() + "\n" + msg
|
2009-08-03 07:23:57 +00:00
|
|
|
|
2009-08-23 10:02:10 +00:00
|
|
|
result = msg.split('\n')
|
2009-11-24 14:44:05 +00:00
|
|
|
except UnicodeDecodeError:
|
|
|
|
result = msg.strip().split('\n')
|
|
|
|
try:
|
2009-07-09 18:16:35 +00:00
|
|
|
if len(result)>1:
|
|
|
|
for idx, s in enumerate(result):
|
|
|
|
level_method('[%02d]: %s' % (idx+1, s,))
|
|
|
|
elif result:
|
|
|
|
level_method(result[0])
|
2010-10-05 12:37:30 +00:00
|
|
|
except IOError:
|
2009-11-24 14:44:05 +00:00
|
|
|
# TODO: perhaps reset the logger streams?
|
|
|
|
#if logrotate closes our files, we end up here..
|
|
|
|
pass
|
2010-10-05 12:37:30 +00:00
|
|
|
except Exception:
|
2009-11-24 14:44:05 +00:00
|
|
|
# better ignore the exception and carry on..
|
|
|
|
pass
|
2008-08-12 14:44:56 +00:00
|
|
|
|
2010-06-22 11:15:49 +00:00
|
|
|
def set_loglevel(self, level, logger=None):
|
|
|
|
if logger is not None:
|
|
|
|
log = logging.getLogger(str(logger))
|
2010-06-22 09:35:47 +00:00
|
|
|
else:
|
|
|
|
log = logging.getLogger()
|
2009-08-14 10:18:22 +00:00
|
|
|
log.setLevel(logging.INFO) # make sure next msg is printed
|
|
|
|
log.info("Log level changed to %s" % logging.getLevelName(level))
|
|
|
|
log.setLevel(level)
|
2008-08-12 14:44:56 +00:00
|
|
|
|
2008-12-30 17:58:25 +00:00
|
|
|
def shutdown(self):
|
|
|
|
logging.shutdown()
|
|
|
|
|
2008-12-26 15:05:51 +00:00
|
|
|
import tools
|
2008-12-08 14:03:42 +00:00
|
|
|
init_logger()
|
2006-12-07 13:41:40 +00:00
|
|
|
|
|
|
|
class Agent(object):
|
2010-09-20 11:22:23 +00:00
|
|
|
"""Singleton that keeps track of cancellable tasks to run at a given
|
|
|
|
timestamp.
|
|
|
|
The tasks are caracterised by:
|
|
|
|
* a timestamp
|
|
|
|
* the database on which the task run
|
|
|
|
* the function to call
|
|
|
|
* the arguments and keyword arguments to pass to the function
|
|
|
|
|
|
|
|
Implementation details:
|
|
|
|
Tasks are stored as list, allowing the cancellation by setting
|
|
|
|
the timestamp to 0.
|
|
|
|
A heapq is used to store tasks, so we don't need to sort
|
|
|
|
tasks ourself.
|
|
|
|
"""
|
|
|
|
__tasks = []
|
|
|
|
__tasks_by_db = {}
|
2008-07-22 14:24:36 +00:00
|
|
|
_logger = Logger()
|
|
|
|
|
2010-09-20 11:22:23 +00:00
|
|
|
@classmethod
|
|
|
|
def setAlarm(cls, function, timestamp, db_name, *args, **kwargs):
|
|
|
|
task = [timestamp, db_name, function, args, kwargs]
|
|
|
|
heapq.heappush(cls.__tasks, task)
|
|
|
|
cls.__tasks_by_db.setdefault(db_name, []).append(task)
|
2009-07-07 10:02:07 +00:00
|
|
|
|
2009-03-04 23:53:28 +00:00
|
|
|
@classmethod
|
|
|
|
def cancel(cls, db_name):
|
2010-09-20 11:22:23 +00:00
|
|
|
"""Cancel all tasks for a given database. If None is passed, all tasks are cancelled"""
|
|
|
|
if db_name is None:
|
|
|
|
cls.__tasks, cls.__tasks_by_db = [], {}
|
|
|
|
else:
|
|
|
|
if db_name in cls.__tasks_by_db:
|
|
|
|
for task in cls.__tasks_by_db[db_name]:
|
|
|
|
task[0] = 0
|
2009-07-07 10:02:07 +00:00
|
|
|
|
2009-03-04 23:53:28 +00:00
|
|
|
@classmethod
|
2008-07-22 14:24:36 +00:00
|
|
|
def quit(cls):
|
2009-03-04 23:53:28 +00:00
|
|
|
cls.cancel(None)
|
2008-12-30 17:30:46 +00:00
|
|
|
|
2010-09-20 11:22:23 +00:00
|
|
|
@classmethod
|
|
|
|
def runner(cls):
|
|
|
|
"""Neverending function (intended to be ran in a dedicated thread) that
|
|
|
|
checks every 60 seconds tasks to run.
|
|
|
|
"""
|
|
|
|
current_thread = threading.currentThread()
|
|
|
|
while True:
|
|
|
|
while cls.__tasks and cls.__tasks[0][0] < time.time():
|
|
|
|
task = heapq.heappop(cls.__tasks)
|
|
|
|
timestamp, dbname, function, args, kwargs = task
|
|
|
|
cls.__tasks_by_db[dbname].remove(task)
|
|
|
|
if not timestamp:
|
|
|
|
# null timestamp -> cancelled task
|
|
|
|
continue
|
|
|
|
current_thread.dbname = dbname # hack hack
|
|
|
|
cls._logger.notifyChannel('timers', LOG_DEBUG, "Run %s.%s(*%r, **%r)" % (function.im_class.__name__, function.func_name, args, kwargs))
|
|
|
|
delattr(current_thread, 'dbname')
|
2010-09-22 11:45:25 +00:00
|
|
|
task_thread = threading.Thread(target=function, name='netsvc.Agent.task', args=args, kwargs=kwargs)
|
|
|
|
# force non-daemon task threads (the runner thread must be daemon, and this property is inherited by default)
|
2010-09-24 10:13:54 +00:00
|
|
|
task_thread.setDaemon(False)
|
2010-09-22 11:45:25 +00:00
|
|
|
task_thread.start()
|
2010-09-20 11:22:23 +00:00
|
|
|
time.sleep(1)
|
|
|
|
time.sleep(60)
|
|
|
|
|
2010-09-21 15:34:23 +00:00
|
|
|
agent_runner = threading.Thread(target=Agent.runner, name="netsvc.Agent.runner")
|
|
|
|
# the agent runner is a typical daemon thread, that will never quit and must be
|
|
|
|
# terminated when the main process exits - with no consequence (the processing
|
|
|
|
# threads it spawns are not marked daemon)
|
2010-09-24 10:13:54 +00:00
|
|
|
agent_runner.setDaemon(True)
|
2010-09-21 15:34:23 +00:00
|
|
|
agent_runner.start()
|
2010-09-20 11:22:23 +00:00
|
|
|
|
|
|
|
|
2008-12-30 17:30:46 +00:00
|
|
|
import traceback
|
|
|
|
|
2009-08-29 15:23:46 +00:00
|
|
|
class Server:
|
2009-10-27 22:39:37 +00:00
|
|
|
""" Generic interface for all servers with an event loop etc.
|
|
|
|
Override this to impement http, net-rpc etc. servers.
|
2010-03-03 13:04:35 +00:00
|
|
|
|
2009-10-27 22:39:37 +00:00
|
|
|
Servers here must have threaded behaviour. start() must not block,
|
|
|
|
there is no run().
|
|
|
|
"""
|
|
|
|
__is_started = False
|
|
|
|
__servers = []
|
2010-07-26 09:33:36 +00:00
|
|
|
__starter_threads = []
|
2010-03-03 13:04:35 +00:00
|
|
|
|
2010-07-13 15:22:44 +00:00
|
|
|
# we don't want blocking server calls (think select()) to
|
|
|
|
# wait forever and possibly prevent exiting the process,
|
|
|
|
# but instead we want a form of polling/busy_wait pattern, where
|
|
|
|
# _server_timeout should be used as the default timeout for
|
|
|
|
# all I/O blocking operations
|
|
|
|
_busywait_timeout = 0.5
|
|
|
|
|
2010-03-03 13:04:35 +00:00
|
|
|
|
|
|
|
__logger = logging.getLogger('server')
|
|
|
|
|
2009-10-27 22:39:37 +00:00
|
|
|
def __init__(self):
|
|
|
|
Server.__servers.append(self)
|
2010-07-26 09:33:36 +00:00
|
|
|
if Server.__is_started:
|
|
|
|
# raise Exception('All instances of servers must be inited before the startAll()')
|
|
|
|
# Since the startAll() won't be called again, allow this server to
|
|
|
|
# init and then start it after 1sec (hopefully). Register that
|
|
|
|
# timer thread in a list, so that we can abort the start if quitAll
|
|
|
|
# is called in the meantime
|
|
|
|
t = threading.Timer(1.0, self._late_start)
|
2010-07-26 09:33:36 +00:00
|
|
|
t.name = 'Late start timer for %s' % str(self.__class__)
|
2010-07-26 09:33:36 +00:00
|
|
|
Server.__starter_threads.append(t)
|
|
|
|
t.start()
|
2009-10-27 22:39:37 +00:00
|
|
|
|
|
|
|
def start(self):
|
2010-03-03 13:04:35 +00:00
|
|
|
self.__logger.debug("called stub Server.start")
|
2010-07-26 09:33:36 +00:00
|
|
|
|
|
|
|
def _late_start(self):
|
|
|
|
self.start()
|
|
|
|
for thr in Server.__starter_threads:
|
|
|
|
if thr.finished.is_set():
|
|
|
|
Server.__starter_threads.remove(thr)
|
2010-03-03 13:04:35 +00:00
|
|
|
|
2009-10-27 22:39:37 +00:00
|
|
|
def stop(self):
|
2010-03-03 13:04:35 +00:00
|
|
|
self.__logger.debug("called stub Server.stop")
|
2009-08-29 15:23:46 +00:00
|
|
|
|
2009-10-28 00:30:33 +00:00
|
|
|
def stats(self):
|
|
|
|
""" This function should return statistics about the server """
|
|
|
|
return "%s: No statistics" % str(self.__class__)
|
|
|
|
|
2009-10-27 22:39:37 +00:00
|
|
|
@classmethod
|
|
|
|
def startAll(cls):
|
|
|
|
if cls.__is_started:
|
|
|
|
return
|
2010-03-03 13:04:35 +00:00
|
|
|
cls.__logger.info("Starting %d services" % len(cls.__servers))
|
2009-10-27 22:39:37 +00:00
|
|
|
for srv in cls.__servers:
|
|
|
|
srv.start()
|
|
|
|
cls.__is_started = True
|
2010-03-03 13:04:35 +00:00
|
|
|
|
2009-10-27 22:39:37 +00:00
|
|
|
@classmethod
|
|
|
|
def quitAll(cls):
|
|
|
|
if not cls.__is_started:
|
|
|
|
return
|
2010-03-03 13:04:35 +00:00
|
|
|
cls.__logger.info("Stopping %d services" % len(cls.__servers))
|
2010-07-26 09:33:36 +00:00
|
|
|
for thr in cls.__starter_threads:
|
|
|
|
if not thr.finished.is_set():
|
|
|
|
thr.cancel()
|
|
|
|
cls.__starter_threads.remove(thr)
|
|
|
|
|
2009-10-27 22:39:37 +00:00
|
|
|
for srv in cls.__servers:
|
|
|
|
srv.stop()
|
|
|
|
cls.__is_started = False
|
2009-08-29 15:23:46 +00:00
|
|
|
|
2009-10-28 00:30:33 +00:00
|
|
|
@classmethod
|
|
|
|
def allStats(cls):
|
2010-03-03 13:04:35 +00:00
|
|
|
res = ["Servers %s" % ('stopped', 'started')[cls.__is_started]]
|
|
|
|
res.extend(srv.stats() for srv in cls.__servers)
|
|
|
|
return '\n'.join(res)
|
2008-08-12 14:44:56 +00:00
|
|
|
|
2010-05-18 15:41:23 +00:00
|
|
|
def _close_socket(self):
|
|
|
|
if os.name != 'nt':
|
|
|
|
try:
|
|
|
|
self.socket.shutdown(getattr(socket, 'SHUT_RDWR', 2))
|
|
|
|
except socket.error, e:
|
|
|
|
if e.errno != errno.ENOTCONN: raise
|
|
|
|
# OSX, socket shutdowns both sides if any side closes it
|
|
|
|
# causing an error 57 'Socket is not connected' on shutdown
|
|
|
|
# of the other side (or something), see
|
|
|
|
# http://bugs.python.org/issue4397
|
|
|
|
self.__logger.debug(
|
|
|
|
'"%s" when shutting down server socket, '
|
|
|
|
'this is normal under OS X', e)
|
|
|
|
self.socket.close()
|
|
|
|
|
2009-01-05 18:55:20 +00:00
|
|
|
class OpenERPDispatcherException(Exception):
|
|
|
|
def __init__(self, exception, traceback):
|
|
|
|
self.exception = exception
|
|
|
|
self.traceback = traceback
|
|
|
|
|
2010-10-13 10:57:13 +00:00
|
|
|
def replace_request_password(args):
|
|
|
|
# password is always 3rd argument in a request, we replace it in RPC logs
|
|
|
|
# so it's easier to forward logs for diagnostics/debugging purposes...
|
|
|
|
args = list(args)
|
|
|
|
if len(args) > 2:
|
|
|
|
args[2] = '*'
|
|
|
|
return args
|
|
|
|
|
2009-01-05 18:55:20 +00:00
|
|
|
class OpenERPDispatcher:
|
2011-01-06 11:29:43 +00:00
|
|
|
def log(self, title, msg, channel=logging.DEBUG_RPC, depth=None):
|
2010-08-05 18:10:45 +00:00
|
|
|
logger = logging.getLogger(title)
|
2010-09-18 09:30:52 +00:00
|
|
|
if logger.isEnabledFor(channel):
|
2011-01-06 11:29:43 +00:00
|
|
|
for line in pformat(msg, depth=depth).split('\n'):
|
2010-09-18 09:30:52 +00:00
|
|
|
logger.log(channel, line)
|
2008-12-18 18:13:41 +00:00
|
|
|
|
2009-01-05 18:55:20 +00:00
|
|
|
def dispatch(self, service_name, method, params):
|
2008-07-22 14:24:36 +00:00
|
|
|
try:
|
2010-10-05 12:37:30 +00:00
|
|
|
logger = logging.getLogger('result')
|
2009-01-05 18:55:20 +00:00
|
|
|
self.log('service', service_name)
|
2008-12-18 18:13:41 +00:00
|
|
|
self.log('method', method)
|
2011-01-06 11:29:43 +00:00
|
|
|
self.log('params', replace_request_password(params), depth=(logger.isEnabledFor(logging.DEBUG_RPC_ANSWER) and None or 1))
|
2010-01-03 14:08:11 +00:00
|
|
|
auth = getattr(self, 'auth_provider', None)
|
2009-08-29 15:23:46 +00:00
|
|
|
result = ExportService.getService(service_name).dispatch(method, auth, params)
|
2011-01-06 11:29:43 +00:00
|
|
|
self.log('result', result, channel=logging.DEBUG_RPC_ANSWER)
|
2009-01-05 18:55:20 +00:00
|
|
|
return result
|
2008-07-22 14:24:36 +00:00
|
|
|
except Exception, e:
|
2009-01-09 09:12:01 +00:00
|
|
|
self.log('exception', tools.exception_to_unicode(e))
|
2010-01-03 14:08:11 +00:00
|
|
|
tb = getattr(e, 'traceback', sys.exc_info())
|
2008-12-30 17:30:46 +00:00
|
|
|
tb_s = "".join(traceback.format_exception(*tb))
|
2008-07-22 14:24:36 +00:00
|
|
|
if tools.config['debug_mode']:
|
|
|
|
import pdb
|
2008-12-30 17:30:46 +00:00
|
|
|
pdb.post_mortem(tb[2])
|
2009-01-05 18:55:20 +00:00
|
|
|
raise OpenERPDispatcherException(e, tb_s)
|
|
|
|
|
2008-07-23 15:01:27 +00:00
|
|
|
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|