[MERGE] trunk-service-thu (remove the class openerp.netsvc.ExportService)
bzr revid: rco@openerp.com-20130212154230-y54s2rbetllwnh4q
This commit is contained in:
commit
e76befd6c6
|
@ -36,6 +36,7 @@ OpenERP Server API
|
|||
:maxdepth: 1
|
||||
|
||||
api_models.rst
|
||||
routing.rst
|
||||
|
||||
Changelog
|
||||
'''''''''
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
.. _routing:
|
||||
|
||||
Routing
|
||||
=======
|
||||
|
||||
.. versionchanged:: 7.1
|
||||
|
||||
The OpenERP framework, as an HTTP server, serves a few hard-coded URLs
|
||||
(``models``, ``db``, ...) to expose RPC endpoints. When running the web addons
|
||||
(which is almost always the case), it also serves URLs without them being RPC
|
||||
endpoints.
|
||||
|
||||
In older version of OpenERP, adding RPC endpoints was done by subclassing the
|
||||
``openerp.netsvc.ExportService`` class. Adding WSGI handlers was done by
|
||||
registering them with the :py:func:`openerp.wsgi.register_wsgi_handler`
|
||||
function.
|
||||
|
||||
Starting with OpenERP 7.1, exposing a new arbitrary WSGI handler is done with
|
||||
the :py:func:`openerp.http.handler` decorator while adding an RPC endpoint is
|
||||
done with the :py:func:`openerp.http.rpc` decorator.
|
||||
|
||||
Routing decorators
|
||||
------------------
|
||||
|
||||
.. automodule:: openerp.http
|
||||
:members:
|
||||
:undoc-members:
|
|
@ -28,6 +28,7 @@ SUPERUSER_ID = 1
|
|||
import addons
|
||||
import cli
|
||||
import conf
|
||||
import http
|
||||
import loglevels
|
||||
import modules
|
||||
import netsvc
|
||||
|
|
|
@ -28,6 +28,7 @@ import time
|
|||
from openerp import SUPERUSER_ID
|
||||
from openerp import netsvc, tools
|
||||
from openerp.osv import fields, osv
|
||||
import openerp.report.interface
|
||||
from openerp.report.report_sxw import report_sxw, report_rml
|
||||
from openerp.tools.config import config
|
||||
from openerp.tools.safe_eval import safe_eval as eval
|
||||
|
@ -93,9 +94,9 @@ class report_xml(osv.osv):
|
|||
opj = os.path.join
|
||||
cr.execute("SELECT * FROM ir_act_report_xml WHERE auto=%s ORDER BY id", (True,))
|
||||
result = cr.dictfetchall()
|
||||
svcs = netsvc.Service._services
|
||||
reports = openerp.report.interface.report_int._reports
|
||||
for r in result:
|
||||
if svcs.has_key('report.'+r['report_name']):
|
||||
if reports.has_key('report.'+r['report_name']):
|
||||
continue
|
||||
if r['report_rml'] or r['report_rml_content_data']:
|
||||
report_sxw('report.'+r['report_name'], r['model'],
|
||||
|
|
|
@ -21,7 +21,8 @@
|
|||
|
||||
import time
|
||||
|
||||
from openerp.osv import osv,fields
|
||||
from openerp.osv import osv, fields
|
||||
from openerp.osv.orm import browse_record
|
||||
from openerp.tools.misc import attrgetter
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
|
@ -111,7 +112,7 @@ class ir_property(osv.osv):
|
|||
raise osv.except_osv('Error', 'Invalid type')
|
||||
|
||||
if field == 'value_reference':
|
||||
if isinstance(value, osv.orm.browse_record):
|
||||
if isinstance(value, browse_record):
|
||||
value = '%s,%d' % (value._name, value.id)
|
||||
elif isinstance(value, (int, long)):
|
||||
field_id = values.get('fields_id')
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
``openerp.http`` offers decorators to register WSGI and RPC endpoints handlers.
|
||||
See :ref:`routing`.
|
||||
"""
|
||||
|
||||
from . import service
|
||||
|
||||
def handler():
|
||||
"""
|
||||
Decorator to register a WSGI handler. The handler must return None if it
|
||||
does not handle the request.
|
||||
"""
|
||||
def decorator(f):
|
||||
service.wsgi_server.register_wsgi_handler(f)
|
||||
return decorator
|
||||
|
||||
def route(url):
|
||||
"""
|
||||
Same as then handler() decorator but register the handler under a specific
|
||||
url. Not yet implemented.
|
||||
"""
|
||||
def decorator(f):
|
||||
pass # TODO
|
||||
return decorator
|
||||
|
||||
def rpc(endpoint):
|
||||
"""
|
||||
Decorator to register a RPC endpoint handler. The handler will receive
|
||||
already unmarshalled RCP arguments.
|
||||
"""
|
||||
def decorator(f):
|
||||
service.wsgi_server.register_rpc_endpoint(endpoint, f)
|
||||
return decorator
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
@ -46,7 +46,7 @@ class Logger(object):
|
|||
_logger.warning(
|
||||
"notifyChannel API shouldn't be used anymore, please use "
|
||||
"the standard `logging` module instead.")
|
||||
from service.web_services import common
|
||||
from service import common
|
||||
|
||||
log = logging.getLogger(__name__ + '.deprecated.' + ustr(name))
|
||||
|
||||
|
@ -63,7 +63,7 @@ class Logger(object):
|
|||
try:
|
||||
msg = ustr(msg).strip()
|
||||
if level in (LOG_ERROR, LOG_CRITICAL): # and tools.config.get_misc('debug','env_info',False):
|
||||
msg = common().exp_get_server_environment() + "\n" + msg
|
||||
msg = common.exp_get_server_environment() + "\n" + msg
|
||||
|
||||
result = msg.split('\n')
|
||||
except UnicodeDecodeError:
|
||||
|
|
|
@ -27,7 +27,6 @@ import logging.handlers
|
|||
import os
|
||||
import platform
|
||||
import release
|
||||
import socket
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
|
@ -46,79 +45,13 @@ import openerp
|
|||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def close_socket(sock):
|
||||
""" Closes a socket instance cleanly
|
||||
|
||||
:param sock: the network socket to close
|
||||
:type sock: socket.socket
|
||||
"""
|
||||
try:
|
||||
sock.shutdown(socket.SHUT_RDWR)
|
||||
except socket.error, e:
|
||||
# On OSX, socket shutdowns both sides if any side closes it
|
||||
# causing an error 57 'Socket is not connected' on shutdown
|
||||
# of the other side (or something), see
|
||||
# http://bugs.python.org/issue4397
|
||||
# note: stdlib fixed test, not behavior
|
||||
if e.errno != errno.ENOTCONN or platform.system() not in ['Darwin', 'Windows']:
|
||||
raise
|
||||
sock.close()
|
||||
|
||||
def abort_response(dummy_1, description, dummy_2, details):
|
||||
# TODO Replace except_{osv,orm} with these directly.
|
||||
raise openerp.osv.osv.except_osv(description, details)
|
||||
|
||||
class Service(object):
|
||||
""" Base class for Local services
|
||||
Functionality here is trusted, no authentication.
|
||||
Workflow engine and reports subclass this.
|
||||
"""
|
||||
_services = {}
|
||||
def __init__(self, name):
|
||||
Service._services[name] = self
|
||||
self.__name = name
|
||||
|
||||
@classmethod
|
||||
def exists(cls, name):
|
||||
return name in cls._services
|
||||
|
||||
@classmethod
|
||||
def remove(cls, name):
|
||||
if cls.exists(name):
|
||||
cls._services.pop(name)
|
||||
|
||||
def LocalService(name):
|
||||
# Special case for addons support, will be removed in a few days when addons
|
||||
# are updated to directly use openerp.osv.osv.service.
|
||||
if name == 'object_proxy':
|
||||
return openerp.osv.osv.service
|
||||
if name == 'workflow':
|
||||
return openerp.workflow
|
||||
|
||||
return Service._services[name]
|
||||
|
||||
class ExportService(object):
|
||||
""" Proxy for exported services.
|
||||
|
||||
Note that this class has no direct proxy, capable of calling
|
||||
eservice.method(). Rather, the proxy should call
|
||||
dispatch(method, params)
|
||||
"""
|
||||
|
||||
_services = {}
|
||||
|
||||
def __init__(self, name):
|
||||
ExportService._services[name] = self
|
||||
self.__name = name
|
||||
_logger.debug("Registered an exported service: %s" % name)
|
||||
|
||||
@classmethod
|
||||
def getService(cls,name):
|
||||
return cls._services[name]
|
||||
|
||||
# Dispatch a RPC call w.r.t. the method name. The dispatching
|
||||
# w.r.t. the service (this class) is done by OpenERPDispatcher.
|
||||
def dispatch(self, method, params):
|
||||
raise Exception("stub dispatch at %s" % self.__name)
|
||||
return openerp.report.interface.report_int._reports[name]
|
||||
|
||||
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, _NOTHING, DEFAULT = range(10)
|
||||
#The background is set with 40 plus the number of the color, and the foreground with 30
|
||||
|
@ -290,7 +223,17 @@ def dispatch_rpc(service_name, method, params):
|
|||
|
||||
threading.current_thread().uid = None
|
||||
threading.current_thread().dbname = None
|
||||
result = ExportService.getService(service_name).dispatch(method, params)
|
||||
if service_name == 'common':
|
||||
dispatch = openerp.service.common.dispatch
|
||||
elif service_name == 'db':
|
||||
dispatch = openerp.service.db.dispatch
|
||||
elif service_name == 'object':
|
||||
dispatch = openerp.service.model.dispatch
|
||||
elif service_name == 'report':
|
||||
dispatch = openerp.service.report.dispatch
|
||||
else:
|
||||
dispatch = openerp.service.wsgi_server.rpc_handlers.get(service_name)
|
||||
result = dispatch(method, params)
|
||||
|
||||
if rpc_request_flag or rpc_response_flag:
|
||||
end_time = time.time()
|
||||
|
|
|
@ -20,201 +20,15 @@
|
|||
##############################################################################
|
||||
|
||||
|
||||
from functools import wraps
|
||||
import logging
|
||||
import threading
|
||||
from openerp.osv.orm import except_orm, Model, TransientModel, AbstractModel
|
||||
|
||||
from psycopg2 import IntegrityError, errorcodes
|
||||
# Deprecated, kept for backward compatibility.
|
||||
# openerp.exceptions.Warning should be used instead.
|
||||
except_osv = except_orm
|
||||
|
||||
import orm
|
||||
import openerp
|
||||
import openerp.netsvc as netsvc
|
||||
import openerp.pooler as pooler
|
||||
import openerp.sql_db as sql_db
|
||||
from openerp.tools.translate import translate
|
||||
from openerp.osv.orm import MetaModel, Model, TransientModel, AbstractModel
|
||||
import openerp.exceptions
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
# Deprecated.
|
||||
class except_osv(Exception):
|
||||
def __init__(self, name, value):
|
||||
self.name = name
|
||||
self.value = value
|
||||
self.args = (name, value)
|
||||
|
||||
service = None
|
||||
|
||||
class object_proxy(object):
|
||||
def __init__(self):
|
||||
global service
|
||||
service = self
|
||||
|
||||
def check(f):
|
||||
@wraps(f)
|
||||
def wrapper(self, dbname, *args, **kwargs):
|
||||
""" Wraps around OSV functions and normalises a few exceptions
|
||||
"""
|
||||
|
||||
def tr(src, ttype):
|
||||
# We try to do the same as the _(), but without the frame
|
||||
# inspection, since we aready are wrapping an osv function
|
||||
# trans_obj = self.get('ir.translation') cannot work yet :(
|
||||
ctx = {}
|
||||
if not kwargs:
|
||||
if args and isinstance(args[-1], dict):
|
||||
ctx = args[-1]
|
||||
elif isinstance(kwargs, dict):
|
||||
ctx = kwargs.get('context', {})
|
||||
|
||||
uid = 1
|
||||
if args and isinstance(args[0], (long, int)):
|
||||
uid = args[0]
|
||||
|
||||
lang = ctx and ctx.get('lang')
|
||||
if not (lang or hasattr(src, '__call__')):
|
||||
return src
|
||||
|
||||
# We open a *new* cursor here, one reason is that failed SQL
|
||||
# queries (as in IntegrityError) will invalidate the current one.
|
||||
cr = False
|
||||
|
||||
if hasattr(src, '__call__'):
|
||||
# callable. We need to find the right parameters to call
|
||||
# the orm._sql_message(self, cr, uid, ids, context) function,
|
||||
# or we skip..
|
||||
# our signature is f(osv_pool, dbname [,uid, obj, method, args])
|
||||
try:
|
||||
if args and len(args) > 1:
|
||||
obj = self.get(args[1])
|
||||
if len(args) > 3 and isinstance(args[3], (long, int, list)):
|
||||
ids = args[3]
|
||||
else:
|
||||
ids = []
|
||||
cr = sql_db.db_connect(dbname).cursor()
|
||||
return src(obj, cr, uid, ids, context=(ctx or {}))
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
if cr: cr.close()
|
||||
|
||||
return False # so that the original SQL error will
|
||||
# be returned, it is the best we have.
|
||||
|
||||
try:
|
||||
cr = sql_db.db_connect(dbname).cursor()
|
||||
res = translate(cr, name=False, source_type=ttype,
|
||||
lang=lang, source=src)
|
||||
if res:
|
||||
return res
|
||||
else:
|
||||
return src
|
||||
finally:
|
||||
if cr: cr.close()
|
||||
|
||||
def _(src):
|
||||
return tr(src, 'code')
|
||||
|
||||
try:
|
||||
if pooler.get_pool(dbname)._init:
|
||||
raise except_osv('Database not ready', 'Currently, this database is not fully loaded and can not be used.')
|
||||
return f(self, dbname, *args, **kwargs)
|
||||
except orm.except_orm, inst:
|
||||
raise except_osv(inst.name, inst.value)
|
||||
except except_osv:
|
||||
raise
|
||||
except IntegrityError, inst:
|
||||
osv_pool = pooler.get_pool(dbname)
|
||||
for key in osv_pool._sql_error.keys():
|
||||
if key in inst[0]:
|
||||
netsvc.abort_response(1, _('Constraint Error'), 'warning',
|
||||
tr(osv_pool._sql_error[key], 'sql_constraint') or inst[0])
|
||||
if inst.pgcode in (errorcodes.NOT_NULL_VIOLATION, errorcodes.FOREIGN_KEY_VIOLATION, errorcodes.RESTRICT_VIOLATION):
|
||||
msg = _('The operation cannot be completed, probably due to the following:\n- deletion: you may be trying to delete a record while other records still reference it\n- creation/update: a mandatory field is not correctly set')
|
||||
_logger.debug("IntegrityError", exc_info=True)
|
||||
try:
|
||||
errortxt = inst.pgerror.replace('«','"').replace('»','"')
|
||||
if '"public".' in errortxt:
|
||||
context = errortxt.split('"public".')[1]
|
||||
model_name = table = context.split('"')[1]
|
||||
else:
|
||||
last_quote_end = errortxt.rfind('"')
|
||||
last_quote_begin = errortxt.rfind('"', 0, last_quote_end)
|
||||
model_name = table = errortxt[last_quote_begin+1:last_quote_end].strip()
|
||||
model = table.replace("_",".")
|
||||
model_obj = osv_pool.get(model)
|
||||
if model_obj:
|
||||
model_name = model_obj._description or model_obj._name
|
||||
msg += _('\n\n[object with reference: %s - %s]') % (model_name, model)
|
||||
except Exception:
|
||||
pass
|
||||
netsvc.abort_response(1, _('Integrity Error'), 'warning', msg)
|
||||
else:
|
||||
netsvc.abort_response(1, _('Integrity Error'), 'warning', inst[0])
|
||||
except Exception:
|
||||
_logger.exception("Uncaught exception")
|
||||
raise
|
||||
|
||||
return wrapper
|
||||
|
||||
def execute_cr(self, cr, uid, obj, method, *args, **kw):
|
||||
object = pooler.get_pool(cr.dbname).get(obj)
|
||||
if not object:
|
||||
raise except_osv('Object Error', 'Object %s doesn\'t exist' % str(obj))
|
||||
return getattr(object, method)(cr, uid, *args, **kw)
|
||||
|
||||
def execute_kw(self, db, uid, obj, method, args, kw=None):
|
||||
return self.execute(db, uid, obj, method, *args, **kw or {})
|
||||
|
||||
@check
|
||||
def execute(self, db, uid, obj, method, *args, **kw):
|
||||
threading.currentThread().dbname = db
|
||||
cr = pooler.get_db(db).cursor()
|
||||
try:
|
||||
try:
|
||||
if method.startswith('_'):
|
||||
raise except_osv('Access Denied', 'Private methods (such as %s) cannot be called remotely.' % (method,))
|
||||
res = self.execute_cr(cr, uid, obj, method, *args, **kw)
|
||||
if res is None:
|
||||
_logger.warning('The method %s of the object %s can not return `None` !', method, obj)
|
||||
cr.commit()
|
||||
except Exception:
|
||||
cr.rollback()
|
||||
raise
|
||||
finally:
|
||||
cr.close()
|
||||
return res
|
||||
|
||||
def exec_workflow_cr(self, cr, uid, obj, signal, *args):
|
||||
object = pooler.get_pool(cr.dbname).get(obj)
|
||||
if not object:
|
||||
raise except_osv('Object Error', 'Object %s doesn\'t exist' % str(obj))
|
||||
res_id = args[0]
|
||||
return object._workflow_signal(cr, uid, [res_id], signal)[res_id]
|
||||
|
||||
@check
|
||||
def exec_workflow(self, db, uid, obj, signal, *args):
|
||||
cr = pooler.get_db(db).cursor()
|
||||
try:
|
||||
try:
|
||||
res = self.exec_workflow_cr(cr, uid, obj, signal, *args)
|
||||
cr.commit()
|
||||
except Exception:
|
||||
cr.rollback()
|
||||
raise
|
||||
finally:
|
||||
cr.close()
|
||||
return res
|
||||
|
||||
# deprecated - for backward compatibility.
|
||||
# Deprecated, kept for backward compatibility.
|
||||
osv = Model
|
||||
osv_memory = TransientModel
|
||||
osv_abstract = AbstractModel # ;-)
|
||||
|
||||
|
||||
def start_object_proxy():
|
||||
object_proxy()
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ import os
|
|||
import re
|
||||
|
||||
from lxml import etree
|
||||
import openerp.netsvc as netsvc
|
||||
import openerp.pooler as pooler
|
||||
|
||||
import openerp.tools as tools
|
||||
|
@ -40,12 +39,17 @@ def toxml(value):
|
|||
unicode_value = tools.ustr(value)
|
||||
return unicode_value.replace('&', '&').replace('<','<').replace('>','>')
|
||||
|
||||
class report_int(netsvc.Service):
|
||||
class report_int(object):
|
||||
|
||||
_reports = {}
|
||||
|
||||
def __init__(self, name):
|
||||
assert not self.exists(name), 'The report "%s" already exists!' % name
|
||||
super(report_int, self).__init__(name)
|
||||
if not name.startswith('report.'):
|
||||
raise Exception('ConceptionError, bad report name, should start with "report."')
|
||||
assert name not in self._reports, 'The report "%s" already exists!' % name
|
||||
self._reports[name] = self
|
||||
self.__name = name
|
||||
|
||||
self.name = name
|
||||
self.id = 0
|
||||
self.name2 = '.'.join(name.split('.')[1:])
|
||||
|
|
|
@ -29,9 +29,6 @@ import threading
|
|||
import time
|
||||
|
||||
import cron
|
||||
import netrpc_server
|
||||
import web_services
|
||||
import web_services
|
||||
import wsgi_server
|
||||
|
||||
import openerp.modules
|
||||
|
@ -40,6 +37,12 @@ import openerp.osv
|
|||
from openerp.release import nt_service_name
|
||||
import openerp.tools
|
||||
|
||||
import common
|
||||
import db
|
||||
import model
|
||||
import report
|
||||
|
||||
#.apidoc title: RPC Services
|
||||
|
||||
""" Classes of this module implement the network protocols that the
|
||||
OpenERP server uses to communicate with remote clients.
|
||||
|
@ -72,20 +75,14 @@ def start_internal():
|
|||
if start_internal_done:
|
||||
return
|
||||
openerp.netsvc.init_logger()
|
||||
|
||||
# Instantiate local services (this is a legacy design).
|
||||
openerp.osv.osv.start_object_proxy()
|
||||
# Export (for RPC) services.
|
||||
web_services.start_service()
|
||||
openerp.modules.loading.open_openerp_namespace()
|
||||
|
||||
load_server_wide_modules()
|
||||
start_internal_done = True
|
||||
|
||||
def start_services():
|
||||
""" Start all services including http, netrpc and cron """
|
||||
""" Start all services including http, and cron """
|
||||
start_internal()
|
||||
# Initialize the NETRPC server.
|
||||
netrpc_server.start_service()
|
||||
# Start the WSGI server.
|
||||
wsgi_server.start_service()
|
||||
# Start the main cron thread.
|
||||
|
@ -95,7 +92,6 @@ def stop_services():
|
|||
""" Stop all services. """
|
||||
# stop services
|
||||
cron.stop_service()
|
||||
netrpc_server.stop_service()
|
||||
wsgi_server.stop_service()
|
||||
|
||||
_logger.info("Initiating shutdown")
|
||||
|
|
|
@ -0,0 +1,200 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
import threading
|
||||
|
||||
import openerp.osv.orm # TODO use openerp.exceptions
|
||||
import openerp.pooler
|
||||
import openerp.release
|
||||
import openerp.tools
|
||||
|
||||
import security
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
RPC_VERSION_1 = {
|
||||
'server_version': openerp.release.version,
|
||||
'server_version_info': openerp.release.version_info,
|
||||
'server_serie': openerp.release.serie,
|
||||
'protocol_version': 1,
|
||||
}
|
||||
|
||||
def dispatch(method, params):
|
||||
if method in ['login', 'about', 'timezone_get', 'get_server_environment',
|
||||
'login_message','get_stats', 'check_connectivity',
|
||||
'list_http_services', 'version', 'authenticate']:
|
||||
pass
|
||||
elif method in ['get_available_updates', 'get_migration_scripts', 'set_loglevel', 'get_os_time', 'get_sqlcount']:
|
||||
passwd = params[0]
|
||||
params = params[1:]
|
||||
security.check_super(passwd)
|
||||
else:
|
||||
raise Exception("Method not found: %s" % method)
|
||||
|
||||
fn = globals()['exp_' + method]
|
||||
return fn(*params)
|
||||
|
||||
def exp_login(db, login, password):
|
||||
# TODO: legacy indirection through 'security', should use directly
|
||||
# the res.users model
|
||||
res = security.login(db, login, password)
|
||||
msg = res and 'successful login' or 'bad login or password'
|
||||
_logger.info("%s from '%s' using database '%s'", msg, login, db.lower())
|
||||
return res or False
|
||||
|
||||
def exp_authenticate(db, login, password, user_agent_env):
|
||||
res_users = openerp.pooler.get_pool(db).get('res.users')
|
||||
return res_users.authenticate(db, login, password, user_agent_env)
|
||||
|
||||
def exp_version():
|
||||
return RPC_VERSION_1
|
||||
|
||||
def exp_about(extended=False):
|
||||
"""Return information about the OpenERP Server.
|
||||
|
||||
@param extended: if True then return version info
|
||||
@return string if extended is False else tuple
|
||||
"""
|
||||
|
||||
info = _('See http://openerp.com')
|
||||
|
||||
if extended:
|
||||
return info, openerp.release.version
|
||||
return info
|
||||
|
||||
def exp_timezone_get(db, login, password):
|
||||
return openerp.tools.misc.get_server_timezone()
|
||||
|
||||
def exp_get_available_updates(contract_id, contract_password):
|
||||
import openerp.tools.maintenance as tm
|
||||
try:
|
||||
rc = tm.remote_contract(contract_id, contract_password)
|
||||
if not rc.id:
|
||||
raise tm.RemoteContractException('This contract does not exist or is not active')
|
||||
|
||||
return rc.get_available_updates(rc.id, openerp.modules.get_modules_with_version())
|
||||
|
||||
except tm.RemoteContractException, e:
|
||||
raise openerp.osv.orm.except_orm('Migration Error', str(e))
|
||||
|
||||
|
||||
def exp_get_migration_scripts(contract_id, contract_password):
|
||||
import openerp.tools.maintenance as tm
|
||||
try:
|
||||
rc = tm.remote_contract(contract_id, contract_password)
|
||||
if not rc.id:
|
||||
raise tm.RemoteContractException('This contract does not exist or is not active')
|
||||
if rc.status != 'full':
|
||||
raise tm.RemoteContractException('Can not get updates for a partial contract')
|
||||
|
||||
_logger.info('starting migration with contract %s', rc.name)
|
||||
|
||||
zips = rc.retrieve_updates(rc.id, openerp.modules.get_modules_with_version())
|
||||
|
||||
from shutil import rmtree, copytree, copy
|
||||
|
||||
backup_directory = os.path.join(openerp.tools.config['root_path'], 'backup', time.strftime('%Y-%m-%d-%H-%M'))
|
||||
if zips and not os.path.isdir(backup_directory):
|
||||
_logger.info('create a new backup directory to store the old modules: %s', backup_directory)
|
||||
os.makedirs(backup_directory)
|
||||
|
||||
for module in zips:
|
||||
_logger.info('upgrade module %s', module)
|
||||
mp = openerp.modules.get_module_path(module)
|
||||
if mp:
|
||||
if os.path.isdir(mp):
|
||||
copytree(mp, os.path.join(backup_directory, module))
|
||||
if os.path.islink(mp):
|
||||
os.unlink(mp)
|
||||
else:
|
||||
rmtree(mp)
|
||||
else:
|
||||
copy(mp + 'zip', backup_directory)
|
||||
os.unlink(mp + '.zip')
|
||||
|
||||
try:
|
||||
try:
|
||||
base64_decoded = base64.decodestring(zips[module])
|
||||
except Exception:
|
||||
_logger.error('unable to read the module %s', module)
|
||||
raise
|
||||
|
||||
zip_contents = StringIO(base64_decoded)
|
||||
zip_contents.seek(0)
|
||||
try:
|
||||
try:
|
||||
openerp.tools.extract_zip_file(zip_contents, openerp.tools.config['addons_path'] )
|
||||
except Exception:
|
||||
_logger.error('unable to extract the module %s', module)
|
||||
rmtree(module)
|
||||
raise
|
||||
finally:
|
||||
zip_contents.close()
|
||||
except Exception:
|
||||
_logger.error('restore the previous version of the module %s', module)
|
||||
nmp = os.path.join(backup_directory, module)
|
||||
if os.path.isdir(nmp):
|
||||
copytree(nmp, openerp.tools.config['addons_path'])
|
||||
else:
|
||||
copy(nmp+'.zip', openerp.tools.config['addons_path'])
|
||||
raise
|
||||
|
||||
return True
|
||||
except tm.RemoteContractException, e:
|
||||
raise openerp.osv.orm.except_orm('Migration Error', str(e))
|
||||
except Exception, e:
|
||||
_logger.exception('Exception in get_migration_script:')
|
||||
raise
|
||||
|
||||
def exp_get_server_environment():
|
||||
os_lang = '.'.join( [x for x in locale.getdefaultlocale() if x] )
|
||||
if not os_lang:
|
||||
os_lang = 'NOT SET'
|
||||
environment = '\nEnvironment Information : \n' \
|
||||
'System : %s\n' \
|
||||
'OS Name : %s\n' \
|
||||
%(platform.platform(), platform.os.name)
|
||||
if os.name == 'posix':
|
||||
if platform.system() == 'Linux':
|
||||
lsbinfo = os.popen('lsb_release -a').read()
|
||||
environment += '%s'% lsbinfo
|
||||
else:
|
||||
environment += 'Your System is not lsb compliant\n'
|
||||
environment += 'Operating System Release : %s\n' \
|
||||
'Operating System Version : %s\n' \
|
||||
'Operating System Architecture : %s\n' \
|
||||
'Operating System Locale : %s\n'\
|
||||
'Python Version : %s\n'\
|
||||
'OpenERP-Server Version : %s'\
|
||||
%(platform.release(), platform.version(), platform.architecture()[0],
|
||||
os_lang, platform.python_version(), openerp.release.version)
|
||||
return environment
|
||||
|
||||
def exp_login_message():
|
||||
return openerp.tools.config.get('login_message', False)
|
||||
|
||||
def exp_set_loglevel(loglevel, logger=None):
|
||||
# TODO Previously, the level was set on the now deprecated
|
||||
# `openerp.netsvc.Logger` class.
|
||||
return True
|
||||
|
||||
def exp_get_stats():
|
||||
res = "OpenERP server: %d threads\n" % threading.active_count()
|
||||
res += netsvc.Server.allStats()
|
||||
return res
|
||||
|
||||
def exp_list_http_services():
|
||||
return http_server.list_http_services()
|
||||
|
||||
def exp_check_connectivity():
|
||||
return bool(sql_db.db_connect('postgres'))
|
||||
|
||||
def exp_get_os_time():
|
||||
return os.times()
|
||||
|
||||
def exp_get_sqlcount():
|
||||
if not logging.getLogger('openerp.sql_db').isEnabledFor(logging.DEBUG):
|
||||
_logger.warning("Counters of SQL will not be reliable unless logger openerp.sql_db is set to level DEBUG or higer.")
|
||||
return sql_db.sql_counter
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
@ -0,0 +1,349 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import contextlib
|
||||
import logging
|
||||
import threading
|
||||
import traceback
|
||||
|
||||
from openerp import SUPERUSER_ID
|
||||
import openerp.pooler
|
||||
import openerp.sql_db
|
||||
import openerp.tools
|
||||
|
||||
import security
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
self_actions = {}
|
||||
self_id = 0
|
||||
self_id_protect = threading.Semaphore()
|
||||
|
||||
# This should be moved to openerp.modules.db, along side initialize().
|
||||
def _initialize_db(id, db_name, demo, lang, user_password):
|
||||
cr = None
|
||||
try:
|
||||
self_actions[id]['progress'] = 0
|
||||
cr = openerp.sql_db.db_connect(db_name).cursor()
|
||||
openerp.modules.db.initialize(cr) # TODO this should be removed as it is done by pooler.restart_pool.
|
||||
openerp.tools.config['lang'] = lang
|
||||
cr.commit()
|
||||
cr.close()
|
||||
|
||||
pool = openerp.pooler.restart_pool(db_name, demo, self_actions[id],
|
||||
update_module=True)[1]
|
||||
|
||||
cr = openerp.sql_db.db_connect(db_name).cursor()
|
||||
|
||||
if lang:
|
||||
modobj = pool.get('ir.module.module')
|
||||
mids = modobj.search(cr, SUPERUSER_ID, [('state', '=', 'installed')])
|
||||
modobj.update_translations(cr, SUPERUSER_ID, mids, lang)
|
||||
|
||||
# update admin's password and lang
|
||||
values = {'password': user_password, 'lang': lang}
|
||||
pool.get('res.users').write(cr, SUPERUSER_ID, [SUPERUSER_ID], values)
|
||||
|
||||
cr.execute('SELECT login, password FROM res_users ORDER BY login')
|
||||
self_actions[id].update(users=cr.dictfetchall(), clean=True)
|
||||
cr.commit()
|
||||
cr.close()
|
||||
except Exception, e:
|
||||
self_actions[id].update(clean=False, exception=e)
|
||||
_logger.exception('CREATE DATABASE failed:')
|
||||
self_actions[id]['traceback'] = traceback.format_exc()
|
||||
if cr:
|
||||
cr.close()
|
||||
|
||||
def dispatch(method, params):
|
||||
if method in [ 'create', 'get_progress', 'drop', 'dump',
|
||||
'restore', 'rename',
|
||||
'change_admin_password', 'migrate_databases',
|
||||
'create_database', 'duplicate_database' ]:
|
||||
passwd = params[0]
|
||||
params = params[1:]
|
||||
security.check_super(passwd)
|
||||
elif method in [ 'db_exist', 'list', 'list_lang', 'server_version' ]:
|
||||
# params = params
|
||||
# No security check for these methods
|
||||
pass
|
||||
else:
|
||||
raise KeyError("Method not found: %s" % method)
|
||||
fn = globals()['exp_' + method]
|
||||
return fn(*params)
|
||||
|
||||
def _create_empty_database(name):
|
||||
db = openerp.sql_db.db_connect('postgres')
|
||||
cr = db.cursor()
|
||||
chosen_template = openerp.tools.config['db_template']
|
||||
cr.execute("""SELECT datname
|
||||
FROM pg_database
|
||||
WHERE datname = %s """,
|
||||
(name,))
|
||||
if cr.fetchall():
|
||||
raise openerp.exceptions.Warning(" %s database already exists!" % name )
|
||||
try:
|
||||
cr.autocommit(True) # avoid transaction block
|
||||
cr.execute("""CREATE DATABASE "%s" ENCODING 'unicode' TEMPLATE "%s" """ % (name, chosen_template))
|
||||
finally:
|
||||
cr.close()
|
||||
|
||||
def exp_create(db_name, demo, lang, user_password='admin'):
|
||||
self_id_protect.acquire()
|
||||
global self_id
|
||||
self_id += 1
|
||||
id = self_id
|
||||
self_id_protect.release()
|
||||
|
||||
self_actions[id] = {'clean': False}
|
||||
|
||||
_create_empty_database(db_name)
|
||||
|
||||
_logger.info('CREATE DATABASE %s', db_name.lower())
|
||||
create_thread = threading.Thread(target=_initialize_db,
|
||||
args=(id, db_name, demo, lang, user_password))
|
||||
create_thread.start()
|
||||
self_actions[id]['thread'] = create_thread
|
||||
return id
|
||||
|
||||
def exp_create_database(db_name, demo, lang, user_password='admin'):
|
||||
""" Similar to exp_create but blocking."""
|
||||
self_id_protect.acquire()
|
||||
global self_id
|
||||
self_id += 1
|
||||
id = self_id
|
||||
self_id_protect.release()
|
||||
|
||||
self_actions[id] = {'clean': False}
|
||||
|
||||
_logger.info('Create database `%s`.', db_name)
|
||||
_create_empty_database(db_name)
|
||||
_initialize_db(id, db_name, demo, lang, user_password)
|
||||
return True
|
||||
|
||||
def exp_duplicate_database(db_original_name, db_name):
|
||||
_logger.info('Duplicate database `%s` to `%s`.', db_original_name, db_name)
|
||||
openerp.sql_db.close_db(db_original_name)
|
||||
db = openerp.sql_db.db_connect('postgres')
|
||||
cr = db.cursor()
|
||||
try:
|
||||
cr.autocommit(True) # avoid transaction block
|
||||
cr.execute("""CREATE DATABASE "%s" ENCODING 'unicode' TEMPLATE "%s" """ % (db_name, db_original_name))
|
||||
finally:
|
||||
cr.close()
|
||||
return True
|
||||
|
||||
def exp_get_progress(id):
|
||||
if self_actions[id]['thread'].isAlive():
|
||||
# return openerp.modules.init_progress[db_name]
|
||||
return min(self_actions[id].get('progress', 0),0.95), []
|
||||
else:
|
||||
clean = self_actions[id]['clean']
|
||||
if clean:
|
||||
users = self_actions[id]['users']
|
||||
for user in users:
|
||||
# Remove the None passwords as they can't be marshalled by XML-RPC.
|
||||
if user['password'] is None:
|
||||
user['password'] = ''
|
||||
self_actions.pop(id)
|
||||
return 1.0, users
|
||||
else:
|
||||
e = self_actions[id]['exception'] # TODO this seems wrong: actions[id]['traceback'] is set, but not 'exception'.
|
||||
self_actions.pop(id)
|
||||
raise Exception, e
|
||||
|
||||
def exp_drop(db_name):
|
||||
if not exp_db_exist(db_name):
|
||||
return False
|
||||
openerp.modules.registry.RegistryManager.delete(db_name)
|
||||
openerp.sql_db.close_db(db_name)
|
||||
|
||||
db = openerp.sql_db.db_connect('postgres')
|
||||
cr = db.cursor()
|
||||
cr.autocommit(True) # avoid transaction block
|
||||
try:
|
||||
# Try to terminate all other connections that might prevent
|
||||
# dropping the database
|
||||
try:
|
||||
|
||||
# PostgreSQL 9.2 renamed pg_stat_activity.procpid to pid:
|
||||
# http://www.postgresql.org/docs/9.2/static/release-9-2.html#AEN110389
|
||||
pid_col = 'pid' if cr._cnx.server_version >= 90200 else 'procpid'
|
||||
|
||||
cr.execute("""SELECT pg_terminate_backend(%(pid_col)s)
|
||||
FROM pg_stat_activity
|
||||
WHERE datname = %%s AND
|
||||
%(pid_col)s != pg_backend_pid()""" % {'pid_col': pid_col},
|
||||
(db_name,))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
cr.execute('DROP DATABASE "%s"' % db_name)
|
||||
except Exception, e:
|
||||
_logger.error('DROP DB: %s failed:\n%s', db_name, e)
|
||||
raise Exception("Couldn't drop database %s: %s" % (db_name, e))
|
||||
else:
|
||||
_logger.info('DROP DB: %s', db_name)
|
||||
finally:
|
||||
cr.close()
|
||||
return True
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _set_pg_password_in_environment():
|
||||
""" On Win32, pg_dump (and pg_restore) require that
|
||||
:envvar:`PGPASSWORD` be set
|
||||
|
||||
This context management method handles setting
|
||||
:envvar:`PGPASSWORD` iif win32 and the envvar is not already
|
||||
set, and removing it afterwards.
|
||||
"""
|
||||
if os.name != 'nt' or os.environ.get('PGPASSWORD'):
|
||||
yield
|
||||
else:
|
||||
os.environ['PGPASSWORD'] = openerp.tools.config['db_password']
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
del os.environ['PGPASSWORD']
|
||||
|
||||
|
||||
def exp_dump(db_name):
|
||||
with _set_pg_password_in_environment():
|
||||
cmd = ['pg_dump', '--format=c', '--no-owner']
|
||||
if openerp.tools.config['db_user']:
|
||||
cmd.append('--username=' + openerp.tools.config['db_user'])
|
||||
if openerp.tools.config['db_host']:
|
||||
cmd.append('--host=' + openerp.tools.config['db_host'])
|
||||
if openerp.tools.config['db_port']:
|
||||
cmd.append('--port=' + str(openerp.tools.config['db_port']))
|
||||
cmd.append(db_name)
|
||||
|
||||
stdin, stdout = openerp.tools.exec_pg_command_pipe(*tuple(cmd))
|
||||
stdin.close()
|
||||
data = stdout.read()
|
||||
res = stdout.close()
|
||||
|
||||
if not data or res:
|
||||
_logger.error(
|
||||
'DUMP DB: %s failed! Please verify the configuration of the database password on the server. '
|
||||
'It should be provided as a -w <PASSWD> command-line option, or as `db_password` in the '
|
||||
'server configuration file.\n %s', db_name, data)
|
||||
raise Exception, "Couldn't dump database"
|
||||
_logger.info('DUMP DB successful: %s', db_name)
|
||||
|
||||
return base64.encodestring(data)
|
||||
|
||||
def exp_restore(db_name, data):
|
||||
with _set_pg_password_in_environment():
|
||||
if exp_db_exist(db_name):
|
||||
_logger.warning('RESTORE DB: %s already exists', db_name)
|
||||
raise Exception, "Database already exists"
|
||||
|
||||
_create_empty_database(db_name)
|
||||
|
||||
cmd = ['pg_restore', '--no-owner']
|
||||
if openerp.tools.config['db_user']:
|
||||
cmd.append('--username=' + openerp.tools.config['db_user'])
|
||||
if openerp.tools.config['db_host']:
|
||||
cmd.append('--host=' + openerp.tools.config['db_host'])
|
||||
if openerp.tools.config['db_port']:
|
||||
cmd.append('--port=' + str(openerp.tools.config['db_port']))
|
||||
cmd.append('--dbname=' + db_name)
|
||||
args2 = tuple(cmd)
|
||||
|
||||
buf=base64.decodestring(data)
|
||||
if os.name == "nt":
|
||||
tmpfile = (os.environ['TMP'] or 'C:\\') + os.tmpnam()
|
||||
file(tmpfile, 'wb').write(buf)
|
||||
args2=list(args2)
|
||||
args2.append(tmpfile)
|
||||
args2=tuple(args2)
|
||||
stdin, stdout = openerp.tools.exec_pg_command_pipe(*args2)
|
||||
if not os.name == "nt":
|
||||
stdin.write(base64.decodestring(data))
|
||||
stdin.close()
|
||||
res = stdout.close()
|
||||
if res:
|
||||
raise Exception, "Couldn't restore database"
|
||||
_logger.info('RESTORE DB: %s', db_name)
|
||||
|
||||
return True
|
||||
|
||||
def exp_rename(old_name, new_name):
|
||||
openerp.modules.registry.RegistryManager.delete(old_name)
|
||||
openerp.sql_db.close_db(old_name)
|
||||
|
||||
db = openerp.sql_db.db_connect('postgres')
|
||||
cr = db.cursor()
|
||||
cr.autocommit(True) # avoid transaction block
|
||||
try:
|
||||
try:
|
||||
cr.execute('ALTER DATABASE "%s" RENAME TO "%s"' % (old_name, new_name))
|
||||
except Exception, e:
|
||||
_logger.error('RENAME DB: %s -> %s failed:\n%s', old_name, new_name, e)
|
||||
raise Exception("Couldn't rename database %s to %s: %s" % (old_name, new_name, e))
|
||||
else:
|
||||
fs = os.path.join(openerp.tools.config['root_path'], 'filestore')
|
||||
if os.path.exists(os.path.join(fs, old_name)):
|
||||
os.rename(os.path.join(fs, old_name), os.path.join(fs, new_name))
|
||||
|
||||
_logger.info('RENAME DB: %s -> %s', old_name, new_name)
|
||||
finally:
|
||||
cr.close()
|
||||
return True
|
||||
|
||||
def exp_db_exist(db_name):
|
||||
## Not True: in fact, check if connection to database is possible. The database may exists
|
||||
return bool(openerp.sql_db.db_connect(db_name))
|
||||
|
||||
def exp_list(document=False):
|
||||
if not openerp.tools.config['list_db'] and not document:
|
||||
raise openerp.exceptions.AccessDenied()
|
||||
chosen_template = openerp.tools.config['db_template']
|
||||
templates_list = tuple(set(['template0', 'template1', 'postgres', chosen_template]))
|
||||
db = openerp.sql_db.db_connect('postgres')
|
||||
cr = db.cursor()
|
||||
try:
|
||||
try:
|
||||
db_user = openerp.tools.config["db_user"]
|
||||
if not db_user and os.name == 'posix':
|
||||
import pwd
|
||||
db_user = pwd.getpwuid(os.getuid())[0]
|
||||
if not db_user:
|
||||
cr.execute("select usename from pg_user where usesysid=(select datdba from pg_database where datname=%s)", (openerp.tools.config["db_name"],))
|
||||
res = cr.fetchone()
|
||||
db_user = res and str(res[0])
|
||||
if db_user:
|
||||
cr.execute("select datname from pg_database where datdba=(select usesysid from pg_user where usename=%s) and datname not in %s order by datname", (db_user, templates_list))
|
||||
else:
|
||||
cr.execute("select datname from pg_database where datname not in %s order by datname", (templates_list,))
|
||||
res = [str(name) for (name,) in cr.fetchall()]
|
||||
except Exception:
|
||||
res = []
|
||||
finally:
|
||||
cr.close()
|
||||
res.sort()
|
||||
return res
|
||||
|
||||
def exp_change_admin_password(new_password):
|
||||
openerp.tools.config['admin_passwd'] = new_password
|
||||
openerp.tools.config.save()
|
||||
return True
|
||||
|
||||
def exp_list_lang():
|
||||
return openerp.tools.scan_languages()
|
||||
|
||||
def exp_server_version():
|
||||
""" Return the version of the server
|
||||
Used by the client to verify the compatibility with its own version
|
||||
"""
|
||||
return release.version
|
||||
|
||||
def exp_migrate_databases(databases):
|
||||
for db in databases:
|
||||
_logger.info('migrate database %s', db)
|
||||
openerp.tools.config['update']['base'] = True
|
||||
openerp.pooler.restart_pool(db, force_demo=False, update_module=True)
|
||||
return True
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
@ -0,0 +1,185 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
from functools import wraps
|
||||
import logging
|
||||
from psycopg2 import IntegrityError, errorcodes
|
||||
import threading
|
||||
|
||||
import openerp
|
||||
from openerp.tools.translate import translate
|
||||
from openerp.osv.orm import except_orm
|
||||
|
||||
import security
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
def dispatch(method, params):
|
||||
(db, uid, passwd ) = params[0:3]
|
||||
threading.current_thread().uid = uid
|
||||
params = params[3:]
|
||||
if method == 'obj_list':
|
||||
raise NameError("obj_list has been discontinued via RPC as of 6.0, please query ir.model directly!")
|
||||
if method not in ['execute', 'execute_kw', 'exec_workflow']:
|
||||
raise NameError("Method not available %s" % method)
|
||||
security.check(db,uid,passwd)
|
||||
openerp.modules.registry.RegistryManager.check_registry_signaling(db)
|
||||
fn = globals()[method]
|
||||
res = fn(db, uid, *params)
|
||||
openerp.modules.registry.RegistryManager.signal_caches_change(db)
|
||||
return res
|
||||
|
||||
def check(f):
|
||||
@wraps(f)
|
||||
def wrapper(dbname, *args, **kwargs):
|
||||
""" Wraps around OSV functions and normalises a few exceptions
|
||||
"""
|
||||
|
||||
def tr(src, ttype):
|
||||
# We try to do the same as the _(), but without the frame
|
||||
# inspection, since we aready are wrapping an osv function
|
||||
# trans_obj = self.get('ir.translation') cannot work yet :(
|
||||
ctx = {}
|
||||
if not kwargs:
|
||||
if args and isinstance(args[-1], dict):
|
||||
ctx = args[-1]
|
||||
elif isinstance(kwargs, dict):
|
||||
ctx = kwargs.get('context', {})
|
||||
|
||||
uid = 1
|
||||
if args and isinstance(args[0], (long, int)):
|
||||
uid = args[0]
|
||||
|
||||
lang = ctx and ctx.get('lang')
|
||||
if not (lang or hasattr(src, '__call__')):
|
||||
return src
|
||||
|
||||
# We open a *new* cursor here, one reason is that failed SQL
|
||||
# queries (as in IntegrityError) will invalidate the current one.
|
||||
cr = False
|
||||
|
||||
if hasattr(src, '__call__'):
|
||||
# callable. We need to find the right parameters to call
|
||||
# the orm._sql_message(self, cr, uid, ids, context) function,
|
||||
# or we skip..
|
||||
# our signature is f(osv_pool, dbname [,uid, obj, method, args])
|
||||
try:
|
||||
if args and len(args) > 1:
|
||||
# TODO self doesn't exist, but was already wrong before (it was not a registry but just the object_service.
|
||||
obj = self.get(args[1])
|
||||
if len(args) > 3 and isinstance(args[3], (long, int, list)):
|
||||
ids = args[3]
|
||||
else:
|
||||
ids = []
|
||||
cr = openerp.sql_db.db_connect(dbname).cursor()
|
||||
return src(obj, cr, uid, ids, context=(ctx or {}))
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
if cr: cr.close()
|
||||
|
||||
return False # so that the original SQL error will
|
||||
# be returned, it is the best we have.
|
||||
|
||||
try:
|
||||
cr = openerp.sql_db.db_connect(dbname).cursor()
|
||||
res = translate(cr, name=False, source_type=ttype,
|
||||
lang=lang, source=src)
|
||||
if res:
|
||||
return res
|
||||
else:
|
||||
return src
|
||||
finally:
|
||||
if cr: cr.close()
|
||||
|
||||
def _(src):
|
||||
return tr(src, 'code')
|
||||
|
||||
try:
|
||||
if openerp.pooler.get_pool(dbname)._init:
|
||||
raise openerp.exceptions.Warning('Currently, this database is not fully loaded and can not be used.')
|
||||
return f(dbname, *args, **kwargs)
|
||||
except except_orm:
|
||||
raise
|
||||
except IntegrityError, inst:
|
||||
osv_pool = openerp.pooler.get_pool(dbname)
|
||||
for key in osv_pool._sql_error.keys():
|
||||
if key in inst[0]:
|
||||
raise openerp.osv.orm.except_orm(_('Constraint Error'), tr(osv_pool._sql_error[key], 'sql_constraint') or inst[0])
|
||||
if inst.pgcode in (errorcodes.NOT_NULL_VIOLATION, errorcodes.FOREIGN_KEY_VIOLATION, errorcodes.RESTRICT_VIOLATION):
|
||||
msg = _('The operation cannot be completed, probably due to the following:\n- deletion: you may be trying to delete a record while other records still reference it\n- creation/update: a mandatory field is not correctly set')
|
||||
_logger.debug("IntegrityError", exc_info=True)
|
||||
try:
|
||||
errortxt = inst.pgerror.replace('«','"').replace('»','"')
|
||||
if '"public".' in errortxt:
|
||||
context = errortxt.split('"public".')[1]
|
||||
model_name = table = context.split('"')[1]
|
||||
else:
|
||||
last_quote_end = errortxt.rfind('"')
|
||||
last_quote_begin = errortxt.rfind('"', 0, last_quote_end)
|
||||
model_name = table = errortxt[last_quote_begin+1:last_quote_end].strip()
|
||||
model = table.replace("_",".")
|
||||
model_obj = osv_pool.get(model)
|
||||
if model_obj:
|
||||
model_name = model_obj._description or model_obj._name
|
||||
msg += _('\n\n[object with reference: %s - %s]') % (model_name, model)
|
||||
except Exception:
|
||||
pass
|
||||
raise openerp.osv.orm.except_orm(_('Integrity Error'), msg)
|
||||
else:
|
||||
raise openerp.osv.orm.except_orm(_('Integrity Error'), inst[0])
|
||||
except Exception:
|
||||
_logger.exception("Uncaught exception")
|
||||
raise
|
||||
|
||||
return wrapper
|
||||
|
||||
def execute_cr(cr, uid, obj, method, *args, **kw):
|
||||
object = openerp.pooler.get_pool(cr.dbname).get(obj)
|
||||
if not object:
|
||||
raise except_orm('Object Error', 'Object %s doesn\'t exist' % str(obj))
|
||||
return getattr(object, method)(cr, uid, *args, **kw)
|
||||
|
||||
def execute_kw(db, uid, obj, method, args, kw=None):
|
||||
return execute(db, uid, obj, method, *args, **kw or {})
|
||||
|
||||
@check
|
||||
def execute(db, uid, obj, method, *args, **kw):
|
||||
threading.currentThread().dbname = db
|
||||
cr = openerp.pooler.get_db(db).cursor()
|
||||
try:
|
||||
try:
|
||||
if method.startswith('_'):
|
||||
raise except_orm('Access Denied', 'Private methods (such as %s) cannot be called remotely.' % (method,))
|
||||
res = execute_cr(cr, uid, obj, method, *args, **kw)
|
||||
if res is None:
|
||||
_logger.warning('The method %s of the object %s can not return `None` !', method, obj)
|
||||
cr.commit()
|
||||
except Exception:
|
||||
cr.rollback()
|
||||
raise
|
||||
finally:
|
||||
cr.close()
|
||||
return res
|
||||
|
||||
def exec_workflow_cr(cr, uid, obj, signal, *args):
|
||||
object = openerp.pooler.get_pool(cr.dbname).get(obj)
|
||||
if not object:
|
||||
raise except_orm('Object Error', 'Object %s doesn\'t exist' % str(obj))
|
||||
res_id = args[0]
|
||||
return object._workflow_signal(cr, uid, [res_id], signal)[res_id]
|
||||
|
||||
@check
|
||||
def exec_workflow(db, uid, obj, signal, *args):
|
||||
cr = openerp.pooler.get_db(db).cursor()
|
||||
try:
|
||||
try:
|
||||
res = exec_workflow_cr(cr, uid, obj, signal, *args)
|
||||
cr.commit()
|
||||
except Exception:
|
||||
cr.rollback()
|
||||
raise
|
||||
finally:
|
||||
cr.close()
|
||||
return res
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
@ -1,244 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
#
|
||||
# Copyright P. Christeas <p_christ@hol.gr> 2008,2009
|
||||
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
##############################################################################
|
||||
|
||||
|
||||
""" This file contains instance of the net-rpc server
|
||||
"""
|
||||
import logging
|
||||
import select
|
||||
import socket
|
||||
import sys
|
||||
import threading
|
||||
import traceback
|
||||
import openerp
|
||||
import openerp.service.netrpc_socket
|
||||
import openerp.netsvc as netsvc
|
||||
import openerp.tools as tools
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
class Server:
|
||||
""" Generic interface for all servers with an event loop etc.
|
||||
Override this to impement http, net-rpc etc. servers.
|
||||
|
||||
Servers here must have threaded behaviour. start() must not block,
|
||||
there is no run().
|
||||
"""
|
||||
__is_started = False
|
||||
__servers = []
|
||||
__starter_threads = []
|
||||
|
||||
# we don't want blocking server calls (think select()) to
|
||||
# wait forever and possibly prevent exiting the process,
|
||||
# but instead we want a form of polling/busy_wait pattern, where
|
||||
# _server_timeout should be used as the default timeout for
|
||||
# all I/O blocking operations
|
||||
_busywait_timeout = 0.5
|
||||
|
||||
def __init__(self):
|
||||
Server.__servers.append(self)
|
||||
if Server.__is_started:
|
||||
# raise Exception('All instances of servers must be inited before the startAll()')
|
||||
# Since the startAll() won't be called again, allow this server to
|
||||
# init and then start it after 1sec (hopefully). Register that
|
||||
# timer thread in a list, so that we can abort the start if quitAll
|
||||
# is called in the meantime
|
||||
t = threading.Timer(1.0, self._late_start)
|
||||
t.name = 'Late start timer for %s' % str(self.__class__)
|
||||
Server.__starter_threads.append(t)
|
||||
t.start()
|
||||
|
||||
def start(self):
|
||||
_logger.debug("called stub Server.start")
|
||||
|
||||
def _late_start(self):
|
||||
self.start()
|
||||
for thr in Server.__starter_threads:
|
||||
if thr.finished.is_set():
|
||||
Server.__starter_threads.remove(thr)
|
||||
|
||||
def stop(self):
|
||||
_logger.debug("called stub Server.stop")
|
||||
|
||||
def stats(self):
|
||||
""" This function should return statistics about the server """
|
||||
return "%s: No statistics" % str(self.__class__)
|
||||
|
||||
@classmethod
|
||||
def startAll(cls):
|
||||
if cls.__is_started:
|
||||
return
|
||||
_logger.info("Starting %d services" % len(cls.__servers))
|
||||
for srv in cls.__servers:
|
||||
srv.start()
|
||||
cls.__is_started = True
|
||||
|
||||
@classmethod
|
||||
def quitAll(cls):
|
||||
if not cls.__is_started:
|
||||
return
|
||||
_logger.info("Stopping %d services" % len(cls.__servers))
|
||||
for thr in cls.__starter_threads:
|
||||
if not thr.finished.is_set():
|
||||
thr.cancel()
|
||||
cls.__starter_threads.remove(thr)
|
||||
|
||||
for srv in cls.__servers:
|
||||
srv.stop()
|
||||
cls.__is_started = False
|
||||
|
||||
@classmethod
|
||||
def allStats(cls):
|
||||
res = ["Servers %s" % ('stopped', 'started')[cls.__is_started]]
|
||||
res.extend(srv.stats() for srv in cls.__servers)
|
||||
return '\n'.join(res)
|
||||
|
||||
def _close_socket(self):
|
||||
netsvc.close_socket(self.socket)
|
||||
|
||||
class TinySocketClientThread(threading.Thread):
|
||||
def __init__(self, sock, threads):
|
||||
spn = sock and sock.getpeername()
|
||||
spn = 'netrpc-client-%s:%s' % spn[0:2]
|
||||
threading.Thread.__init__(self, name=spn)
|
||||
self.sock = sock
|
||||
# Only at the server side, use a big timeout: close the
|
||||
# clients connection when they're idle for 20min.
|
||||
self.sock.settimeout(1200)
|
||||
self.threads = threads
|
||||
|
||||
def run(self):
|
||||
self.running = True
|
||||
try:
|
||||
ts = openerp.server.netrpc_socket.mysocket(self.sock)
|
||||
except Exception:
|
||||
self.threads.remove(self)
|
||||
self.running = False
|
||||
return False
|
||||
|
||||
while self.running:
|
||||
try:
|
||||
msg = ts.myreceive()
|
||||
result = netsvc.dispatch_rpc(msg[0], msg[1], msg[2:])
|
||||
ts.mysend(result)
|
||||
except socket.timeout:
|
||||
#terminate this channel because other endpoint is gone
|
||||
break
|
||||
except Exception, e:
|
||||
try:
|
||||
valid_exception = Exception(netrpc_handle_exception_legacy(e))
|
||||
valid_traceback = getattr(e, 'traceback', sys.exc_info())
|
||||
formatted_traceback = "".join(traceback.format_exception(*valid_traceback))
|
||||
_logger.debug("netrpc: communication-level exception", exc_info=True)
|
||||
ts.mysend(valid_exception, exception=True, traceback=formatted_traceback)
|
||||
break
|
||||
except Exception, ex:
|
||||
#terminate this channel if we can't properly send back the error
|
||||
_logger.exception("netrpc: cannot deliver exception message to client")
|
||||
break
|
||||
|
||||
netsvc.close_socket(self.sock)
|
||||
self.sock = None
|
||||
self.threads.remove(self)
|
||||
self.running = False
|
||||
return True
|
||||
|
||||
def stop(self):
|
||||
self.running = False
|
||||
|
||||
def netrpc_handle_exception_legacy(e):
|
||||
if isinstance(e, openerp.osv.osv.except_osv):
|
||||
return 'warning -- ' + e.name + '\n\n' + e.value
|
||||
if isinstance(e, openerp.exceptions.Warning):
|
||||
return 'warning -- Warning\n\n' + str(e)
|
||||
if isinstance(e, openerp.exceptions.AccessError):
|
||||
return 'warning -- AccessError\n\n' + str(e)
|
||||
if isinstance(e, openerp.exceptions.AccessDenied):
|
||||
return 'AccessDenied ' + str(e)
|
||||
return openerp.tools.exception_to_unicode(e)
|
||||
|
||||
class TinySocketServerThread(threading.Thread,Server):
|
||||
def __init__(self, interface, port, secure=False):
|
||||
threading.Thread.__init__(self, name="NetRPCDaemon-%d"%port)
|
||||
Server.__init__(self)
|
||||
self.__port = port
|
||||
self.__interface = interface
|
||||
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
self.socket.bind((self.__interface, self.__port))
|
||||
self.socket.listen(5)
|
||||
self.threads = []
|
||||
_logger.info("starting NET-RPC service on %s:%s", interface or '0.0.0.0', port)
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
self.running = True
|
||||
while self.running:
|
||||
fd_sets = select.select([self.socket], [], [], self._busywait_timeout)
|
||||
if not fd_sets[0]:
|
||||
continue
|
||||
(clientsocket, address) = self.socket.accept()
|
||||
ct = TinySocketClientThread(clientsocket, self.threads)
|
||||
clientsocket = None
|
||||
self.threads.append(ct)
|
||||
ct.start()
|
||||
lt = len(self.threads)
|
||||
if (lt > 10) and (lt % 10 == 0):
|
||||
# Not many threads should be serving at the same time, so log
|
||||
# their abuse.
|
||||
_logger.debug("Netrpc: %d threads", len(self.threads))
|
||||
self.socket.close()
|
||||
except Exception, e:
|
||||
_logger.warning("Netrpc: closing because of exception %s", e)
|
||||
self.socket.close()
|
||||
return False
|
||||
|
||||
def stop(self):
|
||||
self.running = False
|
||||
for t in self.threads:
|
||||
t.stop()
|
||||
self._close_socket()
|
||||
|
||||
def stats(self):
|
||||
res = "Net-RPC: " + ( (self.running and "running") or "stopped")
|
||||
i = 0
|
||||
for t in self.threads:
|
||||
i += 1
|
||||
res += "\nNet-RPC #%d: %s " % (i, t.name)
|
||||
if t.isAlive():
|
||||
res += "running"
|
||||
else:
|
||||
res += "finished"
|
||||
if t.sock:
|
||||
res += ", socket"
|
||||
return res
|
||||
|
||||
netrpcd = None
|
||||
|
||||
def start_service():
|
||||
global netrpcd
|
||||
if tools.config.get('netrpc', False):
|
||||
netrpcd = TinySocketServerThread(tools.config.get('netrpc_interface', ''), int(tools.config.get('netrpc_port', 8070)))
|
||||
|
||||
def stop_service():
|
||||
Server.quitAll()
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
@ -1,99 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
##############################################################################
|
||||
#
|
||||
# OpenERP, Open Source Management Solution
|
||||
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
##############################################################################
|
||||
|
||||
import socket
|
||||
import cPickle
|
||||
import cStringIO
|
||||
|
||||
import openerp.netsvc as netsvc
|
||||
|
||||
# Pickle protocol version 2 is optimized compared to default (version 0)
|
||||
PICKLE_PROTOCOL = 2
|
||||
|
||||
class Myexception(Exception):
|
||||
"""
|
||||
custom exception object store
|
||||
* faultcode
|
||||
* faulestring
|
||||
* args
|
||||
"""
|
||||
|
||||
def __init__(self, faultCode, faultString):
|
||||
self.faultCode = faultCode
|
||||
self.faultString = faultString
|
||||
self.args = (faultCode, faultString)
|
||||
|
||||
class mysocket:
|
||||
|
||||
def __init__(self, sock=None):
|
||||
if sock is None:
|
||||
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
else:
|
||||
self.sock = sock
|
||||
# self.sock.settimeout(120)
|
||||
# prepare this socket for long operations: it may block for infinite
|
||||
# time, but should exit as soon as the net is down
|
||||
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
|
||||
def connect(self, host, port=False):
|
||||
if not port:
|
||||
protocol, buf = host.split('//')
|
||||
host, port = buf.split(':')
|
||||
self.sock.connect((host, int(port)))
|
||||
|
||||
def disconnect(self):
|
||||
netsvc.close_socket(self.sock)
|
||||
|
||||
def mysend(self, msg, exception=False, traceback=None):
|
||||
msg = cPickle.dumps([msg, traceback], PICKLE_PROTOCOL)
|
||||
self.sock.sendall('%8d%d%s' % (len(msg), bool(exception), msg))
|
||||
|
||||
def myreceive(self):
|
||||
buf=''
|
||||
while len(buf) < 9:
|
||||
chunk = self.sock.recv(9 - len(buf))
|
||||
if not chunk:
|
||||
raise socket.timeout
|
||||
buf += chunk
|
||||
size = int(buf[:8])
|
||||
if buf[8] != "0":
|
||||
exception = buf[8]
|
||||
else:
|
||||
exception = False
|
||||
msg = ''
|
||||
while len(msg) < size:
|
||||
chunk = self.sock.recv(size-len(msg))
|
||||
if not chunk:
|
||||
raise socket.timeout
|
||||
msg = msg + chunk
|
||||
msgio = cStringIO.StringIO(msg)
|
||||
unpickler = cPickle.Unpickler(msgio)
|
||||
unpickler.find_global = None
|
||||
res = unpickler.load()
|
||||
|
||||
if isinstance(res[0],Exception):
|
||||
if exception:
|
||||
raise Myexception(str(res[0]), str(res[1]))
|
||||
raise res[0]
|
||||
else:
|
||||
return res[0]
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
@ -0,0 +1,143 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
import threading
|
||||
|
||||
import openerp.netsvc
|
||||
import openerp.pooler
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
# TODO: set a maximum report number per user to avoid DOS attacks
|
||||
#
|
||||
# Report state:
|
||||
# False -> True
|
||||
|
||||
self_reports = {}
|
||||
self_id = 0
|
||||
self_id_protect = threading.Semaphore()
|
||||
|
||||
def dispatch(method, params):
|
||||
(db, uid, passwd ) = params[0:3]
|
||||
threading.current_thread().uid = uid
|
||||
params = params[3:]
|
||||
if method not in ['report', 'report_get', 'render_report']:
|
||||
raise KeyError("Method not supported %s" % method)
|
||||
security.check(db,uid,passwd)
|
||||
openerp.modules.registry.RegistryManager.check_registry_signaling(db)
|
||||
fn = globals()['exp_' + method]
|
||||
res = fn(db, uid, *params)
|
||||
openerp.modules.registry.RegistryManager.signal_caches_change(db)
|
||||
return res
|
||||
|
||||
def exp_render_report(db, uid, object, ids, datas=None, context=None):
|
||||
if not datas:
|
||||
datas={}
|
||||
if not context:
|
||||
context={}
|
||||
|
||||
self_id_protect.acquire()
|
||||
global self_id
|
||||
self_id += 1
|
||||
id = self_id
|
||||
self_id_protect.release()
|
||||
|
||||
self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None}
|
||||
|
||||
cr = openerp.pooler.get_db(db).cursor()
|
||||
try:
|
||||
obj = openerp.netsvc.LocalService('report.'+object)
|
||||
(result, format) = obj.create(cr, uid, ids, datas, context)
|
||||
if not result:
|
||||
tb = sys.exc_info()
|
||||
self_reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb)
|
||||
self_reports[id]['result'] = result
|
||||
self_reports[id]['format'] = format
|
||||
self_reports[id]['state'] = True
|
||||
except Exception, exception:
|
||||
|
||||
_logger.exception('Exception: %s\n', exception)
|
||||
if hasattr(exception, 'name') and hasattr(exception, 'value'):
|
||||
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value))
|
||||
else:
|
||||
tb = sys.exc_info()
|
||||
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb)
|
||||
self_reports[id]['state'] = True
|
||||
cr.commit()
|
||||
cr.close()
|
||||
|
||||
return _check_report(id)
|
||||
|
||||
def exp_report(db, uid, object, ids, datas=None, context=None):
|
||||
if not datas:
|
||||
datas={}
|
||||
if not context:
|
||||
context={}
|
||||
|
||||
self_id_protect.acquire()
|
||||
global self_id
|
||||
self_id += 1
|
||||
id = self_id
|
||||
self_id_protect.release()
|
||||
|
||||
self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None}
|
||||
|
||||
def go(id, uid, ids, datas, context):
|
||||
cr = openerp.pooler.get_db(db).cursor()
|
||||
try:
|
||||
obj = openerp.netsvc.LocalService('report.'+object)
|
||||
(result, format) = obj.create(cr, uid, ids, datas, context)
|
||||
if not result:
|
||||
tb = sys.exc_info()
|
||||
self_reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb)
|
||||
self_reports[id]['result'] = result
|
||||
self_reports[id]['format'] = format
|
||||
self_reports[id]['state'] = True
|
||||
except Exception, exception:
|
||||
_logger.exception('Exception: %s\n', exception)
|
||||
if hasattr(exception, 'name') and hasattr(exception, 'value'):
|
||||
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value))
|
||||
else:
|
||||
tb = sys.exc_info()
|
||||
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb)
|
||||
self_reports[id]['state'] = True
|
||||
cr.commit()
|
||||
cr.close()
|
||||
return True
|
||||
|
||||
thread.start_new_thread(go, (id, uid, ids, datas, context))
|
||||
return id
|
||||
|
||||
def _check_report(report_id):
|
||||
result = self_reports[report_id]
|
||||
exc = result['exception']
|
||||
if exc:
|
||||
raise openerp.osv.orm.except_orm(exc.message, exc.traceback)
|
||||
res = {'state': result['state']}
|
||||
if res['state']:
|
||||
if tools.config['reportgz']:
|
||||
import zlib
|
||||
res2 = zlib.compress(result['result'])
|
||||
res['code'] = 'zlib'
|
||||
else:
|
||||
#CHECKME: why is this needed???
|
||||
if isinstance(result['result'], unicode):
|
||||
res2 = result['result'].encode('latin1', 'replace')
|
||||
else:
|
||||
res2 = result['result']
|
||||
if res2:
|
||||
res['result'] = base64.encodestring(res2)
|
||||
res['format'] = result['format']
|
||||
del self_reports[report_id]
|
||||
return res
|
||||
|
||||
def exp_report_get(db, uid, report_id):
|
||||
if report_id in self_reports:
|
||||
if self_reports[report_id]['uid'] == uid:
|
||||
return _check_report(report_id)
|
||||
else:
|
||||
raise Exception, 'AccessDenied'
|
||||
else:
|
||||
raise Exception, 'ReportNotFound'
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
@ -1,760 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
##############################################################################
|
||||
#
|
||||
# OpenERP, Open Source Management Solution
|
||||
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
##############################################################################
|
||||
from __future__ import with_statement
|
||||
import contextlib
|
||||
import base64
|
||||
import locale
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import security
|
||||
import sys
|
||||
import thread
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
from cStringIO import StringIO
|
||||
from openerp.tools.translate import _
|
||||
import openerp.netsvc as netsvc
|
||||
import openerp.pooler as pooler
|
||||
import openerp.release as release
|
||||
import openerp.sql_db as sql_db
|
||||
import openerp.tools as tools
|
||||
import openerp.modules
|
||||
import openerp.exceptions
|
||||
from openerp.service import http_server
|
||||
from openerp import SUPERUSER_ID
|
||||
|
||||
|
||||
""" This python module defines the RPC methods available to remote clients.
|
||||
|
||||
Each 'Export Service' is a group of 'methods', which in turn are RPC
|
||||
procedures to be called. Each method has its own arguments footprint.
|
||||
"""
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
RPC_VERSION_1 = {
|
||||
'server_version': release.version,
|
||||
'server_version_info': release.version_info,
|
||||
'server_serie': release.serie,
|
||||
'protocol_version': 1,
|
||||
}
|
||||
|
||||
# This should be moved to openerp.modules.db, along side initialize().
|
||||
def _initialize_db(serv, id, db_name, demo, lang, user_password):
|
||||
cr = None
|
||||
try:
|
||||
serv.actions[id]['progress'] = 0
|
||||
cr = sql_db.db_connect(db_name).cursor()
|
||||
openerp.modules.db.initialize(cr) # TODO this should be removed as it is done by pooler.restart_pool.
|
||||
tools.config['lang'] = lang
|
||||
cr.commit()
|
||||
cr.close()
|
||||
|
||||
pool = pooler.restart_pool(db_name, demo, serv.actions[id],
|
||||
update_module=True)[1]
|
||||
|
||||
cr = sql_db.db_connect(db_name).cursor()
|
||||
|
||||
if lang:
|
||||
modobj = pool.get('ir.module.module')
|
||||
mids = modobj.search(cr, SUPERUSER_ID, [('state', '=', 'installed')])
|
||||
modobj.update_translations(cr, SUPERUSER_ID, mids, lang)
|
||||
|
||||
# update admin's password and lang
|
||||
values = {'password': user_password, 'lang': lang}
|
||||
pool.get('res.users').write(cr, SUPERUSER_ID, [SUPERUSER_ID], values)
|
||||
|
||||
cr.execute('SELECT login, password FROM res_users ORDER BY login')
|
||||
serv.actions[id].update(users=cr.dictfetchall(), clean=True)
|
||||
cr.commit()
|
||||
cr.close()
|
||||
except Exception, e:
|
||||
serv.actions[id].update(clean=False, exception=e)
|
||||
_logger.exception('CREATE DATABASE failed:')
|
||||
serv.actions[id]['traceback'] = traceback.format_exc()
|
||||
if cr:
|
||||
cr.close()
|
||||
|
||||
class db(netsvc.ExportService):
|
||||
def __init__(self, name="db"):
|
||||
netsvc.ExportService.__init__(self, name)
|
||||
self.actions = {}
|
||||
self.id = 0
|
||||
self.id_protect = threading.Semaphore()
|
||||
|
||||
def dispatch(self, method, params):
|
||||
if method in [ 'create', 'get_progress', 'drop', 'dump',
|
||||
'restore', 'rename',
|
||||
'change_admin_password', 'migrate_databases',
|
||||
'create_database', 'duplicate_database' ]:
|
||||
passwd = params[0]
|
||||
params = params[1:]
|
||||
security.check_super(passwd)
|
||||
elif method in [ 'db_exist', 'list', 'list_lang', 'server_version' ]:
|
||||
# params = params
|
||||
# No security check for these methods
|
||||
pass
|
||||
else:
|
||||
raise KeyError("Method not found: %s" % method)
|
||||
fn = getattr(self, 'exp_'+method)
|
||||
return fn(*params)
|
||||
|
||||
def _create_empty_database(self, name):
|
||||
db = sql_db.db_connect('postgres')
|
||||
cr = db.cursor()
|
||||
chosen_template = tools.config['db_template']
|
||||
cr.execute("""SELECT datname
|
||||
FROM pg_database
|
||||
WHERE datname = %s """,
|
||||
(name,))
|
||||
if cr.fetchall():
|
||||
raise openerp.exceptions.Warning(" %s database already exists!" % name )
|
||||
try:
|
||||
cr.autocommit(True) # avoid transaction block
|
||||
cr.execute("""CREATE DATABASE "%s" ENCODING 'unicode' TEMPLATE "%s" """ % (name, chosen_template))
|
||||
finally:
|
||||
cr.close()
|
||||
|
||||
def exp_create(self, db_name, demo, lang, user_password='admin'):
|
||||
self.id_protect.acquire()
|
||||
self.id += 1
|
||||
id = self.id
|
||||
self.id_protect.release()
|
||||
|
||||
self.actions[id] = {'clean': False}
|
||||
|
||||
self._create_empty_database(db_name)
|
||||
|
||||
_logger.info('CREATE DATABASE %s', db_name.lower())
|
||||
create_thread = threading.Thread(target=_initialize_db,
|
||||
args=(self, id, db_name, demo, lang, user_password))
|
||||
create_thread.start()
|
||||
self.actions[id]['thread'] = create_thread
|
||||
return id
|
||||
|
||||
def exp_create_database(self, db_name, demo, lang, user_password='admin'):
|
||||
""" Similar to exp_create but blocking."""
|
||||
self.id_protect.acquire()
|
||||
self.id += 1
|
||||
id = self.id
|
||||
self.id_protect.release()
|
||||
|
||||
self.actions[id] = {'clean': False}
|
||||
|
||||
_logger.info('Create database `%s`.', db_name)
|
||||
self._create_empty_database(db_name)
|
||||
_initialize_db(self, id, db_name, demo, lang, user_password)
|
||||
return True
|
||||
|
||||
def exp_duplicate_database(self, db_original_name, db_name):
|
||||
_logger.info('Duplicate database `%s` to `%s`.', db_original_name, db_name)
|
||||
sql_db.close_db(db_original_name)
|
||||
db = sql_db.db_connect('postgres')
|
||||
cr = db.cursor()
|
||||
try:
|
||||
cr.autocommit(True) # avoid transaction block
|
||||
cr.execute("""CREATE DATABASE "%s" ENCODING 'unicode' TEMPLATE "%s" """ % (db_name, db_original_name))
|
||||
finally:
|
||||
cr.close()
|
||||
return True
|
||||
|
||||
def exp_get_progress(self, id):
|
||||
if self.actions[id]['thread'].isAlive():
|
||||
# return openerp.modules.init_progress[db_name]
|
||||
return min(self.actions[id].get('progress', 0),0.95), []
|
||||
else:
|
||||
clean = self.actions[id]['clean']
|
||||
if clean:
|
||||
users = self.actions[id]['users']
|
||||
self.actions.pop(id)
|
||||
return 1.0, users
|
||||
else:
|
||||
e = self.actions[id]['exception'] # TODO this seems wrong: actions[id]['traceback'] is set, but not 'exception'.
|
||||
self.actions.pop(id)
|
||||
raise Exception, e
|
||||
|
||||
def exp_drop(self, db_name):
|
||||
if not self.exp_db_exist(db_name):
|
||||
return False
|
||||
openerp.modules.registry.RegistryManager.delete(db_name)
|
||||
sql_db.close_db(db_name)
|
||||
|
||||
db = sql_db.db_connect('postgres')
|
||||
cr = db.cursor()
|
||||
cr.autocommit(True) # avoid transaction block
|
||||
try:
|
||||
# Try to terminate all other connections that might prevent
|
||||
# dropping the database
|
||||
try:
|
||||
|
||||
# PostgreSQL 9.2 renamed pg_stat_activity.procpid to pid:
|
||||
# http://www.postgresql.org/docs/9.2/static/release-9-2.html#AEN110389
|
||||
pid_col = 'pid' if cr._cnx.server_version >= 90200 else 'procpid'
|
||||
|
||||
cr.execute("""SELECT pg_terminate_backend(%(pid_col)s)
|
||||
FROM pg_stat_activity
|
||||
WHERE datname = %%s AND
|
||||
%(pid_col)s != pg_backend_pid()""" % {'pid_col': pid_col},
|
||||
(db_name,))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
cr.execute('DROP DATABASE "%s"' % db_name)
|
||||
except Exception, e:
|
||||
_logger.error('DROP DB: %s failed:\n%s', db_name, e)
|
||||
raise Exception("Couldn't drop database %s: %s" % (db_name, e))
|
||||
else:
|
||||
_logger.info('DROP DB: %s', db_name)
|
||||
finally:
|
||||
cr.close()
|
||||
return True
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _set_pg_password_in_environment(self):
|
||||
""" On Win32, pg_dump (and pg_restore) require that
|
||||
:envvar:`PGPASSWORD` be set
|
||||
|
||||
This context management method handles setting
|
||||
:envvar:`PGPASSWORD` iif win32 and the envvar is not already
|
||||
set, and removing it afterwards.
|
||||
"""
|
||||
if os.name != 'nt' or os.environ.get('PGPASSWORD'):
|
||||
yield
|
||||
else:
|
||||
os.environ['PGPASSWORD'] = tools.config['db_password']
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
del os.environ['PGPASSWORD']
|
||||
|
||||
|
||||
def exp_dump(self, db_name):
|
||||
logger = logging.getLogger('openerp.service.web_services.db.dump')
|
||||
with self._set_pg_password_in_environment():
|
||||
cmd = ['pg_dump', '--format=c', '--no-owner']
|
||||
if tools.config['db_user']:
|
||||
cmd.append('--username=' + tools.config['db_user'])
|
||||
if tools.config['db_host']:
|
||||
cmd.append('--host=' + tools.config['db_host'])
|
||||
if tools.config['db_port']:
|
||||
cmd.append('--port=' + str(tools.config['db_port']))
|
||||
cmd.append(db_name)
|
||||
|
||||
stdin, stdout = tools.exec_pg_command_pipe(*tuple(cmd))
|
||||
stdin.close()
|
||||
data = stdout.read()
|
||||
res = stdout.close()
|
||||
|
||||
if not data or res:
|
||||
logger.error(
|
||||
'DUMP DB: %s failed! Please verify the configuration of the database password on the server. '
|
||||
'It should be provided as a -w <PASSWD> command-line option, or as `db_password` in the '
|
||||
'server configuration file.\n %s', db_name, data)
|
||||
raise Exception, "Couldn't dump database"
|
||||
logger.info('DUMP DB successful: %s', db_name)
|
||||
|
||||
return base64.encodestring(data)
|
||||
|
||||
def exp_restore(self, db_name, data):
|
||||
logger = logging.getLogger('openerp.service.web_services.db.restore')
|
||||
with self._set_pg_password_in_environment():
|
||||
if self.exp_db_exist(db_name):
|
||||
logger.warning('RESTORE DB: %s already exists', db_name)
|
||||
raise Exception, "Database already exists"
|
||||
|
||||
self._create_empty_database(db_name)
|
||||
|
||||
cmd = ['pg_restore', '--no-owner']
|
||||
if tools.config['db_user']:
|
||||
cmd.append('--username=' + tools.config['db_user'])
|
||||
if tools.config['db_host']:
|
||||
cmd.append('--host=' + tools.config['db_host'])
|
||||
if tools.config['db_port']:
|
||||
cmd.append('--port=' + str(tools.config['db_port']))
|
||||
cmd.append('--dbname=' + db_name)
|
||||
args2 = tuple(cmd)
|
||||
|
||||
buf=base64.decodestring(data)
|
||||
if os.name == "nt":
|
||||
tmpfile = (os.environ['TMP'] or 'C:\\') + os.tmpnam()
|
||||
file(tmpfile, 'wb').write(buf)
|
||||
args2=list(args2)
|
||||
args2.append(tmpfile)
|
||||
args2=tuple(args2)
|
||||
stdin, stdout = tools.exec_pg_command_pipe(*args2)
|
||||
if not os.name == "nt":
|
||||
stdin.write(base64.decodestring(data))
|
||||
stdin.close()
|
||||
res = stdout.close()
|
||||
if res:
|
||||
raise Exception, "Couldn't restore database"
|
||||
logger.info('RESTORE DB: %s', db_name)
|
||||
|
||||
return True
|
||||
|
||||
def exp_rename(self, old_name, new_name):
|
||||
openerp.modules.registry.RegistryManager.delete(old_name)
|
||||
sql_db.close_db(old_name)
|
||||
|
||||
db = sql_db.db_connect('postgres')
|
||||
cr = db.cursor()
|
||||
cr.autocommit(True) # avoid transaction block
|
||||
try:
|
||||
try:
|
||||
cr.execute('ALTER DATABASE "%s" RENAME TO "%s"' % (old_name, new_name))
|
||||
except Exception, e:
|
||||
_logger.error('RENAME DB: %s -> %s failed:\n%s', old_name, new_name, e)
|
||||
raise Exception("Couldn't rename database %s to %s: %s" % (old_name, new_name, e))
|
||||
else:
|
||||
fs = os.path.join(tools.config['root_path'], 'filestore')
|
||||
if os.path.exists(os.path.join(fs, old_name)):
|
||||
os.rename(os.path.join(fs, old_name), os.path.join(fs, new_name))
|
||||
|
||||
_logger.info('RENAME DB: %s -> %s', old_name, new_name)
|
||||
finally:
|
||||
cr.close()
|
||||
return True
|
||||
|
||||
def exp_db_exist(self, db_name):
|
||||
## Not True: in fact, check if connection to database is possible. The database may exists
|
||||
return bool(sql_db.db_connect(db_name))
|
||||
|
||||
def exp_list(self, document=False):
|
||||
if not tools.config['list_db'] and not document:
|
||||
raise openerp.exceptions.AccessDenied()
|
||||
chosen_template = tools.config['db_template']
|
||||
templates_list = tuple(set(['template0', 'template1', 'postgres', chosen_template]))
|
||||
db = sql_db.db_connect('postgres')
|
||||
cr = db.cursor()
|
||||
try:
|
||||
try:
|
||||
db_user = tools.config["db_user"]
|
||||
if not db_user and os.name == 'posix':
|
||||
import pwd
|
||||
db_user = pwd.getpwuid(os.getuid())[0]
|
||||
if not db_user:
|
||||
cr.execute("select usename from pg_user where usesysid=(select datdba from pg_database where datname=%s)", (tools.config["db_name"],))
|
||||
res = cr.fetchone()
|
||||
db_user = res and str(res[0])
|
||||
if db_user:
|
||||
cr.execute("select datname from pg_database where datdba=(select usesysid from pg_user where usename=%s) and datname not in %s order by datname", (db_user, templates_list))
|
||||
else:
|
||||
cr.execute("select datname from pg_database where datname not in %s order by datname", (templates_list,))
|
||||
res = [str(name) for (name,) in cr.fetchall()]
|
||||
except Exception:
|
||||
res = []
|
||||
finally:
|
||||
cr.close()
|
||||
res.sort()
|
||||
return res
|
||||
|
||||
def exp_change_admin_password(self, new_password):
|
||||
tools.config['admin_passwd'] = new_password
|
||||
tools.config.save()
|
||||
return True
|
||||
|
||||
def exp_list_lang(self):
|
||||
return tools.scan_languages()
|
||||
|
||||
def exp_server_version(self):
|
||||
""" Return the version of the server
|
||||
Used by the client to verify the compatibility with its own version
|
||||
"""
|
||||
return release.version
|
||||
|
||||
def exp_migrate_databases(self,databases):
|
||||
|
||||
from openerp.osv.orm import except_orm
|
||||
from openerp.osv.osv import except_osv
|
||||
|
||||
for db in databases:
|
||||
try:
|
||||
_logger.info('migrate database %s', db)
|
||||
tools.config['update']['base'] = True
|
||||
pooler.restart_pool(db, force_demo=False, update_module=True)
|
||||
except except_orm, inst:
|
||||
netsvc.abort_response(1, inst.name, 'warning', inst.value)
|
||||
except except_osv, inst:
|
||||
netsvc.abort_response(1, inst.name, 'warning', inst.value)
|
||||
except Exception:
|
||||
_logger.exception('Exception in migrate_databases:')
|
||||
raise
|
||||
return True
|
||||
|
||||
class common(netsvc.ExportService):
|
||||
|
||||
def __init__(self,name="common"):
|
||||
netsvc.ExportService.__init__(self,name)
|
||||
|
||||
def dispatch(self, method, params):
|
||||
if method in ['login', 'about', 'timezone_get', 'get_server_environment',
|
||||
'login_message','get_stats', 'check_connectivity',
|
||||
'list_http_services', 'version', 'authenticate']:
|
||||
pass
|
||||
elif method in ['get_available_updates', 'get_migration_scripts', 'set_loglevel', 'get_os_time', 'get_sqlcount']:
|
||||
passwd = params[0]
|
||||
params = params[1:]
|
||||
security.check_super(passwd)
|
||||
else:
|
||||
raise Exception("Method not found: %s" % method)
|
||||
|
||||
fn = getattr(self, 'exp_'+method)
|
||||
return fn(*params)
|
||||
|
||||
def exp_login(self, db, login, password):
|
||||
# TODO: legacy indirection through 'security', should use directly
|
||||
# the res.users model
|
||||
res = security.login(db, login, password)
|
||||
msg = res and 'successful login' or 'bad login or password'
|
||||
_logger.info("%s from '%s' using database '%s'", msg, login, db.lower())
|
||||
return res or False
|
||||
|
||||
def exp_authenticate(self, db, login, password, user_agent_env):
|
||||
res_users = pooler.get_pool(db).get('res.users')
|
||||
return res_users.authenticate(db, login, password, user_agent_env)
|
||||
|
||||
def exp_version(self):
|
||||
return RPC_VERSION_1
|
||||
|
||||
def exp_about(self, extended=False):
|
||||
"""Return information about the OpenERP Server.
|
||||
|
||||
@param extended: if True then return version info
|
||||
@return string if extended is False else tuple
|
||||
"""
|
||||
|
||||
info = _('''
|
||||
|
||||
OpenERP is an ERP+CRM program for small and medium businesses.
|
||||
|
||||
The whole source code is distributed under the terms of the
|
||||
GNU Public Licence.
|
||||
|
||||
(c) 2003-TODAY - OpenERP SA''')
|
||||
|
||||
if extended:
|
||||
return info, release.version
|
||||
return info
|
||||
|
||||
def exp_timezone_get(self, db, login, password):
|
||||
return tools.misc.get_server_timezone()
|
||||
|
||||
def exp_get_available_updates(self, contract_id, contract_password):
|
||||
import openerp.tools.maintenance as tm
|
||||
try:
|
||||
rc = tm.remote_contract(contract_id, contract_password)
|
||||
if not rc.id:
|
||||
raise tm.RemoteContractException('This contract does not exist or is not active')
|
||||
|
||||
return rc.get_available_updates(rc.id, openerp.modules.get_modules_with_version())
|
||||
|
||||
except tm.RemoteContractException, e:
|
||||
netsvc.abort_response(1, 'Migration Error', 'warning', str(e))
|
||||
|
||||
|
||||
def exp_get_migration_scripts(self, contract_id, contract_password):
|
||||
import openerp.tools.maintenance as tm
|
||||
try:
|
||||
rc = tm.remote_contract(contract_id, contract_password)
|
||||
if not rc.id:
|
||||
raise tm.RemoteContractException('This contract does not exist or is not active')
|
||||
if rc.status != 'full':
|
||||
raise tm.RemoteContractException('Can not get updates for a partial contract')
|
||||
|
||||
_logger.info('starting migration with contract %s', rc.name)
|
||||
|
||||
zips = rc.retrieve_updates(rc.id, openerp.modules.get_modules_with_version())
|
||||
|
||||
from shutil import rmtree, copytree, copy
|
||||
|
||||
backup_directory = os.path.join(tools.config['root_path'], 'backup', time.strftime('%Y-%m-%d-%H-%M'))
|
||||
if zips and not os.path.isdir(backup_directory):
|
||||
_logger.info('create a new backup directory to store the old modules: %s', backup_directory)
|
||||
os.makedirs(backup_directory)
|
||||
|
||||
for module in zips:
|
||||
_logger.info('upgrade module %s', module)
|
||||
mp = openerp.modules.get_module_path(module)
|
||||
if mp:
|
||||
if os.path.isdir(mp):
|
||||
copytree(mp, os.path.join(backup_directory, module))
|
||||
if os.path.islink(mp):
|
||||
os.unlink(mp)
|
||||
else:
|
||||
rmtree(mp)
|
||||
else:
|
||||
copy(mp + 'zip', backup_directory)
|
||||
os.unlink(mp + '.zip')
|
||||
|
||||
try:
|
||||
try:
|
||||
base64_decoded = base64.decodestring(zips[module])
|
||||
except Exception:
|
||||
_logger.error('unable to read the module %s', module)
|
||||
raise
|
||||
|
||||
zip_contents = StringIO(base64_decoded)
|
||||
zip_contents.seek(0)
|
||||
try:
|
||||
try:
|
||||
tools.extract_zip_file(zip_contents, tools.config['addons_path'] )
|
||||
except Exception:
|
||||
_logger.error('unable to extract the module %s', module)
|
||||
rmtree(module)
|
||||
raise
|
||||
finally:
|
||||
zip_contents.close()
|
||||
except Exception:
|
||||
_logger.error('restore the previous version of the module %s', module)
|
||||
nmp = os.path.join(backup_directory, module)
|
||||
if os.path.isdir(nmp):
|
||||
copytree(nmp, tools.config['addons_path'])
|
||||
else:
|
||||
copy(nmp+'.zip', tools.config['addons_path'])
|
||||
raise
|
||||
|
||||
return True
|
||||
except tm.RemoteContractException, e:
|
||||
netsvc.abort_response(1, 'Migration Error', 'warning', str(e))
|
||||
except Exception, e:
|
||||
_logger.exception('Exception in get_migration_script:')
|
||||
raise
|
||||
|
||||
def exp_get_server_environment(self):
|
||||
os_lang = '.'.join( [x for x in locale.getdefaultlocale() if x] )
|
||||
if not os_lang:
|
||||
os_lang = 'NOT SET'
|
||||
environment = '\nEnvironment Information : \n' \
|
||||
'System : %s\n' \
|
||||
'OS Name : %s\n' \
|
||||
%(platform.platform(), platform.os.name)
|
||||
if os.name == 'posix':
|
||||
if platform.system() == 'Linux':
|
||||
lsbinfo = os.popen('lsb_release -a').read()
|
||||
environment += '%s'% lsbinfo
|
||||
else:
|
||||
environment += 'Your System is not lsb compliant\n'
|
||||
environment += 'Operating System Release : %s\n' \
|
||||
'Operating System Version : %s\n' \
|
||||
'Operating System Architecture : %s\n' \
|
||||
'Operating System Locale : %s\n'\
|
||||
'Python Version : %s\n'\
|
||||
'OpenERP-Server Version : %s'\
|
||||
%(platform.release(), platform.version(), platform.architecture()[0],
|
||||
os_lang, platform.python_version(),release.version)
|
||||
return environment
|
||||
|
||||
def exp_login_message(self):
|
||||
return tools.config.get('login_message', False)
|
||||
|
||||
def exp_set_loglevel(self, loglevel, logger=None):
|
||||
# TODO Previously, the level was set on the now deprecated
|
||||
# `openerp.netsvc.Logger` class.
|
||||
return True
|
||||
|
||||
def exp_get_stats(self):
|
||||
res = "OpenERP server: %d threads\n" % threading.active_count()
|
||||
res += netsvc.Server.allStats()
|
||||
return res
|
||||
|
||||
def exp_list_http_services(self):
|
||||
return http_server.list_http_services()
|
||||
|
||||
def exp_check_connectivity(self):
|
||||
return bool(sql_db.db_connect('postgres'))
|
||||
|
||||
def exp_get_os_time(self):
|
||||
return os.times()
|
||||
|
||||
def exp_get_sqlcount(self):
|
||||
if not logging.getLogger('openerp.sql_db').isEnabledFor(logging.DEBUG):
|
||||
_logger.warning("Counters of SQL will not be reliable unless logger openerp.sql_db is set to level DEBUG or higer.")
|
||||
return sql_db.sql_counter
|
||||
|
||||
|
||||
class objects_proxy(netsvc.ExportService):
|
||||
def __init__(self, name="object"):
|
||||
netsvc.ExportService.__init__(self,name)
|
||||
|
||||
def dispatch(self, method, params):
|
||||
(db, uid, passwd ) = params[0:3]
|
||||
threading.current_thread().uid = uid
|
||||
params = params[3:]
|
||||
if method == 'obj_list':
|
||||
raise NameError("obj_list has been discontinued via RPC as of 6.0, please query ir.model directly!")
|
||||
if method not in ['execute', 'execute_kw', 'exec_workflow']:
|
||||
raise NameError("Method not available %s" % method)
|
||||
security.check(db,uid,passwd)
|
||||
assert openerp.osv.osv.service, "The object_proxy class must be started with start_object_proxy."
|
||||
openerp.modules.registry.RegistryManager.check_registry_signaling(db)
|
||||
fn = getattr(openerp.osv.osv.service, method)
|
||||
res = fn(db, uid, *params)
|
||||
openerp.modules.registry.RegistryManager.signal_caches_change(db)
|
||||
return res
|
||||
|
||||
|
||||
#
|
||||
# TODO: set a maximum report number per user to avoid DOS attacks
|
||||
#
|
||||
# Report state:
|
||||
# False -> True
|
||||
#
|
||||
|
||||
class report_spool(netsvc.ExportService):
|
||||
def __init__(self, name='report'):
|
||||
netsvc.ExportService.__init__(self, name)
|
||||
self._reports = {}
|
||||
self.id = 0
|
||||
self.id_protect = threading.Semaphore()
|
||||
|
||||
def dispatch(self, method, params):
|
||||
(db, uid, passwd ) = params[0:3]
|
||||
threading.current_thread().uid = uid
|
||||
params = params[3:]
|
||||
if method not in ['report', 'report_get', 'render_report']:
|
||||
raise KeyError("Method not supported %s" % method)
|
||||
security.check(db,uid,passwd)
|
||||
openerp.modules.registry.RegistryManager.check_registry_signaling(db)
|
||||
fn = getattr(self, 'exp_' + method)
|
||||
res = fn(db, uid, *params)
|
||||
openerp.modules.registry.RegistryManager.signal_caches_change(db)
|
||||
return res
|
||||
|
||||
def exp_render_report(self, db, uid, object, ids, datas=None, context=None):
|
||||
if not datas:
|
||||
datas={}
|
||||
if not context:
|
||||
context={}
|
||||
|
||||
self.id_protect.acquire()
|
||||
self.id += 1
|
||||
id = self.id
|
||||
self.id_protect.release()
|
||||
|
||||
self._reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None}
|
||||
|
||||
cr = pooler.get_db(db).cursor()
|
||||
try:
|
||||
obj = netsvc.LocalService('report.'+object)
|
||||
(result, format) = obj.create(cr, uid, ids, datas, context)
|
||||
if not result:
|
||||
tb = sys.exc_info()
|
||||
self._reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb)
|
||||
self._reports[id]['result'] = result
|
||||
self._reports[id]['format'] = format
|
||||
self._reports[id]['state'] = True
|
||||
except Exception, exception:
|
||||
|
||||
_logger.exception('Exception: %s\n', exception)
|
||||
if hasattr(exception, 'name') and hasattr(exception, 'value'):
|
||||
self._reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value))
|
||||
else:
|
||||
tb = sys.exc_info()
|
||||
self._reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb)
|
||||
self._reports[id]['state'] = True
|
||||
cr.commit()
|
||||
cr.close()
|
||||
|
||||
return self._check_report(id)
|
||||
|
||||
def exp_report(self, db, uid, object, ids, datas=None, context=None):
|
||||
if not datas:
|
||||
datas={}
|
||||
if not context:
|
||||
context={}
|
||||
|
||||
self.id_protect.acquire()
|
||||
self.id += 1
|
||||
id = self.id
|
||||
self.id_protect.release()
|
||||
|
||||
self._reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None}
|
||||
|
||||
def go(id, uid, ids, datas, context):
|
||||
cr = pooler.get_db(db).cursor()
|
||||
try:
|
||||
obj = netsvc.LocalService('report.'+object)
|
||||
(result, format) = obj.create(cr, uid, ids, datas, context)
|
||||
if not result:
|
||||
tb = sys.exc_info()
|
||||
self._reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb)
|
||||
self._reports[id]['result'] = result
|
||||
self._reports[id]['format'] = format
|
||||
self._reports[id]['state'] = True
|
||||
except Exception, exception:
|
||||
_logger.exception('Exception: %s\n', exception)
|
||||
if hasattr(exception, 'name') and hasattr(exception, 'value'):
|
||||
self._reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value))
|
||||
else:
|
||||
tb = sys.exc_info()
|
||||
self._reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb)
|
||||
self._reports[id]['state'] = True
|
||||
cr.commit()
|
||||
cr.close()
|
||||
return True
|
||||
|
||||
thread.start_new_thread(go, (id, uid, ids, datas, context))
|
||||
return id
|
||||
|
||||
def _check_report(self, report_id):
|
||||
result = self._reports[report_id]
|
||||
exc = result['exception']
|
||||
if exc:
|
||||
netsvc.abort_response(exc, exc.message, 'warning', exc.traceback)
|
||||
res = {'state': result['state']}
|
||||
if res['state']:
|
||||
if tools.config['reportgz']:
|
||||
import zlib
|
||||
res2 = zlib.compress(result['result'])
|
||||
res['code'] = 'zlib'
|
||||
else:
|
||||
#CHECKME: why is this needed???
|
||||
if isinstance(result['result'], unicode):
|
||||
res2 = result['result'].encode('latin1', 'replace')
|
||||
else:
|
||||
res2 = result['result']
|
||||
if res2:
|
||||
res['result'] = base64.encodestring(res2)
|
||||
res['format'] = result['format']
|
||||
del self._reports[report_id]
|
||||
return res
|
||||
|
||||
def exp_report_get(self, db, uid, report_id):
|
||||
if report_id in self._reports:
|
||||
if self._reports[report_id]['uid'] == uid:
|
||||
return self._check_report(report_id)
|
||||
else:
|
||||
raise Exception, 'AccessDenied'
|
||||
else:
|
||||
raise Exception, 'ReportNotFound'
|
||||
|
||||
|
||||
def start_service():
|
||||
db()
|
||||
common()
|
||||
objects_proxy()
|
||||
report_spool()
|
||||
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
@ -367,7 +367,7 @@ class WorkerCron(Worker):
|
|||
if config['db_name']:
|
||||
db_names = config['db_name'].split(',')
|
||||
else:
|
||||
db_names = openerp.netsvc.ExportService._services['db'].exp_list(True)
|
||||
db_names = openerp.service.db.exp_list(True)
|
||||
for db_name in db_names:
|
||||
if rpc_request_flag:
|
||||
start_time = time.time()
|
||||
|
|
|
@ -34,6 +34,7 @@ import errno
|
|||
import logging
|
||||
import os
|
||||
import signal
|
||||
import socket
|
||||
import sys
|
||||
import threading
|
||||
import traceback
|
||||
|
@ -90,7 +91,7 @@ def xmlrpc_return(start_response, service, method, params, legacy_exceptions=Fal
|
|||
return [response]
|
||||
|
||||
def xmlrpc_handle_exception(e):
|
||||
if isinstance(e, openerp.osv.osv.except_osv): # legacy
|
||||
if isinstance(e, openerp.osv.orm.except_orm): # legacy
|
||||
fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, openerp.tools.ustr(e.value))
|
||||
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
|
||||
elif isinstance(e, openerp.exceptions.Warning):
|
||||
|
@ -123,7 +124,7 @@ def xmlrpc_handle_exception(e):
|
|||
return response
|
||||
|
||||
def xmlrpc_handle_exception_legacy(e):
|
||||
if isinstance(e, openerp.osv.osv.except_osv):
|
||||
if isinstance(e, openerp.osv.orm.except_orm):
|
||||
fault = xmlrpclib.Fault('warning -- ' + e.name + '\n\n' + e.value, '')
|
||||
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
|
||||
elif isinstance(e, openerp.exceptions.Warning):
|
||||
|
@ -373,6 +374,8 @@ def parse_http_response(s):
|
|||
|
||||
# WSGI handlers registered through the register_wsgi_handler() function below.
|
||||
module_handlers = []
|
||||
# RPC endpoints registered through the register_rpc_endpoint() function below.
|
||||
rpc_handlers = {}
|
||||
|
||||
def register_wsgi_handler(handler):
|
||||
""" Register a WSGI handler.
|
||||
|
@ -382,6 +385,11 @@ def register_wsgi_handler(handler):
|
|||
"""
|
||||
module_handlers.append(handler)
|
||||
|
||||
def register_rpc_endpoint(endpoint, handler):
|
||||
""" Register a handler for a given RPC enpoint.
|
||||
"""
|
||||
rpc_handlers[endpoint] = handler
|
||||
|
||||
def application_unproxied(environ, start_response):
|
||||
""" WSGI entry point."""
|
||||
openerp.service.start_internal()
|
||||
|
@ -439,6 +447,25 @@ def stop_service():
|
|||
"""
|
||||
if httpd:
|
||||
httpd.shutdown()
|
||||
openerp.netsvc.close_socket(httpd.socket)
|
||||
close_socket(httpd.socket)
|
||||
|
||||
def close_socket(sock):
|
||||
""" Closes a socket instance cleanly
|
||||
|
||||
:param sock: the network socket to close
|
||||
:type sock: socket.socket
|
||||
"""
|
||||
try:
|
||||
sock.shutdown(socket.SHUT_RDWR)
|
||||
except socket.error, e:
|
||||
# On OSX, socket shutdowns both sides if any side closes it
|
||||
# causing an error 57 'Socket is not connected' on shutdown
|
||||
# of the other side (or something), see
|
||||
# http://bugs.python.org/issue4397
|
||||
# note: stdlib fixed test, not behavior
|
||||
if e.errno != errno.ENOTCONN or platform.system() not in ['Darwin', 'Windows']:
|
||||
raise
|
||||
sock.close()
|
||||
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
||||
|
|
|
@ -22,6 +22,8 @@ import test_osv
|
|||
import test_translate
|
||||
import test_uninstall
|
||||
import test_view_validation
|
||||
# This need a change in `oe run-tests` to only run fast_suite + checks by default.
|
||||
# import test_xmlrpc
|
||||
|
||||
fast_suite = [
|
||||
test_ir_sequence,
|
||||
|
|
|
@ -133,6 +133,7 @@ class RpcCase(unittest2.TestCase):
|
|||
self.proxy.common_60 = xmlrpclib.ServerProxy(url_60 + 'common')
|
||||
self.proxy.db_60 = xmlrpclib.ServerProxy(url_60 + 'db')
|
||||
self.proxy.object_60 = xmlrpclib.ServerProxy(url_60 + 'object')
|
||||
#self.proxy.edi_60 = xmlrpclib.ServerProxy(url_60 + 'edi')
|
||||
|
||||
# Use the new (6.1) API.
|
||||
self.proxy.url_61 = url_61 = 'http://%s:%d/openerp/xmlrpc/1/' % (HOST, PORT)
|
||||
|
|
|
@ -64,6 +64,14 @@ class test_xmlrpc(common.RpcCase):
|
|||
ids = proxy.execute(ADMIN_USER_ID, ADMIN_PASSWORD, 'search', [], {})
|
||||
assert ids
|
||||
|
||||
# This test was written to test the creation of a new RPC endpoint, not
|
||||
# really for the EDI itself.
|
||||
#def test_xmlrpc_import_edi_document(self):
|
||||
# """ Try to call an EDI method. """
|
||||
# msg_re = 'EDI Document is empty!'
|
||||
# with self.assertRaisesRegexp(Exception, msg_re):
|
||||
# self.proxy.edi_60.import_edi_document(DB, ADMIN_USER_ID, ADMIN_PASSWORD, {})
|
||||
|
||||
def test_zz_xmlrpc_drop_database(self):
|
||||
"""
|
||||
Simulate a OpenERP client requesting the deletion of a database.
|
||||
|
|
|
@ -674,49 +674,7 @@ def trans_generate(lang, modules, cr):
|
|||
for t in trans_parse_view(d):
|
||||
push_translation(module, 'view', encode(obj.model), 0, t)
|
||||
elif model=='ir.actions.wizard':
|
||||
service_name = 'wizard.'+encode(obj.wiz_name)
|
||||
import openerp.netsvc as netsvc
|
||||
if netsvc.Service._services.get(service_name):
|
||||
obj2 = netsvc.Service._services[service_name]
|
||||
for state_name, state_def in obj2.states.iteritems():
|
||||
if 'result' in state_def:
|
||||
result = state_def['result']
|
||||
if result['type'] != 'form':
|
||||
continue
|
||||
name = "%s,%s" % (encode(obj.wiz_name), state_name)
|
||||
|
||||
def_params = {
|
||||
'string': ('wizard_field', lambda s: [encode(s)]),
|
||||
'selection': ('selection', lambda s: [encode(e[1]) for e in ((not callable(s)) and s or [])]),
|
||||
'help': ('help', lambda s: [encode(s)]),
|
||||
}
|
||||
|
||||
# export fields
|
||||
if not result.has_key('fields'):
|
||||
_logger.warning("res has no fields: %r", result)
|
||||
continue
|
||||
for field_name, field_def in result['fields'].iteritems():
|
||||
res_name = name + ',' + field_name
|
||||
|
||||
for fn in def_params:
|
||||
if fn in field_def:
|
||||
transtype, modifier = def_params[fn]
|
||||
for val in modifier(field_def[fn]):
|
||||
push_translation(module, transtype, res_name, 0, val)
|
||||
|
||||
# export arch
|
||||
arch = result['arch']
|
||||
if arch and not isinstance(arch, UpdateableStr):
|
||||
d = etree.XML(arch)
|
||||
for t in trans_parse_view(d):
|
||||
push_translation(module, 'wizard_view', name, 0, t)
|
||||
|
||||
# export button labels
|
||||
for but_args in result['state']:
|
||||
button_name = but_args[0]
|
||||
button_label = but_args[1]
|
||||
res_name = name + ',' + button_name
|
||||
push_translation(module, 'wizard_button', res_name, 0, button_label)
|
||||
pass # TODO Can model really be 'ir.actions.wizard' ?
|
||||
|
||||
elif model=='ir.model.fields':
|
||||
try:
|
||||
|
|
|
@ -19,9 +19,123 @@
|
|||
#
|
||||
##############################################################################
|
||||
|
||||
import wkf_service
|
||||
import instance
|
||||
|
||||
wkf_on_create_cache = {}
|
||||
|
||||
def clear_cache(cr, uid):
|
||||
wkf_on_create_cache[cr.dbname]={}
|
||||
|
||||
def trg_write(uid, res_type, res_id, cr):
|
||||
"""
|
||||
Reevaluates the specified workflow instance. Thus if any condition for
|
||||
a transition have been changed in the backend, then running ``trg_write``
|
||||
will move the workflow over that transition.
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id the workflow belongs to
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
ident = (uid,res_type,res_id)
|
||||
cr.execute('select id from wkf_instance where res_id=%s and res_type=%s and state=%s', (res_id or None,res_type or None, 'active'))
|
||||
for (id,) in cr.fetchall():
|
||||
instance.update(cr, id, ident)
|
||||
|
||||
def trg_trigger(uid, res_type, res_id, cr):
|
||||
"""
|
||||
Activate a trigger.
|
||||
|
||||
If a workflow instance is waiting for a trigger from another model, then this
|
||||
trigger can be activated if its conditions are met.
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id the workflow belongs to
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
cr.execute('select instance_id from wkf_triggers where res_id=%s and model=%s', (res_id,res_type))
|
||||
res = cr.fetchall()
|
||||
for (instance_id,) in res:
|
||||
cr.execute('select %s,res_type,res_id from wkf_instance where id=%s', (uid, instance_id,))
|
||||
ident = cr.fetchone()
|
||||
instance.update(cr, instance_id, ident)
|
||||
|
||||
def trg_delete(uid, res_type, res_id, cr):
|
||||
"""
|
||||
Delete a workflow instance
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id the workflow belongs to
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
ident = (uid,res_type,res_id)
|
||||
instance.delete(cr, ident)
|
||||
|
||||
def trg_create(uid, res_type, res_id, cr):
|
||||
"""
|
||||
Create a new workflow instance
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id to own the created worfklow instance
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
ident = (uid,res_type,res_id)
|
||||
wkf_on_create_cache.setdefault(cr.dbname, {})
|
||||
if res_type in wkf_on_create_cache[cr.dbname]:
|
||||
wkf_ids = wkf_on_create_cache[cr.dbname][res_type]
|
||||
else:
|
||||
cr.execute('select id from wkf where osv=%s and on_create=True', (res_type,))
|
||||
wkf_ids = cr.fetchall()
|
||||
wkf_on_create_cache[cr.dbname][res_type] = wkf_ids
|
||||
for (wkf_id,) in wkf_ids:
|
||||
instance.create(cr, ident, wkf_id)
|
||||
|
||||
def trg_validate(uid, res_type, res_id, signal, cr):
|
||||
"""
|
||||
Fire a signal on a given workflow instance
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id the workflow belongs to
|
||||
:signal: the signal name to be fired
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
result = False
|
||||
ident = (uid,res_type,res_id)
|
||||
# ids of all active workflow instances for a corresponding resource (id, model_nam)
|
||||
cr.execute('select id from wkf_instance where res_id=%s and res_type=%s and state=%s', (res_id, res_type, 'active'))
|
||||
for (id,) in cr.fetchall():
|
||||
res2 = instance.validate(cr, id, ident, signal)
|
||||
result = result or res2
|
||||
return result
|
||||
|
||||
def trg_redirect(uid, res_type, res_id, new_rid, cr):
|
||||
"""
|
||||
Re-bind a workflow instance to another instance of the same model.
|
||||
|
||||
Make all workitems which are waiting for a (subflow) workflow instance
|
||||
for the old resource point to the (first active) workflow instance for
|
||||
the new resource.
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id the workflow belongs to
|
||||
:param new_rid: the model instance id to own the worfklow instance
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
# get ids of wkf instances for the old resource (res_id)
|
||||
#CHECKME: shouldn't we get only active instances?
|
||||
cr.execute('select id, wkf_id from wkf_instance where res_id=%s and res_type=%s', (res_id, res_type))
|
||||
for old_inst_id, wkf_id in cr.fetchall():
|
||||
# first active instance for new resource (new_rid), using same wkf
|
||||
cr.execute(
|
||||
'SELECT id '\
|
||||
'FROM wkf_instance '\
|
||||
'WHERE res_id=%s AND res_type=%s AND wkf_id=%s AND state=%s',
|
||||
(new_rid, res_type, wkf_id, 'active'))
|
||||
new_id = cr.fetchone()
|
||||
if new_id:
|
||||
# select all workitems which "wait" for the old instance
|
||||
cr.execute('select id from wkf_workitem where subflow_id=%s', (old_inst_id,))
|
||||
for (item_id,) in cr.fetchall():
|
||||
# redirect all those workitems to the wkf instance of the new resource
|
||||
cr.execute('update wkf_workitem set subflow_id=%s where id=%s', (new_id[0], item_id))
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
||||
|
||||
|
|
|
@ -1,156 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
##############################################################################
|
||||
#
|
||||
# OpenERP, Open Source Management Solution
|
||||
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
##############################################################################
|
||||
import instance
|
||||
import openerp.netsvc as netsvc
|
||||
|
||||
class workflow_service(netsvc.Service):
|
||||
"""
|
||||
Sometimes you might want to fire a signal or re-evaluate the current state
|
||||
of a workflow using the service's API. You can access the workflow services
|
||||
using:
|
||||
|
||||
>>> import netsvc
|
||||
>>> wf_service = netsvc.LocalService("workflow")
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, name='workflow'):
|
||||
netsvc.Service.__init__(self, name)
|
||||
self.wkf_on_create_cache={}
|
||||
|
||||
def clear_cache(self, cr, uid):
|
||||
self.wkf_on_create_cache[cr.dbname]={}
|
||||
|
||||
def trg_write(self, uid, res_type, res_id, cr):
|
||||
"""
|
||||
Reevaluates the specified workflow instance. Thus if any condition for
|
||||
a transition have been changed in the backend, then running ``trg_write``
|
||||
will move the workflow over that transition.
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id the workflow belongs to
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
ident = (uid,res_type,res_id)
|
||||
cr.execute('select id from wkf_instance where res_id=%s and res_type=%s and state=%s', (res_id or None,res_type or None, 'active'))
|
||||
for (id,) in cr.fetchall():
|
||||
instance.update(cr, id, ident)
|
||||
|
||||
def trg_trigger(self, uid, res_type, res_id, cr):
|
||||
"""
|
||||
Activate a trigger.
|
||||
|
||||
If a workflow instance is waiting for a trigger from another model, then this
|
||||
trigger can be activated if its conditions are met.
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id the workflow belongs to
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
cr.execute('select instance_id from wkf_triggers where res_id=%s and model=%s', (res_id,res_type))
|
||||
res = cr.fetchall()
|
||||
for (instance_id,) in res:
|
||||
cr.execute('select %s,res_type,res_id from wkf_instance where id=%s', (uid, instance_id,))
|
||||
ident = cr.fetchone()
|
||||
instance.update(cr, instance_id, ident)
|
||||
|
||||
def trg_delete(self, uid, res_type, res_id, cr):
|
||||
"""
|
||||
Delete a workflow instance
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id the workflow belongs to
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
ident = (uid,res_type,res_id)
|
||||
instance.delete(cr, ident)
|
||||
|
||||
def trg_create(self, uid, res_type, res_id, cr):
|
||||
"""
|
||||
Create a new workflow instance
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id to own the created worfklow instance
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
ident = (uid,res_type,res_id)
|
||||
self.wkf_on_create_cache.setdefault(cr.dbname, {})
|
||||
if res_type in self.wkf_on_create_cache[cr.dbname]:
|
||||
wkf_ids = self.wkf_on_create_cache[cr.dbname][res_type]
|
||||
else:
|
||||
cr.execute('select id from wkf where osv=%s and on_create=True', (res_type,))
|
||||
wkf_ids = cr.fetchall()
|
||||
self.wkf_on_create_cache[cr.dbname][res_type] = wkf_ids
|
||||
for (wkf_id,) in wkf_ids:
|
||||
instance.create(cr, ident, wkf_id)
|
||||
|
||||
def trg_validate(self, uid, res_type, res_id, signal, cr):
|
||||
"""
|
||||
Fire a signal on a given workflow instance
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id the workflow belongs to
|
||||
:signal: the signal name to be fired
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
result = False
|
||||
ident = (uid,res_type,res_id)
|
||||
# ids of all active workflow instances for a corresponding resource (id, model_nam)
|
||||
cr.execute('select id from wkf_instance where res_id=%s and res_type=%s and state=%s', (res_id, res_type, 'active'))
|
||||
for (id,) in cr.fetchall():
|
||||
res2 = instance.validate(cr, id, ident, signal)
|
||||
result = result or res2
|
||||
return result
|
||||
|
||||
def trg_redirect(self, uid, res_type, res_id, new_rid, cr):
|
||||
"""
|
||||
Re-bind a workflow instance to another instance of the same model.
|
||||
|
||||
Make all workitems which are waiting for a (subflow) workflow instance
|
||||
for the old resource point to the (first active) workflow instance for
|
||||
the new resource.
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id the workflow belongs to
|
||||
:param new_rid: the model instance id to own the worfklow instance
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
# get ids of wkf instances for the old resource (res_id)
|
||||
#CHECKME: shouldn't we get only active instances?
|
||||
cr.execute('select id, wkf_id from wkf_instance where res_id=%s and res_type=%s', (res_id, res_type))
|
||||
for old_inst_id, wkf_id in cr.fetchall():
|
||||
# first active instance for new resource (new_rid), using same wkf
|
||||
cr.execute(
|
||||
'SELECT id '\
|
||||
'FROM wkf_instance '\
|
||||
'WHERE res_id=%s AND res_type=%s AND wkf_id=%s AND state=%s',
|
||||
(new_rid, res_type, wkf_id, 'active'))
|
||||
new_id = cr.fetchone()
|
||||
if new_id:
|
||||
# select all workitems which "wait" for the old instance
|
||||
cr.execute('select id from wkf_workitem where subflow_id=%s', (old_inst_id,))
|
||||
for (item_id,) in cr.fetchall():
|
||||
# redirect all those workitems to the wkf instance of the new resource
|
||||
cr.execute('update wkf_workitem set subflow_id=%s where id=%s', (new_id[0], item_id))
|
||||
workflow_service()
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
||||
|
Loading…
Reference in New Issue