[MERGE] trunk
bzr revid: abo@openerp.com-20130213105620-xyez3q7ddgc0x5x8
This commit is contained in:
commit
f929ebc73b
|
@ -0,0 +1,12 @@
|
|||
.. _changelog:
|
||||
|
||||
Changelog
|
||||
=========
|
||||
|
||||
`trunk`
|
||||
-------
|
||||
|
||||
- Removed support for `__terp__.py` descriptor files.
|
||||
- Removed support for `<terp>` root element in XML files.
|
||||
- Removed support for the non-openerp namespace (e.g. importing `tools` instead
|
||||
of `openerp.tools` in an addons).
|
|
@ -36,6 +36,15 @@ OpenERP Server API
|
|||
:maxdepth: 1
|
||||
|
||||
api_models.rst
|
||||
routing.rst
|
||||
|
||||
Changelog
|
||||
'''''''''
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
changelog.rst
|
||||
|
||||
Concepts
|
||||
''''''''
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
.. _routing:
|
||||
|
||||
Routing
|
||||
=======
|
||||
|
||||
.. versionchanged:: 7.1
|
||||
|
||||
The OpenERP framework, as an HTTP server, serves a few hard-coded URLs
|
||||
(``models``, ``db``, ...) to expose RPC endpoints. When running the web addons
|
||||
(which is almost always the case), it also serves URLs without them being RPC
|
||||
endpoints.
|
||||
|
||||
In older version of OpenERP, adding RPC endpoints was done by subclassing the
|
||||
``openerp.netsvc.ExportService`` class. Adding WSGI handlers was done by
|
||||
registering them with the :py:func:`openerp.wsgi.register_wsgi_handler`
|
||||
function.
|
||||
|
||||
Starting with OpenERP 7.1, exposing a new arbitrary WSGI handler is done with
|
||||
the :py:func:`openerp.http.handler` decorator while adding an RPC endpoint is
|
||||
done with the :py:func:`openerp.http.rpc` decorator.
|
||||
|
||||
Routing decorators
|
||||
------------------
|
||||
|
||||
.. automodule:: openerp.http
|
||||
:members:
|
||||
:undoc-members:
|
|
@ -1 +0,0 @@
|
|||
excludes: pychart release openerp-server test run_tests addons/base_quality_interrogation
|
|
@ -28,7 +28,7 @@ SUPERUSER_ID = 1
|
|||
import addons
|
||||
import cli
|
||||
import conf
|
||||
import exceptions
|
||||
import http
|
||||
import loglevels
|
||||
import modules
|
||||
import netsvc
|
||||
|
|
|
@ -28,6 +28,7 @@ import time
|
|||
from openerp import SUPERUSER_ID
|
||||
from openerp import netsvc, tools
|
||||
from openerp.osv import fields, osv
|
||||
import openerp.report.interface
|
||||
from openerp.report.report_sxw import report_sxw, report_rml
|
||||
from openerp.tools.config import config
|
||||
from openerp.tools.safe_eval import safe_eval as eval
|
||||
|
@ -93,9 +94,9 @@ class report_xml(osv.osv):
|
|||
opj = os.path.join
|
||||
cr.execute("SELECT * FROM ir_act_report_xml WHERE auto=%s ORDER BY id", (True,))
|
||||
result = cr.dictfetchall()
|
||||
svcs = netsvc.Service._services
|
||||
reports = openerp.report.interface.report_int._reports
|
||||
for r in result:
|
||||
if svcs.has_key('report.'+r['report_name']):
|
||||
if reports.has_key('report.'+r['report_name']):
|
||||
continue
|
||||
if r['report_rml'] or r['report_rml_content_data']:
|
||||
report_sxw('report.'+r['report_name'], r['model'],
|
||||
|
|
|
@ -1,74 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
##############################################################################
|
||||
#
|
||||
# OpenERP, Open Source Management Solution
|
||||
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
##############################################################################
|
||||
import os
|
||||
import glob
|
||||
import imp
|
||||
import zipfile
|
||||
|
||||
from openerp import tools
|
||||
from openerp.osv import osv
|
||||
|
||||
class base_module_scan(osv.osv_memory):
|
||||
""" scan module """
|
||||
|
||||
_name = "base.module.scan"
|
||||
_description = "scan module"
|
||||
|
||||
def watch_dir(self, cr, uid, ids, context):
|
||||
mod_obj = self.pool.get('ir.module.module')
|
||||
all_mods = mod_obj.read(cr, uid, mod_obj.search(cr, uid, []), ['name', 'state'])
|
||||
known_modules = [x['name'] for x in all_mods]
|
||||
ls_ad = glob.glob(os.path.join(tools.config['addons_path'], '*', '__terp__.py'))
|
||||
modules = [module_name_re.match(name).group(1) for name in ls_ad]
|
||||
for fname in os.listdir(tools.config['addons_path']):
|
||||
if zipfile.is_zipfile(fname):
|
||||
modules.append( fname.split('.')[0])
|
||||
for module in modules:
|
||||
if module in known_modules:
|
||||
continue
|
||||
terp = mod_obj.get_module_info(module)
|
||||
if not terp.get('installable', True):
|
||||
continue
|
||||
|
||||
# XXX check if this code is correct...
|
||||
fm = imp.find_module(module)
|
||||
try:
|
||||
imp.load_module(module, *fm)
|
||||
finally:
|
||||
if fm[0]:
|
||||
fm[0].close()
|
||||
|
||||
values = mod_obj.get_values_from_terp(terp)
|
||||
mod_id = mod_obj.create(cr, uid, dict(name=module, state='uninstalled', **values))
|
||||
dependencies = terp.get('depends', [])
|
||||
for d in dependencies:
|
||||
cr.execute('insert into ir_module_module_dependency (module_id,name) values (%s, %s)', (mod_id, d))
|
||||
for module in known_modules:
|
||||
terp = mod_obj.get_module_info(module)
|
||||
if terp.get('installable', True):
|
||||
for mod in all_mods:
|
||||
if mod['name'] == module and mod['state'] == 'uninstallable':
|
||||
mod_obj.write(cr, uid, [mod['id']], {'state': 'uninstalled'})
|
||||
return {}
|
||||
|
||||
base_module_scan()
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
@ -21,7 +21,8 @@
|
|||
|
||||
import time
|
||||
|
||||
from openerp.osv import osv,fields
|
||||
from openerp.osv import osv, fields
|
||||
from openerp.osv.orm import browse_record
|
||||
from openerp.tools.misc import attrgetter
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
|
@ -111,7 +112,7 @@ class ir_property(osv.osv):
|
|||
raise osv.except_osv('Error', 'Invalid type')
|
||||
|
||||
if field == 'value_reference':
|
||||
if isinstance(value, osv.orm.browse_record):
|
||||
if isinstance(value, browse_record):
|
||||
value = '%s,%d' % (value._name, value.id)
|
||||
elif isinstance(value, (int, long)):
|
||||
field_id = values.get('fields_id')
|
||||
|
|
|
@ -1,352 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
##############################################################################
|
||||
#
|
||||
# OpenERP, Open Source Management Solution
|
||||
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
##############################################################################
|
||||
|
||||
import xmlrpclib
|
||||
import optparse
|
||||
import sys
|
||||
import threading
|
||||
import os
|
||||
import time
|
||||
import base64
|
||||
import socket
|
||||
import string
|
||||
|
||||
admin_passwd = 'admin'
|
||||
waittime = 10
|
||||
wait_count = 0
|
||||
wait_limit = 12
|
||||
|
||||
def to_decode(s):
|
||||
try:
|
||||
return s.encode('utf-8')
|
||||
except UnicodeError:
|
||||
try:
|
||||
return s.encode('latin')
|
||||
except UnicodeError:
|
||||
try:
|
||||
return s.decode('ascii')
|
||||
except UnicodeError:
|
||||
return s
|
||||
|
||||
def start_server(root_path, port, netport, addons_path):
|
||||
os.system('python2.5 %sopenerp-server --pidfile=openerp.pid --no-xmlrpcs --xmlrpc-port=%s --netrpc-port=%s --addons-path=%s' %(root_path, str(port),str(netport),addons_path))
|
||||
def clean():
|
||||
if os.path.isfile('openerp.pid'):
|
||||
ps = open('openerp.pid')
|
||||
if ps:
|
||||
pid = int(ps.read())
|
||||
ps.close()
|
||||
if pid:
|
||||
os.kill(pid,9)
|
||||
|
||||
def execute(connector, method, *args):
|
||||
global wait_count
|
||||
res = False
|
||||
try:
|
||||
res = getattr(connector,method)(*args)
|
||||
except socket.error,e:
|
||||
if e.args[0] == 111:
|
||||
if wait_count > wait_limit:
|
||||
print "Server is taking too long to start, it has exceeded the maximum limit of %d seconds." % wait_limit
|
||||
clean()
|
||||
sys.exit(1)
|
||||
print 'Please wait %d sec to start server....' % waittime
|
||||
wait_count += 1
|
||||
time.sleep(waittime)
|
||||
res = execute(connector, method, *args)
|
||||
else:
|
||||
raise e
|
||||
wait_count = 0
|
||||
return res
|
||||
|
||||
def login(uri, dbname, user, pwd):
|
||||
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/common')
|
||||
uid = execute(conn,'login',dbname, user, pwd)
|
||||
return uid
|
||||
|
||||
def import_translate(uri, user, pwd, dbname, translate_in):
|
||||
uid = login(uri, dbname, user, pwd)
|
||||
if uid:
|
||||
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
|
||||
wiz_id = execute(conn,'create',dbname, uid, pwd, 'base.language.import')
|
||||
for trans_in in translate_in:
|
||||
lang,ext = os.path.splitext(trans_in.split('/')[-1])
|
||||
state = 'init'
|
||||
datas = {'form':{}}
|
||||
while state!='end':
|
||||
res = execute(conn,'execute',dbname, uid, pwd, wiz_id, datas, state, {})
|
||||
if 'datas' in res:
|
||||
datas['form'].update( res['datas'].get('form',{}) )
|
||||
if res['type']=='form':
|
||||
for field in res['fields'].keys():
|
||||
datas['form'][field] = res['fields'][field].get('value', False)
|
||||
state = res['state'][-1][0]
|
||||
trans_obj = open(trans_in)
|
||||
datas['form'].update({
|
||||
'name': lang,
|
||||
'code': lang,
|
||||
'data' : base64.encodestring(trans_obj.read())
|
||||
})
|
||||
trans_obj.close()
|
||||
elif res['type']=='action':
|
||||
state = res['state']
|
||||
|
||||
|
||||
def check_quality(uri, user, pwd, dbname, modules, quality_logs):
|
||||
uid = login(uri, dbname, user, pwd)
|
||||
quality_logs += 'quality-logs'
|
||||
if uid:
|
||||
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
|
||||
final = {}
|
||||
for module in modules:
|
||||
qualityresult = {}
|
||||
test_detail = {}
|
||||
quality_result = execute(conn,'execute', dbname, uid, pwd,'module.quality.check','check_quality',module)
|
||||
detail_html = ''
|
||||
html = '''<html><body><a name="TOP"></a>'''
|
||||
html +="<h1> Module: %s </h1>"%(quality_result['name'])
|
||||
html += "<h2> Final score: %s</h2>"%(quality_result['final_score'])
|
||||
html += "<div id='tabs'>"
|
||||
html += "<ul>"
|
||||
for x,y,detail in quality_result['check_detail_ids']:
|
||||
test = detail.get('name')
|
||||
msg = detail.get('message','')
|
||||
score = round(float(detail.get('score',0)),2)
|
||||
html += "<li><a href=\"#%s\">%s</a></li>"%(test.replace(' ','-'),test)
|
||||
detail_html +='''<div id=\"%s\"><h3>%s (Score : %s)</h3><font color=red><h5>%s</h5></font>%s</div>'''%(test.replace(' ', '-'), test, score, msg, detail.get('detail', ''))
|
||||
test_detail[test] = (score,msg,detail.get('detail',''))
|
||||
html += "</ul>"
|
||||
html += "%s"% detail_html
|
||||
html += "</div></body></html>"
|
||||
if not os.path.isdir(quality_logs):
|
||||
os.mkdir(quality_logs)
|
||||
fp = open('%s/%s.html'%(quality_logs,module),'wb')
|
||||
fp.write(to_decode(html))
|
||||
fp.close()
|
||||
#final[quality_result['name']] = (quality_result['final_score'],html,test_detail)
|
||||
|
||||
#fp = open('quality_log.pck','wb')
|
||||
#pck_obj = pickle.dump(final,fp)
|
||||
#fp.close()
|
||||
#print "LOG PATH%s"%(os.path.realpath('quality_log.pck'))
|
||||
return True
|
||||
else:
|
||||
print 'Login Failed...'
|
||||
clean()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
|
||||
def wait(id,url=''):
|
||||
progress=0.0
|
||||
sock2 = xmlrpclib.ServerProxy(url+'/xmlrpc/db')
|
||||
while not progress==1.0:
|
||||
progress,users = execute(sock2,'get_progress',admin_passwd, id)
|
||||
return True
|
||||
|
||||
|
||||
def create_db(uri, dbname, user='admin', pwd='admin', lang='en_US'):
|
||||
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
|
||||
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
|
||||
wiz_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
|
||||
login_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/common')
|
||||
db_list = execute(conn, 'list')
|
||||
if dbname in db_list:
|
||||
drop_db(uri, dbname)
|
||||
id = execute(conn,'create',admin_passwd, dbname, True, lang)
|
||||
wait(id,uri)
|
||||
install_module(uri, dbname, ['base_module_quality'],user=user,pwd=pwd)
|
||||
return True
|
||||
|
||||
def drop_db(uri, dbname):
|
||||
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
|
||||
db_list = execute(conn,'list')
|
||||
if dbname in db_list:
|
||||
execute(conn, 'drop', admin_passwd, dbname)
|
||||
return True
|
||||
|
||||
def make_links(uri, uid, dbname, source, destination, module, user, pwd):
|
||||
if module in ('base','quality_integration_server'):
|
||||
return True
|
||||
if os.path.islink(destination + '/' + module):
|
||||
os.unlink(destination + '/' + module)
|
||||
for path in source:
|
||||
if os.path.isdir(path + '/' + module):
|
||||
os.symlink(path + '/' + module, destination + '/' + module)
|
||||
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
|
||||
execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'update_list')
|
||||
module_ids = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'search', [('name','=',module)])
|
||||
if len(module_ids):
|
||||
data = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'read', module_ids[0],['name','dependencies_id'])
|
||||
dep_datas = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module.dependency', 'read', data['dependencies_id'],['name'])
|
||||
for dep_data in dep_datas:
|
||||
make_links(uri, uid, dbname, source, destination, dep_data['name'], user, pwd)
|
||||
return False
|
||||
|
||||
def install_module(uri, dbname, modules, addons='', extra_addons='', user='admin', pwd='admin'):
|
||||
uid = login(uri, dbname, user, pwd)
|
||||
if extra_addons:
|
||||
extra_addons = extra_addons.split(',')
|
||||
if uid:
|
||||
if addons and extra_addons:
|
||||
for module in modules:
|
||||
make_links(uri, uid, dbname, extra_addons, addons, module, user, pwd)
|
||||
|
||||
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
|
||||
wizard_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
|
||||
module_ids = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'search', [('name','in',modules)])
|
||||
execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'button_install', module_ids)
|
||||
wiz_id = execute(wizard_conn, 'create', dbname, uid, pwd, 'module.upgrade.simple')
|
||||
state = 'init'
|
||||
datas = {}
|
||||
#while state!='menu':
|
||||
while state!='end':
|
||||
res = execute(wizard_conn, 'execute', dbname, uid, pwd, wiz_id, datas, state, {})
|
||||
if state == 'init':
|
||||
state = 'start'
|
||||
elif state == 'start':
|
||||
state = 'end'
|
||||
return True
|
||||
|
||||
def upgrade_module(uri, dbname, modules, user='admin', pwd='admin'):
|
||||
uid = login(uri, dbname, user, pwd)
|
||||
if uid:
|
||||
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
|
||||
wizard_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
|
||||
module_ids = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'search', [('name','in',modules)])
|
||||
execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'button_upgrade', module_ids)
|
||||
wiz_id = execute(wizard_conn, 'create', dbname, uid, pwd, 'module.upgrade.simple')
|
||||
state = 'init'
|
||||
datas = {}
|
||||
#while state!='menu':
|
||||
while state!='end':
|
||||
res = execute(wizard_conn, 'execute', dbname, uid, pwd, wiz_id, datas, state, {})
|
||||
if state == 'init':
|
||||
state = 'start'
|
||||
elif state == 'start':
|
||||
state = 'end'
|
||||
|
||||
return True
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
usage = """%prog command [options]
|
||||
|
||||
Basic Commands:
|
||||
start-server Start Server
|
||||
create-db Create new database
|
||||
drop-db Drop database
|
||||
install-module Install module
|
||||
upgrade-module Upgrade module
|
||||
install-translation Install translation file
|
||||
check-quality Calculate quality and dump quality result into quality_log.pck using pickle
|
||||
"""
|
||||
parser = optparse.OptionParser(usage)
|
||||
parser.add_option("--modules", dest="modules",
|
||||
help="specify modules to install or check quality")
|
||||
parser.add_option("--addons-path", dest="addons_path", help="specify the addons path")
|
||||
parser.add_option("--quality-logs", dest="quality_logs", help="specify the path of quality logs files which has to stores")
|
||||
parser.add_option("--root-path", dest="root_path", help="specify the root path")
|
||||
parser.add_option("-p", "--port", dest="port", help="specify the TCP port", type="int")
|
||||
parser.add_option("--net_port", dest="netport",help="specify the TCP port for netrpc")
|
||||
parser.add_option("-d", "--database", dest="db_name", help="specify the database name")
|
||||
parser.add_option("--login", dest="login", help="specify the User Login")
|
||||
parser.add_option("--password", dest="pwd", help="specify the User Password")
|
||||
parser.add_option("--translate-in", dest="translate_in",
|
||||
help="specify .po files to import translation terms")
|
||||
parser.add_option("--extra-addons", dest="extra_addons",
|
||||
help="specify extra_addons and trunkCommunity modules path ")
|
||||
|
||||
(opt, args) = parser.parse_args()
|
||||
if len(args) != 1:
|
||||
parser.error("incorrect number of arguments")
|
||||
command = args[0]
|
||||
if command not in ('start-server','create-db','drop-db','install-module','upgrade-module','check-quality','install-translation'):
|
||||
parser.error("incorrect command")
|
||||
|
||||
def die(cond, msg):
|
||||
if cond:
|
||||
print msg
|
||||
sys.exit(1)
|
||||
|
||||
die(opt.modules and (not opt.db_name),
|
||||
"the modules option cannot be used without the database (-d) option")
|
||||
|
||||
die(opt.translate_in and (not opt.db_name),
|
||||
"the translate-in option cannot be used without the database (-d) option")
|
||||
|
||||
options = {
|
||||
'addons-path' : opt.addons_path or 'addons',
|
||||
'quality-logs' : opt.quality_logs or '',
|
||||
'root-path' : opt.root_path or '',
|
||||
'translate-in': [],
|
||||
'port' : opt.port or 8069,
|
||||
'netport':opt.netport or 8070,
|
||||
'database': opt.db_name or 'terp',
|
||||
'modules' : map(string.strip, opt.modules.split(',')) if opt.modules else [],
|
||||
'login' : opt.login or 'admin',
|
||||
'pwd' : opt.pwd or '',
|
||||
'extra-addons':opt.extra_addons or []
|
||||
}
|
||||
# Hint:i18n-import=purchase:ar_AR.po+sale:fr_FR.po,nl_BE.po
|
||||
if opt.translate_in:
|
||||
translate = opt.translate_in
|
||||
for module_name,po_files in map(lambda x:tuple(x.split(':')),translate.split('+')):
|
||||
for po_file in po_files.split(','):
|
||||
if module_name == 'base':
|
||||
po_link = '%saddons/%s/i18n/%s'%(options['root-path'],module_name,po_file)
|
||||
else:
|
||||
po_link = '%s/%s/i18n/%s'%(options['addons-path'], module_name, po_file)
|
||||
options['translate-in'].append(po_link)
|
||||
|
||||
uri = 'http://localhost:' + str(options['port'])
|
||||
|
||||
server_thread = threading.Thread(target=start_server,
|
||||
args=(options['root-path'], options['port'],options['netport'], options['addons-path']))
|
||||
try:
|
||||
server_thread.start()
|
||||
if command == 'create-db':
|
||||
create_db(uri, options['database'], options['login'], options['pwd'])
|
||||
if command == 'drop-db':
|
||||
drop_db(uri, options['database'])
|
||||
if command == 'install-module':
|
||||
install_module(uri, options['database'], options['modules'],options['addons-path'],options['extra-addons'],options['login'], options['pwd'])
|
||||
if command == 'upgrade-module':
|
||||
upgrade_module(uri, options['database'], options['modules'], options['login'], options['pwd'])
|
||||
if command == 'check-quality':
|
||||
check_quality(uri, options['login'], options['pwd'], options['database'], options['modules'], options['quality-logs'])
|
||||
if command == 'install-translation':
|
||||
import_translate(uri, options['login'], options['pwd'], options['database'], options['translate-in'])
|
||||
clean()
|
||||
sys.exit(0)
|
||||
|
||||
except xmlrpclib.Fault, e:
|
||||
print e.faultString
|
||||
clean()
|
||||
sys.exit(1)
|
||||
except Exception, e:
|
||||
print e
|
||||
clean()
|
||||
sys.exit(1)
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
@ -0,0 +1,37 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
``openerp.http`` offers decorators to register WSGI and RPC endpoints handlers.
|
||||
See :ref:`routing`.
|
||||
"""
|
||||
|
||||
from . import service
|
||||
|
||||
def handler():
|
||||
"""
|
||||
Decorator to register a WSGI handler. The handler must return None if it
|
||||
does not handle the request.
|
||||
"""
|
||||
def decorator(f):
|
||||
service.wsgi_server.register_wsgi_handler(f)
|
||||
return decorator
|
||||
|
||||
def route(url):
|
||||
"""
|
||||
Same as then handler() decorator but register the handler under a specific
|
||||
url. Not yet implemented.
|
||||
"""
|
||||
def decorator(f):
|
||||
pass # TODO
|
||||
return decorator
|
||||
|
||||
def rpc(endpoint):
|
||||
"""
|
||||
Decorator to register a RPC endpoint handler. The handler will receive
|
||||
already unmarshalled RCP arguments.
|
||||
"""
|
||||
def decorator(f):
|
||||
service.wsgi_server.register_rpc_endpoint(endpoint, f)
|
||||
return decorator
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
@ -246,17 +246,10 @@
|
|||
</rng:define>
|
||||
|
||||
<rng:start>
|
||||
<rng:choice>
|
||||
<rng:element name="openerp">
|
||||
<rng:oneOrMore>
|
||||
<rng:ref name="data" />
|
||||
</rng:oneOrMore>
|
||||
</rng:element>
|
||||
<rng:element name="terp">
|
||||
<rng:oneOrMore>
|
||||
<rng:ref name="data" />
|
||||
</rng:oneOrMore>
|
||||
</rng:element>
|
||||
</rng:choice>
|
||||
</rng:start>
|
||||
</rng:grammar>
|
||||
|
|
|
@ -46,7 +46,7 @@ class Logger(object):
|
|||
_logger.warning(
|
||||
"notifyChannel API shouldn't be used anymore, please use "
|
||||
"the standard `logging` module instead.")
|
||||
from service.web_services import common
|
||||
from service import common
|
||||
|
||||
log = logging.getLogger(__name__ + '.deprecated.' + ustr(name))
|
||||
|
||||
|
@ -63,7 +63,7 @@ class Logger(object):
|
|||
try:
|
||||
msg = ustr(msg).strip()
|
||||
if level in (LOG_ERROR, LOG_CRITICAL): # and tools.config.get_misc('debug','env_info',False):
|
||||
msg = common().exp_get_server_environment() + "\n" + msg
|
||||
msg = common.exp_get_server_environment() + "\n" + msg
|
||||
|
||||
result = msg.split('\n')
|
||||
except UnicodeDecodeError:
|
||||
|
|
|
@ -226,7 +226,7 @@ def zip_directory(directory, b64enc=True, src=True):
|
|||
base = os.path.basename(path)
|
||||
for f in osutil.listdir(path, True):
|
||||
bf = os.path.basename(f)
|
||||
if not RE_exclude.search(bf) and (src or bf in ('__openerp__.py', '__terp__.py') or not bf.endswith('.py')):
|
||||
if not RE_exclude.search(bf) and (src or bf == '__openerp__.py' or not bf.endswith('.py')):
|
||||
archive.write(os.path.join(path, f), os.path.join(base, f))
|
||||
|
||||
archname = StringIO()
|
||||
|
@ -310,8 +310,6 @@ def load_information_from_description_file(module):
|
|||
"""
|
||||
|
||||
terp_file = get_module_resource(module, '__openerp__.py')
|
||||
if not terp_file:
|
||||
terp_file = get_module_resource(module, '__terp__.py')
|
||||
mod_path = get_module_path(module)
|
||||
if terp_file:
|
||||
info = {}
|
||||
|
@ -354,8 +352,7 @@ def load_information_from_description_file(module):
|
|||
|
||||
#TODO: refactor the logger in this file to follow the logging guidelines
|
||||
# for 6.0
|
||||
_logger.debug('module %s: no descriptor file'
|
||||
' found: __openerp__.py or __terp__.py (deprecated)', module)
|
||||
_logger.debug('module %s: no __openerp__.py file found.', module)
|
||||
return {}
|
||||
|
||||
|
||||
|
|
|
@ -20,8 +20,6 @@
|
|||
#
|
||||
##############################################################################
|
||||
|
||||
#.apidoc title: Common Services: netsvc
|
||||
#.apidoc module-mods: member-order: bysource
|
||||
|
||||
import errno
|
||||
import logging
|
||||
|
@ -29,7 +27,6 @@ import logging.handlers
|
|||
import os
|
||||
import platform
|
||||
import release
|
||||
import socket
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
|
@ -48,79 +45,13 @@ import openerp
|
|||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def close_socket(sock):
|
||||
""" Closes a socket instance cleanly
|
||||
|
||||
:param sock: the network socket to close
|
||||
:type sock: socket.socket
|
||||
"""
|
||||
try:
|
||||
sock.shutdown(socket.SHUT_RDWR)
|
||||
except socket.error, e:
|
||||
# On OSX, socket shutdowns both sides if any side closes it
|
||||
# causing an error 57 'Socket is not connected' on shutdown
|
||||
# of the other side (or something), see
|
||||
# http://bugs.python.org/issue4397
|
||||
# note: stdlib fixed test, not behavior
|
||||
if e.errno != errno.ENOTCONN or platform.system() not in ['Darwin', 'Windows']:
|
||||
raise
|
||||
sock.close()
|
||||
|
||||
def abort_response(dummy_1, description, dummy_2, details):
|
||||
# TODO Replace except_{osv,orm} with these directly.
|
||||
raise openerp.osv.osv.except_osv(description, details)
|
||||
|
||||
class Service(object):
|
||||
""" Base class for Local services
|
||||
Functionality here is trusted, no authentication.
|
||||
Workflow engine and reports subclass this.
|
||||
"""
|
||||
_services = {}
|
||||
def __init__(self, name):
|
||||
Service._services[name] = self
|
||||
self.__name = name
|
||||
|
||||
@classmethod
|
||||
def exists(cls, name):
|
||||
return name in cls._services
|
||||
|
||||
@classmethod
|
||||
def remove(cls, name):
|
||||
if cls.exists(name):
|
||||
cls._services.pop(name)
|
||||
|
||||
def LocalService(name):
|
||||
# Special case for addons support, will be removed in a few days when addons
|
||||
# are updated to directly use openerp.osv.osv.service.
|
||||
if name == 'object_proxy':
|
||||
return openerp.osv.osv.service
|
||||
if name == 'workflow':
|
||||
return openerp.workflow
|
||||
|
||||
return Service._services[name]
|
||||
|
||||
class ExportService(object):
|
||||
""" Proxy for exported services.
|
||||
|
||||
Note that this class has no direct proxy, capable of calling
|
||||
eservice.method(). Rather, the proxy should call
|
||||
dispatch(method, params)
|
||||
"""
|
||||
|
||||
_services = {}
|
||||
|
||||
def __init__(self, name):
|
||||
ExportService._services[name] = self
|
||||
self.__name = name
|
||||
_logger.debug("Registered an exported service: %s" % name)
|
||||
|
||||
@classmethod
|
||||
def getService(cls,name):
|
||||
return cls._services[name]
|
||||
|
||||
# Dispatch a RPC call w.r.t. the method name. The dispatching
|
||||
# w.r.t. the service (this class) is done by OpenERPDispatcher.
|
||||
def dispatch(self, method, params):
|
||||
raise Exception("stub dispatch at %s" % self.__name)
|
||||
return openerp.report.interface.report_int._reports[name]
|
||||
|
||||
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, _NOTHING, DEFAULT = range(10)
|
||||
#The background is set with 40 plus the number of the color, and the foreground with 30
|
||||
|
@ -292,7 +223,17 @@ def dispatch_rpc(service_name, method, params):
|
|||
|
||||
threading.current_thread().uid = None
|
||||
threading.current_thread().dbname = None
|
||||
result = ExportService.getService(service_name).dispatch(method, params)
|
||||
if service_name == 'common':
|
||||
dispatch = openerp.service.common.dispatch
|
||||
elif service_name == 'db':
|
||||
dispatch = openerp.service.db.dispatch
|
||||
elif service_name == 'object':
|
||||
dispatch = openerp.service.model.dispatch
|
||||
elif service_name == 'report':
|
||||
dispatch = openerp.service.report.dispatch
|
||||
else:
|
||||
dispatch = openerp.service.wsgi_server.rpc_handlers.get(service_name)
|
||||
result = dispatch(method, params)
|
||||
|
||||
if rpc_request_flag or rpc_response_flag:
|
||||
end_time = time.time()
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
import osv
|
||||
import fields
|
||||
|
||||
#.apidoc title: Object Services and Relational Mapping
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
||||
|
||||
|
|
|
@ -141,7 +141,6 @@ from openerp.osv import fields
|
|||
from openerp.osv.orm import MAGIC_COLUMNS
|
||||
import openerp.tools as tools
|
||||
|
||||
#.apidoc title: Domain Expressions
|
||||
|
||||
# Domain operators.
|
||||
NOT_OPERATOR = '!'
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
#
|
||||
##############################################################################
|
||||
|
||||
#.apidoc title: Object Relational Mapping
|
||||
#.apidoc module-mods: member-order: bysource
|
||||
|
||||
"""
|
||||
Object relational mapping to database (postgresql) module
|
||||
|
|
|
@ -19,204 +19,15 @@
|
|||
#
|
||||
##############################################################################
|
||||
|
||||
#.apidoc title: Objects Services (OSV)
|
||||
from openerp.osv.orm import except_orm, Model, TransientModel, AbstractModel
|
||||
|
||||
from functools import wraps
|
||||
import logging
|
||||
import threading
|
||||
# Deprecated, kept for backward compatibility.
|
||||
# openerp.exceptions.Warning should be used instead.
|
||||
except_osv = except_orm
|
||||
|
||||
from psycopg2 import IntegrityError, errorcodes
|
||||
|
||||
import orm
|
||||
import openerp
|
||||
import openerp.netsvc as netsvc
|
||||
import openerp.pooler as pooler
|
||||
import openerp.sql_db as sql_db
|
||||
from openerp.tools.translate import translate
|
||||
from openerp.osv.orm import MetaModel, Model, TransientModel, AbstractModel
|
||||
import openerp.exceptions
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
# Deprecated.
|
||||
# Have a look at exceptions.py instead.
|
||||
class except_osv(Exception):
|
||||
def __init__(self, name, value):
|
||||
self.name = name
|
||||
self.value = value
|
||||
self.args = (name, value)
|
||||
|
||||
service = None
|
||||
|
||||
class object_proxy(object):
|
||||
def __init__(self):
|
||||
global service
|
||||
service = self
|
||||
|
||||
def check(f):
|
||||
@wraps(f)
|
||||
def wrapper(self, dbname, *args, **kwargs):
|
||||
""" Wraps around OSV functions and normalises a few exceptions
|
||||
"""
|
||||
|
||||
def tr(src, ttype):
|
||||
# We try to do the same as the _(), but without the frame
|
||||
# inspection, since we aready are wrapping an osv function
|
||||
# trans_obj = self.get('ir.translation') cannot work yet :(
|
||||
ctx = {}
|
||||
if not kwargs:
|
||||
if args and isinstance(args[-1], dict):
|
||||
ctx = args[-1]
|
||||
elif isinstance(kwargs, dict):
|
||||
ctx = kwargs.get('context', {})
|
||||
|
||||
uid = 1
|
||||
if args and isinstance(args[0], (long, int)):
|
||||
uid = args[0]
|
||||
|
||||
lang = ctx and ctx.get('lang')
|
||||
if not (lang or hasattr(src, '__call__')):
|
||||
return src
|
||||
|
||||
# We open a *new* cursor here, one reason is that failed SQL
|
||||
# queries (as in IntegrityError) will invalidate the current one.
|
||||
cr = False
|
||||
|
||||
if hasattr(src, '__call__'):
|
||||
# callable. We need to find the right parameters to call
|
||||
# the orm._sql_message(self, cr, uid, ids, context) function,
|
||||
# or we skip..
|
||||
# our signature is f(osv_pool, dbname [,uid, obj, method, args])
|
||||
try:
|
||||
if args and len(args) > 1:
|
||||
obj = self.get(args[1])
|
||||
if len(args) > 3 and isinstance(args[3], (long, int, list)):
|
||||
ids = args[3]
|
||||
else:
|
||||
ids = []
|
||||
cr = sql_db.db_connect(dbname).cursor()
|
||||
return src(obj, cr, uid, ids, context=(ctx or {}))
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
if cr: cr.close()
|
||||
|
||||
return False # so that the original SQL error will
|
||||
# be returned, it is the best we have.
|
||||
|
||||
try:
|
||||
cr = sql_db.db_connect(dbname).cursor()
|
||||
res = translate(cr, name=False, source_type=ttype,
|
||||
lang=lang, source=src)
|
||||
if res:
|
||||
return res
|
||||
else:
|
||||
return src
|
||||
finally:
|
||||
if cr: cr.close()
|
||||
|
||||
def _(src):
|
||||
return tr(src, 'code')
|
||||
|
||||
try:
|
||||
if pooler.get_pool(dbname)._init:
|
||||
raise except_osv('Database not ready', 'Currently, this database is not fully loaded and can not be used.')
|
||||
return f(self, dbname, *args, **kwargs)
|
||||
except orm.except_orm, inst:
|
||||
raise except_osv(inst.name, inst.value)
|
||||
except except_osv:
|
||||
raise
|
||||
except IntegrityError, inst:
|
||||
osv_pool = pooler.get_pool(dbname)
|
||||
for key in osv_pool._sql_error.keys():
|
||||
if key in inst[0]:
|
||||
netsvc.abort_response(1, _('Constraint Error'), 'warning',
|
||||
tr(osv_pool._sql_error[key], 'sql_constraint') or inst[0])
|
||||
if inst.pgcode in (errorcodes.NOT_NULL_VIOLATION, errorcodes.FOREIGN_KEY_VIOLATION, errorcodes.RESTRICT_VIOLATION):
|
||||
msg = _('The operation cannot be completed, probably due to the following:\n- deletion: you may be trying to delete a record while other records still reference it\n- creation/update: a mandatory field is not correctly set')
|
||||
_logger.debug("IntegrityError", exc_info=True)
|
||||
try:
|
||||
errortxt = inst.pgerror.replace('«','"').replace('»','"')
|
||||
if '"public".' in errortxt:
|
||||
context = errortxt.split('"public".')[1]
|
||||
model_name = table = context.split('"')[1]
|
||||
else:
|
||||
last_quote_end = errortxt.rfind('"')
|
||||
last_quote_begin = errortxt.rfind('"', 0, last_quote_end)
|
||||
model_name = table = errortxt[last_quote_begin+1:last_quote_end].strip()
|
||||
model = table.replace("_",".")
|
||||
model_obj = osv_pool.get(model)
|
||||
if model_obj:
|
||||
model_name = model_obj._description or model_obj._name
|
||||
msg += _('\n\n[object with reference: %s - %s]') % (model_name, model)
|
||||
except Exception:
|
||||
pass
|
||||
netsvc.abort_response(1, _('Integrity Error'), 'warning', msg)
|
||||
else:
|
||||
netsvc.abort_response(1, _('Integrity Error'), 'warning', inst[0])
|
||||
except Exception:
|
||||
_logger.exception("Uncaught exception")
|
||||
raise
|
||||
|
||||
return wrapper
|
||||
|
||||
def execute_cr(self, cr, uid, obj, method, *args, **kw):
|
||||
object = pooler.get_pool(cr.dbname).get(obj)
|
||||
if not object:
|
||||
raise except_osv('Object Error', 'Object %s doesn\'t exist' % str(obj))
|
||||
return getattr(object, method)(cr, uid, *args, **kw)
|
||||
|
||||
def execute_kw(self, db, uid, obj, method, args, kw=None):
|
||||
return self.execute(db, uid, obj, method, *args, **kw or {})
|
||||
|
||||
@check
|
||||
def execute(self, db, uid, obj, method, *args, **kw):
|
||||
threading.currentThread().dbname = db
|
||||
cr = pooler.get_db(db).cursor()
|
||||
try:
|
||||
try:
|
||||
if method.startswith('_'):
|
||||
raise except_osv('Access Denied', 'Private methods (such as %s) cannot be called remotely.' % (method,))
|
||||
res = self.execute_cr(cr, uid, obj, method, *args, **kw)
|
||||
if res is None:
|
||||
_logger.warning('The method %s of the object %s can not return `None` !', method, obj)
|
||||
cr.commit()
|
||||
except Exception:
|
||||
cr.rollback()
|
||||
raise
|
||||
finally:
|
||||
cr.close()
|
||||
return res
|
||||
|
||||
def exec_workflow_cr(self, cr, uid, obj, signal, *args):
|
||||
object = pooler.get_pool(cr.dbname).get(obj)
|
||||
if not object:
|
||||
raise except_osv('Object Error', 'Object %s doesn\'t exist' % str(obj))
|
||||
res_id = args[0]
|
||||
return object._workflow_signal(cr, uid, [res_id], signal)[res_id]
|
||||
|
||||
@check
|
||||
def exec_workflow(self, db, uid, obj, signal, *args):
|
||||
cr = pooler.get_db(db).cursor()
|
||||
try:
|
||||
try:
|
||||
res = self.exec_workflow_cr(cr, uid, obj, signal, *args)
|
||||
cr.commit()
|
||||
except Exception:
|
||||
cr.rollback()
|
||||
raise
|
||||
finally:
|
||||
cr.close()
|
||||
return res
|
||||
|
||||
# deprecated - for backward compatibility.
|
||||
# Deprecated, kept for backward compatibility.
|
||||
osv = Model
|
||||
osv_memory = TransientModel
|
||||
osv_abstract = AbstractModel # ;-)
|
||||
|
||||
|
||||
def start_object_proxy():
|
||||
object_proxy()
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
#
|
||||
##############################################################################
|
||||
|
||||
#.apidoc title: Query object
|
||||
|
||||
|
||||
def _quote(to_quote):
|
||||
|
|
|
@ -30,7 +30,6 @@ import report_sxw
|
|||
|
||||
import printscreen
|
||||
|
||||
#.apidoc title: Reporting Support and Engines
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ import os
|
|||
import re
|
||||
|
||||
from lxml import etree
|
||||
import openerp.netsvc as netsvc
|
||||
import openerp.pooler as pooler
|
||||
|
||||
import openerp.tools as tools
|
||||
|
@ -40,12 +39,17 @@ def toxml(value):
|
|||
unicode_value = tools.ustr(value)
|
||||
return unicode_value.replace('&', '&').replace('<','<').replace('>','>')
|
||||
|
||||
class report_int(netsvc.Service):
|
||||
class report_int(object):
|
||||
|
||||
_reports = {}
|
||||
|
||||
def __init__(self, name):
|
||||
assert not self.exists(name), 'The report "%s" already exists!' % name
|
||||
super(report_int, self).__init__(name)
|
||||
if not name.startswith('report.'):
|
||||
raise Exception('ConceptionError, bad report name, should start with "report."')
|
||||
assert name not in self._reports, 'The report "%s" already exists!' % name
|
||||
self._reports[name] = self
|
||||
self.__name = name
|
||||
|
||||
self.name = name
|
||||
self.id = 0
|
||||
self.name2 = '.'.join(name.split('.')[1:])
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
import ps_list
|
||||
import ps_form
|
||||
|
||||
#.apidoc title: Printscreen Support
|
||||
|
||||
""" A special report, that is automatically formatted to look like the
|
||||
screen contents of Form/List Views.
|
||||
|
|
|
@ -28,7 +28,6 @@ from lxml import etree
|
|||
|
||||
import time, os
|
||||
|
||||
#.apidoc title: Printscreen for Form Views
|
||||
|
||||
class report_printscreen_list(report_int):
|
||||
def __init__(self, name):
|
||||
|
|
|
@ -31,7 +31,6 @@ import time, os
|
|||
from operator import itemgetter
|
||||
from datetime import datetime
|
||||
|
||||
#.apidoc title: Printscreen for List Views
|
||||
|
||||
class report_printscreen_list(report_int):
|
||||
def __init__(self, name):
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
from pdf import PdfFileReader, PdfFileWriter
|
||||
#.apidoc title: pyPdf Engine
|
||||
|
||||
__all__ = ["pdf"]
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ from simple import simple
|
|||
from rml import rml, rml2html, rml2txt, odt2odt , html2html, makohtml2html
|
||||
from render import render
|
||||
|
||||
#.apidoc title: Report Rendering
|
||||
|
||||
try:
|
||||
from PIL import Image
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
|
||||
from html2html import parseString
|
||||
|
||||
#.apidoc title: HTML to HTML engine
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
|
||||
from makohtml2html import parseNode
|
||||
|
||||
#.apidoc title: MAKO to HTML engine
|
||||
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
||||
|
|
|
@ -21,6 +21,5 @@
|
|||
|
||||
from odt2odt import parseNode
|
||||
|
||||
#.apidoc title: ODT to ODT engine
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
@ -21,7 +21,6 @@
|
|||
|
||||
from rml2html import parseString
|
||||
|
||||
#.apidoc title: RML to HTML engine
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
|
||||
from trml2pdf import parseString, parseNode
|
||||
|
||||
#.apidoc title: RML to PDF engine
|
||||
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
||||
|
|
|
@ -28,7 +28,6 @@ from reportlab import rl_config
|
|||
|
||||
from openerp.tools import config
|
||||
|
||||
#.apidoc title: TTF Font Table
|
||||
|
||||
"""This module allows the mapping of some system-available TTF fonts to
|
||||
the reportlab engine.
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
|
||||
from rml2txt import parseString, parseNode
|
||||
|
||||
#.apidoc title: RML to TXT engine
|
||||
|
||||
""" This engine is the minimalistic renderer of RML documents into text files,
|
||||
using spaces and newlines to format.
|
||||
|
|
|
@ -29,9 +29,6 @@ import threading
|
|||
import time
|
||||
|
||||
import cron
|
||||
import netrpc_server
|
||||
import web_services
|
||||
import web_services
|
||||
import wsgi_server
|
||||
|
||||
import openerp.modules
|
||||
|
@ -40,6 +37,11 @@ import openerp.osv
|
|||
from openerp.release import nt_service_name
|
||||
import openerp.tools
|
||||
|
||||
import common
|
||||
import db
|
||||
import model
|
||||
import report
|
||||
|
||||
#.apidoc title: RPC Services
|
||||
|
||||
""" Classes of this module implement the network protocols that the
|
||||
|
@ -74,19 +76,12 @@ def start_internal():
|
|||
return
|
||||
openerp.netsvc.init_logger()
|
||||
|
||||
# Instantiate local services (this is a legacy design).
|
||||
openerp.osv.osv.start_object_proxy()
|
||||
# Export (for RPC) services.
|
||||
web_services.start_service()
|
||||
|
||||
load_server_wide_modules()
|
||||
start_internal_done = True
|
||||
|
||||
def start_services():
|
||||
""" Start all services including http, netrpc and cron """
|
||||
""" Start all services including http, and cron """
|
||||
start_internal()
|
||||
# Initialize the NETRPC server.
|
||||
netrpc_server.start_service()
|
||||
# Start the WSGI server.
|
||||
wsgi_server.start_service()
|
||||
# Start the main cron thread.
|
||||
|
@ -96,7 +91,6 @@ def stop_services():
|
|||
""" Stop all services. """
|
||||
# stop services
|
||||
cron.stop_service()
|
||||
netrpc_server.stop_service()
|
||||
wsgi_server.stop_service()
|
||||
|
||||
_logger.info("Initiating shutdown")
|
||||
|
|
|
@ -0,0 +1,200 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
import threading
|
||||
|
||||
import openerp.osv.orm # TODO use openerp.exceptions
|
||||
import openerp.pooler
|
||||
import openerp.release
|
||||
import openerp.tools
|
||||
|
||||
import security
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
RPC_VERSION_1 = {
|
||||
'server_version': openerp.release.version,
|
||||
'server_version_info': openerp.release.version_info,
|
||||
'server_serie': openerp.release.serie,
|
||||
'protocol_version': 1,
|
||||
}
|
||||
|
||||
def dispatch(method, params):
|
||||
if method in ['login', 'about', 'timezone_get', 'get_server_environment',
|
||||
'login_message','get_stats', 'check_connectivity',
|
||||
'list_http_services', 'version', 'authenticate']:
|
||||
pass
|
||||
elif method in ['get_available_updates', 'get_migration_scripts', 'set_loglevel', 'get_os_time', 'get_sqlcount']:
|
||||
passwd = params[0]
|
||||
params = params[1:]
|
||||
security.check_super(passwd)
|
||||
else:
|
||||
raise Exception("Method not found: %s" % method)
|
||||
|
||||
fn = globals()['exp_' + method]
|
||||
return fn(*params)
|
||||
|
||||
def exp_login(db, login, password):
|
||||
# TODO: legacy indirection through 'security', should use directly
|
||||
# the res.users model
|
||||
res = security.login(db, login, password)
|
||||
msg = res and 'successful login' or 'bad login or password'
|
||||
_logger.info("%s from '%s' using database '%s'", msg, login, db.lower())
|
||||
return res or False
|
||||
|
||||
def exp_authenticate(db, login, password, user_agent_env):
|
||||
res_users = openerp.pooler.get_pool(db).get('res.users')
|
||||
return res_users.authenticate(db, login, password, user_agent_env)
|
||||
|
||||
def exp_version():
|
||||
return RPC_VERSION_1
|
||||
|
||||
def exp_about(extended=False):
|
||||
"""Return information about the OpenERP Server.
|
||||
|
||||
@param extended: if True then return version info
|
||||
@return string if extended is False else tuple
|
||||
"""
|
||||
|
||||
info = _('See http://openerp.com')
|
||||
|
||||
if extended:
|
||||
return info, openerp.release.version
|
||||
return info
|
||||
|
||||
def exp_timezone_get(db, login, password):
|
||||
return openerp.tools.misc.get_server_timezone()
|
||||
|
||||
def exp_get_available_updates(contract_id, contract_password):
|
||||
import openerp.tools.maintenance as tm
|
||||
try:
|
||||
rc = tm.remote_contract(contract_id, contract_password)
|
||||
if not rc.id:
|
||||
raise tm.RemoteContractException('This contract does not exist or is not active')
|
||||
|
||||
return rc.get_available_updates(rc.id, openerp.modules.get_modules_with_version())
|
||||
|
||||
except tm.RemoteContractException, e:
|
||||
raise openerp.osv.orm.except_orm('Migration Error', str(e))
|
||||
|
||||
|
||||
def exp_get_migration_scripts(contract_id, contract_password):
|
||||
import openerp.tools.maintenance as tm
|
||||
try:
|
||||
rc = tm.remote_contract(contract_id, contract_password)
|
||||
if not rc.id:
|
||||
raise tm.RemoteContractException('This contract does not exist or is not active')
|
||||
if rc.status != 'full':
|
||||
raise tm.RemoteContractException('Can not get updates for a partial contract')
|
||||
|
||||
_logger.info('starting migration with contract %s', rc.name)
|
||||
|
||||
zips = rc.retrieve_updates(rc.id, openerp.modules.get_modules_with_version())
|
||||
|
||||
from shutil import rmtree, copytree, copy
|
||||
|
||||
backup_directory = os.path.join(openerp.tools.config['root_path'], 'backup', time.strftime('%Y-%m-%d-%H-%M'))
|
||||
if zips and not os.path.isdir(backup_directory):
|
||||
_logger.info('create a new backup directory to store the old modules: %s', backup_directory)
|
||||
os.makedirs(backup_directory)
|
||||
|
||||
for module in zips:
|
||||
_logger.info('upgrade module %s', module)
|
||||
mp = openerp.modules.get_module_path(module)
|
||||
if mp:
|
||||
if os.path.isdir(mp):
|
||||
copytree(mp, os.path.join(backup_directory, module))
|
||||
if os.path.islink(mp):
|
||||
os.unlink(mp)
|
||||
else:
|
||||
rmtree(mp)
|
||||
else:
|
||||
copy(mp + 'zip', backup_directory)
|
||||
os.unlink(mp + '.zip')
|
||||
|
||||
try:
|
||||
try:
|
||||
base64_decoded = base64.decodestring(zips[module])
|
||||
except Exception:
|
||||
_logger.error('unable to read the module %s', module)
|
||||
raise
|
||||
|
||||
zip_contents = StringIO(base64_decoded)
|
||||
zip_contents.seek(0)
|
||||
try:
|
||||
try:
|
||||
openerp.tools.extract_zip_file(zip_contents, openerp.tools.config['addons_path'] )
|
||||
except Exception:
|
||||
_logger.error('unable to extract the module %s', module)
|
||||
rmtree(module)
|
||||
raise
|
||||
finally:
|
||||
zip_contents.close()
|
||||
except Exception:
|
||||
_logger.error('restore the previous version of the module %s', module)
|
||||
nmp = os.path.join(backup_directory, module)
|
||||
if os.path.isdir(nmp):
|
||||
copytree(nmp, openerp.tools.config['addons_path'])
|
||||
else:
|
||||
copy(nmp+'.zip', openerp.tools.config['addons_path'])
|
||||
raise
|
||||
|
||||
return True
|
||||
except tm.RemoteContractException, e:
|
||||
raise openerp.osv.orm.except_orm('Migration Error', str(e))
|
||||
except Exception, e:
|
||||
_logger.exception('Exception in get_migration_script:')
|
||||
raise
|
||||
|
||||
def exp_get_server_environment():
|
||||
os_lang = '.'.join( [x for x in locale.getdefaultlocale() if x] )
|
||||
if not os_lang:
|
||||
os_lang = 'NOT SET'
|
||||
environment = '\nEnvironment Information : \n' \
|
||||
'System : %s\n' \
|
||||
'OS Name : %s\n' \
|
||||
%(platform.platform(), platform.os.name)
|
||||
if os.name == 'posix':
|
||||
if platform.system() == 'Linux':
|
||||
lsbinfo = os.popen('lsb_release -a').read()
|
||||
environment += '%s'% lsbinfo
|
||||
else:
|
||||
environment += 'Your System is not lsb compliant\n'
|
||||
environment += 'Operating System Release : %s\n' \
|
||||
'Operating System Version : %s\n' \
|
||||
'Operating System Architecture : %s\n' \
|
||||
'Operating System Locale : %s\n'\
|
||||
'Python Version : %s\n'\
|
||||
'OpenERP-Server Version : %s'\
|
||||
%(platform.release(), platform.version(), platform.architecture()[0],
|
||||
os_lang, platform.python_version(), openerp.release.version)
|
||||
return environment
|
||||
|
||||
def exp_login_message():
|
||||
return openerp.tools.config.get('login_message', False)
|
||||
|
||||
def exp_set_loglevel(loglevel, logger=None):
|
||||
# TODO Previously, the level was set on the now deprecated
|
||||
# `openerp.netsvc.Logger` class.
|
||||
return True
|
||||
|
||||
def exp_get_stats():
|
||||
res = "OpenERP server: %d threads\n" % threading.active_count()
|
||||
res += netsvc.Server.allStats()
|
||||
return res
|
||||
|
||||
def exp_list_http_services():
|
||||
return http_server.list_http_services()
|
||||
|
||||
def exp_check_connectivity():
|
||||
return bool(sql_db.db_connect('postgres'))
|
||||
|
||||
def exp_get_os_time():
|
||||
return os.times()
|
||||
|
||||
def exp_get_sqlcount():
|
||||
if not logging.getLogger('openerp.sql_db').isEnabledFor(logging.DEBUG):
|
||||
_logger.warning("Counters of SQL will not be reliable unless logger openerp.sql_db is set to level DEBUG or higer.")
|
||||
return sql_db.sql_counter
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
@ -0,0 +1,349 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import contextlib
|
||||
import logging
|
||||
import threading
|
||||
import traceback
|
||||
|
||||
from openerp import SUPERUSER_ID
|
||||
import openerp.pooler
|
||||
import openerp.sql_db
|
||||
import openerp.tools
|
||||
|
||||
import security
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
self_actions = {}
|
||||
self_id = 0
|
||||
self_id_protect = threading.Semaphore()
|
||||
|
||||
# This should be moved to openerp.modules.db, along side initialize().
|
||||
def _initialize_db(id, db_name, demo, lang, user_password):
|
||||
cr = None
|
||||
try:
|
||||
self_actions[id]['progress'] = 0
|
||||
cr = openerp.sql_db.db_connect(db_name).cursor()
|
||||
openerp.modules.db.initialize(cr) # TODO this should be removed as it is done by pooler.restart_pool.
|
||||
openerp.tools.config['lang'] = lang
|
||||
cr.commit()
|
||||
cr.close()
|
||||
|
||||
pool = openerp.pooler.restart_pool(db_name, demo, self_actions[id],
|
||||
update_module=True)[1]
|
||||
|
||||
cr = openerp.sql_db.db_connect(db_name).cursor()
|
||||
|
||||
if lang:
|
||||
modobj = pool.get('ir.module.module')
|
||||
mids = modobj.search(cr, SUPERUSER_ID, [('state', '=', 'installed')])
|
||||
modobj.update_translations(cr, SUPERUSER_ID, mids, lang)
|
||||
|
||||
# update admin's password and lang
|
||||
values = {'password': user_password, 'lang': lang}
|
||||
pool.get('res.users').write(cr, SUPERUSER_ID, [SUPERUSER_ID], values)
|
||||
|
||||
cr.execute('SELECT login, password FROM res_users ORDER BY login')
|
||||
self_actions[id].update(users=cr.dictfetchall(), clean=True)
|
||||
cr.commit()
|
||||
cr.close()
|
||||
except Exception, e:
|
||||
self_actions[id].update(clean=False, exception=e)
|
||||
_logger.exception('CREATE DATABASE failed:')
|
||||
self_actions[id]['traceback'] = traceback.format_exc()
|
||||
if cr:
|
||||
cr.close()
|
||||
|
||||
def dispatch(method, params):
|
||||
if method in [ 'create', 'get_progress', 'drop', 'dump',
|
||||
'restore', 'rename',
|
||||
'change_admin_password', 'migrate_databases',
|
||||
'create_database', 'duplicate_database' ]:
|
||||
passwd = params[0]
|
||||
params = params[1:]
|
||||
security.check_super(passwd)
|
||||
elif method in [ 'db_exist', 'list', 'list_lang', 'server_version' ]:
|
||||
# params = params
|
||||
# No security check for these methods
|
||||
pass
|
||||
else:
|
||||
raise KeyError("Method not found: %s" % method)
|
||||
fn = globals()['exp_' + method]
|
||||
return fn(*params)
|
||||
|
||||
def _create_empty_database(name):
|
||||
db = openerp.sql_db.db_connect('postgres')
|
||||
cr = db.cursor()
|
||||
chosen_template = openerp.tools.config['db_template']
|
||||
cr.execute("""SELECT datname
|
||||
FROM pg_database
|
||||
WHERE datname = %s """,
|
||||
(name,))
|
||||
if cr.fetchall():
|
||||
raise openerp.exceptions.Warning(" %s database already exists!" % name )
|
||||
try:
|
||||
cr.autocommit(True) # avoid transaction block
|
||||
cr.execute("""CREATE DATABASE "%s" ENCODING 'unicode' TEMPLATE "%s" """ % (name, chosen_template))
|
||||
finally:
|
||||
cr.close()
|
||||
|
||||
def exp_create(db_name, demo, lang, user_password='admin'):
|
||||
self_id_protect.acquire()
|
||||
global self_id
|
||||
self_id += 1
|
||||
id = self_id
|
||||
self_id_protect.release()
|
||||
|
||||
self_actions[id] = {'clean': False}
|
||||
|
||||
_create_empty_database(db_name)
|
||||
|
||||
_logger.info('CREATE DATABASE %s', db_name.lower())
|
||||
create_thread = threading.Thread(target=_initialize_db,
|
||||
args=(id, db_name, demo, lang, user_password))
|
||||
create_thread.start()
|
||||
self_actions[id]['thread'] = create_thread
|
||||
return id
|
||||
|
||||
def exp_create_database(db_name, demo, lang, user_password='admin'):
|
||||
""" Similar to exp_create but blocking."""
|
||||
self_id_protect.acquire()
|
||||
global self_id
|
||||
self_id += 1
|
||||
id = self_id
|
||||
self_id_protect.release()
|
||||
|
||||
self_actions[id] = {'clean': False}
|
||||
|
||||
_logger.info('Create database `%s`.', db_name)
|
||||
_create_empty_database(db_name)
|
||||
_initialize_db(id, db_name, demo, lang, user_password)
|
||||
return True
|
||||
|
||||
def exp_duplicate_database(db_original_name, db_name):
|
||||
_logger.info('Duplicate database `%s` to `%s`.', db_original_name, db_name)
|
||||
openerp.sql_db.close_db(db_original_name)
|
||||
db = openerp.sql_db.db_connect('postgres')
|
||||
cr = db.cursor()
|
||||
try:
|
||||
cr.autocommit(True) # avoid transaction block
|
||||
cr.execute("""CREATE DATABASE "%s" ENCODING 'unicode' TEMPLATE "%s" """ % (db_name, db_original_name))
|
||||
finally:
|
||||
cr.close()
|
||||
return True
|
||||
|
||||
def exp_get_progress(id):
|
||||
if self_actions[id]['thread'].isAlive():
|
||||
# return openerp.modules.init_progress[db_name]
|
||||
return min(self_actions[id].get('progress', 0),0.95), []
|
||||
else:
|
||||
clean = self_actions[id]['clean']
|
||||
if clean:
|
||||
users = self_actions[id]['users']
|
||||
for user in users:
|
||||
# Remove the None passwords as they can't be marshalled by XML-RPC.
|
||||
if user['password'] is None:
|
||||
user['password'] = ''
|
||||
self_actions.pop(id)
|
||||
return 1.0, users
|
||||
else:
|
||||
e = self_actions[id]['exception'] # TODO this seems wrong: actions[id]['traceback'] is set, but not 'exception'.
|
||||
self_actions.pop(id)
|
||||
raise Exception, e
|
||||
|
||||
def exp_drop(db_name):
|
||||
if not exp_db_exist(db_name):
|
||||
return False
|
||||
openerp.modules.registry.RegistryManager.delete(db_name)
|
||||
openerp.sql_db.close_db(db_name)
|
||||
|
||||
db = openerp.sql_db.db_connect('postgres')
|
||||
cr = db.cursor()
|
||||
cr.autocommit(True) # avoid transaction block
|
||||
try:
|
||||
# Try to terminate all other connections that might prevent
|
||||
# dropping the database
|
||||
try:
|
||||
|
||||
# PostgreSQL 9.2 renamed pg_stat_activity.procpid to pid:
|
||||
# http://www.postgresql.org/docs/9.2/static/release-9-2.html#AEN110389
|
||||
pid_col = 'pid' if cr._cnx.server_version >= 90200 else 'procpid'
|
||||
|
||||
cr.execute("""SELECT pg_terminate_backend(%(pid_col)s)
|
||||
FROM pg_stat_activity
|
||||
WHERE datname = %%s AND
|
||||
%(pid_col)s != pg_backend_pid()""" % {'pid_col': pid_col},
|
||||
(db_name,))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
cr.execute('DROP DATABASE "%s"' % db_name)
|
||||
except Exception, e:
|
||||
_logger.error('DROP DB: %s failed:\n%s', db_name, e)
|
||||
raise Exception("Couldn't drop database %s: %s" % (db_name, e))
|
||||
else:
|
||||
_logger.info('DROP DB: %s', db_name)
|
||||
finally:
|
||||
cr.close()
|
||||
return True
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _set_pg_password_in_environment():
|
||||
""" On Win32, pg_dump (and pg_restore) require that
|
||||
:envvar:`PGPASSWORD` be set
|
||||
|
||||
This context management method handles setting
|
||||
:envvar:`PGPASSWORD` iif win32 and the envvar is not already
|
||||
set, and removing it afterwards.
|
||||
"""
|
||||
if os.name != 'nt' or os.environ.get('PGPASSWORD'):
|
||||
yield
|
||||
else:
|
||||
os.environ['PGPASSWORD'] = openerp.tools.config['db_password']
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
del os.environ['PGPASSWORD']
|
||||
|
||||
|
||||
def exp_dump(db_name):
|
||||
with _set_pg_password_in_environment():
|
||||
cmd = ['pg_dump', '--format=c', '--no-owner']
|
||||
if openerp.tools.config['db_user']:
|
||||
cmd.append('--username=' + openerp.tools.config['db_user'])
|
||||
if openerp.tools.config['db_host']:
|
||||
cmd.append('--host=' + openerp.tools.config['db_host'])
|
||||
if openerp.tools.config['db_port']:
|
||||
cmd.append('--port=' + str(openerp.tools.config['db_port']))
|
||||
cmd.append(db_name)
|
||||
|
||||
stdin, stdout = openerp.tools.exec_pg_command_pipe(*tuple(cmd))
|
||||
stdin.close()
|
||||
data = stdout.read()
|
||||
res = stdout.close()
|
||||
|
||||
if not data or res:
|
||||
_logger.error(
|
||||
'DUMP DB: %s failed! Please verify the configuration of the database password on the server. '
|
||||
'It should be provided as a -w <PASSWD> command-line option, or as `db_password` in the '
|
||||
'server configuration file.\n %s', db_name, data)
|
||||
raise Exception, "Couldn't dump database"
|
||||
_logger.info('DUMP DB successful: %s', db_name)
|
||||
|
||||
return base64.encodestring(data)
|
||||
|
||||
def exp_restore(db_name, data):
|
||||
with _set_pg_password_in_environment():
|
||||
if exp_db_exist(db_name):
|
||||
_logger.warning('RESTORE DB: %s already exists', db_name)
|
||||
raise Exception, "Database already exists"
|
||||
|
||||
_create_empty_database(db_name)
|
||||
|
||||
cmd = ['pg_restore', '--no-owner']
|
||||
if openerp.tools.config['db_user']:
|
||||
cmd.append('--username=' + openerp.tools.config['db_user'])
|
||||
if openerp.tools.config['db_host']:
|
||||
cmd.append('--host=' + openerp.tools.config['db_host'])
|
||||
if openerp.tools.config['db_port']:
|
||||
cmd.append('--port=' + str(openerp.tools.config['db_port']))
|
||||
cmd.append('--dbname=' + db_name)
|
||||
args2 = tuple(cmd)
|
||||
|
||||
buf=base64.decodestring(data)
|
||||
if os.name == "nt":
|
||||
tmpfile = (os.environ['TMP'] or 'C:\\') + os.tmpnam()
|
||||
file(tmpfile, 'wb').write(buf)
|
||||
args2=list(args2)
|
||||
args2.append(tmpfile)
|
||||
args2=tuple(args2)
|
||||
stdin, stdout = openerp.tools.exec_pg_command_pipe(*args2)
|
||||
if not os.name == "nt":
|
||||
stdin.write(base64.decodestring(data))
|
||||
stdin.close()
|
||||
res = stdout.close()
|
||||
if res:
|
||||
raise Exception, "Couldn't restore database"
|
||||
_logger.info('RESTORE DB: %s', db_name)
|
||||
|
||||
return True
|
||||
|
||||
def exp_rename(old_name, new_name):
|
||||
openerp.modules.registry.RegistryManager.delete(old_name)
|
||||
openerp.sql_db.close_db(old_name)
|
||||
|
||||
db = openerp.sql_db.db_connect('postgres')
|
||||
cr = db.cursor()
|
||||
cr.autocommit(True) # avoid transaction block
|
||||
try:
|
||||
try:
|
||||
cr.execute('ALTER DATABASE "%s" RENAME TO "%s"' % (old_name, new_name))
|
||||
except Exception, e:
|
||||
_logger.error('RENAME DB: %s -> %s failed:\n%s', old_name, new_name, e)
|
||||
raise Exception("Couldn't rename database %s to %s: %s" % (old_name, new_name, e))
|
||||
else:
|
||||
fs = os.path.join(openerp.tools.config['root_path'], 'filestore')
|
||||
if os.path.exists(os.path.join(fs, old_name)):
|
||||
os.rename(os.path.join(fs, old_name), os.path.join(fs, new_name))
|
||||
|
||||
_logger.info('RENAME DB: %s -> %s', old_name, new_name)
|
||||
finally:
|
||||
cr.close()
|
||||
return True
|
||||
|
||||
def exp_db_exist(db_name):
|
||||
## Not True: in fact, check if connection to database is possible. The database may exists
|
||||
return bool(openerp.sql_db.db_connect(db_name))
|
||||
|
||||
def exp_list(document=False):
|
||||
if not openerp.tools.config['list_db'] and not document:
|
||||
raise openerp.exceptions.AccessDenied()
|
||||
chosen_template = openerp.tools.config['db_template']
|
||||
templates_list = tuple(set(['template0', 'template1', 'postgres', chosen_template]))
|
||||
db = openerp.sql_db.db_connect('postgres')
|
||||
cr = db.cursor()
|
||||
try:
|
||||
try:
|
||||
db_user = openerp.tools.config["db_user"]
|
||||
if not db_user and os.name == 'posix':
|
||||
import pwd
|
||||
db_user = pwd.getpwuid(os.getuid())[0]
|
||||
if not db_user:
|
||||
cr.execute("select usename from pg_user where usesysid=(select datdba from pg_database where datname=%s)", (openerp.tools.config["db_name"],))
|
||||
res = cr.fetchone()
|
||||
db_user = res and str(res[0])
|
||||
if db_user:
|
||||
cr.execute("select datname from pg_database where datdba=(select usesysid from pg_user where usename=%s) and datname not in %s order by datname", (db_user, templates_list))
|
||||
else:
|
||||
cr.execute("select datname from pg_database where datname not in %s order by datname", (templates_list,))
|
||||
res = [str(name) for (name,) in cr.fetchall()]
|
||||
except Exception:
|
||||
res = []
|
||||
finally:
|
||||
cr.close()
|
||||
res.sort()
|
||||
return res
|
||||
|
||||
def exp_change_admin_password(new_password):
|
||||
openerp.tools.config['admin_passwd'] = new_password
|
||||
openerp.tools.config.save()
|
||||
return True
|
||||
|
||||
def exp_list_lang():
|
||||
return openerp.tools.scan_languages()
|
||||
|
||||
def exp_server_version():
|
||||
""" Return the version of the server
|
||||
Used by the client to verify the compatibility with its own version
|
||||
"""
|
||||
return release.version
|
||||
|
||||
def exp_migrate_databases(databases):
|
||||
for db in databases:
|
||||
_logger.info('migrate database %s', db)
|
||||
openerp.tools.config['update']['base'] = True
|
||||
openerp.pooler.restart_pool(db, force_demo=False, update_module=True)
|
||||
return True
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
@ -26,7 +26,6 @@
|
|||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
###############################################################################
|
||||
|
||||
#.apidoc title: HTTP and XML-RPC Server
|
||||
|
||||
""" This module offers the family of HTTP-based servers. These are not a single
|
||||
class/functionality, but a set of network stack layers, implementing
|
||||
|
|
|
@ -0,0 +1,185 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
from functools import wraps
|
||||
import logging
|
||||
from psycopg2 import IntegrityError, errorcodes
|
||||
import threading
|
||||
|
||||
import openerp
|
||||
from openerp.tools.translate import translate
|
||||
from openerp.osv.orm import except_orm
|
||||
|
||||
import security
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
def dispatch(method, params):
|
||||
(db, uid, passwd ) = params[0:3]
|
||||
threading.current_thread().uid = uid
|
||||
params = params[3:]
|
||||
if method == 'obj_list':
|
||||
raise NameError("obj_list has been discontinued via RPC as of 6.0, please query ir.model directly!")
|
||||
if method not in ['execute', 'execute_kw', 'exec_workflow']:
|
||||
raise NameError("Method not available %s" % method)
|
||||
security.check(db,uid,passwd)
|
||||
openerp.modules.registry.RegistryManager.check_registry_signaling(db)
|
||||
fn = globals()[method]
|
||||
res = fn(db, uid, *params)
|
||||
openerp.modules.registry.RegistryManager.signal_caches_change(db)
|
||||
return res
|
||||
|
||||
def check(f):
|
||||
@wraps(f)
|
||||
def wrapper(dbname, *args, **kwargs):
|
||||
""" Wraps around OSV functions and normalises a few exceptions
|
||||
"""
|
||||
|
||||
def tr(src, ttype):
|
||||
# We try to do the same as the _(), but without the frame
|
||||
# inspection, since we aready are wrapping an osv function
|
||||
# trans_obj = self.get('ir.translation') cannot work yet :(
|
||||
ctx = {}
|
||||
if not kwargs:
|
||||
if args and isinstance(args[-1], dict):
|
||||
ctx = args[-1]
|
||||
elif isinstance(kwargs, dict):
|
||||
ctx = kwargs.get('context', {})
|
||||
|
||||
uid = 1
|
||||
if args and isinstance(args[0], (long, int)):
|
||||
uid = args[0]
|
||||
|
||||
lang = ctx and ctx.get('lang')
|
||||
if not (lang or hasattr(src, '__call__')):
|
||||
return src
|
||||
|
||||
# We open a *new* cursor here, one reason is that failed SQL
|
||||
# queries (as in IntegrityError) will invalidate the current one.
|
||||
cr = False
|
||||
|
||||
if hasattr(src, '__call__'):
|
||||
# callable. We need to find the right parameters to call
|
||||
# the orm._sql_message(self, cr, uid, ids, context) function,
|
||||
# or we skip..
|
||||
# our signature is f(osv_pool, dbname [,uid, obj, method, args])
|
||||
try:
|
||||
if args and len(args) > 1:
|
||||
# TODO self doesn't exist, but was already wrong before (it was not a registry but just the object_service.
|
||||
obj = self.get(args[1])
|
||||
if len(args) > 3 and isinstance(args[3], (long, int, list)):
|
||||
ids = args[3]
|
||||
else:
|
||||
ids = []
|
||||
cr = openerp.sql_db.db_connect(dbname).cursor()
|
||||
return src(obj, cr, uid, ids, context=(ctx or {}))
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
if cr: cr.close()
|
||||
|
||||
return False # so that the original SQL error will
|
||||
# be returned, it is the best we have.
|
||||
|
||||
try:
|
||||
cr = openerp.sql_db.db_connect(dbname).cursor()
|
||||
res = translate(cr, name=False, source_type=ttype,
|
||||
lang=lang, source=src)
|
||||
if res:
|
||||
return res
|
||||
else:
|
||||
return src
|
||||
finally:
|
||||
if cr: cr.close()
|
||||
|
||||
def _(src):
|
||||
return tr(src, 'code')
|
||||
|
||||
try:
|
||||
if openerp.pooler.get_pool(dbname)._init:
|
||||
raise openerp.exceptions.Warning('Currently, this database is not fully loaded and can not be used.')
|
||||
return f(dbname, *args, **kwargs)
|
||||
except except_orm:
|
||||
raise
|
||||
except IntegrityError, inst:
|
||||
osv_pool = openerp.pooler.get_pool(dbname)
|
||||
for key in osv_pool._sql_error.keys():
|
||||
if key in inst[0]:
|
||||
raise openerp.osv.orm.except_orm(_('Constraint Error'), tr(osv_pool._sql_error[key], 'sql_constraint') or inst[0])
|
||||
if inst.pgcode in (errorcodes.NOT_NULL_VIOLATION, errorcodes.FOREIGN_KEY_VIOLATION, errorcodes.RESTRICT_VIOLATION):
|
||||
msg = _('The operation cannot be completed, probably due to the following:\n- deletion: you may be trying to delete a record while other records still reference it\n- creation/update: a mandatory field is not correctly set')
|
||||
_logger.debug("IntegrityError", exc_info=True)
|
||||
try:
|
||||
errortxt = inst.pgerror.replace('«','"').replace('»','"')
|
||||
if '"public".' in errortxt:
|
||||
context = errortxt.split('"public".')[1]
|
||||
model_name = table = context.split('"')[1]
|
||||
else:
|
||||
last_quote_end = errortxt.rfind('"')
|
||||
last_quote_begin = errortxt.rfind('"', 0, last_quote_end)
|
||||
model_name = table = errortxt[last_quote_begin+1:last_quote_end].strip()
|
||||
model = table.replace("_",".")
|
||||
model_obj = osv_pool.get(model)
|
||||
if model_obj:
|
||||
model_name = model_obj._description or model_obj._name
|
||||
msg += _('\n\n[object with reference: %s - %s]') % (model_name, model)
|
||||
except Exception:
|
||||
pass
|
||||
raise openerp.osv.orm.except_orm(_('Integrity Error'), msg)
|
||||
else:
|
||||
raise openerp.osv.orm.except_orm(_('Integrity Error'), inst[0])
|
||||
except Exception:
|
||||
_logger.exception("Uncaught exception")
|
||||
raise
|
||||
|
||||
return wrapper
|
||||
|
||||
def execute_cr(cr, uid, obj, method, *args, **kw):
|
||||
object = openerp.pooler.get_pool(cr.dbname).get(obj)
|
||||
if not object:
|
||||
raise except_orm('Object Error', 'Object %s doesn\'t exist' % str(obj))
|
||||
return getattr(object, method)(cr, uid, *args, **kw)
|
||||
|
||||
def execute_kw(db, uid, obj, method, args, kw=None):
|
||||
return execute(db, uid, obj, method, *args, **kw or {})
|
||||
|
||||
@check
|
||||
def execute(db, uid, obj, method, *args, **kw):
|
||||
threading.currentThread().dbname = db
|
||||
cr = openerp.pooler.get_db(db).cursor()
|
||||
try:
|
||||
try:
|
||||
if method.startswith('_'):
|
||||
raise except_orm('Access Denied', 'Private methods (such as %s) cannot be called remotely.' % (method,))
|
||||
res = execute_cr(cr, uid, obj, method, *args, **kw)
|
||||
if res is None:
|
||||
_logger.warning('The method %s of the object %s can not return `None` !', method, obj)
|
||||
cr.commit()
|
||||
except Exception:
|
||||
cr.rollback()
|
||||
raise
|
||||
finally:
|
||||
cr.close()
|
||||
return res
|
||||
|
||||
def exec_workflow_cr(cr, uid, obj, signal, *args):
|
||||
object = openerp.pooler.get_pool(cr.dbname).get(obj)
|
||||
if not object:
|
||||
raise except_orm('Object Error', 'Object %s doesn\'t exist' % str(obj))
|
||||
res_id = args[0]
|
||||
return object._workflow_signal(cr, uid, [res_id], signal)[res_id]
|
||||
|
||||
@check
|
||||
def exec_workflow(db, uid, obj, signal, *args):
|
||||
cr = openerp.pooler.get_db(db).cursor()
|
||||
try:
|
||||
try:
|
||||
res = exec_workflow_cr(cr, uid, obj, signal, *args)
|
||||
cr.commit()
|
||||
except Exception:
|
||||
cr.rollback()
|
||||
raise
|
||||
finally:
|
||||
cr.close()
|
||||
return res
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
@ -1,99 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
##############################################################################
|
||||
#
|
||||
# OpenERP, Open Source Management Solution
|
||||
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
##############################################################################
|
||||
|
||||
import socket
|
||||
import cPickle
|
||||
import cStringIO
|
||||
|
||||
import openerp.netsvc as netsvc
|
||||
|
||||
# Pickle protocol version 2 is optimized compared to default (version 0)
|
||||
PICKLE_PROTOCOL = 2
|
||||
|
||||
class Myexception(Exception):
|
||||
"""
|
||||
custom exception object store
|
||||
* faultcode
|
||||
* faulestring
|
||||
* args
|
||||
"""
|
||||
|
||||
def __init__(self, faultCode, faultString):
|
||||
self.faultCode = faultCode
|
||||
self.faultString = faultString
|
||||
self.args = (faultCode, faultString)
|
||||
|
||||
class mysocket:
|
||||
|
||||
def __init__(self, sock=None):
|
||||
if sock is None:
|
||||
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
else:
|
||||
self.sock = sock
|
||||
# self.sock.settimeout(120)
|
||||
# prepare this socket for long operations: it may block for infinite
|
||||
# time, but should exit as soon as the net is down
|
||||
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
|
||||
def connect(self, host, port=False):
|
||||
if not port:
|
||||
protocol, buf = host.split('//')
|
||||
host, port = buf.split(':')
|
||||
self.sock.connect((host, int(port)))
|
||||
|
||||
def disconnect(self):
|
||||
netsvc.close_socket(self.sock)
|
||||
|
||||
def mysend(self, msg, exception=False, traceback=None):
|
||||
msg = cPickle.dumps([msg, traceback], PICKLE_PROTOCOL)
|
||||
self.sock.sendall('%8d%d%s' % (len(msg), bool(exception), msg))
|
||||
|
||||
def myreceive(self):
|
||||
buf=''
|
||||
while len(buf) < 9:
|
||||
chunk = self.sock.recv(9 - len(buf))
|
||||
if not chunk:
|
||||
raise socket.timeout
|
||||
buf += chunk
|
||||
size = int(buf[:8])
|
||||
if buf[8] != "0":
|
||||
exception = buf[8]
|
||||
else:
|
||||
exception = False
|
||||
msg = ''
|
||||
while len(msg) < size:
|
||||
chunk = self.sock.recv(size-len(msg))
|
||||
if not chunk:
|
||||
raise socket.timeout
|
||||
msg = msg + chunk
|
||||
msgio = cStringIO.StringIO(msg)
|
||||
unpickler = cPickle.Unpickler(msgio)
|
||||
unpickler.find_global = None
|
||||
res = unpickler.load()
|
||||
|
||||
if isinstance(res[0],Exception):
|
||||
if exception:
|
||||
raise Myexception(str(res[0]), str(res[1]))
|
||||
raise res[0]
|
||||
else:
|
||||
return res[0]
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
@ -0,0 +1,143 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
import threading
|
||||
|
||||
import openerp.netsvc
|
||||
import openerp.pooler
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
# TODO: set a maximum report number per user to avoid DOS attacks
|
||||
#
|
||||
# Report state:
|
||||
# False -> True
|
||||
|
||||
self_reports = {}
|
||||
self_id = 0
|
||||
self_id_protect = threading.Semaphore()
|
||||
|
||||
def dispatch(method, params):
|
||||
(db, uid, passwd ) = params[0:3]
|
||||
threading.current_thread().uid = uid
|
||||
params = params[3:]
|
||||
if method not in ['report', 'report_get', 'render_report']:
|
||||
raise KeyError("Method not supported %s" % method)
|
||||
security.check(db,uid,passwd)
|
||||
openerp.modules.registry.RegistryManager.check_registry_signaling(db)
|
||||
fn = globals()['exp_' + method]
|
||||
res = fn(db, uid, *params)
|
||||
openerp.modules.registry.RegistryManager.signal_caches_change(db)
|
||||
return res
|
||||
|
||||
def exp_render_report(db, uid, object, ids, datas=None, context=None):
|
||||
if not datas:
|
||||
datas={}
|
||||
if not context:
|
||||
context={}
|
||||
|
||||
self_id_protect.acquire()
|
||||
global self_id
|
||||
self_id += 1
|
||||
id = self_id
|
||||
self_id_protect.release()
|
||||
|
||||
self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None}
|
||||
|
||||
cr = openerp.pooler.get_db(db).cursor()
|
||||
try:
|
||||
obj = openerp.netsvc.LocalService('report.'+object)
|
||||
(result, format) = obj.create(cr, uid, ids, datas, context)
|
||||
if not result:
|
||||
tb = sys.exc_info()
|
||||
self_reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb)
|
||||
self_reports[id]['result'] = result
|
||||
self_reports[id]['format'] = format
|
||||
self_reports[id]['state'] = True
|
||||
except Exception, exception:
|
||||
|
||||
_logger.exception('Exception: %s\n', exception)
|
||||
if hasattr(exception, 'name') and hasattr(exception, 'value'):
|
||||
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value))
|
||||
else:
|
||||
tb = sys.exc_info()
|
||||
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb)
|
||||
self_reports[id]['state'] = True
|
||||
cr.commit()
|
||||
cr.close()
|
||||
|
||||
return _check_report(id)
|
||||
|
||||
def exp_report(db, uid, object, ids, datas=None, context=None):
|
||||
if not datas:
|
||||
datas={}
|
||||
if not context:
|
||||
context={}
|
||||
|
||||
self_id_protect.acquire()
|
||||
global self_id
|
||||
self_id += 1
|
||||
id = self_id
|
||||
self_id_protect.release()
|
||||
|
||||
self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None}
|
||||
|
||||
def go(id, uid, ids, datas, context):
|
||||
cr = openerp.pooler.get_db(db).cursor()
|
||||
try:
|
||||
obj = openerp.netsvc.LocalService('report.'+object)
|
||||
(result, format) = obj.create(cr, uid, ids, datas, context)
|
||||
if not result:
|
||||
tb = sys.exc_info()
|
||||
self_reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb)
|
||||
self_reports[id]['result'] = result
|
||||
self_reports[id]['format'] = format
|
||||
self_reports[id]['state'] = True
|
||||
except Exception, exception:
|
||||
_logger.exception('Exception: %s\n', exception)
|
||||
if hasattr(exception, 'name') and hasattr(exception, 'value'):
|
||||
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value))
|
||||
else:
|
||||
tb = sys.exc_info()
|
||||
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb)
|
||||
self_reports[id]['state'] = True
|
||||
cr.commit()
|
||||
cr.close()
|
||||
return True
|
||||
|
||||
thread.start_new_thread(go, (id, uid, ids, datas, context))
|
||||
return id
|
||||
|
||||
def _check_report(report_id):
|
||||
result = self_reports[report_id]
|
||||
exc = result['exception']
|
||||
if exc:
|
||||
raise openerp.osv.orm.except_orm(exc.message, exc.traceback)
|
||||
res = {'state': result['state']}
|
||||
if res['state']:
|
||||
if tools.config['reportgz']:
|
||||
import zlib
|
||||
res2 = zlib.compress(result['result'])
|
||||
res['code'] = 'zlib'
|
||||
else:
|
||||
#CHECKME: why is this needed???
|
||||
if isinstance(result['result'], unicode):
|
||||
res2 = result['result'].encode('latin1', 'replace')
|
||||
else:
|
||||
res2 = result['result']
|
||||
if res2:
|
||||
res['result'] = base64.encodestring(res2)
|
||||
res['format'] = result['format']
|
||||
del self_reports[report_id]
|
||||
return res
|
||||
|
||||
def exp_report_get(db, uid, report_id):
|
||||
if report_id in self_reports:
|
||||
if self_reports[report_id]['uid'] == uid:
|
||||
return _check_report(report_id)
|
||||
else:
|
||||
raise Exception, 'AccessDenied'
|
||||
else:
|
||||
raise Exception, 'ReportNotFound'
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
@ -23,7 +23,6 @@ import openerp.exceptions
|
|||
import openerp.pooler as pooler
|
||||
import openerp.tools as tools
|
||||
|
||||
#.apidoc title: Authentication helpers
|
||||
|
||||
def login(db, login, password):
|
||||
pool = pooler.get_pool(db)
|
||||
|
|
|
@ -1,762 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
##############################################################################
|
||||
#
|
||||
# OpenERP, Open Source Management Solution
|
||||
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
##############################################################################
|
||||
from __future__ import with_statement
|
||||
import contextlib
|
||||
import base64
|
||||
import locale
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import security
|
||||
import sys
|
||||
import thread
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
from cStringIO import StringIO
|
||||
from openerp.tools.translate import _
|
||||
import openerp.netsvc as netsvc
|
||||
import openerp.pooler as pooler
|
||||
import openerp.release as release
|
||||
import openerp.sql_db as sql_db
|
||||
import openerp.tools as tools
|
||||
import openerp.modules
|
||||
import openerp.exceptions
|
||||
from openerp.service import http_server
|
||||
from openerp import SUPERUSER_ID
|
||||
|
||||
#.apidoc title: Exported Service methods
|
||||
#.apidoc module-mods: member-order: bysource
|
||||
|
||||
""" This python module defines the RPC methods available to remote clients.
|
||||
|
||||
Each 'Export Service' is a group of 'methods', which in turn are RPC
|
||||
procedures to be called. Each method has its own arguments footprint.
|
||||
"""
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
RPC_VERSION_1 = {
|
||||
'server_version': release.version,
|
||||
'server_version_info': release.version_info,
|
||||
'server_serie': release.serie,
|
||||
'protocol_version': 1,
|
||||
}
|
||||
|
||||
# This should be moved to openerp.modules.db, along side initialize().
|
||||
def _initialize_db(serv, id, db_name, demo, lang, user_password):
|
||||
cr = None
|
||||
try:
|
||||
serv.actions[id]['progress'] = 0
|
||||
cr = sql_db.db_connect(db_name).cursor()
|
||||
openerp.modules.db.initialize(cr) # TODO this should be removed as it is done by pooler.restart_pool.
|
||||
tools.config['lang'] = lang
|
||||
cr.commit()
|
||||
cr.close()
|
||||
|
||||
pool = pooler.restart_pool(db_name, demo, serv.actions[id],
|
||||
update_module=True)[1]
|
||||
|
||||
cr = sql_db.db_connect(db_name).cursor()
|
||||
|
||||
if lang:
|
||||
modobj = pool.get('ir.module.module')
|
||||
mids = modobj.search(cr, SUPERUSER_ID, [('state', '=', 'installed')])
|
||||
modobj.update_translations(cr, SUPERUSER_ID, mids, lang)
|
||||
|
||||
# update admin's password and lang
|
||||
values = {'password': user_password, 'lang': lang}
|
||||
pool.get('res.users').write(cr, SUPERUSER_ID, [SUPERUSER_ID], values)
|
||||
|
||||
cr.execute('SELECT login, password FROM res_users ORDER BY login')
|
||||
serv.actions[id].update(users=cr.dictfetchall(), clean=True)
|
||||
cr.commit()
|
||||
cr.close()
|
||||
except Exception, e:
|
||||
serv.actions[id].update(clean=False, exception=e)
|
||||
_logger.exception('CREATE DATABASE failed:')
|
||||
serv.actions[id]['traceback'] = traceback.format_exc()
|
||||
if cr:
|
||||
cr.close()
|
||||
|
||||
class db(netsvc.ExportService):
|
||||
def __init__(self, name="db"):
|
||||
netsvc.ExportService.__init__(self, name)
|
||||
self.actions = {}
|
||||
self.id = 0
|
||||
self.id_protect = threading.Semaphore()
|
||||
|
||||
def dispatch(self, method, params):
|
||||
if method in [ 'create', 'get_progress', 'drop', 'dump',
|
||||
'restore', 'rename',
|
||||
'change_admin_password', 'migrate_databases',
|
||||
'create_database', 'duplicate_database' ]:
|
||||
passwd = params[0]
|
||||
params = params[1:]
|
||||
security.check_super(passwd)
|
||||
elif method in [ 'db_exist', 'list', 'list_lang', 'server_version' ]:
|
||||
# params = params
|
||||
# No security check for these methods
|
||||
pass
|
||||
else:
|
||||
raise KeyError("Method not found: %s" % method)
|
||||
fn = getattr(self, 'exp_'+method)
|
||||
return fn(*params)
|
||||
|
||||
def _create_empty_database(self, name):
|
||||
db = sql_db.db_connect('postgres')
|
||||
cr = db.cursor()
|
||||
chosen_template = tools.config['db_template']
|
||||
cr.execute("""SELECT datname
|
||||
FROM pg_database
|
||||
WHERE datname = %s """,
|
||||
(name,))
|
||||
if cr.fetchall():
|
||||
raise openerp.exceptions.Warning(" %s database already exists!" % name )
|
||||
try:
|
||||
cr.autocommit(True) # avoid transaction block
|
||||
cr.execute("""CREATE DATABASE "%s" ENCODING 'unicode' TEMPLATE "%s" """ % (name, chosen_template))
|
||||
finally:
|
||||
cr.close()
|
||||
|
||||
def exp_create(self, db_name, demo, lang, user_password='admin'):
|
||||
self.id_protect.acquire()
|
||||
self.id += 1
|
||||
id = self.id
|
||||
self.id_protect.release()
|
||||
|
||||
self.actions[id] = {'clean': False}
|
||||
|
||||
self._create_empty_database(db_name)
|
||||
|
||||
_logger.info('CREATE DATABASE %s', db_name.lower())
|
||||
create_thread = threading.Thread(target=_initialize_db,
|
||||
args=(self, id, db_name, demo, lang, user_password))
|
||||
create_thread.start()
|
||||
self.actions[id]['thread'] = create_thread
|
||||
return id
|
||||
|
||||
def exp_create_database(self, db_name, demo, lang, user_password='admin'):
|
||||
""" Similar to exp_create but blocking."""
|
||||
self.id_protect.acquire()
|
||||
self.id += 1
|
||||
id = self.id
|
||||
self.id_protect.release()
|
||||
|
||||
self.actions[id] = {'clean': False}
|
||||
|
||||
_logger.info('Create database `%s`.', db_name)
|
||||
self._create_empty_database(db_name)
|
||||
_initialize_db(self, id, db_name, demo, lang, user_password)
|
||||
return True
|
||||
|
||||
def exp_duplicate_database(self, db_original_name, db_name):
|
||||
_logger.info('Duplicate database `%s` to `%s`.', db_original_name, db_name)
|
||||
sql_db.close_db(db_original_name)
|
||||
db = sql_db.db_connect('postgres')
|
||||
cr = db.cursor()
|
||||
try:
|
||||
cr.autocommit(True) # avoid transaction block
|
||||
cr.execute("""CREATE DATABASE "%s" ENCODING 'unicode' TEMPLATE "%s" """ % (db_name, db_original_name))
|
||||
finally:
|
||||
cr.close()
|
||||
return True
|
||||
|
||||
def exp_get_progress(self, id):
|
||||
if self.actions[id]['thread'].isAlive():
|
||||
# return openerp.modules.init_progress[db_name]
|
||||
return min(self.actions[id].get('progress', 0),0.95), []
|
||||
else:
|
||||
clean = self.actions[id]['clean']
|
||||
if clean:
|
||||
users = self.actions[id]['users']
|
||||
self.actions.pop(id)
|
||||
return 1.0, users
|
||||
else:
|
||||
e = self.actions[id]['exception'] # TODO this seems wrong: actions[id]['traceback'] is set, but not 'exception'.
|
||||
self.actions.pop(id)
|
||||
raise Exception, e
|
||||
|
||||
def exp_drop(self, db_name):
|
||||
if not self.exp_db_exist(db_name):
|
||||
return False
|
||||
openerp.modules.registry.RegistryManager.delete(db_name)
|
||||
sql_db.close_db(db_name)
|
||||
|
||||
db = sql_db.db_connect('postgres')
|
||||
cr = db.cursor()
|
||||
cr.autocommit(True) # avoid transaction block
|
||||
try:
|
||||
# Try to terminate all other connections that might prevent
|
||||
# dropping the database
|
||||
try:
|
||||
|
||||
# PostgreSQL 9.2 renamed pg_stat_activity.procpid to pid:
|
||||
# http://www.postgresql.org/docs/9.2/static/release-9-2.html#AEN110389
|
||||
pid_col = 'pid' if cr._cnx.server_version >= 90200 else 'procpid'
|
||||
|
||||
cr.execute("""SELECT pg_terminate_backend(%(pid_col)s)
|
||||
FROM pg_stat_activity
|
||||
WHERE datname = %%s AND
|
||||
%(pid_col)s != pg_backend_pid()""" % {'pid_col': pid_col},
|
||||
(db_name,))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
cr.execute('DROP DATABASE "%s"' % db_name)
|
||||
except Exception, e:
|
||||
_logger.error('DROP DB: %s failed:\n%s', db_name, e)
|
||||
raise Exception("Couldn't drop database %s: %s" % (db_name, e))
|
||||
else:
|
||||
_logger.info('DROP DB: %s', db_name)
|
||||
finally:
|
||||
cr.close()
|
||||
return True
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _set_pg_password_in_environment(self):
|
||||
""" On Win32, pg_dump (and pg_restore) require that
|
||||
:envvar:`PGPASSWORD` be set
|
||||
|
||||
This context management method handles setting
|
||||
:envvar:`PGPASSWORD` iif win32 and the envvar is not already
|
||||
set, and removing it afterwards.
|
||||
"""
|
||||
if os.name != 'nt' or os.environ.get('PGPASSWORD'):
|
||||
yield
|
||||
else:
|
||||
os.environ['PGPASSWORD'] = tools.config['db_password']
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
del os.environ['PGPASSWORD']
|
||||
|
||||
|
||||
def exp_dump(self, db_name):
|
||||
logger = logging.getLogger('openerp.service.web_services.db.dump')
|
||||
with self._set_pg_password_in_environment():
|
||||
cmd = ['pg_dump', '--format=c', '--no-owner']
|
||||
if tools.config['db_user']:
|
||||
cmd.append('--username=' + tools.config['db_user'])
|
||||
if tools.config['db_host']:
|
||||
cmd.append('--host=' + tools.config['db_host'])
|
||||
if tools.config['db_port']:
|
||||
cmd.append('--port=' + str(tools.config['db_port']))
|
||||
cmd.append(db_name)
|
||||
|
||||
stdin, stdout = tools.exec_pg_command_pipe(*tuple(cmd))
|
||||
stdin.close()
|
||||
data = stdout.read()
|
||||
res = stdout.close()
|
||||
|
||||
if not data or res:
|
||||
logger.error(
|
||||
'DUMP DB: %s failed! Please verify the configuration of the database password on the server. '
|
||||
'It should be provided as a -w <PASSWD> command-line option, or as `db_password` in the '
|
||||
'server configuration file.\n %s', db_name, data)
|
||||
raise Exception, "Couldn't dump database"
|
||||
logger.info('DUMP DB successful: %s', db_name)
|
||||
|
||||
return base64.encodestring(data)
|
||||
|
||||
def exp_restore(self, db_name, data):
|
||||
logger = logging.getLogger('openerp.service.web_services.db.restore')
|
||||
with self._set_pg_password_in_environment():
|
||||
if self.exp_db_exist(db_name):
|
||||
logger.warning('RESTORE DB: %s already exists', db_name)
|
||||
raise Exception, "Database already exists"
|
||||
|
||||
self._create_empty_database(db_name)
|
||||
|
||||
cmd = ['pg_restore', '--no-owner']
|
||||
if tools.config['db_user']:
|
||||
cmd.append('--username=' + tools.config['db_user'])
|
||||
if tools.config['db_host']:
|
||||
cmd.append('--host=' + tools.config['db_host'])
|
||||
if tools.config['db_port']:
|
||||
cmd.append('--port=' + str(tools.config['db_port']))
|
||||
cmd.append('--dbname=' + db_name)
|
||||
args2 = tuple(cmd)
|
||||
|
||||
buf=base64.decodestring(data)
|
||||
if os.name == "nt":
|
||||
tmpfile = (os.environ['TMP'] or 'C:\\') + os.tmpnam()
|
||||
file(tmpfile, 'wb').write(buf)
|
||||
args2=list(args2)
|
||||
args2.append(tmpfile)
|
||||
args2=tuple(args2)
|
||||
stdin, stdout = tools.exec_pg_command_pipe(*args2)
|
||||
if not os.name == "nt":
|
||||
stdin.write(base64.decodestring(data))
|
||||
stdin.close()
|
||||
res = stdout.close()
|
||||
if res:
|
||||
raise Exception, "Couldn't restore database"
|
||||
logger.info('RESTORE DB: %s', db_name)
|
||||
|
||||
return True
|
||||
|
||||
def exp_rename(self, old_name, new_name):
|
||||
openerp.modules.registry.RegistryManager.delete(old_name)
|
||||
sql_db.close_db(old_name)
|
||||
|
||||
db = sql_db.db_connect('postgres')
|
||||
cr = db.cursor()
|
||||
cr.autocommit(True) # avoid transaction block
|
||||
try:
|
||||
try:
|
||||
cr.execute('ALTER DATABASE "%s" RENAME TO "%s"' % (old_name, new_name))
|
||||
except Exception, e:
|
||||
_logger.error('RENAME DB: %s -> %s failed:\n%s', old_name, new_name, e)
|
||||
raise Exception("Couldn't rename database %s to %s: %s" % (old_name, new_name, e))
|
||||
else:
|
||||
fs = os.path.join(tools.config['root_path'], 'filestore')
|
||||
if os.path.exists(os.path.join(fs, old_name)):
|
||||
os.rename(os.path.join(fs, old_name), os.path.join(fs, new_name))
|
||||
|
||||
_logger.info('RENAME DB: %s -> %s', old_name, new_name)
|
||||
finally:
|
||||
cr.close()
|
||||
return True
|
||||
|
||||
def exp_db_exist(self, db_name):
|
||||
## Not True: in fact, check if connection to database is possible. The database may exists
|
||||
return bool(sql_db.db_connect(db_name))
|
||||
|
||||
def exp_list(self, document=False):
|
||||
if not tools.config['list_db'] and not document:
|
||||
raise openerp.exceptions.AccessDenied()
|
||||
chosen_template = tools.config['db_template']
|
||||
templates_list = tuple(set(['template0', 'template1', 'postgres', chosen_template]))
|
||||
db = sql_db.db_connect('postgres')
|
||||
cr = db.cursor()
|
||||
try:
|
||||
try:
|
||||
db_user = tools.config["db_user"]
|
||||
if not db_user and os.name == 'posix':
|
||||
import pwd
|
||||
db_user = pwd.getpwuid(os.getuid())[0]
|
||||
if not db_user:
|
||||
cr.execute("select usename from pg_user where usesysid=(select datdba from pg_database where datname=%s)", (tools.config["db_name"],))
|
||||
res = cr.fetchone()
|
||||
db_user = res and str(res[0])
|
||||
if db_user:
|
||||
cr.execute("select datname from pg_database where datdba=(select usesysid from pg_user where usename=%s) and datname not in %s order by datname", (db_user, templates_list))
|
||||
else:
|
||||
cr.execute("select datname from pg_database where datname not in %s order by datname", (templates_list,))
|
||||
res = [str(name) for (name,) in cr.fetchall()]
|
||||
except Exception:
|
||||
res = []
|
||||
finally:
|
||||
cr.close()
|
||||
res.sort()
|
||||
return res
|
||||
|
||||
def exp_change_admin_password(self, new_password):
|
||||
tools.config['admin_passwd'] = new_password
|
||||
tools.config.save()
|
||||
return True
|
||||
|
||||
def exp_list_lang(self):
|
||||
return tools.scan_languages()
|
||||
|
||||
def exp_server_version(self):
|
||||
""" Return the version of the server
|
||||
Used by the client to verify the compatibility with its own version
|
||||
"""
|
||||
return release.version
|
||||
|
||||
def exp_migrate_databases(self,databases):
|
||||
|
||||
from openerp.osv.orm import except_orm
|
||||
from openerp.osv.osv import except_osv
|
||||
|
||||
for db in databases:
|
||||
try:
|
||||
_logger.info('migrate database %s', db)
|
||||
tools.config['update']['base'] = True
|
||||
pooler.restart_pool(db, force_demo=False, update_module=True)
|
||||
except except_orm, inst:
|
||||
netsvc.abort_response(1, inst.name, 'warning', inst.value)
|
||||
except except_osv, inst:
|
||||
netsvc.abort_response(1, inst.name, 'warning', inst.value)
|
||||
except Exception:
|
||||
_logger.exception('Exception in migrate_databases:')
|
||||
raise
|
||||
return True
|
||||
|
||||
class common(netsvc.ExportService):
|
||||
|
||||
def __init__(self,name="common"):
|
||||
netsvc.ExportService.__init__(self,name)
|
||||
|
||||
def dispatch(self, method, params):
|
||||
if method in ['login', 'about', 'timezone_get', 'get_server_environment',
|
||||
'login_message','get_stats', 'check_connectivity',
|
||||
'list_http_services', 'version', 'authenticate']:
|
||||
pass
|
||||
elif method in ['get_available_updates', 'get_migration_scripts', 'set_loglevel', 'get_os_time', 'get_sqlcount']:
|
||||
passwd = params[0]
|
||||
params = params[1:]
|
||||
security.check_super(passwd)
|
||||
else:
|
||||
raise Exception("Method not found: %s" % method)
|
||||
|
||||
fn = getattr(self, 'exp_'+method)
|
||||
return fn(*params)
|
||||
|
||||
def exp_login(self, db, login, password):
|
||||
# TODO: legacy indirection through 'security', should use directly
|
||||
# the res.users model
|
||||
res = security.login(db, login, password)
|
||||
msg = res and 'successful login' or 'bad login or password'
|
||||
_logger.info("%s from '%s' using database '%s'", msg, login, db.lower())
|
||||
return res or False
|
||||
|
||||
def exp_authenticate(self, db, login, password, user_agent_env):
|
||||
res_users = pooler.get_pool(db).get('res.users')
|
||||
return res_users.authenticate(db, login, password, user_agent_env)
|
||||
|
||||
def exp_version(self):
|
||||
return RPC_VERSION_1
|
||||
|
||||
def exp_about(self, extended=False):
|
||||
"""Return information about the OpenERP Server.
|
||||
|
||||
@param extended: if True then return version info
|
||||
@return string if extended is False else tuple
|
||||
"""
|
||||
|
||||
info = _('''
|
||||
|
||||
OpenERP is an ERP+CRM program for small and medium businesses.
|
||||
|
||||
The whole source code is distributed under the terms of the
|
||||
GNU Public Licence.
|
||||
|
||||
(c) 2003-TODAY - OpenERP SA''')
|
||||
|
||||
if extended:
|
||||
return info, release.version
|
||||
return info
|
||||
|
||||
def exp_timezone_get(self, db, login, password):
|
||||
return tools.misc.get_server_timezone()
|
||||
|
||||
def exp_get_available_updates(self, contract_id, contract_password):
|
||||
import openerp.tools.maintenance as tm
|
||||
try:
|
||||
rc = tm.remote_contract(contract_id, contract_password)
|
||||
if not rc.id:
|
||||
raise tm.RemoteContractException('This contract does not exist or is not active')
|
||||
|
||||
return rc.get_available_updates(rc.id, openerp.modules.get_modules_with_version())
|
||||
|
||||
except tm.RemoteContractException, e:
|
||||
netsvc.abort_response(1, 'Migration Error', 'warning', str(e))
|
||||
|
||||
|
||||
def exp_get_migration_scripts(self, contract_id, contract_password):
|
||||
import openerp.tools.maintenance as tm
|
||||
try:
|
||||
rc = tm.remote_contract(contract_id, contract_password)
|
||||
if not rc.id:
|
||||
raise tm.RemoteContractException('This contract does not exist or is not active')
|
||||
if rc.status != 'full':
|
||||
raise tm.RemoteContractException('Can not get updates for a partial contract')
|
||||
|
||||
_logger.info('starting migration with contract %s', rc.name)
|
||||
|
||||
zips = rc.retrieve_updates(rc.id, openerp.modules.get_modules_with_version())
|
||||
|
||||
from shutil import rmtree, copytree, copy
|
||||
|
||||
backup_directory = os.path.join(tools.config['root_path'], 'backup', time.strftime('%Y-%m-%d-%H-%M'))
|
||||
if zips and not os.path.isdir(backup_directory):
|
||||
_logger.info('create a new backup directory to store the old modules: %s', backup_directory)
|
||||
os.makedirs(backup_directory)
|
||||
|
||||
for module in zips:
|
||||
_logger.info('upgrade module %s', module)
|
||||
mp = openerp.modules.get_module_path(module)
|
||||
if mp:
|
||||
if os.path.isdir(mp):
|
||||
copytree(mp, os.path.join(backup_directory, module))
|
||||
if os.path.islink(mp):
|
||||
os.unlink(mp)
|
||||
else:
|
||||
rmtree(mp)
|
||||
else:
|
||||
copy(mp + 'zip', backup_directory)
|
||||
os.unlink(mp + '.zip')
|
||||
|
||||
try:
|
||||
try:
|
||||
base64_decoded = base64.decodestring(zips[module])
|
||||
except Exception:
|
||||
_logger.error('unable to read the module %s', module)
|
||||
raise
|
||||
|
||||
zip_contents = StringIO(base64_decoded)
|
||||
zip_contents.seek(0)
|
||||
try:
|
||||
try:
|
||||
tools.extract_zip_file(zip_contents, tools.config['addons_path'] )
|
||||
except Exception:
|
||||
_logger.error('unable to extract the module %s', module)
|
||||
rmtree(module)
|
||||
raise
|
||||
finally:
|
||||
zip_contents.close()
|
||||
except Exception:
|
||||
_logger.error('restore the previous version of the module %s', module)
|
||||
nmp = os.path.join(backup_directory, module)
|
||||
if os.path.isdir(nmp):
|
||||
copytree(nmp, tools.config['addons_path'])
|
||||
else:
|
||||
copy(nmp+'.zip', tools.config['addons_path'])
|
||||
raise
|
||||
|
||||
return True
|
||||
except tm.RemoteContractException, e:
|
||||
netsvc.abort_response(1, 'Migration Error', 'warning', str(e))
|
||||
except Exception, e:
|
||||
_logger.exception('Exception in get_migration_script:')
|
||||
raise
|
||||
|
||||
def exp_get_server_environment(self):
|
||||
os_lang = '.'.join( [x for x in locale.getdefaultlocale() if x] )
|
||||
if not os_lang:
|
||||
os_lang = 'NOT SET'
|
||||
environment = '\nEnvironment Information : \n' \
|
||||
'System : %s\n' \
|
||||
'OS Name : %s\n' \
|
||||
%(platform.platform(), platform.os.name)
|
||||
if os.name == 'posix':
|
||||
if platform.system() == 'Linux':
|
||||
lsbinfo = os.popen('lsb_release -a').read()
|
||||
environment += '%s'% lsbinfo
|
||||
else:
|
||||
environment += 'Your System is not lsb compliant\n'
|
||||
environment += 'Operating System Release : %s\n' \
|
||||
'Operating System Version : %s\n' \
|
||||
'Operating System Architecture : %s\n' \
|
||||
'Operating System Locale : %s\n'\
|
||||
'Python Version : %s\n'\
|
||||
'OpenERP-Server Version : %s'\
|
||||
%(platform.release(), platform.version(), platform.architecture()[0],
|
||||
os_lang, platform.python_version(),release.version)
|
||||
return environment
|
||||
|
||||
def exp_login_message(self):
|
||||
return tools.config.get('login_message', False)
|
||||
|
||||
def exp_set_loglevel(self, loglevel, logger=None):
|
||||
# TODO Previously, the level was set on the now deprecated
|
||||
# `openerp.netsvc.Logger` class.
|
||||
return True
|
||||
|
||||
def exp_get_stats(self):
|
||||
res = "OpenERP server: %d threads\n" % threading.active_count()
|
||||
res += netsvc.Server.allStats()
|
||||
return res
|
||||
|
||||
def exp_list_http_services(self):
|
||||
return http_server.list_http_services()
|
||||
|
||||
def exp_check_connectivity(self):
|
||||
return bool(sql_db.db_connect('postgres'))
|
||||
|
||||
def exp_get_os_time(self):
|
||||
return os.times()
|
||||
|
||||
def exp_get_sqlcount(self):
|
||||
if not logging.getLogger('openerp.sql_db').isEnabledFor(logging.DEBUG):
|
||||
_logger.warning("Counters of SQL will not be reliable unless logger openerp.sql_db is set to level DEBUG or higer.")
|
||||
return sql_db.sql_counter
|
||||
|
||||
|
||||
class objects_proxy(netsvc.ExportService):
|
||||
def __init__(self, name="object"):
|
||||
netsvc.ExportService.__init__(self,name)
|
||||
|
||||
def dispatch(self, method, params):
|
||||
(db, uid, passwd ) = params[0:3]
|
||||
threading.current_thread().uid = uid
|
||||
params = params[3:]
|
||||
if method == 'obj_list':
|
||||
raise NameError("obj_list has been discontinued via RPC as of 6.0, please query ir.model directly!")
|
||||
if method not in ['execute', 'execute_kw', 'exec_workflow']:
|
||||
raise NameError("Method not available %s" % method)
|
||||
security.check(db,uid,passwd)
|
||||
assert openerp.osv.osv.service, "The object_proxy class must be started with start_object_proxy."
|
||||
openerp.modules.registry.RegistryManager.check_registry_signaling(db)
|
||||
fn = getattr(openerp.osv.osv.service, method)
|
||||
res = fn(db, uid, *params)
|
||||
openerp.modules.registry.RegistryManager.signal_caches_change(db)
|
||||
return res
|
||||
|
||||
|
||||
#
|
||||
# TODO: set a maximum report number per user to avoid DOS attacks
|
||||
#
|
||||
# Report state:
|
||||
# False -> True
|
||||
#
|
||||
|
||||
class report_spool(netsvc.ExportService):
|
||||
def __init__(self, name='report'):
|
||||
netsvc.ExportService.__init__(self, name)
|
||||
self._reports = {}
|
||||
self.id = 0
|
||||
self.id_protect = threading.Semaphore()
|
||||
|
||||
def dispatch(self, method, params):
|
||||
(db, uid, passwd ) = params[0:3]
|
||||
threading.current_thread().uid = uid
|
||||
params = params[3:]
|
||||
if method not in ['report', 'report_get', 'render_report']:
|
||||
raise KeyError("Method not supported %s" % method)
|
||||
security.check(db,uid,passwd)
|
||||
openerp.modules.registry.RegistryManager.check_registry_signaling(db)
|
||||
fn = getattr(self, 'exp_' + method)
|
||||
res = fn(db, uid, *params)
|
||||
openerp.modules.registry.RegistryManager.signal_caches_change(db)
|
||||
return res
|
||||
|
||||
def exp_render_report(self, db, uid, object, ids, datas=None, context=None):
|
||||
if not datas:
|
||||
datas={}
|
||||
if not context:
|
||||
context={}
|
||||
|
||||
self.id_protect.acquire()
|
||||
self.id += 1
|
||||
id = self.id
|
||||
self.id_protect.release()
|
||||
|
||||
self._reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None}
|
||||
|
||||
cr = pooler.get_db(db).cursor()
|
||||
try:
|
||||
obj = netsvc.LocalService('report.'+object)
|
||||
(result, format) = obj.create(cr, uid, ids, datas, context)
|
||||
if not result:
|
||||
tb = sys.exc_info()
|
||||
self._reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb)
|
||||
self._reports[id]['result'] = result
|
||||
self._reports[id]['format'] = format
|
||||
self._reports[id]['state'] = True
|
||||
except Exception, exception:
|
||||
|
||||
_logger.exception('Exception: %s\n', exception)
|
||||
if hasattr(exception, 'name') and hasattr(exception, 'value'):
|
||||
self._reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value))
|
||||
else:
|
||||
tb = sys.exc_info()
|
||||
self._reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb)
|
||||
self._reports[id]['state'] = True
|
||||
cr.commit()
|
||||
cr.close()
|
||||
|
||||
return self._check_report(id)
|
||||
|
||||
def exp_report(self, db, uid, object, ids, datas=None, context=None):
|
||||
if not datas:
|
||||
datas={}
|
||||
if not context:
|
||||
context={}
|
||||
|
||||
self.id_protect.acquire()
|
||||
self.id += 1
|
||||
id = self.id
|
||||
self.id_protect.release()
|
||||
|
||||
self._reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None}
|
||||
|
||||
def go(id, uid, ids, datas, context):
|
||||
cr = pooler.get_db(db).cursor()
|
||||
try:
|
||||
obj = netsvc.LocalService('report.'+object)
|
||||
(result, format) = obj.create(cr, uid, ids, datas, context)
|
||||
if not result:
|
||||
tb = sys.exc_info()
|
||||
self._reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb)
|
||||
self._reports[id]['result'] = result
|
||||
self._reports[id]['format'] = format
|
||||
self._reports[id]['state'] = True
|
||||
except Exception, exception:
|
||||
_logger.exception('Exception: %s\n', exception)
|
||||
if hasattr(exception, 'name') and hasattr(exception, 'value'):
|
||||
self._reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value))
|
||||
else:
|
||||
tb = sys.exc_info()
|
||||
self._reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb)
|
||||
self._reports[id]['state'] = True
|
||||
cr.commit()
|
||||
cr.close()
|
||||
return True
|
||||
|
||||
thread.start_new_thread(go, (id, uid, ids, datas, context))
|
||||
return id
|
||||
|
||||
def _check_report(self, report_id):
|
||||
result = self._reports[report_id]
|
||||
exc = result['exception']
|
||||
if exc:
|
||||
netsvc.abort_response(exc, exc.message, 'warning', exc.traceback)
|
||||
res = {'state': result['state']}
|
||||
if res['state']:
|
||||
if tools.config['reportgz']:
|
||||
import zlib
|
||||
res2 = zlib.compress(result['result'])
|
||||
res['code'] = 'zlib'
|
||||
else:
|
||||
#CHECKME: why is this needed???
|
||||
if isinstance(result['result'], unicode):
|
||||
res2 = result['result'].encode('latin1', 'replace')
|
||||
else:
|
||||
res2 = result['result']
|
||||
if res2:
|
||||
res['result'] = base64.encodestring(res2)
|
||||
res['format'] = result['format']
|
||||
del self._reports[report_id]
|
||||
return res
|
||||
|
||||
def exp_report_get(self, db, uid, report_id):
|
||||
if report_id in self._reports:
|
||||
if self._reports[report_id]['uid'] == uid:
|
||||
return self._check_report(report_id)
|
||||
else:
|
||||
raise Exception, 'AccessDenied'
|
||||
else:
|
||||
raise Exception, 'ReportNotFound'
|
||||
|
||||
|
||||
def start_service():
|
||||
db()
|
||||
common()
|
||||
objects_proxy()
|
||||
report_spool()
|
||||
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
@ -24,7 +24,6 @@
|
|||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
###############################################################################
|
||||
|
||||
#.apidoc title: HTTP Layer library (websrv_lib)
|
||||
|
||||
""" Framework for generic http servers
|
||||
|
||||
|
|
|
@ -367,7 +367,7 @@ class WorkerCron(Worker):
|
|||
if config['db_name']:
|
||||
db_names = config['db_name'].split(',')
|
||||
else:
|
||||
db_names = openerp.netsvc.ExportService._services['db'].exp_list(True)
|
||||
db_names = openerp.service.db.exp_list(True)
|
||||
for db_name in db_names:
|
||||
if rpc_request_flag:
|
||||
start_time = time.time()
|
||||
|
|
|
@ -34,6 +34,7 @@ import errno
|
|||
import logging
|
||||
import os
|
||||
import signal
|
||||
import socket
|
||||
import sys
|
||||
import threading
|
||||
import traceback
|
||||
|
@ -90,7 +91,7 @@ def xmlrpc_return(start_response, service, method, params, legacy_exceptions=Fal
|
|||
return [response]
|
||||
|
||||
def xmlrpc_handle_exception(e):
|
||||
if isinstance(e, openerp.osv.osv.except_osv): # legacy
|
||||
if isinstance(e, openerp.osv.orm.except_orm): # legacy
|
||||
fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, openerp.tools.ustr(e.value))
|
||||
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
|
||||
elif isinstance(e, openerp.exceptions.Warning):
|
||||
|
@ -122,7 +123,7 @@ def xmlrpc_handle_exception(e):
|
|||
return response
|
||||
|
||||
def xmlrpc_handle_exception_legacy(e):
|
||||
if isinstance(e, openerp.osv.osv.except_osv):
|
||||
if isinstance(e, openerp.osv.orm.except_orm):
|
||||
fault = xmlrpclib.Fault('warning -- ' + e.name + '\n\n' + e.value, '')
|
||||
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
|
||||
elif isinstance(e, openerp.exceptions.Warning):
|
||||
|
@ -372,6 +373,8 @@ def parse_http_response(s):
|
|||
|
||||
# WSGI handlers registered through the register_wsgi_handler() function below.
|
||||
module_handlers = []
|
||||
# RPC endpoints registered through the register_rpc_endpoint() function below.
|
||||
rpc_handlers = {}
|
||||
|
||||
def register_wsgi_handler(handler):
|
||||
""" Register a WSGI handler.
|
||||
|
@ -381,6 +384,11 @@ def register_wsgi_handler(handler):
|
|||
"""
|
||||
module_handlers.append(handler)
|
||||
|
||||
def register_rpc_endpoint(endpoint, handler):
|
||||
""" Register a handler for a given RPC enpoint.
|
||||
"""
|
||||
rpc_handlers[endpoint] = handler
|
||||
|
||||
def application_unproxied(environ, start_response):
|
||||
""" WSGI entry point."""
|
||||
openerp.service.start_internal()
|
||||
|
@ -438,6 +446,25 @@ def stop_service():
|
|||
"""
|
||||
if httpd:
|
||||
httpd.shutdown()
|
||||
openerp.netsvc.close_socket(httpd.socket)
|
||||
close_socket(httpd.socket)
|
||||
|
||||
def close_socket(sock):
|
||||
""" Closes a socket instance cleanly
|
||||
|
||||
:param sock: the network socket to close
|
||||
:type sock: socket.socket
|
||||
"""
|
||||
try:
|
||||
sock.shutdown(socket.SHUT_RDWR)
|
||||
except socket.error, e:
|
||||
# On OSX, socket shutdowns both sides if any side closes it
|
||||
# causing an error 57 'Socket is not connected' on shutdown
|
||||
# of the other side (or something), see
|
||||
# http://bugs.python.org/issue4397
|
||||
# note: stdlib fixed test, not behavior
|
||||
if e.errno != errno.ENOTCONN or platform.system() not in ['Darwin', 'Windows']:
|
||||
raise
|
||||
sock.close()
|
||||
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#
|
||||
##############################################################################
|
||||
|
||||
#.apidoc title: PostgreSQL interface
|
||||
|
||||
"""
|
||||
The PostgreSQL connector is a connectivity layer between the OpenERP code and
|
||||
|
@ -30,8 +29,6 @@ the ORM does, in fact.
|
|||
See also: the `pooler` module
|
||||
"""
|
||||
|
||||
#.apidoc add-functions: print_stats
|
||||
#.apidoc add-classes: Cursor Connection ConnectionPool
|
||||
|
||||
__all__ = ['db_connect', 'close_db']
|
||||
|
||||
|
|
|
@ -22,6 +22,8 @@ import test_osv
|
|||
import test_translate
|
||||
import test_uninstall
|
||||
import test_view_validation
|
||||
# This need a change in `oe run-tests` to only run fast_suite + checks by default.
|
||||
# import test_xmlrpc
|
||||
|
||||
fast_suite = [
|
||||
test_ir_sequence,
|
||||
|
|
|
@ -133,6 +133,7 @@ class RpcCase(unittest2.TestCase):
|
|||
self.proxy.common_60 = xmlrpclib.ServerProxy(url_60 + 'common')
|
||||
self.proxy.db_60 = xmlrpclib.ServerProxy(url_60 + 'db')
|
||||
self.proxy.object_60 = xmlrpclib.ServerProxy(url_60 + 'object')
|
||||
#self.proxy.edi_60 = xmlrpclib.ServerProxy(url_60 + 'edi')
|
||||
|
||||
# Use the new (6.1) API.
|
||||
self.proxy.url_61 = url_61 = 'http://%s:%d/openerp/xmlrpc/1/' % (HOST, PORT)
|
||||
|
|
|
@ -64,6 +64,14 @@ class test_xmlrpc(common.RpcCase):
|
|||
ids = proxy.execute(ADMIN_USER_ID, ADMIN_PASSWORD, 'search', [], {})
|
||||
assert ids
|
||||
|
||||
# This test was written to test the creation of a new RPC endpoint, not
|
||||
# really for the EDI itself.
|
||||
#def test_xmlrpc_import_edi_document(self):
|
||||
# """ Try to call an EDI method. """
|
||||
# msg_re = 'EDI Document is empty!'
|
||||
# with self.assertRaisesRegexp(Exception, msg_re):
|
||||
# self.proxy.edi_60.import_edi_document(DB, ADMIN_USER_ID, ADMIN_PASSWORD, {})
|
||||
|
||||
def test_zz_xmlrpc_drop_database(self):
|
||||
"""
|
||||
Simulate a OpenERP client requesting the deletion of a database.
|
||||
|
|
|
@ -35,7 +35,6 @@ from sql import *
|
|||
from float_utils import *
|
||||
from mail import *
|
||||
|
||||
#.apidoc title: Tools
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
||||
|
||||
|
|
|
@ -47,7 +47,6 @@ class MyOption (optparse.Option, object):
|
|||
self.my_default = attrs.pop('my_default', None)
|
||||
super(MyOption, self).__init__(*opts, **attrs)
|
||||
|
||||
#.apidoc title: Server Configuration Loader
|
||||
|
||||
def check_ssl():
|
||||
try:
|
||||
|
|
|
@ -833,12 +833,8 @@ form: module.record_id""" % (xml_id,)
|
|||
return model_data_obj.get_object_reference(cr, self.uid, mod, id_str)
|
||||
|
||||
def parse(self, de):
|
||||
if not de.tag in ['terp', 'openerp']:
|
||||
_logger.error("Mismatch xml format")
|
||||
raise Exception( "Mismatch xml format: only terp or openerp as root tag" )
|
||||
|
||||
if de.tag == 'terp':
|
||||
_logger.warning("The tag <terp/> is deprecated, use <openerp/>")
|
||||
if de.tag != 'openerp':
|
||||
raise Exception("Mismatch xml format: root tag must be `openerp`.")
|
||||
|
||||
for n in de.findall('./data'):
|
||||
for rec in n:
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#
|
||||
##############################################################################
|
||||
|
||||
#.apidoc title: Utilities: tools.misc
|
||||
|
||||
"""
|
||||
Miscellaneous tools used by OpenERP.
|
||||
|
|
|
@ -674,49 +674,7 @@ def trans_generate(lang, modules, cr):
|
|||
for t in trans_parse_view(d):
|
||||
push_translation(module, 'view', encode(obj.model), 0, t)
|
||||
elif model=='ir.actions.wizard':
|
||||
service_name = 'wizard.'+encode(obj.wiz_name)
|
||||
import openerp.netsvc as netsvc
|
||||
if netsvc.Service._services.get(service_name):
|
||||
obj2 = netsvc.Service._services[service_name]
|
||||
for state_name, state_def in obj2.states.iteritems():
|
||||
if 'result' in state_def:
|
||||
result = state_def['result']
|
||||
if result['type'] != 'form':
|
||||
continue
|
||||
name = "%s,%s" % (encode(obj.wiz_name), state_name)
|
||||
|
||||
def_params = {
|
||||
'string': ('wizard_field', lambda s: [encode(s)]),
|
||||
'selection': ('selection', lambda s: [encode(e[1]) for e in ((not callable(s)) and s or [])]),
|
||||
'help': ('help', lambda s: [encode(s)]),
|
||||
}
|
||||
|
||||
# export fields
|
||||
if not result.has_key('fields'):
|
||||
_logger.warning("res has no fields: %r", result)
|
||||
continue
|
||||
for field_name, field_def in result['fields'].iteritems():
|
||||
res_name = name + ',' + field_name
|
||||
|
||||
for fn in def_params:
|
||||
if fn in field_def:
|
||||
transtype, modifier = def_params[fn]
|
||||
for val in modifier(field_def[fn]):
|
||||
push_translation(module, transtype, res_name, 0, val)
|
||||
|
||||
# export arch
|
||||
arch = result['arch']
|
||||
if arch and not isinstance(arch, UpdateableStr):
|
||||
d = etree.XML(arch)
|
||||
for t in trans_parse_view(d):
|
||||
push_translation(module, 'wizard_view', name, 0, t)
|
||||
|
||||
# export button labels
|
||||
for but_args in result['state']:
|
||||
button_name = but_args[0]
|
||||
button_label = but_args[1]
|
||||
res_name = name + ',' + button_name
|
||||
push_translation(module, 'wizard_button', res_name, 0, button_label)
|
||||
pass # TODO Can model really be 'ir.actions.wizard' ?
|
||||
|
||||
elif model=='ir.model.fields':
|
||||
try:
|
||||
|
|
|
@ -19,10 +19,123 @@
|
|||
#
|
||||
##############################################################################
|
||||
|
||||
import wkf_service
|
||||
import instance
|
||||
|
||||
#.apidoc title: Workflow objects
|
||||
wkf_on_create_cache = {}
|
||||
|
||||
def clear_cache(cr, uid):
|
||||
wkf_on_create_cache[cr.dbname]={}
|
||||
|
||||
def trg_write(uid, res_type, res_id, cr):
|
||||
"""
|
||||
Reevaluates the specified workflow instance. Thus if any condition for
|
||||
a transition have been changed in the backend, then running ``trg_write``
|
||||
will move the workflow over that transition.
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id the workflow belongs to
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
ident = (uid,res_type,res_id)
|
||||
cr.execute('select id from wkf_instance where res_id=%s and res_type=%s and state=%s', (res_id or None,res_type or None, 'active'))
|
||||
for (id,) in cr.fetchall():
|
||||
instance.update(cr, id, ident)
|
||||
|
||||
def trg_trigger(uid, res_type, res_id, cr):
|
||||
"""
|
||||
Activate a trigger.
|
||||
|
||||
If a workflow instance is waiting for a trigger from another model, then this
|
||||
trigger can be activated if its conditions are met.
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id the workflow belongs to
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
cr.execute('select instance_id from wkf_triggers where res_id=%s and model=%s', (res_id,res_type))
|
||||
res = cr.fetchall()
|
||||
for (instance_id,) in res:
|
||||
cr.execute('select %s,res_type,res_id from wkf_instance where id=%s', (uid, instance_id,))
|
||||
ident = cr.fetchone()
|
||||
instance.update(cr, instance_id, ident)
|
||||
|
||||
def trg_delete(uid, res_type, res_id, cr):
|
||||
"""
|
||||
Delete a workflow instance
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id the workflow belongs to
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
ident = (uid,res_type,res_id)
|
||||
instance.delete(cr, ident)
|
||||
|
||||
def trg_create(uid, res_type, res_id, cr):
|
||||
"""
|
||||
Create a new workflow instance
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id to own the created worfklow instance
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
ident = (uid,res_type,res_id)
|
||||
wkf_on_create_cache.setdefault(cr.dbname, {})
|
||||
if res_type in wkf_on_create_cache[cr.dbname]:
|
||||
wkf_ids = wkf_on_create_cache[cr.dbname][res_type]
|
||||
else:
|
||||
cr.execute('select id from wkf where osv=%s and on_create=True', (res_type,))
|
||||
wkf_ids = cr.fetchall()
|
||||
wkf_on_create_cache[cr.dbname][res_type] = wkf_ids
|
||||
for (wkf_id,) in wkf_ids:
|
||||
instance.create(cr, ident, wkf_id)
|
||||
|
||||
def trg_validate(uid, res_type, res_id, signal, cr):
|
||||
"""
|
||||
Fire a signal on a given workflow instance
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id the workflow belongs to
|
||||
:signal: the signal name to be fired
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
result = False
|
||||
ident = (uid,res_type,res_id)
|
||||
# ids of all active workflow instances for a corresponding resource (id, model_nam)
|
||||
cr.execute('select id from wkf_instance where res_id=%s and res_type=%s and state=%s', (res_id, res_type, 'active'))
|
||||
for (id,) in cr.fetchall():
|
||||
res2 = instance.validate(cr, id, ident, signal)
|
||||
result = result or res2
|
||||
return result
|
||||
|
||||
def trg_redirect(uid, res_type, res_id, new_rid, cr):
|
||||
"""
|
||||
Re-bind a workflow instance to another instance of the same model.
|
||||
|
||||
Make all workitems which are waiting for a (subflow) workflow instance
|
||||
for the old resource point to the (first active) workflow instance for
|
||||
the new resource.
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id the workflow belongs to
|
||||
:param new_rid: the model instance id to own the worfklow instance
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
# get ids of wkf instances for the old resource (res_id)
|
||||
#CHECKME: shouldn't we get only active instances?
|
||||
cr.execute('select id, wkf_id from wkf_instance where res_id=%s and res_type=%s', (res_id, res_type))
|
||||
for old_inst_id, wkf_id in cr.fetchall():
|
||||
# first active instance for new resource (new_rid), using same wkf
|
||||
cr.execute(
|
||||
'SELECT id '\
|
||||
'FROM wkf_instance '\
|
||||
'WHERE res_id=%s AND res_type=%s AND wkf_id=%s AND state=%s',
|
||||
(new_rid, res_type, wkf_id, 'active'))
|
||||
new_id = cr.fetchone()
|
||||
if new_id:
|
||||
# select all workitems which "wait" for the old instance
|
||||
cr.execute('select id from wkf_workitem where subflow_id=%s', (old_inst_id,))
|
||||
for (item_id,) in cr.fetchall():
|
||||
# redirect all those workitems to the wkf instance of the new resource
|
||||
cr.execute('update wkf_workitem set subflow_id=%s where id=%s', (new_id[0], item_id))
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
||||
|
||||
|
|
|
@ -1,156 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
##############################################################################
|
||||
#
|
||||
# OpenERP, Open Source Management Solution
|
||||
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
##############################################################################
|
||||
import instance
|
||||
import openerp.netsvc as netsvc
|
||||
|
||||
class workflow_service(netsvc.Service):
|
||||
"""
|
||||
Sometimes you might want to fire a signal or re-evaluate the current state
|
||||
of a workflow using the service's API. You can access the workflow services
|
||||
using:
|
||||
|
||||
>>> import netsvc
|
||||
>>> wf_service = netsvc.LocalService("workflow")
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, name='workflow'):
|
||||
netsvc.Service.__init__(self, name)
|
||||
self.wkf_on_create_cache={}
|
||||
|
||||
def clear_cache(self, cr, uid):
|
||||
self.wkf_on_create_cache[cr.dbname]={}
|
||||
|
||||
def trg_write(self, uid, res_type, res_id, cr):
|
||||
"""
|
||||
Reevaluates the specified workflow instance. Thus if any condition for
|
||||
a transition have been changed in the backend, then running ``trg_write``
|
||||
will move the workflow over that transition.
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id the workflow belongs to
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
ident = (uid,res_type,res_id)
|
||||
cr.execute('select id from wkf_instance where res_id=%s and res_type=%s and state=%s', (res_id or None,res_type or None, 'active'))
|
||||
for (id,) in cr.fetchall():
|
||||
instance.update(cr, id, ident)
|
||||
|
||||
def trg_trigger(self, uid, res_type, res_id, cr):
|
||||
"""
|
||||
Activate a trigger.
|
||||
|
||||
If a workflow instance is waiting for a trigger from another model, then this
|
||||
trigger can be activated if its conditions are met.
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id the workflow belongs to
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
cr.execute('select instance_id from wkf_triggers where res_id=%s and model=%s', (res_id,res_type))
|
||||
res = cr.fetchall()
|
||||
for (instance_id,) in res:
|
||||
cr.execute('select %s,res_type,res_id from wkf_instance where id=%s', (uid, instance_id,))
|
||||
ident = cr.fetchone()
|
||||
instance.update(cr, instance_id, ident)
|
||||
|
||||
def trg_delete(self, uid, res_type, res_id, cr):
|
||||
"""
|
||||
Delete a workflow instance
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id the workflow belongs to
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
ident = (uid,res_type,res_id)
|
||||
instance.delete(cr, ident)
|
||||
|
||||
def trg_create(self, uid, res_type, res_id, cr):
|
||||
"""
|
||||
Create a new workflow instance
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id to own the created worfklow instance
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
ident = (uid,res_type,res_id)
|
||||
self.wkf_on_create_cache.setdefault(cr.dbname, {})
|
||||
if res_type in self.wkf_on_create_cache[cr.dbname]:
|
||||
wkf_ids = self.wkf_on_create_cache[cr.dbname][res_type]
|
||||
else:
|
||||
cr.execute('select id from wkf where osv=%s and on_create=True', (res_type,))
|
||||
wkf_ids = cr.fetchall()
|
||||
self.wkf_on_create_cache[cr.dbname][res_type] = wkf_ids
|
||||
for (wkf_id,) in wkf_ids:
|
||||
instance.create(cr, ident, wkf_id)
|
||||
|
||||
def trg_validate(self, uid, res_type, res_id, signal, cr):
|
||||
"""
|
||||
Fire a signal on a given workflow instance
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id the workflow belongs to
|
||||
:signal: the signal name to be fired
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
result = False
|
||||
ident = (uid,res_type,res_id)
|
||||
# ids of all active workflow instances for a corresponding resource (id, model_nam)
|
||||
cr.execute('select id from wkf_instance where res_id=%s and res_type=%s and state=%s', (res_id, res_type, 'active'))
|
||||
for (id,) in cr.fetchall():
|
||||
res2 = instance.validate(cr, id, ident, signal)
|
||||
result = result or res2
|
||||
return result
|
||||
|
||||
def trg_redirect(self, uid, res_type, res_id, new_rid, cr):
|
||||
"""
|
||||
Re-bind a workflow instance to another instance of the same model.
|
||||
|
||||
Make all workitems which are waiting for a (subflow) workflow instance
|
||||
for the old resource point to the (first active) workflow instance for
|
||||
the new resource.
|
||||
|
||||
:param res_type: the model name
|
||||
:param res_id: the model instance id the workflow belongs to
|
||||
:param new_rid: the model instance id to own the worfklow instance
|
||||
:param cr: a database cursor
|
||||
"""
|
||||
# get ids of wkf instances for the old resource (res_id)
|
||||
#CHECKME: shouldn't we get only active instances?
|
||||
cr.execute('select id, wkf_id from wkf_instance where res_id=%s and res_type=%s', (res_id, res_type))
|
||||
for old_inst_id, wkf_id in cr.fetchall():
|
||||
# first active instance for new resource (new_rid), using same wkf
|
||||
cr.execute(
|
||||
'SELECT id '\
|
||||
'FROM wkf_instance '\
|
||||
'WHERE res_id=%s AND res_type=%s AND wkf_id=%s AND state=%s',
|
||||
(new_rid, res_type, wkf_id, 'active'))
|
||||
new_id = cr.fetchone()
|
||||
if new_id:
|
||||
# select all workitems which "wait" for the old instance
|
||||
cr.execute('select id from wkf_workitem where subflow_id=%s', (old_inst_id,))
|
||||
for (item_id,) in cr.fetchall():
|
||||
# redirect all those workitems to the wkf instance of the new resource
|
||||
cr.execute('update wkf_workitem set subflow_id=%s where id=%s', (new_id[0], item_id))
|
||||
workflow_service()
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
||||
|
Loading…
Reference in New Issue