diff --git a/openerp/netsvc.py b/openerp/netsvc.py index 856faa8f61d..68375d1953c 100644 --- a/openerp/netsvc.py +++ b/openerp/netsvc.py @@ -80,7 +80,7 @@ class PostgreSQLHandler(logging.Handler): dbname = tools.config['log_db'] or ct_db if not dbname: return - with tools.ignore(Exception), tools.mute_logger('openerp.sql_db'), sql_db.db_connect(dbname).cursor() as cr: + with tools.ignore(Exception), tools.mute_logger('openerp.sql_db'), sql_db.db_connect(dbname, allow_uri=True).cursor() as cr: msg = tools.ustr(record.msg) if record.args: msg = msg % record.args diff --git a/openerp/sql_db.py b/openerp/sql_db.py index be6c02f9fe3..739c32a4f12 100644 --- a/openerp/sql_db.py +++ b/openerp/sql_db.py @@ -30,6 +30,7 @@ the ORM does, in fact. from contextlib import contextmanager from functools import wraps import logging +import urlparse import uuid import psycopg2.extras import psycopg2.extensions @@ -47,11 +48,13 @@ types_mapping = { } def unbuffer(symb, cr): - if symb is None: return None + if symb is None: + return None return str(symb) def undecimalize(symb, cr): - if symb is None: return None + if symb is None: + return None return float(symb) for name, typeoid in types_mapping.items(): @@ -142,7 +145,7 @@ class Cursor(object): *any* data which may be modified during the life of the cursor. """ - IN_MAX = 1000 # decent limit on size of IN queries - guideline = Oracle limit + IN_MAX = 1000 # decent limit on size of IN queries - guideline = Oracle limit def check(f): @wraps(f) @@ -155,7 +158,7 @@ class Cursor(object): return f(self, *args, **kwargs) return wrapper - def __init__(self, pool, dbname, serialized=True): + def __init__(self, pool, dbname, dsn, serialized=True): self.sql_from_log = {} self.sql_into_log = {} @@ -164,19 +167,21 @@ class Cursor(object): self.sql_log = _logger.isEnabledFor(logging.DEBUG) self.sql_log_count = 0 - self._closed = True # avoid the call of close() (by __del__) if an exception - # is raised by any of the following initialisations + + # avoid the call of close() (by __del__) if an exception + # is raised by any of the following initialisations + self._closed = True + self.__pool = pool self.dbname = dbname - # Whether to enable snapshot isolation level for this cursor. - # see also the docstring of Cursor. + # see also the docstring of Cursor. self._serialized = serialized - self._cnx = pool.borrow(dsn(dbname)) + self._cnx = pool.borrow(dsn) self._obj = self._cnx.cursor() if self.sql_log: - self.__caller = frame_codeinfo(currentframe(),2) + self.__caller = frame_codeinfo(currentframe(), 2) else: self.__caller = False self._closed = False # real initialisation value @@ -188,7 +193,7 @@ class Cursor(object): self.cache = {} def __build_dict(self, row): - return { d.name: row[i] for i, d in enumerate(self._obj.description) } + return {d.name: row[i] for i, d in enumerate(self._obj.description)} def dictfetchone(self): row = self._obj.fetchone() return row and self.__build_dict(row) @@ -216,8 +221,7 @@ class Cursor(object): def execute(self, query, params=None, log_exceptions=None): if '%d' in query or '%f' in query: _logger.warning(query) - _logger.warning("SQL queries cannot contain %d or %f anymore. " - "Use only %s") + _logger.warning("SQL queries cannot contain %d or %f anymore. Use only %s") if params and not isinstance(params, (tuple, list, dict)): _logger.error("SQL query parameters should be a tuple, list or dict; got %r", params) raise ValueError("SQL query parameters should be a tuple, list or dict; got %r" % (params,)) @@ -258,7 +262,6 @@ class Cursor(object): self.sql_into_log[res_into.group(1)][1] += delay return res - def split_for_in_conditions(self, ids): """Split a list of identifiers into one or more smaller tuples safe for IN conditions, after uniquifying them.""" @@ -270,22 +273,20 @@ class Cursor(object): if not self.sql_log: return def process(type): - sqllogs = {'from':self.sql_from_log, 'into':self.sql_into_log} + sqllogs = {'from': self.sql_from_log, 'into': self.sql_into_log} sum = 0 if sqllogs[type]: sqllogitems = sqllogs[type].items() sqllogitems.sort(key=lambda k: k[1][1]) _logger.debug("SQL LOG %s:", type) - sqllogitems.sort(lambda x,y: cmp(x[1][0], y[1][0])) + sqllogitems.sort(lambda x, y: cmp(x[1][0], y[1][0])) for r in sqllogitems: delay = timedelta(microseconds=r[1][1]) - _logger.debug("table: %s: %s/%s", - r[0], delay, r[1][0]) - sum+= r[1][1] + _logger.debug("table: %s: %s/%s", r[0], delay, r[1][0]) + sum += r[1][1] sqllogs[type].clear() sum = timedelta(microseconds=sum) - _logger.debug("SUM %s:%s/%d [%d]", - type, sum, self.sql_log_count, sql_counter) + _logger.debug("SUM %s:%s/%d [%d]", type, sum, self.sql_log_count, sql_counter) sqllogs[type].clear() process('from') process('into') @@ -305,7 +306,7 @@ class Cursor(object): del self.cache if self.sql_log: - self.__closer = frame_codeinfo(currentframe(),3) + self.__closer = frame_codeinfo(currentframe(), 3) # simple query count is always computed sql_counter += self.sql_log_count @@ -349,9 +350,10 @@ class Cursor(object): # is remapped to serializable before being # sent to the database, so it is in fact # unavailable for use with pg 9.1. - isolation_level = ISOLATION_LEVEL_REPEATABLE_READ \ - if self._serialized \ - else ISOLATION_LEVEL_READ_COMMITTED + isolation_level = \ + ISOLATION_LEVEL_REPEATABLE_READ \ + if self._serialized \ + else ISOLATION_LEVEL_READ_COMMITTED self._cnx.set_isolation_level(isolation_level) @check @@ -442,10 +444,10 @@ class PsycoConnection(psycopg2.extensions.connection): class ConnectionPool(object): """ The pool of connections to database(s) - + Keep a set of connections to pg databases open, and reuse them to open cursors for all transactions. - + The connections are *not* automatically closed. Only a close_db() can trigger that. """ @@ -460,7 +462,6 @@ class ConnectionPool(object): self._lock.release() return _locked - def __init__(self, maxconn=64): self._connections = [] self._maxconn = max(maxconn, 1) @@ -491,7 +492,7 @@ class ConnectionPool(object): _logger.warning('%r: Free leaked connection to %r', self, cnx.dsn) for i, (cnx, used) in enumerate(self._connections): - if not used and dsn_are_equals(cnx.dsn, dsn): + if not used and cnx._original_dsn == dsn: try: cnx.reset() except psycopg2.OperationalError: @@ -522,6 +523,7 @@ class ConnectionPool(object): except psycopg2.Error: _logger.exception('Connection to the database failed') raise + result._original_dsn = dsn self._connections.append((result, True)) self._debug('Create new connection') return result @@ -546,7 +548,7 @@ class ConnectionPool(object): def close_all(self, dsn=None): _logger.info('%r: Close all connections to %r', self, dsn) for i, (cnx, used) in tools.reverse_enumerate(self._connections): - if dsn is None or dsn_are_equals(cnx.dsn, dsn): + if dsn is None or cnx._original_dsn == dsn: cnx.close() self._connections.pop(i) @@ -554,20 +556,20 @@ class ConnectionPool(object): class Connection(object): """ A lightweight instance of a connection to postgres """ - - def __init__(self, pool, dbname): + def __init__(self, pool, dbname, dsn): self.dbname = dbname + self.dsn = dsn self.__pool = pool def cursor(self, serialized=True): cursor_type = serialized and 'serialized ' or '' - _logger.debug('create %scursor to %r', cursor_type, self.dbname) - return Cursor(self.__pool, self.dbname, serialized=serialized) + _logger.debug('create %scursor to %r', cursor_type, self.dsn) + return Cursor(self.__pool, self.dbname, self.dsn, serialized=serialized) def test_cursor(self, serialized=True): cursor_type = serialized and 'serialized ' or '' - _logger.debug('create test %scursor to %r', cursor_type, self.dbname) - return TestCursor(self.__pool, self.dbname, serialized=serialized) + _logger.debug('create test %scursor to %r', cursor_type, self.dsn) + return TestCursor(self.__pool, self.dbname, self.dsn, serialized=serialized) # serialized_cursor is deprecated - cursors are serialized by default serialized_cursor = cursor @@ -582,42 +584,48 @@ class Connection(object): except Exception: return False -def dsn(db_name): +def dsn(db_or_uri): + """parse the given `db_or_uri` and return a 2-tuple (dbname, uri)""" + if db_or_uri.startswith(('postgresql://', 'postgres://')): + # extract db from uri + us = urlparse.urlsplit(db_or_uri) + if len(us.path) > 1: + db_name = us.path[1:] + elif us.username: + db_name = us.username + else: + db_name = us.hostname + return db_name, db_or_uri + _dsn = '' for p in ('host', 'port', 'user', 'password'): cfg = tools.config['db_' + p] if cfg: _dsn += '%s=%s ' % (p, cfg) - return '%sdbname=%s' % (_dsn, db_name) - -def dsn_are_equals(first, second): - def key(dsn): - k = dict(x.split('=', 1) for x in dsn.strip().split()) - k.pop('password', None) # password is not relevant - return k - return key(first) == key(second) - + return db_or_uri, '%sdbname=%s' % (_dsn, db_or_uri) _Pool = None -def db_connect(db_name): +def db_connect(to, allow_uri=False): global _Pool if _Pool is None: _Pool = ConnectionPool(int(tools.config['db_maxconn'])) - return Connection(_Pool, db_name) + + db, uri = dsn(to) + if not allow_uri and db != to: + raise ValueError('URI connections not allowed') + return Connection(_Pool, db, uri) def close_db(db_name): """ You might want to call openerp.modules.registry.RegistryManager.delete(db_name) along this function.""" global _Pool if _Pool: - _Pool.close_all(dsn(db_name)) + _Pool.close_all(dsn(db_name)[1]) def close_all(): global _Pool if _Pool: _Pool.close_all() - # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: -