Here is the exported translation file:
This file was generated using the universal Unicode/UTF-8 file encoding, please be sure to view and edit + using the same encoding.
+The next step depends on the file format: +
For more details about translating OpenERP in your language, please refer to the + documentation.
+
@@ -298,7 +298,7 @@
--\nYours truly\n'), @@ -15,7 +15,37 @@ class test_misc(unittest2.TestCase): 'some content\n\n\n -- \nYours truly \n\n\n'), ] for html, content, flag, expected in test_samples: - self.assertEqual(append_content_to_html(html,content,flag), expected, 'append_content_to_html is broken') + self.assertEqual(misc.append_content_to_html(html,content,flag), expected, 'append_content_to_html is broken') + +class test_countingstream(unittest2.TestCase): + def test_empty_stream(self): + s = misc.CountingStream(iter([])) + self.assertEqual(s.index, -1) + self.assertIsNone(next(s, None)) + self.assertEqual(s.index, 0) + + def test_single(self): + s = misc.CountingStream(xrange(1)) + self.assertEqual(s.index, -1) + self.assertEqual(next(s, None), 0) + self.assertIsNone(next(s, None)) + self.assertEqual(s.index, 1) + + def test_full(self): + s = misc.CountingStream(xrange(42)) + for _ in s: + pass + self.assertEqual(s.index, 42) + + def test_repeated(self): + """ Once the CountingStream has stopped iterating, the index should not + increase anymore (the internal state should not be allowed to change) + """ + s = misc.CountingStream(iter([])) + self.assertIsNone(next(s, None)) + self.assertEqual(s.index, 0) + self.assertIsNone(next(s, None)) + self.assertEqual(s.index, 0) if __name__ == '__main__': - unittest2.main() \ No newline at end of file + unittest2.main() diff --git a/openerp/tests/test_orm.py b/openerp/tests/test_orm.py index bce5bc3903a..ef8c0cf19aa 100644 --- a/openerp/tests/test_orm.py +++ b/openerp/tests/test_orm.py @@ -6,6 +6,85 @@ import common UID = common.ADMIN_USER_ID DB = common.DB +class TestInherits(common.TransactionCase): + """ test the behavior of the orm for models that use _inherits; + specifically: res.users, that inherits from res.partner + """ + + def setUp(self): + super(TestInherits, self).setUp() + self.partner = self.registry('res.partner') + self.user = self.registry('res.users') + + def test_create(self): + """ creating a user should automatically create a new partner """ + partners_before = self.partner.search(self.cr, UID, []) + foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'}) + foo = self.user.browse(self.cr, UID, foo_id) + + self.assertNotIn(foo.partner_id.id, partners_before) + + def test_create_with_ancestor(self): + """ creating a user with a specific 'partner_id' should not create a new partner """ + par_id = self.partner.create(self.cr, UID, {'name': 'Foo'}) + partners_before = self.partner.search(self.cr, UID, []) + foo_id = self.user.create(self.cr, UID, {'partner_id': par_id, 'login': 'foo', 'password': 'foo'}) + partners_after = self.partner.search(self.cr, UID, []) + + self.assertEqual(set(partners_before), set(partners_after)) + + foo = self.user.browse(self.cr, UID, foo_id) + self.assertEqual(foo.name, 'Foo') + self.assertEqual(foo.partner_id.id, par_id) + + def test_read(self): + """ inherited fields should be read without any indirection """ + foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'}) + foo_values, = self.user.read(self.cr, UID, [foo_id]) + partner_id = foo_values['partner_id'][0] + partner_values, = self.partner.read(self.cr, UID, [partner_id]) + self.assertEqual(foo_values['name'], partner_values['name']) + + foo = self.user.browse(self.cr, UID, foo_id) + self.assertEqual(foo.name, foo.partner_id.name) + + def test_copy(self): + """ copying a user should automatically copy its partner, too """ + foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'}) + foo_before, = self.user.read(self.cr, UID, [foo_id]) + bar_id = self.user.copy(self.cr, UID, foo_id, {'login': 'bar', 'password': 'bar'}) + foo_after, = self.user.read(self.cr, UID, [foo_id]) + + self.assertEqual(foo_before, foo_after) + + foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id]) + self.assertEqual(bar.login, 'bar') + self.assertNotEqual(foo.id, bar.id) + self.assertNotEqual(foo.partner_id.id, bar.partner_id.id) + + def test_copy_with_ancestor(self): + """ copying a user with 'parent_id' in defaults should not duplicate the partner """ + foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'}) + par_id = self.partner.create(self.cr, UID, {'name': 'Bar'}) + + foo_before, = self.user.read(self.cr, UID, [foo_id]) + partners_before = self.partner.search(self.cr, UID, []) + bar_id = self.user.copy(self.cr, UID, foo_id, {'partner_id': par_id, 'login': 'bar'}) + foo_after, = self.user.read(self.cr, UID, [foo_id]) + partners_after = self.partner.search(self.cr, UID, []) + + self.assertEqual(foo_before, foo_after) + self.assertEqual(set(partners_before), set(partners_after)) + + foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id]) + self.assertNotEqual(foo.id, bar.id) + self.assertEqual(bar.partner_id.id, par_id) + self.assertEqual(bar.login, 'bar', "login is given from copy parameters") + self.assertEqual(bar.password, foo.password, "password is given from original record") + self.assertEqual(bar.name, 'Bar', "name is given from specific partner") + + + CREATE = lambda values: (0, False, values) UPDATE = lambda id, values: (1, id, values) DELETE = lambda id: (2, id, False) @@ -19,6 +98,7 @@ def sorted_by_id(list_of_dicts): return sorted(list_of_dicts, key=lambda d: d.get('id')) class TestO2MSerialization(common.TransactionCase): + """ test the orm method 'write' on one2many fields """ def setUp(self): super(TestO2MSerialization, self).setUp() diff --git a/openerp/tools/cache.py b/openerp/tools/cache.py index 2c5c4a46e0d..6e18007c340 100644 --- a/openerp/tools/cache.py +++ b/openerp/tools/cache.py @@ -43,7 +43,7 @@ class ormcache(object): return r except KeyError: self.stat_miss += 1 - value = d[args] = self.method(self2, cr, *args) + value = d[key] = self.method(self2, cr, *args) return value except TypeError: self.stat_err += 1 diff --git a/openerp/tools/config.py b/openerp/tools/config.py index 18b442221c9..56f1e5c7b83 100644 --- a/openerp/tools/config.py +++ b/openerp/tools/config.py @@ -182,7 +182,7 @@ class configmanager(object): group.add_option('--log-handler', action="append", default=[':INFO'], my_default=[':INFO'], metavar="PREFIX:LEVEL", help='setup a handler at LEVEL for a given PREFIX. An empty PREFIX indicates the root logger. This option can be repeated. Example: "openerp.orm:DEBUG" or "werkzeug:CRITICAL" (default: ":INFO")') group.add_option('--log-request', action="append_const", dest="log_handler", const="openerp.netsvc.rpc.request:DEBUG", help='shortcut for --log-handler=openerp.netsvc.rpc.request:DEBUG') group.add_option('--log-response', action="append_const", dest="log_handler", const="openerp.netsvc.rpc.response:DEBUG", help='shortcut for --log-handler=openerp.netsvc.rpc.response:DEBUG') - group.add_option('--log-web', action="append_const", dest="log_handler", const="openerp.addons.web.common.http:DEBUG", help='shortcut for --log-handler=openerp.addons.web.common.http:DEBUG') + group.add_option('--log-web', action="append_const", dest="log_handler", const="openerp.addons.web.http:DEBUG", help='shortcut for --log-handler=openerp.addons.web.http:DEBUG') group.add_option('--log-sql', action="append_const", dest="log_handler", const="openerp.sql_db:DEBUG", help='shortcut for --log-handler=openerp.sql_db:DEBUG') # For backward-compatibility, map the old log levels to something # quite close. diff --git a/openerp/tools/convert.py b/openerp/tools/convert.py index c9836d7f0c6..12ed269db8b 100644 --- a/openerp/tools/convert.py +++ b/openerp/tools/convert.py @@ -386,18 +386,8 @@ form: module.record_id""" % (xml_id,) res = {'name': name, 'url': url, 'target':target} - id = self.pool.get('ir.model.data')._update(cr, self.uid, "ir.actions.url", self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode) + id = self.pool.get('ir.model.data')._update(cr, self.uid, "ir.actions.act_url", self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode) self.idref[xml_id] = int(id) - # ir_set - if (not rec.get('menu') or eval(rec.get('menu','False'))) and id: - keyword = str(rec.get('keyword','') or 'client_action_multi') - value = 'ir.actions.url,'+str(id) - replace = rec.get("replace",'') or True - self.pool.get('ir.model.data').ir_set(cr, self.uid, 'action', keyword, url, ["ir.actions.url"], value, replace=replace, isobject=True, xml_id=xml_id) - elif self.mode=='update' and (rec.get('menu') and eval(rec.get('menu','False'))==False): - # Special check for URL having attribute menu=False on update - value = 'ir.actions.url,'+str(id) - self._remove_ir_values(cr, url, value, "ir.actions.url") def _tag_act_window(self, cr, rec, data_node=None): name = rec.get('name','').encode('utf-8') diff --git a/openerp/tools/misc.py b/openerp/tools/misc.py index 27b81489b55..c9283a839d4 100644 --- a/openerp/tools/misc.py +++ b/openerp/tools/misc.py @@ -312,16 +312,8 @@ def html2plaintext(html, body_id=None, encoding='utf-8'): html = ustr(html) - from lxml.etree import tostring - try: - from lxml.html.soupparser import fromstring - kwargs = {} - except ImportError: - _logger.debug('tools.misc.html2plaintext: cannot use BeautifulSoup, fallback to lxml.etree.HTMLParser') - from lxml.etree import fromstring, HTMLParser - kwargs = dict(parser=HTMLParser()) - - tree = fromstring(html, **kwargs) + from lxml.etree import tostring, fromstring, HTMLParser + tree = fromstring(html, parser=HTMLParser()) if body_id is not None: source = tree.xpath('//*[@id=%s]'%(body_id,)) @@ -1221,4 +1213,38 @@ class mute_logger(object): with self: return func(*args, **kwargs) return deco + +_ph = object() +class CountingStream(object): + """ Stream wrapper counting the number of element it has yielded. Similar + role to ``enumerate``, but for use when the iteration process of the stream + isn't fully under caller control (the stream can be iterated from multiple + points including within a library) + + ``start`` allows overriding the starting index (the index before the first + item is returned). + + On each iteration (call to :meth:`~.next`), increases its :attr:`~.index` + by one. + + .. attribute:: index + + ``int``, index of the last yielded element in the stream. If the stream + has ended, will give an index 1-past the stream + """ + def __init__(self, stream, start=-1): + self.stream = iter(stream) + self.index = start + self.stopped = False + def __iter__(self): + return self + def next(self): + if self.stopped: raise StopIteration() + self.index += 1 + val = next(self.stream, _ph) + if val is _ph: + self.stopped = True + raise StopIteration() + return val + # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: diff --git a/openerp/tools/safe_eval.py b/openerp/tools/safe_eval.py index 601fd95200a..1c3870004a7 100644 --- a/openerp/tools/safe_eval.py +++ b/openerp/tools/safe_eval.py @@ -46,7 +46,7 @@ _ALLOWED_MODULES = ['_strptime', 'time'] _CONST_OPCODES = set(opmap[x] for x in [ 'POP_TOP', 'ROT_TWO', 'ROT_THREE', 'ROT_FOUR', 'DUP_TOP', 'DUP_TOPX', 'POP_BLOCK','SETUP_LOOP', 'BUILD_LIST', 'BUILD_MAP', 'BUILD_TUPLE', - 'LOAD_CONST', 'RETURN_VALUE', 'STORE_SUBSCR'] if x in opmap) + 'LOAD_CONST', 'RETURN_VALUE', 'STORE_SUBSCR', 'STORE_MAP'] if x in opmap) _EXPR_OPCODES = _CONST_OPCODES.union(set(opmap[x] for x in [ 'UNARY_POSITIVE', 'UNARY_NEGATIVE', 'UNARY_NOT', @@ -61,7 +61,7 @@ _EXPR_OPCODES = _CONST_OPCODES.union(set(opmap[x] for x in [ ] if x in opmap)) _SAFE_OPCODES = _EXPR_OPCODES.union(set(opmap[x] for x in [ - 'STORE_MAP', 'LOAD_NAME', 'CALL_FUNCTION', 'COMPARE_OP', 'LOAD_ATTR', + 'LOAD_NAME', 'CALL_FUNCTION', 'COMPARE_OP', 'LOAD_ATTR', 'STORE_NAME', 'GET_ITER', 'FOR_ITER', 'LIST_APPEND', 'DELETE_NAME', 'JUMP_FORWARD', 'JUMP_IF_TRUE', 'JUMP_IF_FALSE', 'JUMP_ABSOLUTE', 'MAKE_FUNCTION', 'SLICE+0', 'SLICE+1', 'SLICE+2', 'SLICE+3', diff --git a/openerp/tools/translate.py b/openerp/tools/translate.py index 5239b37bd2d..e7476827093 100644 --- a/openerp/tools/translate.py +++ b/openerp/tools/translate.py @@ -33,6 +33,7 @@ import logging import tarfile import tempfile import threading +from babel.messages import extract from os.path import join from datetime import datetime @@ -47,6 +48,9 @@ from openerp import SUPERUSER_ID _logger = logging.getLogger(__name__) +# used to notify web client that these translations should be loaded in the UI +WEB_TRANSLATION_COMMENT = "openerp-web" + _LOCALE2WIN32 = { 'af_ZA': 'Afrikaans_South Africa', 'sq_AL': 'Albanian_Albania', @@ -262,7 +266,7 @@ class TinyPoFile(object): self.lines_count = len(self.lines); self.first = True - self.tnrs= [] + self.extra_lines= [] return self def _get_lines(self): @@ -278,14 +282,14 @@ class TinyPoFile(object): return (self.lines_count - len(self.lines)) def next(self): - type = name = res_id = source = trad = None - - if self.tnrs: - type, name, res_id, source, trad = self.tnrs.pop(0) + trans_type = name = res_id = source = trad = None + if self.extra_lines: + trans_type, name, res_id, source, trad, comments = self.extra_lines.pop(0) if not res_id: res_id = '0' else: - tmp_tnrs = [] + comments = [] + targets = [] line = None fuzzy = False while (not line): @@ -295,15 +299,20 @@ class TinyPoFile(object): while line.startswith('#'): if line.startswith('#~ '): break - if line.startswith('#:'): + if line.startswith('#.'): + line = line[2:].strip() + if not line.startswith('module:'): + comments.append(line) + elif line.startswith('#:'): for lpart in line[2:].strip().split(' '): trans_info = lpart.strip().split(':',2) if trans_info and len(trans_info) == 2: - # looks like the translation type is missing, which is not + # looks like the translation trans_type is missing, which is not # unexpected because it is not a GetText standard. Default: 'code' trans_info[:0] = ['code'] if trans_info and len(trans_info) == 3: - tmp_tnrs.append(trans_info) + # this is a ref line holding the destination info (model, field, record) + targets.append(trans_info) elif line.startswith('#,') and (line[2:].strip() == 'fuzzy'): fuzzy = True line = self.lines.pop(0).strip() @@ -326,7 +335,7 @@ class TinyPoFile(object): # if the source is "" and it's the first msgid, it's the special # msgstr with the informations about the traduction and the # traductor; we skip it - self.tnrs = [] + self.extra_lines = [] while line: line = self.lines.pop(0).strip() return self.next() @@ -343,10 +352,11 @@ class TinyPoFile(object): trad += unquote(line) line = self.lines.pop(0).strip() - if tmp_tnrs and not fuzzy: - type, name, res_id = tmp_tnrs.pop(0) - for t, n, r in tmp_tnrs: - self.tnrs.append((t, n, r, source, trad)) + if targets and not fuzzy: + trans_type, name, res_id = targets.pop(0) + for t, n, r in targets: + if t == trans_type == 'code': continue + self.extra_lines.append((t, n, r, source, trad, comments)) self.first = False @@ -355,7 +365,7 @@ class TinyPoFile(object): self.warn('Missing "#:" formated comment at line %d for the following source:\n\t%s', self.cur_line(), source[:30]) return self.next() - return type, name, res_id, source, trad + return trans_type, name, res_id, source, trad, '\n'.join(comments) def write_infos(self, modules): import openerp.release as release @@ -384,11 +394,13 @@ class TinyPoFile(object): } ) - def write(self, modules, tnrs, source, trad): + def write(self, modules, tnrs, source, trad, comments=None): plurial = len(modules) > 1 and 's' or '' self.buffer.write("#. module%s: %s\n" % (plurial, ', '.join(modules))) + if comments: + self.buffer.write(''.join(('#. %s\n' % c for c in comments))) code = False for typy, name, res_id in tnrs: @@ -415,44 +427,43 @@ class TinyPoFile(object): def trans_export(lang, modules, buffer, format, cr): - def _process(format, modules, rows, buffer, lang, newlang): + def _process(format, modules, rows, buffer, lang): if format == 'csv': - writer=csv.writer(buffer, 'UNIX') - for row in rows: - writer.writerow(row) + writer = csv.writer(buffer, 'UNIX') + # write header first + writer.writerow(("module","type","name","res_id","src","value")) + for module, type, name, res_id, src, trad, comments in rows: + # Comments are ignored by the CSV writer + writer.writerow((module, type, name, res_id, src, trad)) elif format == 'po': - rows.pop(0) writer = TinyPoFile(buffer) writer.write_infos(modules) # we now group the translations by source. That means one translation per source. grouped_rows = {} - for module, type, name, res_id, src, trad in rows: + for module, type, name, res_id, src, trad, comments in rows: row = grouped_rows.setdefault(src, {}) row.setdefault('modules', set()).add(module) if ('translation' not in row) or (not row['translation']): row['translation'] = trad row.setdefault('tnrs', []).append((type, name, res_id)) + row.setdefault('comments', set()).update(comments) for src, row in grouped_rows.items(): - writer.write(row['modules'], row['tnrs'], src, row['translation']) + writer.write(row['modules'], row['tnrs'], src, row['translation'], row['comments']) elif format == 'tgz': - rows.pop(0) rows_by_module = {} for row in rows: module = row[0] - # first row is the "header", as in csv, it will be popped - rows_by_module.setdefault(module, [['module', 'type', 'name', 'res_id', 'src', ''],]) - rows_by_module[module].append(row) - + rows_by_module.setdefault(module, []).append(row) tmpdir = tempfile.mkdtemp() for mod, modrows in rows_by_module.items(): tmpmoddir = join(tmpdir, mod, 'i18n') os.makedirs(tmpmoddir) - pofilename = (newlang and mod or lang) + ".po" + (newlang and 't' or '') + pofilename = (lang if lang else mod) + ".po" + ('t' if not lang else '') buf = file(join(tmpmoddir, pofilename), 'w') - _process('po', [mod], modrows, buf, lang, newlang) + _process('po', [mod], modrows, buf, lang) buf.close() tar = tarfile.open(fileobj=buffer, mode='w|gz') @@ -463,16 +474,15 @@ def trans_export(lang, modules, buffer, format, cr): raise Exception(_('Unrecognized extension: must be one of ' '.csv, .po, or .tgz (received .%s).' % format)) - newlang = not bool(lang) - if newlang: - lang = 'en_US' - trans = trans_generate(lang, modules, cr) - if newlang and format!='csv': - for trx in trans: - trx[-1] = '' - modules = set([t[0] for t in trans[1:]]) - _process(format, modules, trans, buffer, lang, newlang) - del trans + trans_lang = lang + if not trans_lang and format == 'csv': + # CSV files are meant for translators and they need a starting point, + # so we at least put the original term in the translation column + trans_lang = 'en_US' + translations = trans_generate(lang, modules, cr) + modules = set([t[0] for t in translations[1:]]) + _process(format, modules, translations, buffer, lang) + del translations def trans_parse_xsl(de): res = [] @@ -535,6 +545,46 @@ def in_modules(object_name, modules): module = module_dict.get(module, module) return module in modules + +def babel_extract_qweb(fileobj, keywords, comment_tags, options): + """Babel message extractor for qweb template files. + :param fileobj: the file-like object the messages should be extracted from + :param keywords: a list of keywords (i.e. function names) that should + be recognized as translation functions + :param comment_tags: a list of translator tags to search for and + include in the results + :param options: a dictionary of additional options (optional) + :return: an iterator over ``(lineno, funcname, message, comments)`` + tuples + :rtype: ``iterator`` + """ + result = [] + def handle_text(text, lineno): + text = (text or "").strip() + if len(text) > 1: # Avoid mono-char tokens like ':' ',' etc. + result.append((lineno, None, text, [])) + + # not using elementTree.iterparse because we need to skip sub-trees in case + # the ancestor element had a reason to be skipped + def iter_elements(current_element): + for el in current_element: + if isinstance(el, SKIPPED_ELEMENT_TYPES): continue + if "t-js" not in el.attrib and \ + not ("t-jquery" in el.attrib and "t-operation" not in el.attrib) and \ + not ("t-translation" in el.attrib and el.attrib["t-translation"].strip() == "off"): + handle_text(el.text, el.sourceline) + for att in ('title', 'alt', 'label', 'placeholder'): + if att in el.attrib: + handle_text(el.attrib[att], el.sourceline) + iter_elements(el) + handle_text(el.tail, el.sourceline) + + tree = etree.parse(fileobj) + iter_elements(tree.getroot()) + + return result + + def trans_generate(lang, modules, cr): dbname = cr.dbname @@ -566,8 +616,8 @@ def trans_generate(lang, modules, cr): cr.execute(query, query_param) _to_translate = [] - def push_translation(module, type, name, id, source): - tuple = (module, source, name, id, type) + def push_translation(module, type, name, id, source, comments=None): + tuple = (module, source, name, id, type, comments or []) if source and tuple not in _to_translate: _to_translate.append(tuple) @@ -717,7 +767,7 @@ def trans_generate(lang, modules, cr): if not hasattr(msg, '__call__'): push_translation(module, term_type, model, 0, encode(msg)) - for (model_id, model, module) in cr.fetchall(): + for (_, model, module) in cr.fetchall(): module = encode(module) model = encode(model) @@ -733,7 +783,6 @@ def trans_generate(lang, modules, cr): for constraint in getattr(model_obj, '_sql_constraints', []): push_constraint_msg(module, 'sql_constraint', model, constraint[2]) - # parse source code for _() calls def get_module_from_path(path, mod_paths=None): if not mod_paths: # First, construct a list of possible paths @@ -771,92 +820,71 @@ def trans_generate(lang, modules, cr): _logger.debug("Scanning modules at paths: ", path_list) mod_paths = [] - join_dquotes = re.compile(r'([^\\])"[\s\\]*"', re.DOTALL) - join_quotes = re.compile(r'([^\\])\'[\s\\]*\'', re.DOTALL) - re_dquotes = re.compile(r'[^a-zA-Z0-9_]_\([\s]*"(.+?)"[\s]*?\)', re.DOTALL) - re_quotes = re.compile(r'[^a-zA-Z0-9_]_\([\s]*\'(.+?)\'[\s]*?\)', re.DOTALL) - def export_code_terms_from_file(fname, path, root, terms_type): + def verified_module_filepaths(fname, path, root): fabsolutepath = join(root, fname) frelativepath = fabsolutepath[len(path):] + display_path = "addons%s" % frelativepath module = get_module_from_path(fabsolutepath, mod_paths=mod_paths) - is_mod_installed = module in installed_modules - if (('all' in modules) or (module in modules)) and is_mod_installed: - _logger.debug("Scanning code of %s at module: %s", frelativepath, module) - src_file = misc.file_open(fabsolutepath, subdir='') + if (('all' in modules) or (module in modules)) and module in installed_modules: + return module, fabsolutepath, frelativepath, display_path + return None, None, None, None + + def babel_extract_terms(fname, path, root, extract_method="python", trans_type='code', + extra_comments=None, extract_keywords={'_': None}): + module, fabsolutepath, _, display_path = verified_module_filepaths(fname, path, root) + extra_comments = extra_comments or [] + if module: + src_file = open(fabsolutepath, 'r') try: - code_string = src_file.read() + for lineno, message, comments in extract.extract(extract_method, src_file, + keywords=extract_keywords): + push_translation(module, trans_type, display_path, lineno, + encode(message), comments + extra_comments) finally: src_file.close() - if module in installed_modules: - frelativepath = str("addons" + frelativepath) - ite = re_dquotes.finditer(code_string) - code_offset = 0 - code_line = 1 - for i in ite: - src = i.group(1) - if src.startswith('""'): - assert src.endswith('""'), "Incorrect usage of _(..) function (should contain only literal strings!) in file %s near: %s" % (frelativepath, src[:30]) - src = src[2:-2] - else: - src = join_dquotes.sub(r'\1', src) - # try to count the lines from the last pos to our place: - code_line += code_string[code_offset:i.start(1)].count('\n') - # now, since we did a binary read of a python source file, we - # have to expand pythonic escapes like the interpreter does. - src = src.decode('string_escape') - push_translation(module, terms_type, frelativepath, code_line, encode(src)) - code_line += i.group(1).count('\n') - code_offset = i.end() # we have counted newlines up to the match end - - ite = re_quotes.finditer(code_string) - code_offset = 0 #reset counters - code_line = 1 - for i in ite: - src = i.group(1) - if src.startswith("''"): - assert src.endswith("''"), "Incorrect usage of _(..) function (should contain only literal strings!) in file %s near: %s" % (frelativepath, src[:30]) - src = src[2:-2] - else: - src = join_quotes.sub(r'\1', src) - code_line += code_string[code_offset:i.start(1)].count('\n') - src = src.decode('string_escape') - push_translation(module, terms_type, frelativepath, code_line, encode(src)) - code_line += i.group(1).count('\n') - code_offset = i.end() # we have counted newlines up to the match end for path in path_list: _logger.debug("Scanning files of modules at %s", path) for root, dummy, files in osutil.walksymlinks(path): - for fname in itertools.chain(fnmatch.filter(files, '*.py')): - export_code_terms_from_file(fname, path, root, 'code') - for fname in itertools.chain(fnmatch.filter(files, '*.mako')): - export_code_terms_from_file(fname, path, root, 'report') + for fname in fnmatch.filter(files, '*.py'): + babel_extract_terms(fname, path, root) + for fname in fnmatch.filter(files, '*.mako'): + babel_extract_terms(fname, path, root, trans_type='report') + # Javascript source files in the static/src/js directory, rest is ignored (libs) + if fnmatch.fnmatch(root, '*/static/src/js*'): + for fname in fnmatch.filter(files, '*.js'): + babel_extract_terms(fname, path, root, 'javascript', + extra_comments=[WEB_TRANSLATION_COMMENT], + extract_keywords={'_t': None}) + # QWeb template files + if fnmatch.fnmatch(root, '*/static/src/xml*'): + for fname in fnmatch.filter(files, '*.xml'): + babel_extract_terms(fname, path, root, 'openerp.tools.translate:babel_extract_qweb', + extra_comments=[WEB_TRANSLATION_COMMENT]) - - out = [["module","type","name","res_id","src","value"]] # header + out = [] _to_translate.sort() # translate strings marked as to be translated - for module, source, name, id, type in _to_translate: - trans = trans_obj._get_source(cr, uid, name, type, lang, source) - out.append([module, type, name, id, source, encode(trans) or '']) - + for module, source, name, id, type, comments in _to_translate: + trans = '' if not lang else trans_obj._get_source(cr, uid, name, type, lang, source) + out.append([module, type, name, id, source, encode(trans) or '', comments]) return out -def trans_load(cr, filename, lang, verbose=True, context=None): +def trans_load(cr, filename, lang, verbose=True, module_name=None, context=None): try: fileobj = misc.file_open(filename) _logger.info("loading %s", filename) fileformat = os.path.splitext(filename)[-1][1:].lower() - r = trans_load_data(cr, fileobj, fileformat, lang, verbose=verbose, context=context) + result = trans_load_data(cr, fileobj, fileformat, lang, verbose=verbose, module_name=module_name, context=context) fileobj.close() - return r + return result except IOError: if verbose: _logger.error("couldn't read translation file %s", filename) return None -def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, context=None): +def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, module_name=None, context=None): """Populates the ir_translation table.""" if verbose: _logger.info('loading translation file for language %s', lang) @@ -868,8 +896,7 @@ def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, trans_obj = pool.get('ir.translation') iso_lang = misc.get_iso_codes(lang) try: - uid = 1 - ids = lang_obj.search(cr, uid, [('code','=', lang)]) + ids = lang_obj.search(cr, SUPERUSER_ID, [('code','=', lang)]) if not ids: # lets create the language with locale information @@ -886,14 +913,14 @@ def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, break elif fileformat == 'po': reader = TinyPoFile(fileobj) - f = ['type', 'name', 'res_id', 'src', 'value'] + f = ['type', 'name', 'res_id', 'src', 'value', 'comments'] else: _logger.error('Bad file format: %s', fileformat) raise Exception(_('Bad file format')) # read the rest of the file line = 1 - irt_cursor = trans_obj._get_import_cursor(cr, uid, context=context) + irt_cursor = trans_obj._get_import_cursor(cr, SUPERUSER_ID, context=context) for row in reader: line += 1 @@ -903,39 +930,32 @@ def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, # dictionary which holds values for this line of the csv file # {'lang': ..., 'type': ..., 'name': ..., 'res_id': ..., - # 'src': ..., 'value': ...} - dic = {'lang': lang} - dic_module = False - for i in range(len(f)): - if f[i] in ('module',): - continue - dic[f[i]] = row[i] + # 'src': ..., 'value': ..., 'module':...} + dic = dict.fromkeys(('name', 'res_id', 'src', 'type', 'imd_model', 'imd_name', 'module', 'value', 'comments')) + dic['lang'] = lang + for i, field in enumerate(f): + dic[field] = row[i] # This would skip terms that fail to specify a res_id - if not dic.get('res_id', False): + if not dic.get('res_id'): continue res_id = dic.pop('res_id') if res_id and isinstance(res_id, (int, long)) \ or (isinstance(res_id, basestring) and res_id.isdigit()): dic['res_id'] = int(res_id) + dic['module'] = module_name else: - try: - tmodel = dic['name'].split(',')[0] - if '.' in res_id: - tmodule, tname = res_id.split('.', 1) - else: - tmodule = dic_module - tname = res_id - dic['imd_model'] = tmodel - dic['imd_module'] = tmodule - dic['imd_name'] = tname - - dic['res_id'] = None - except Exception: - _logger.warning("Could not decode resource for %s, please fix the po file.", - dic['res_id'], exc_info=True) - dic['res_id'] = None + tmodel = dic['name'].split(',')[0] + if '.' in res_id: + tmodule, tname = res_id.split('.', 1) + else: + tmodule = False + tname = res_id + dic['imd_model'] = tmodel + dic['imd_name'] = tname + dic['module'] = tmodule + dic['res_id'] = None irt_cursor.push(dic) diff --git a/openerp/tools/yaml_import.py b/openerp/tools/yaml_import.py index 14b857cd658..7e1c7152b0f 100644 --- a/openerp/tools/yaml_import.py +++ b/openerp/tools/yaml_import.py @@ -313,13 +313,13 @@ class YamlInterpreter(object): #context = self.get_context(record, self.eval_context) #TOFIX: record.context like {'withoutemployee':True} should pass from self.eval_context. example: test_project.yml in project module context = record.context + view_info = False if view_id: varg = view_id if view_id is True: varg = False - view = model.fields_view_get(self.cr, SUPERUSER_ID, varg, 'form', context) - view_id = etree.fromstring(view['arch'].encode('utf-8')) + view_info = model.fields_view_get(self.cr, SUPERUSER_ID, varg, 'form', context) - record_dict = self._create_record(model, fields, view_id, default=default) + record_dict = self._create_record(model, fields, view_info, default=default) _logger.debug("RECORD_DICT %s" % record_dict) id = self.pool.get('ir.model.data')._update(self.cr, SUPERUSER_ID, record.model, \ self.module, record_dict, record.id, noupdate=self.isnoupdate(record), mode=self.mode, context=context) @@ -327,16 +327,18 @@ class YamlInterpreter(object): if config.get('import_partial'): self.cr.commit() - def _create_record(self, model, fields, view=False, parent={}, default=True): - if view is not False: - defaults = default and model._add_missing_default_values(self.cr, SUPERUSER_ID, {}, context=self.context) or {} - fg = model.fields_get(self.cr, SUPERUSER_ID, context=self.context) - else: - defaults = {} - fg = {} - record_dict = {} - fields = fields or {} - + def _create_record(self, model, fields, view_info=False, parent={}, default=True): + """This function processes the !record tag in yalm files. It simulates the record creation through an xml + view (either specified on the !record tag or the default one for this object), including the calls to + on_change() functions. + :param model: model instance + :param fields: dictonary mapping the field names and their values + :param view_info: result of fields_view_get() called on the object + :param parent: dictionary containing the values already computed for the parent, in case of one2many fields + :param default: if True, the default values must be processed too or not + :return: dictionary mapping the field names and their values, ready to use when calling the create() function + :rtype: dict + """ def process_val(key, val): if fg[key]['type']=='many2one': if type(val) in (tuple,list): @@ -348,63 +350,75 @@ class YamlInterpreter(object): val = map(lambda x: (0,0,x), val) return val - # Process all on_change calls - nodes = (view is not False) and [view] or [] - while nodes: - el = nodes.pop(0) - if el.tag=='field': - field_name = el.attrib['name'] - assert field_name in fg, "The field '%s' is defined in the form view but not on the object '%s'!" % (field_name, model._name) - if field_name in fields: - view2 = None - # if the form view is not inline, we call fields_view_get - if (view is not False) and (fg[field_name]['type']=='one2many'): - view2 = view.find("field[@name='%s']/form"%(field_name,)) - if not view2: - view2 = self.pool.get(fg[field_name]['relation']).fields_view_get(self.cr, SUPERUSER_ID, False, 'form', self.context) - view2 = etree.fromstring(view2['arch'].encode('utf-8')) + view = view_info and etree.fromstring(view_info['arch'].encode('utf-8')) or False + fields = fields or {} + if view is not False: + fg = view_info['fields'] + # gather the default values on the object. (Can't use `fields´ as parameter instead of {} because we may + # have references like `base.main_company´ in the yaml file and it's not compatible with the function) + defaults = default and model._add_missing_default_values(self.cr, SUPERUSER_ID, {}, context=self.context) or {} - field_value = self._eval_field(model, field_name, fields[field_name], view2, parent=record_dict, default=default) - record_dict[field_name] = field_value - #if (field_name in defaults) and defaults[field_name] == field_value: - # print '*** You can remove these lines:', field_name, field_value - elif (field_name in defaults): - if (field_name not in record_dict): - record_dict[field_name] = process_val(field_name, defaults[field_name]) - else: - continue + # copy the default values in record_dict, only if they are in the view (because that's what the client does) + # the other default values will be added later on by the create(). + record_dict = dict([(key, val) for key, val in defaults.items() if key in fg]) - if not el.attrib.get('on_change', False): - continue - match = re.match("([a-z_1-9A-Z]+)\((.*)\)", el.attrib['on_change']) - assert match, "Unable to parse the on_change '%s'!" % (el.attrib['on_change'], ) + # Process all on_change calls + nodes = [view] + while nodes: + el = nodes.pop(0) + if el.tag=='field': + field_name = el.attrib['name'] + assert field_name in fg, "The field '%s' is defined in the form view but not on the object '%s'!" % (field_name, model._name) + if field_name in fields: + one2many_form_view = None + if (view is not False) and (fg[field_name]['type']=='one2many'): + # for one2many fields, we want to eval them using the inline form view defined on the parent + one2many_form_view = view_info['fields'][field_name]['views'].get('form') + # if the form view is not defined inline, we call fields_view_get() + if not one2many_form_view: + one2many_form_view = self.pool.get(fg[field_name]['relation']).fields_view_get(self.cr, SUPERUSER_ID, False, 'form', self.context) - # creating the context - class parent2(object): - def __init__(self, d): - self.d = d - def __getattr__(self, name): - return self.d.get(name, False) + field_value = self._eval_field(model, field_name, fields[field_name], one2many_form_view or view_info, parent=record_dict, default=default) + record_dict[field_name] = field_value + #if (field_name in defaults) and defaults[field_name] == field_value: + # print '*** You can remove these lines:', field_name, field_value - ctx = record_dict.copy() - ctx['context'] = self.context - ctx['uid'] = 1 - ctx['parent'] = parent2(parent) - for a in fg: - if a not in ctx: - ctx[a]=process_val(a, defaults.get(a, False)) + #if field_name has a default value or a value is given in the yaml file, we must call its on_change() + elif field_name not in defaults: + continue - # Evaluation args - args = map(lambda x: eval(x, ctx), match.group(2).split(',')) - result = getattr(model, match.group(1))(self.cr, SUPERUSER_ID, [], *args) - for key, val in (result or {}).get('value', {}).items(): - if key not in fields: - assert key in fg, "The returning field '%s' from your on_change call '%s' does not exist on the object '%s'" % (key, match.group(1), model._name) + if not el.attrib.get('on_change', False): + continue + match = re.match("([a-z_1-9A-Z]+)\((.*)\)", el.attrib['on_change']) + assert match, "Unable to parse the on_change '%s'!" % (el.attrib['on_change'], ) + + # creating the context + class parent2(object): + def __init__(self, d): + self.d = d + def __getattr__(self, name): + return self.d.get(name, False) + + ctx = record_dict.copy() + ctx['context'] = self.context + ctx['uid'] = SUPERUSER_ID + ctx['parent'] = parent2(parent) + for a in fg: + if a not in ctx: + ctx[a]=process_val(a, defaults.get(a, False)) + + # Evaluation args + args = map(lambda x: eval(x, ctx), match.group(2).split(',')) + result = getattr(model, match.group(1))(self.cr, SUPERUSER_ID, [], *args) + for key, val in (result or {}).get('value', {}).items(): + assert key in fg, "The returning field '%s' from your on_change call '%s' does not exist either on the object '%s', either in the view '%s' used for the creation" % (key, match.group(1), model._name, view_info['name']) record_dict[key] = process_val(key, val) #if (key in fields) and record_dict[key] == process_val(key, val): # print '*** You can remove these lines:', key, val - else: - nodes = list(el) + nodes + else: + nodes = list(el) + nodes + else: + record_dict = {} for field_name, expression in fields.items(): if field_name in record_dict: @@ -440,7 +454,7 @@ class YamlInterpreter(object): def process_eval(self, node): return eval(node.expression, self.eval_context) - def _eval_field(self, model, field_name, expression, view=False, parent={}, default=True): + def _eval_field(self, model, field_name, expression, view_info=False, parent={}, default=True): # TODO this should be refactored as something like model.get_field() in bin/osv if field_name in model._columns: column = model._columns[field_name] @@ -461,7 +475,7 @@ class YamlInterpreter(object): value = self.get_id(expression) elif column._type == "one2many": other_model = self.get_model(column._obj) - value = [(0, 0, self._create_record(other_model, fields, view, parent, default=default)) for fields in expression] + value = [(0, 0, self._create_record(other_model, fields, view_info, parent, default=default)) for fields in expression] elif column._type == "many2many": ids = [self.get_id(xml_id) for xml_id in expression] value = [(6, 0, ids)] @@ -731,15 +745,15 @@ class YamlInterpreter(object): res = {'name': node.name, 'url': node.url, 'target': node.target} id = self.pool.get('ir.model.data')._update(self.cr, SUPERUSER_ID, \ - "ir.actions.url", self.module, res, node.id, mode=self.mode) + "ir.actions.act_url", self.module, res, node.id, mode=self.mode) self.id_map[node.id] = int(id) # ir_set if (not node.menu or eval(node.menu)) and id: keyword = node.keyword or 'client_action_multi' - value = 'ir.actions.url,%s' % id + value = 'ir.actions.act_url,%s' % id replace = node.replace or True self.pool.get('ir.model.data').ir_set(self.cr, SUPERUSER_ID, 'action', \ - keyword, node.url, ["ir.actions.url"], value, replace=replace, \ + keyword, node.url, ["ir.actions.act_url"], value, replace=replace, \ noupdate=self.isnoupdate(node), isobject=True, xml_id=node.id) def process_ir_set(self, node): diff --git a/setup.py b/setup.py index 18d341039ea..6550392e106 100755 --- a/setup.py +++ b/setup.py @@ -117,6 +117,7 @@ setuptools.setup( extras_require = { 'SSL' : ['pyopenssl'], }, + tests_require = ['unittest2'], **py2exe_options() ) |