2009-10-20 10:52:23 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
2006-12-07 13:41:40 +00:00
|
|
|
##############################################################################
|
2010-05-11 14:01:11 +00:00
|
|
|
#
|
2009-11-20 11:57:21 +00:00
|
|
|
# OpenERP, Open Source Management Solution
|
2009-11-27 07:23:48 +00:00
|
|
|
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
|
2008-06-16 07:24:04 +00:00
|
|
|
#
|
2008-11-03 18:27:16 +00:00
|
|
|
# This program is free software: you can redistribute it and/or modify
|
2009-11-27 07:23:48 +00:00
|
|
|
# it under the terms of the GNU Affero General Public License as
|
|
|
|
# published by the Free Software Foundation, either version 3 of the
|
|
|
|
# License, or (at your option) any later version.
|
2006-12-07 13:41:40 +00:00
|
|
|
#
|
2008-11-03 18:27:16 +00:00
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2009-11-27 07:23:48 +00:00
|
|
|
# GNU Affero General Public License for more details.
|
2006-12-07 13:41:40 +00:00
|
|
|
#
|
2009-11-27 07:23:48 +00:00
|
|
|
# You should have received a copy of the GNU Affero General Public License
|
2010-05-11 14:01:11 +00:00
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2006-12-07 13:41:40 +00:00
|
|
|
#
|
2008-11-03 18:27:16 +00:00
|
|
|
##############################################################################
|
2006-12-07 13:41:40 +00:00
|
|
|
|
2010-02-02 07:39:28 +00:00
|
|
|
import codecs
|
|
|
|
import csv
|
|
|
|
import fnmatch
|
|
|
|
import inspect
|
|
|
|
import locale
|
2006-12-07 13:41:40 +00:00
|
|
|
import os
|
2011-04-20 14:14:07 +00:00
|
|
|
import openerp.sql_db as sql_db
|
2010-02-02 07:39:28 +00:00
|
|
|
import re
|
2010-11-23 13:58:35 +00:00
|
|
|
import logging
|
2010-02-02 07:39:28 +00:00
|
|
|
import tarfile
|
|
|
|
import tempfile
|
2010-12-15 11:40:19 +00:00
|
|
|
import threading
|
2012-10-03 13:26:59 +00:00
|
|
|
from babel.messages import extract
|
2014-11-26 16:36:42 +00:00
|
|
|
from collections import defaultdict
|
2010-02-02 07:39:28 +00:00
|
|
|
from datetime import datetime
|
2009-09-17 07:27:12 +00:00
|
|
|
from lxml import etree
|
2014-11-26 16:36:42 +00:00
|
|
|
from os.path import join
|
2010-02-02 07:39:28 +00:00
|
|
|
|
2011-02-09 09:02:31 +00:00
|
|
|
import config
|
2011-02-07 12:57:23 +00:00
|
|
|
import misc
|
|
|
|
from misc import SKIPPED_ELEMENT_TYPES
|
2011-02-09 09:02:31 +00:00
|
|
|
import osutil
|
2013-03-27 11:10:14 +00:00
|
|
|
import openerp
|
2012-08-31 13:53:09 +00:00
|
|
|
from openerp import SUPERUSER_ID
|
2008-10-08 15:28:05 +00:00
|
|
|
|
2012-01-24 14:00:56 +00:00
|
|
|
_logger = logging.getLogger(__name__)
|
|
|
|
|
2012-10-03 13:26:59 +00:00
|
|
|
# used to notify web client that these translations should be loaded in the UI
|
|
|
|
WEB_TRANSLATION_COMMENT = "openerp-web"
|
|
|
|
|
2014-08-13 09:08:02 +00:00
|
|
|
SKIPPED_ELEMENTS = ('script', 'style')
|
|
|
|
|
2009-04-07 07:18:45 +00:00
|
|
|
_LOCALE2WIN32 = {
|
|
|
|
'af_ZA': 'Afrikaans_South Africa',
|
|
|
|
'sq_AL': 'Albanian_Albania',
|
|
|
|
'ar_SA': 'Arabic_Saudi Arabia',
|
|
|
|
'eu_ES': 'Basque_Spain',
|
|
|
|
'be_BY': 'Belarusian_Belarus',
|
2014-11-05 18:09:23 +00:00
|
|
|
'bs_BA': 'Bosnian_Bosnia and Herzegovina',
|
2009-04-07 07:18:45 +00:00
|
|
|
'bg_BG': 'Bulgarian_Bulgaria',
|
|
|
|
'ca_ES': 'Catalan_Spain',
|
|
|
|
'hr_HR': 'Croatian_Croatia',
|
|
|
|
'zh_CN': 'Chinese_China',
|
|
|
|
'zh_TW': 'Chinese_Taiwan',
|
|
|
|
'cs_CZ': 'Czech_Czech Republic',
|
|
|
|
'da_DK': 'Danish_Denmark',
|
|
|
|
'nl_NL': 'Dutch_Netherlands',
|
|
|
|
'et_EE': 'Estonian_Estonia',
|
|
|
|
'fa_IR': 'Farsi_Iran',
|
|
|
|
'ph_PH': 'Filipino_Philippines',
|
|
|
|
'fi_FI': 'Finnish_Finland',
|
|
|
|
'fr_FR': 'French_France',
|
|
|
|
'fr_BE': 'French_France',
|
|
|
|
'fr_CH': 'French_France',
|
|
|
|
'fr_CA': 'French_France',
|
|
|
|
'ga': 'Scottish Gaelic',
|
|
|
|
'gl_ES': 'Galician_Spain',
|
|
|
|
'ka_GE': 'Georgian_Georgia',
|
|
|
|
'de_DE': 'German_Germany',
|
|
|
|
'el_GR': 'Greek_Greece',
|
|
|
|
'gu': 'Gujarati_India',
|
|
|
|
'he_IL': 'Hebrew_Israel',
|
|
|
|
'hi_IN': 'Hindi',
|
|
|
|
'hu': 'Hungarian_Hungary',
|
|
|
|
'is_IS': 'Icelandic_Iceland',
|
|
|
|
'id_ID': 'Indonesian_indonesia',
|
|
|
|
'it_IT': 'Italian_Italy',
|
|
|
|
'ja_JP': 'Japanese_Japan',
|
|
|
|
'kn_IN': 'Kannada',
|
|
|
|
'km_KH': 'Khmer',
|
|
|
|
'ko_KR': 'Korean_Korea',
|
|
|
|
'lo_LA': 'Lao_Laos',
|
|
|
|
'lt_LT': 'Lithuanian_Lithuania',
|
|
|
|
'lat': 'Latvian_Latvia',
|
|
|
|
'ml_IN': 'Malayalam_India',
|
|
|
|
'mi_NZ': 'Maori',
|
|
|
|
'mn': 'Cyrillic_Mongolian',
|
|
|
|
'no_NO': 'Norwegian_Norway',
|
|
|
|
'nn_NO': 'Norwegian-Nynorsk_Norway',
|
|
|
|
'pl': 'Polish_Poland',
|
|
|
|
'pt_PT': 'Portuguese_Portugal',
|
|
|
|
'pt_BR': 'Portuguese_Brazil',
|
|
|
|
'ro_RO': 'Romanian_Romania',
|
|
|
|
'ru_RU': 'Russian_Russia',
|
|
|
|
'sr_CS': 'Serbian (Cyrillic)_Serbia and Montenegro',
|
|
|
|
'sk_SK': 'Slovak_Slovakia',
|
|
|
|
'sl_SI': 'Slovenian_Slovenia',
|
2010-10-20 16:18:43 +00:00
|
|
|
#should find more specific locales for spanish countries,
|
|
|
|
#but better than nothing
|
|
|
|
'es_AR': 'Spanish_Spain',
|
|
|
|
'es_BO': 'Spanish_Spain',
|
|
|
|
'es_CL': 'Spanish_Spain',
|
|
|
|
'es_CO': 'Spanish_Spain',
|
|
|
|
'es_CR': 'Spanish_Spain',
|
|
|
|
'es_DO': 'Spanish_Spain',
|
|
|
|
'es_EC': 'Spanish_Spain',
|
2009-04-07 07:18:45 +00:00
|
|
|
'es_ES': 'Spanish_Spain',
|
2010-10-20 16:18:43 +00:00
|
|
|
'es_GT': 'Spanish_Spain',
|
|
|
|
'es_HN': 'Spanish_Spain',
|
|
|
|
'es_MX': 'Spanish_Spain',
|
|
|
|
'es_NI': 'Spanish_Spain',
|
|
|
|
'es_PA': 'Spanish_Spain',
|
|
|
|
'es_PE': 'Spanish_Spain',
|
|
|
|
'es_PR': 'Spanish_Spain',
|
|
|
|
'es_PY': 'Spanish_Spain',
|
|
|
|
'es_SV': 'Spanish_Spain',
|
|
|
|
'es_UY': 'Spanish_Spain',
|
|
|
|
'es_VE': 'Spanish_Spain',
|
2009-04-07 07:18:45 +00:00
|
|
|
'sv_SE': 'Swedish_Sweden',
|
|
|
|
'ta_IN': 'English_Australia',
|
|
|
|
'th_TH': 'Thai_Thailand',
|
|
|
|
'tr_TR': 'Turkish_Turkey',
|
|
|
|
'uk_UA': 'Ukrainian_Ukraine',
|
|
|
|
'vi_VN': 'Vietnamese_Viet Nam',
|
2009-10-12 07:55:08 +00:00
|
|
|
'tlh_TLH': 'Klingon',
|
2009-11-20 11:57:21 +00:00
|
|
|
|
2009-04-07 07:18:45 +00:00
|
|
|
}
|
|
|
|
|
2014-12-04 12:09:15 +00:00
|
|
|
# These are not all english small words, just those that could potentially be isolated within views
|
|
|
|
ENGLISH_SMALL_WORDS = set("as at by do go if in me no of ok on or to up us we".split())
|
|
|
|
|
2007-10-24 06:14:05 +00:00
|
|
|
|
2008-07-08 08:13:12 +00:00
|
|
|
class UNIX_LINE_TERMINATOR(csv.excel):
|
2008-07-22 14:24:36 +00:00
|
|
|
lineterminator = '\n'
|
2007-10-24 06:14:05 +00:00
|
|
|
|
2008-07-08 08:13:12 +00:00
|
|
|
csv.register_dialect("UNIX", UNIX_LINE_TERMINATOR)
|
2007-10-24 06:14:05 +00:00
|
|
|
|
2006-12-07 13:41:40 +00:00
|
|
|
#
|
2008-12-13 06:01:18 +00:00
|
|
|
# Warning: better use self.pool.get('ir.translation')._get_source if you can
|
2006-12-07 13:41:40 +00:00
|
|
|
#
|
2008-07-08 08:13:12 +00:00
|
|
|
def translate(cr, name, source_type, lang, source=None):
|
2008-07-22 14:24:36 +00:00
|
|
|
if source and name:
|
|
|
|
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s and src=%s', (lang, source_type, str(name), source))
|
|
|
|
elif name:
|
|
|
|
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s', (lang, source_type, str(name)))
|
|
|
|
elif source:
|
|
|
|
cr.execute('select value from ir_translation where lang=%s and type=%s and src=%s', (lang, source_type, source))
|
|
|
|
res_trans = cr.fetchone()
|
|
|
|
res = res_trans and res_trans[0] or False
|
|
|
|
return res
|
2008-06-17 20:17:23 +00:00
|
|
|
|
2008-07-15 08:40:23 +00:00
|
|
|
class GettextAlias(object):
|
2009-08-13 11:06:43 +00:00
|
|
|
|
2010-12-15 13:21:28 +00:00
|
|
|
def _get_db(self):
|
2010-12-15 11:40:19 +00:00
|
|
|
# find current DB based on thread/worker db name (see netsvc)
|
|
|
|
db_name = getattr(threading.currentThread(), 'dbname', None)
|
|
|
|
if db_name:
|
2011-04-20 14:14:07 +00:00
|
|
|
return sql_db.db_connect(db_name)
|
2010-12-15 11:40:19 +00:00
|
|
|
|
2012-06-29 12:04:18 +00:00
|
|
|
def _get_cr(self, frame, allow_create=True):
|
2014-07-06 14:44:26 +00:00
|
|
|
# try, in order: cr, cursor, self.env.cr, self.cr
|
|
|
|
if 'cr' in frame.f_locals:
|
|
|
|
return frame.f_locals['cr'], False
|
|
|
|
if 'cursor' in frame.f_locals:
|
|
|
|
return frame.f_locals['cursor'], False
|
|
|
|
s = frame.f_locals.get('self')
|
|
|
|
if hasattr(s, 'env'):
|
|
|
|
return s.env.cr, False
|
|
|
|
if hasattr(s, 'cr'):
|
|
|
|
return s.cr, False
|
|
|
|
if allow_create:
|
|
|
|
# create a new cursor
|
2010-12-15 13:21:28 +00:00
|
|
|
db = self._get_db()
|
2014-02-06 10:51:41 +00:00
|
|
|
if db is not None:
|
2014-07-06 14:44:26 +00:00
|
|
|
return db.cursor(), True
|
|
|
|
return None, False
|
2010-11-24 09:36:47 +00:00
|
|
|
|
2012-06-29 12:04:18 +00:00
|
|
|
def _get_uid(self, frame):
|
2014-07-06 14:44:26 +00:00
|
|
|
# try, in order: uid, user, self.env.uid
|
|
|
|
if 'uid' in frame.f_locals:
|
|
|
|
return frame.f_locals['uid']
|
|
|
|
if 'user' in frame.f_locals:
|
|
|
|
return int(frame.f_locals['user']) # user may be a record
|
|
|
|
s = frame.f_locals.get('self')
|
|
|
|
return s.env.uid
|
2012-06-29 12:04:18 +00:00
|
|
|
|
2010-09-07 08:49:02 +00:00
|
|
|
def _get_lang(self, frame):
|
2014-07-06 14:44:26 +00:00
|
|
|
# try, in order: context.get('lang'), kwargs['context'].get('lang'),
|
|
|
|
# self.env.lang, self.localcontext.get('lang')
|
2014-11-27 16:18:46 +00:00
|
|
|
lang = None
|
|
|
|
if frame.f_locals.get('context'):
|
|
|
|
lang = frame.f_locals['context'].get('lang')
|
|
|
|
if not lang:
|
|
|
|
kwargs = frame.f_locals.get('kwargs', {})
|
|
|
|
if kwargs.get('context'):
|
|
|
|
lang = kwargs['context'].get('lang')
|
|
|
|
if not lang:
|
|
|
|
s = frame.f_locals.get('self')
|
|
|
|
if hasattr(s, 'env'):
|
|
|
|
lang = s.env.lang
|
|
|
|
if not lang:
|
|
|
|
if hasattr(s, 'localcontext'):
|
|
|
|
lang = s.localcontext.get('lang')
|
|
|
|
if not lang:
|
|
|
|
# Last resort: attempt to guess the language of the user
|
|
|
|
# Pitfall: some operations are performed in sudo mode, and we
|
|
|
|
# don't know the originial uid, so the language may
|
|
|
|
# be wrong when the admin language differs.
|
|
|
|
pool = getattr(s, 'pool', None)
|
|
|
|
(cr, dummy) = self._get_cr(frame, allow_create=False)
|
|
|
|
uid = self._get_uid(frame)
|
|
|
|
if pool and cr and uid:
|
|
|
|
lang = pool['res.users'].context_get(cr, uid)['lang']
|
|
|
|
return lang
|
2010-11-24 09:36:47 +00:00
|
|
|
|
2010-09-07 08:49:02 +00:00
|
|
|
def __call__(self, source):
|
|
|
|
res = source
|
2010-12-15 11:40:19 +00:00
|
|
|
cr = None
|
2010-12-15 13:21:28 +00:00
|
|
|
is_new_cr = False
|
2009-11-24 14:44:05 +00:00
|
|
|
try:
|
2010-12-15 11:40:19 +00:00
|
|
|
frame = inspect.currentframe()
|
|
|
|
if frame is None:
|
|
|
|
return source
|
|
|
|
frame = frame.f_back
|
|
|
|
if not frame:
|
|
|
|
return source
|
2010-09-07 08:49:02 +00:00
|
|
|
lang = self._get_lang(frame)
|
2010-12-15 11:40:19 +00:00
|
|
|
if lang:
|
2010-12-15 13:21:28 +00:00
|
|
|
cr, is_new_cr = self._get_cr(frame)
|
2010-12-15 11:40:19 +00:00
|
|
|
if cr:
|
|
|
|
# Try to use ir.translation to benefit from global cache if possible
|
2013-03-27 11:10:14 +00:00
|
|
|
registry = openerp.registry(cr.dbname)
|
|
|
|
res = registry['ir.translation']._get_source(cr, SUPERUSER_ID, None, ('code','sql_constraint'), lang, source)
|
2010-12-15 11:40:19 +00:00
|
|
|
else:
|
2012-01-24 14:00:56 +00:00
|
|
|
_logger.debug('no context cursor detected, skipping translation for "%r"', source)
|
2010-12-15 11:40:19 +00:00
|
|
|
else:
|
2012-01-24 14:00:56 +00:00
|
|
|
_logger.debug('no translation language detected, skipping translation for "%r" ', source)
|
2010-10-12 18:20:42 +00:00
|
|
|
except Exception:
|
2012-01-24 14:00:56 +00:00
|
|
|
_logger.debug('translation went wrong for "%r", skipped', source)
|
2010-12-29 11:28:50 +00:00
|
|
|
# if so, double-check the root/base translations filenames
|
2010-09-07 08:49:02 +00:00
|
|
|
finally:
|
2010-12-15 13:21:28 +00:00
|
|
|
if cr and is_new_cr:
|
2010-09-07 08:49:02 +00:00
|
|
|
cr.close()
|
|
|
|
return res
|
2008-07-15 08:40:23 +00:00
|
|
|
|
|
|
|
_ = GettextAlias()
|
2006-12-07 13:41:40 +00:00
|
|
|
|
2008-07-17 10:22:36 +00:00
|
|
|
|
2010-11-16 12:38:44 +00:00
|
|
|
def quote(s):
|
|
|
|
"""Returns quoted PO term string, with special PO characters escaped"""
|
2010-11-16 14:15:08 +00:00
|
|
|
assert r"\n" not in s, "Translation terms may not include escaped newlines ('\\n'), please use only literal newlines! (in '%s')" % s
|
2010-11-16 12:38:44 +00:00
|
|
|
return '"%s"' % s.replace('\\','\\\\') \
|
|
|
|
.replace('"','\\"') \
|
|
|
|
.replace('\n', '\\n"\n"')
|
|
|
|
|
|
|
|
re_escaped_char = re.compile(r"(\\.)")
|
|
|
|
re_escaped_replacements = {'n': '\n', }
|
|
|
|
|
|
|
|
def _sub_replacement(match_obj):
|
|
|
|
return re_escaped_replacements.get(match_obj.group(1)[1], match_obj.group(1)[1])
|
|
|
|
|
|
|
|
def unquote(str):
|
|
|
|
"""Returns unquoted PO term string, with special PO characters unescaped"""
|
|
|
|
return re_escaped_char.sub(_sub_replacement, str[1:-1])
|
|
|
|
|
2008-07-17 10:22:36 +00:00
|
|
|
# class to handle po files
|
|
|
|
class TinyPoFile(object):
|
2008-07-22 14:24:36 +00:00
|
|
|
def __init__(self, buffer):
|
|
|
|
self.buffer = buffer
|
2008-10-08 15:28:05 +00:00
|
|
|
|
2010-12-21 14:46:46 +00:00
|
|
|
def warn(self, msg, *args):
|
2012-01-24 14:00:56 +00:00
|
|
|
_logger.warning(msg, *args)
|
2009-12-10 11:08:26 +00:00
|
|
|
|
2008-07-22 14:24:36 +00:00
|
|
|
def __iter__(self):
|
|
|
|
self.buffer.seek(0)
|
2008-10-08 15:28:05 +00:00
|
|
|
self.lines = self._get_lines()
|
2012-12-14 12:43:10 +00:00
|
|
|
self.lines_count = len(self.lines)
|
2008-10-08 15:28:05 +00:00
|
|
|
|
2008-07-22 14:24:36 +00:00
|
|
|
self.first = True
|
2012-09-13 14:29:20 +00:00
|
|
|
self.extra_lines= []
|
2008-07-22 14:24:36 +00:00
|
|
|
return self
|
2008-10-08 15:28:05 +00:00
|
|
|
|
|
|
|
def _get_lines(self):
|
|
|
|
lines = self.buffer.readlines()
|
|
|
|
# remove the BOM (Byte Order Mark):
|
2008-10-09 08:52:45 +00:00
|
|
|
if len(lines):
|
2008-10-08 15:28:05 +00:00
|
|
|
lines[0] = unicode(lines[0], 'utf8').lstrip(unicode( codecs.BOM_UTF8, "utf8"))
|
|
|
|
|
|
|
|
lines.append('') # ensure that the file ends with at least an empty line
|
|
|
|
return lines
|
|
|
|
|
2008-12-11 17:59:36 +00:00
|
|
|
def cur_line(self):
|
2012-12-14 12:38:03 +00:00
|
|
|
return self.lines_count - len(self.lines)
|
2008-12-11 17:59:36 +00:00
|
|
|
|
2008-07-22 14:24:36 +00:00
|
|
|
def next(self):
|
2012-09-13 14:29:20 +00:00
|
|
|
trans_type = name = res_id = source = trad = None
|
|
|
|
if self.extra_lines:
|
|
|
|
trans_type, name, res_id, source, trad, comments = self.extra_lines.pop(0)
|
2010-11-26 08:20:48 +00:00
|
|
|
if not res_id:
|
|
|
|
res_id = '0'
|
2008-08-22 09:39:14 +00:00
|
|
|
else:
|
2012-09-13 14:29:20 +00:00
|
|
|
comments = []
|
|
|
|
targets = []
|
2008-08-22 09:39:14 +00:00
|
|
|
line = None
|
2009-11-24 14:44:05 +00:00
|
|
|
fuzzy = False
|
2012-12-14 12:38:03 +00:00
|
|
|
while not line:
|
2008-08-22 09:39:14 +00:00
|
|
|
if 0 == len(self.lines):
|
|
|
|
raise StopIteration()
|
2008-07-22 14:24:36 +00:00
|
|
|
line = self.lines.pop(0).strip()
|
2009-11-26 14:09:35 +00:00
|
|
|
while line.startswith('#'):
|
|
|
|
if line.startswith('#~ '):
|
|
|
|
break
|
2012-09-13 14:29:20 +00:00
|
|
|
if line.startswith('#.'):
|
|
|
|
line = line[2:].strip()
|
|
|
|
if not line.startswith('module:'):
|
|
|
|
comments.append(line)
|
|
|
|
elif line.startswith('#:'):
|
2014-08-13 20:59:13 +00:00
|
|
|
# Process the `reference` comments. Each line can specify
|
|
|
|
# multiple targets (e.g. model, view, code, selection,
|
|
|
|
# ...). For each target, we will return an additional
|
|
|
|
# entry.
|
2012-02-02 14:32:10 +00:00
|
|
|
for lpart in line[2:].strip().split(' '):
|
|
|
|
trans_info = lpart.strip().split(':',2)
|
|
|
|
if trans_info and len(trans_info) == 2:
|
2012-09-13 14:29:20 +00:00
|
|
|
# looks like the translation trans_type is missing, which is not
|
2012-02-02 14:32:10 +00:00
|
|
|
# unexpected because it is not a GetText standard. Default: 'code'
|
|
|
|
trans_info[:0] = ['code']
|
|
|
|
if trans_info and len(trans_info) == 3:
|
2012-09-13 14:29:20 +00:00
|
|
|
# this is a ref line holding the destination info (model, field, record)
|
|
|
|
targets.append(trans_info)
|
2009-11-24 14:44:05 +00:00
|
|
|
elif line.startswith('#,') and (line[2:].strip() == 'fuzzy'):
|
|
|
|
fuzzy = True
|
2008-08-22 09:39:14 +00:00
|
|
|
line = self.lines.pop(0).strip()
|
2008-10-15 07:49:53 +00:00
|
|
|
while not line:
|
|
|
|
# allow empty lines between comments and msgid
|
|
|
|
line = self.lines.pop(0).strip()
|
2009-11-24 14:44:05 +00:00
|
|
|
if line.startswith('#~ '):
|
|
|
|
while line.startswith('#~ ') or not line.strip():
|
|
|
|
if 0 == len(self.lines):
|
2009-05-16 21:20:29 +00:00
|
|
|
raise StopIteration()
|
2009-11-24 14:44:05 +00:00
|
|
|
line = self.lines.pop(0)
|
|
|
|
# This has been a deprecated entry, don't return anything
|
|
|
|
return self.next()
|
2009-05-16 21:20:29 +00:00
|
|
|
|
2008-08-22 09:39:14 +00:00
|
|
|
if not line.startswith('msgid'):
|
2008-10-08 15:28:05 +00:00
|
|
|
raise Exception("malformed file: bad line: %s" % line)
|
2008-09-11 15:15:44 +00:00
|
|
|
source = unquote(line[6:])
|
2008-07-22 14:24:36 +00:00
|
|
|
line = self.lines.pop(0).strip()
|
2008-08-22 09:39:14 +00:00
|
|
|
if not source and self.first:
|
2015-01-16 12:03:59 +00:00
|
|
|
self.first = False
|
2008-10-08 15:28:05 +00:00
|
|
|
# if the source is "" and it's the first msgid, it's the special
|
|
|
|
# msgstr with the informations about the traduction and the
|
2008-08-22 09:39:14 +00:00
|
|
|
# traductor; we skip it
|
2012-09-13 14:29:20 +00:00
|
|
|
self.extra_lines = []
|
2008-08-22 09:39:14 +00:00
|
|
|
while line:
|
|
|
|
line = self.lines.pop(0).strip()
|
2008-10-08 15:28:05 +00:00
|
|
|
return self.next()
|
|
|
|
|
2008-08-22 09:39:14 +00:00
|
|
|
while not line.startswith('msgstr'):
|
|
|
|
if not line:
|
2008-12-11 17:59:36 +00:00
|
|
|
raise Exception('malformed file at %d'% self.cur_line())
|
2008-08-22 09:39:14 +00:00
|
|
|
source += unquote(line)
|
|
|
|
line = self.lines.pop(0).strip()
|
2008-07-22 14:24:36 +00:00
|
|
|
|
2008-09-11 15:15:44 +00:00
|
|
|
trad = unquote(line[7:])
|
2008-07-22 14:24:36 +00:00
|
|
|
line = self.lines.pop(0).strip()
|
2008-08-22 09:39:14 +00:00
|
|
|
while line:
|
|
|
|
trad += unquote(line)
|
|
|
|
line = self.lines.pop(0).strip()
|
2008-10-08 15:28:05 +00:00
|
|
|
|
2012-09-13 14:29:20 +00:00
|
|
|
if targets and not fuzzy:
|
2014-08-13 20:59:13 +00:00
|
|
|
# Use the first target for the current entry (returned at the
|
|
|
|
# end of this next() call), and keep the others to generate
|
|
|
|
# additional entries (returned the next next() calls).
|
2012-09-13 14:29:20 +00:00
|
|
|
trans_type, name, res_id = targets.pop(0)
|
|
|
|
for t, n, r in targets:
|
2012-09-18 07:30:29 +00:00
|
|
|
if t == trans_type == 'code': continue
|
2012-09-13 14:29:20 +00:00
|
|
|
self.extra_lines.append((t, n, r, source, trad, comments))
|
2008-08-22 09:39:14 +00:00
|
|
|
|
2009-12-10 11:08:26 +00:00
|
|
|
if name is None:
|
2010-11-23 13:58:58 +00:00
|
|
|
if not fuzzy:
|
2010-12-30 09:17:34 +00:00
|
|
|
self.warn('Missing "#:" formated comment at line %d for the following source:\n\t%s',
|
2010-11-23 13:58:58 +00:00
|
|
|
self.cur_line(), source[:30])
|
2009-11-24 14:44:05 +00:00
|
|
|
return self.next()
|
2012-09-13 14:29:20 +00:00
|
|
|
return trans_type, name, res_id, source, trad, '\n'.join(comments)
|
2008-07-22 14:24:36 +00:00
|
|
|
|
|
|
|
def write_infos(self, modules):
|
2011-02-07 12:57:23 +00:00
|
|
|
import openerp.release as release
|
2008-07-22 14:24:36 +00:00
|
|
|
self.buffer.write("# Translation of %(project)s.\n" \
|
2009-01-20 13:54:09 +00:00
|
|
|
"# This file contains the translation of the following modules:\n" \
|
2008-07-22 14:24:36 +00:00
|
|
|
"%(modules)s" \
|
|
|
|
"#\n" \
|
|
|
|
"msgid \"\"\n" \
|
|
|
|
"msgstr \"\"\n" \
|
2008-10-09 08:24:16 +00:00
|
|
|
'''"Project-Id-Version: %(project)s %(version)s\\n"\n''' \
|
2011-10-14 15:02:10 +00:00
|
|
|
'''"Report-Msgid-Bugs-To: \\n"\n''' \
|
2008-10-09 08:24:16 +00:00
|
|
|
'''"POT-Creation-Date: %(now)s\\n"\n''' \
|
|
|
|
'''"PO-Revision-Date: %(now)s\\n"\n''' \
|
|
|
|
'''"Last-Translator: <>\\n"\n''' \
|
|
|
|
'''"Language-Team: \\n"\n''' \
|
|
|
|
'''"MIME-Version: 1.0\\n"\n''' \
|
|
|
|
'''"Content-Type: text/plain; charset=UTF-8\\n"\n''' \
|
|
|
|
'''"Content-Transfer-Encoding: \\n"\n''' \
|
|
|
|
'''"Plural-Forms: \\n"\n''' \
|
2008-07-22 14:24:36 +00:00
|
|
|
"\n"
|
|
|
|
|
|
|
|
% { 'project': release.description,
|
|
|
|
'version': release.version,
|
|
|
|
'modules': reduce(lambda s, m: s + "#\t* %s\n" % m, modules, ""),
|
2011-01-12 14:31:41 +00:00
|
|
|
'now': datetime.utcnow().strftime('%Y-%m-%d %H:%M')+"+0000",
|
2008-07-22 14:24:36 +00:00
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2012-10-03 13:26:59 +00:00
|
|
|
def write(self, modules, tnrs, source, trad, comments=None):
|
2008-07-22 14:24:36 +00:00
|
|
|
|
2008-08-22 09:39:14 +00:00
|
|
|
plurial = len(modules) > 1 and 's' or ''
|
2008-09-02 15:01:44 +00:00
|
|
|
self.buffer.write("#. module%s: %s\n" % (plurial, ', '.join(modules)))
|
2008-10-08 15:28:05 +00:00
|
|
|
|
2012-10-03 13:26:59 +00:00
|
|
|
if comments:
|
|
|
|
self.buffer.write(''.join(('#. %s\n' % c for c in comments)))
|
2008-08-22 09:39:14 +00:00
|
|
|
|
2008-12-01 09:25:53 +00:00
|
|
|
code = False
|
2008-09-04 13:28:41 +00:00
|
|
|
for typy, name, res_id in tnrs:
|
|
|
|
self.buffer.write("#: %s:%s:%s\n" % (typy, name, res_id))
|
2008-12-01 09:25:53 +00:00
|
|
|
if typy == 'code':
|
|
|
|
code = True
|
|
|
|
|
|
|
|
if code:
|
|
|
|
# only strings in python code are python formated
|
|
|
|
self.buffer.write("#, python-format\n")
|
2008-08-22 09:39:14 +00:00
|
|
|
|
2008-09-04 13:28:41 +00:00
|
|
|
if not isinstance(trad, unicode):
|
|
|
|
trad = unicode(trad, 'utf8')
|
|
|
|
if not isinstance(source, unicode):
|
|
|
|
source = unicode(source, 'utf8')
|
|
|
|
|
|
|
|
msg = "msgid %s\n" \
|
|
|
|
"msgstr %s\n\n" \
|
|
|
|
% (quote(source), quote(trad))
|
|
|
|
self.buffer.write(msg.encode('utf8'))
|
2008-07-17 10:22:36 +00:00
|
|
|
|
|
|
|
|
2006-12-07 13:41:40 +00:00
|
|
|
# Methods to export the translation file
|
|
|
|
|
2011-01-05 14:07:58 +00:00
|
|
|
def trans_export(lang, modules, buffer, format, cr):
|
2008-08-19 13:10:16 +00:00
|
|
|
|
2012-10-03 13:26:59 +00:00
|
|
|
def _process(format, modules, rows, buffer, lang):
|
2008-08-19 13:10:16 +00:00
|
|
|
if format == 'csv':
|
2012-10-03 13:26:59 +00:00
|
|
|
writer = csv.writer(buffer, 'UNIX')
|
|
|
|
# write header first
|
|
|
|
writer.writerow(("module","type","name","res_id","src","value"))
|
|
|
|
for module, type, name, res_id, src, trad, comments in rows:
|
|
|
|
# Comments are ignored by the CSV writer
|
|
|
|
writer.writerow((module, type, name, res_id, src, trad))
|
2008-08-19 13:10:16 +00:00
|
|
|
elif format == 'po':
|
2011-02-07 12:57:23 +00:00
|
|
|
writer = TinyPoFile(buffer)
|
2008-08-19 13:10:16 +00:00
|
|
|
writer.write_infos(modules)
|
2008-08-22 09:39:14 +00:00
|
|
|
|
|
|
|
# we now group the translations by source. That means one translation per source.
|
|
|
|
grouped_rows = {}
|
2012-10-03 13:26:59 +00:00
|
|
|
for module, type, name, res_id, src, trad, comments in rows:
|
2008-08-22 09:39:14 +00:00
|
|
|
row = grouped_rows.setdefault(src, {})
|
|
|
|
row.setdefault('modules', set()).add(module)
|
2012-11-26 18:15:27 +00:00
|
|
|
if not row.get('translation') and trad != src:
|
2008-09-11 15:15:44 +00:00
|
|
|
row['translation'] = trad
|
2008-08-22 09:39:14 +00:00
|
|
|
row.setdefault('tnrs', []).append((type, name, res_id))
|
2012-10-03 13:26:59 +00:00
|
|
|
row.setdefault('comments', set()).update(comments)
|
2008-08-22 09:39:14 +00:00
|
|
|
|
2013-08-29 07:50:35 +00:00
|
|
|
for src, row in sorted(grouped_rows.items()):
|
2012-11-26 18:15:27 +00:00
|
|
|
if not lang:
|
|
|
|
# translation template, so no translation value
|
2012-09-24 13:40:02 +00:00
|
|
|
row['translation'] = ''
|
|
|
|
elif not row.get('translation'):
|
|
|
|
row['translation'] = src
|
2012-10-03 13:26:59 +00:00
|
|
|
writer.write(row['modules'], row['tnrs'], src, row['translation'], row['comments'])
|
2008-08-22 09:39:14 +00:00
|
|
|
|
2008-08-19 13:10:16 +00:00
|
|
|
elif format == 'tgz':
|
|
|
|
rows_by_module = {}
|
|
|
|
for row in rows:
|
|
|
|
module = row[0]
|
2012-10-03 13:26:59 +00:00
|
|
|
rows_by_module.setdefault(module, []).append(row)
|
2008-08-19 13:10:16 +00:00
|
|
|
tmpdir = tempfile.mkdtemp()
|
|
|
|
for mod, modrows in rows_by_module.items():
|
|
|
|
tmpmoddir = join(tmpdir, mod, 'i18n')
|
|
|
|
os.makedirs(tmpmoddir)
|
2012-10-03 13:26:59 +00:00
|
|
|
pofilename = (lang if lang else mod) + ".po" + ('t' if not lang else '')
|
2009-08-18 17:12:26 +00:00
|
|
|
buf = file(join(tmpmoddir, pofilename), 'w')
|
2012-10-03 13:26:59 +00:00
|
|
|
_process('po', [mod], modrows, buf, lang)
|
2009-08-18 17:12:26 +00:00
|
|
|
buf.close()
|
2008-08-19 13:10:16 +00:00
|
|
|
|
|
|
|
tar = tarfile.open(fileobj=buffer, mode='w|gz')
|
2008-09-02 15:13:05 +00:00
|
|
|
tar.add(tmpdir, '')
|
2008-08-19 13:10:16 +00:00
|
|
|
tar.close()
|
|
|
|
|
|
|
|
else:
|
2011-08-23 10:33:43 +00:00
|
|
|
raise Exception(_('Unrecognized extension: must be one of '
|
|
|
|
'.csv, .po, or .tgz (received .%s).' % format))
|
2008-08-19 13:10:16 +00:00
|
|
|
|
2012-10-03 13:26:59 +00:00
|
|
|
translations = trans_generate(lang, modules, cr)
|
2014-10-10 13:12:47 +00:00
|
|
|
modules = set(t[0] for t in translations)
|
2012-10-03 13:26:59 +00:00
|
|
|
_process(format, modules, translations, buffer, lang)
|
|
|
|
del translations
|
2008-10-08 15:28:05 +00:00
|
|
|
|
2006-12-07 13:41:40 +00:00
|
|
|
def trans_parse_xsl(de):
|
2012-09-24 13:40:02 +00:00
|
|
|
return list(set(trans_parse_xsl_aux(de, False)))
|
|
|
|
|
2012-10-08 11:09:46 +00:00
|
|
|
def trans_parse_xsl_aux(de, t):
|
2008-07-22 14:24:36 +00:00
|
|
|
res = []
|
2012-10-08 11:09:46 +00:00
|
|
|
|
2009-11-28 11:58:09 +00:00
|
|
|
for n in de:
|
2012-09-24 13:40:02 +00:00
|
|
|
t = t or n.get("t")
|
|
|
|
if t:
|
|
|
|
if isinstance(n, SKIPPED_ELEMENT_TYPES) or n.tag.startswith('{http://www.w3.org/1999/XSL/Transform}'):
|
2010-12-29 11:33:34 +00:00
|
|
|
continue
|
2012-09-24 13:40:02 +00:00
|
|
|
if n.text:
|
|
|
|
l = n.text.strip().replace('\n',' ')
|
|
|
|
if len(l):
|
|
|
|
res.append(l.encode("utf8"))
|
|
|
|
if n.tail:
|
|
|
|
l = n.tail.strip().replace('\n',' ')
|
|
|
|
if len(l):
|
|
|
|
res.append(l.encode("utf8"))
|
|
|
|
res.extend(trans_parse_xsl_aux(n, t))
|
2008-07-22 14:24:36 +00:00
|
|
|
return res
|
2006-12-07 13:41:40 +00:00
|
|
|
|
|
|
|
def trans_parse_rml(de):
|
2008-07-22 14:24:36 +00:00
|
|
|
res = []
|
2009-11-28 11:58:09 +00:00
|
|
|
for n in de:
|
2010-12-29 11:33:34 +00:00
|
|
|
for m in n:
|
|
|
|
if isinstance(m, SKIPPED_ELEMENT_TYPES) or not m.text:
|
|
|
|
continue
|
2009-09-17 07:27:12 +00:00
|
|
|
string_list = [s.replace('\n', ' ').strip() for s in re.split('\[\[.+?\]\]', m.text)]
|
2008-07-22 14:24:36 +00:00
|
|
|
for s in string_list:
|
|
|
|
if s:
|
|
|
|
res.append(s.encode("utf8"))
|
|
|
|
res.extend(trans_parse_rml(n))
|
|
|
|
return res
|
2006-12-07 13:41:40 +00:00
|
|
|
|
2014-08-13 09:08:02 +00:00
|
|
|
def _push(callback, term, source_line):
|
|
|
|
""" Sanity check before pushing translation terms """
|
|
|
|
term = (term or "").strip().encode('utf8')
|
|
|
|
# Avoid non-char tokens like ':' '...' '.00' etc.
|
|
|
|
if len(term) > 8 or any(x.isalpha() for x in term):
|
|
|
|
callback(term, source_line)
|
|
|
|
|
|
|
|
def trans_parse_view(element, callback):
|
|
|
|
""" Helper method to recursively walk an etree document representing a
|
|
|
|
regular view and call ``callback(term)`` for each translatable term
|
|
|
|
that is found in the document.
|
|
|
|
|
|
|
|
:param ElementTree element: root of etree document to extract terms from
|
|
|
|
:param callable callback: a callable in the form ``f(term, source_line)``,
|
|
|
|
that will be called for each extracted term.
|
|
|
|
"""
|
2014-10-10 12:20:25 +00:00
|
|
|
for el in element.iter():
|
|
|
|
if (not isinstance(el, SKIPPED_ELEMENT_TYPES)
|
|
|
|
and el.tag.lower() not in SKIPPED_ELEMENTS
|
|
|
|
and el.text):
|
|
|
|
_push(callback, el.text, el.sourceline)
|
|
|
|
if el.tail:
|
|
|
|
_push(callback, el.tail, el.sourceline)
|
|
|
|
for attr in ('string', 'help', 'sum', 'confirm', 'placeholder'):
|
|
|
|
value = el.get(attr)
|
|
|
|
if value:
|
|
|
|
_push(callback, value, el.sourceline)
|
2006-12-07 13:41:40 +00:00
|
|
|
|
|
|
|
# tests whether an object is in a list of modules
|
|
|
|
def in_modules(object_name, modules):
|
2008-07-22 14:24:36 +00:00
|
|
|
if 'all' in modules:
|
|
|
|
return True
|
2008-10-08 15:28:05 +00:00
|
|
|
|
2008-07-22 14:24:36 +00:00
|
|
|
module_dict = {
|
|
|
|
'ir': 'base',
|
|
|
|
'res': 'base',
|
|
|
|
'workflow': 'base',
|
|
|
|
}
|
|
|
|
module = object_name.split('.')[0]
|
|
|
|
module = module_dict.get(module, module)
|
|
|
|
return module in modules
|
2006-12-07 13:41:40 +00:00
|
|
|
|
2014-08-13 09:08:02 +00:00
|
|
|
def _extract_translatable_qweb_terms(element, callback):
|
|
|
|
""" Helper method to walk an etree document representing
|
|
|
|
a QWeb template, and call ``callback(term)`` for each
|
|
|
|
translatable term that is found in the document.
|
|
|
|
|
2014-10-10 12:20:25 +00:00
|
|
|
:param etree._Element element: root of etree document to extract terms from
|
|
|
|
:param Callable callback: a callable in the form ``f(term, source_line)``,
|
|
|
|
that will be called for each extracted term.
|
2014-08-13 09:08:02 +00:00
|
|
|
"""
|
|
|
|
# not using elementTree.iterparse because we need to skip sub-trees in case
|
|
|
|
# the ancestor element had a reason to be skipped
|
|
|
|
for el in element:
|
|
|
|
if isinstance(el, SKIPPED_ELEMENT_TYPES): continue
|
|
|
|
if (el.tag.lower() not in SKIPPED_ELEMENTS
|
|
|
|
and "t-js" not in el.attrib
|
|
|
|
and not ("t-jquery" in el.attrib and "t-operation" not in el.attrib)
|
2014-10-10 12:23:56 +00:00
|
|
|
and el.get("t-translation", '').strip() != "off"):
|
2014-08-13 09:08:02 +00:00
|
|
|
_push(callback, el.text, el.sourceline)
|
|
|
|
for att in ('title', 'alt', 'label', 'placeholder'):
|
|
|
|
if att in el.attrib:
|
|
|
|
_push(callback, el.attrib[att], el.sourceline)
|
|
|
|
_extract_translatable_qweb_terms(el, callback)
|
|
|
|
_push(callback, el.tail, el.sourceline)
|
2012-10-03 13:26:59 +00:00
|
|
|
|
|
|
|
def babel_extract_qweb(fileobj, keywords, comment_tags, options):
|
|
|
|
"""Babel message extractor for qweb template files.
|
2014-10-10 12:20:25 +00:00
|
|
|
|
2012-10-03 13:26:59 +00:00
|
|
|
:param fileobj: the file-like object the messages should be extracted from
|
|
|
|
:param keywords: a list of keywords (i.e. function names) that should
|
|
|
|
be recognized as translation functions
|
|
|
|
:param comment_tags: a list of translator tags to search for and
|
|
|
|
include in the results
|
|
|
|
:param options: a dictionary of additional options (optional)
|
|
|
|
:return: an iterator over ``(lineno, funcname, message, comments)``
|
|
|
|
tuples
|
2014-10-10 12:20:25 +00:00
|
|
|
:rtype: Iterable
|
2012-10-03 13:26:59 +00:00
|
|
|
"""
|
|
|
|
result = []
|
|
|
|
def handle_text(text, lineno):
|
2014-08-13 09:08:02 +00:00
|
|
|
result.append((lineno, None, text, []))
|
2012-10-03 13:26:59 +00:00
|
|
|
tree = etree.parse(fileobj)
|
2014-08-13 09:08:02 +00:00
|
|
|
_extract_translatable_qweb_terms(tree.getroot(), handle_text)
|
2012-10-03 13:26:59 +00:00
|
|
|
return result
|
|
|
|
|
2011-01-05 14:07:58 +00:00
|
|
|
def trans_generate(lang, modules, cr):
|
|
|
|
dbname = cr.dbname
|
2008-07-23 16:33:28 +00:00
|
|
|
|
2013-03-27 11:10:14 +00:00
|
|
|
registry = openerp.registry(dbname)
|
2014-10-10 15:29:39 +00:00
|
|
|
trans_obj = registry['ir.translation']
|
|
|
|
model_data_obj = registry['ir.model.data']
|
2008-07-22 14:24:36 +00:00
|
|
|
uid = 1
|
|
|
|
|
2014-10-10 15:29:39 +00:00
|
|
|
query = 'SELECT name, model, res_id, module' \
|
2008-07-22 14:24:36 +00:00
|
|
|
' FROM ir_model_data'
|
2010-12-30 09:17:34 +00:00
|
|
|
|
|
|
|
query_models = """SELECT m.id, m.model, imd.module
|
|
|
|
FROM ir_model AS m, ir_model_data AS imd
|
2010-11-16 16:35:10 +00:00
|
|
|
WHERE m.id = imd.res_id AND imd.model = 'ir.model' """
|
|
|
|
|
2009-06-08 20:29:44 +00:00
|
|
|
if 'all_installed' in modules:
|
|
|
|
query += ' WHERE module IN ( SELECT name FROM ir_module_module WHERE state = \'installed\') '
|
2010-11-16 16:35:10 +00:00
|
|
|
query_models += " AND imd.module in ( SELECT name FROM ir_module_module WHERE state = 'installed') "
|
2010-06-15 13:27:22 +00:00
|
|
|
query_param = None
|
|
|
|
if 'all' not in modules:
|
|
|
|
query += ' WHERE module IN %s'
|
2010-11-16 16:35:10 +00:00
|
|
|
query_models += ' AND imd.module in %s'
|
2010-06-15 13:27:22 +00:00
|
|
|
query_param = (tuple(modules),)
|
2008-07-22 14:24:36 +00:00
|
|
|
query += ' ORDER BY module, model, name'
|
2010-11-16 16:35:10 +00:00
|
|
|
query_models += ' ORDER BY module, model'
|
2008-10-08 15:28:05 +00:00
|
|
|
|
2008-07-22 14:24:36 +00:00
|
|
|
cr.execute(query, query_param)
|
|
|
|
|
2014-10-10 15:29:39 +00:00
|
|
|
_to_translate = set()
|
2012-10-03 13:26:59 +00:00
|
|
|
def push_translation(module, type, name, id, source, comments=None):
|
2012-11-23 23:51:30 +00:00
|
|
|
# empty and one-letter terms are ignored, they probably are not meant to be
|
|
|
|
# translated, and would be very hard to translate anyway.
|
|
|
|
if not source or len(source.strip()) <= 1:
|
|
|
|
return
|
2014-10-10 15:29:39 +00:00
|
|
|
|
|
|
|
tnx = (module, source, name, id, type, tuple(comments or ()))
|
2014-11-27 10:07:09 +00:00
|
|
|
_to_translate.add(tnx)
|
2009-11-20 11:57:21 +00:00
|
|
|
|
2008-12-16 16:21:12 +00:00
|
|
|
def encode(s):
|
|
|
|
if isinstance(s, unicode):
|
|
|
|
return s.encode('utf8')
|
|
|
|
return s
|
2008-07-22 14:24:36 +00:00
|
|
|
|
2014-08-13 09:08:02 +00:00
|
|
|
def push(mod, type, name, res_id, term):
|
|
|
|
term = (term or '').strip()
|
2014-12-04 12:09:15 +00:00
|
|
|
if len(term) > 2 or term in ENGLISH_SMALL_WORDS:
|
2014-08-13 09:08:02 +00:00
|
|
|
push_translation(mod, type, name, res_id, term)
|
|
|
|
|
|
|
|
def get_root_view(xml_id):
|
|
|
|
view = model_data_obj.xmlid_to_object(cr, uid, xml_id)
|
|
|
|
if view:
|
|
|
|
while view.mode != 'primary':
|
|
|
|
view = view.inherit_id
|
|
|
|
xml_id = view.get_external_id(cr, uid).get(view.id, xml_id)
|
|
|
|
return xml_id
|
|
|
|
|
2008-07-22 14:24:36 +00:00
|
|
|
for (xml_name,model,res_id,module) in cr.fetchall():
|
2008-12-16 16:21:12 +00:00
|
|
|
module = encode(module)
|
|
|
|
model = encode(model)
|
|
|
|
xml_name = "%s.%s" % (module, encode(xml_name))
|
|
|
|
|
2013-03-29 14:07:23 +00:00
|
|
|
if model not in registry:
|
2012-01-24 14:00:56 +00:00
|
|
|
_logger.error("Unable to find object %r", model)
|
2008-10-20 13:34:57 +00:00
|
|
|
continue
|
2009-11-20 11:57:21 +00:00
|
|
|
|
2014-10-10 15:29:39 +00:00
|
|
|
Model = registry[model]
|
|
|
|
if not Model._translate:
|
2014-07-14 14:36:48 +00:00
|
|
|
# explicitly disabled
|
|
|
|
continue
|
|
|
|
|
2014-10-10 15:29:39 +00:00
|
|
|
obj = Model.browse(cr, uid, res_id)
|
|
|
|
if not obj.exists():
|
2012-01-24 14:00:56 +00:00
|
|
|
_logger.warning("Unable to find object %r with id %d", model, res_id)
|
2009-06-10 11:18:40 +00:00
|
|
|
continue
|
|
|
|
|
2008-07-22 14:24:36 +00:00
|
|
|
if model=='ir.ui.view':
|
2009-09-17 07:27:12 +00:00
|
|
|
d = etree.XML(encode(obj.arch))
|
2014-08-13 09:08:02 +00:00
|
|
|
if obj.type == 'qweb':
|
|
|
|
view_id = get_root_view(xml_name)
|
|
|
|
push_qweb = lambda t,l: push(module, 'view', 'website', view_id, t)
|
|
|
|
_extract_translatable_qweb_terms(d, push_qweb)
|
|
|
|
else:
|
|
|
|
push_view = lambda t,l: push(module, 'view', obj.model, xml_name, t)
|
|
|
|
trans_parse_view(d, push_view)
|
2008-07-22 14:24:36 +00:00
|
|
|
elif model=='ir.actions.wizard':
|
2013-02-01 11:20:01 +00:00
|
|
|
pass # TODO Can model really be 'ir.actions.wizard' ?
|
2008-07-22 14:24:36 +00:00
|
|
|
|
|
|
|
elif model=='ir.model.fields':
|
2009-11-24 14:44:05 +00:00
|
|
|
try:
|
2008-12-18 16:49:25 +00:00
|
|
|
field_name = encode(obj.name)
|
2009-11-24 14:44:05 +00:00
|
|
|
except AttributeError, exc:
|
2012-01-24 14:00:56 +00:00
|
|
|
_logger.error("name error in %s: %s", xml_name, str(exc))
|
2009-11-24 14:44:05 +00:00
|
|
|
continue
|
2013-11-27 16:11:54 +00:00
|
|
|
objmodel = registry.get(obj.model)
|
2014-07-14 14:36:48 +00:00
|
|
|
if (objmodel is None or field_name not in objmodel._columns
|
|
|
|
or not objmodel._translate):
|
2008-09-01 19:29:02 +00:00
|
|
|
continue
|
2008-09-10 13:14:16 +00:00
|
|
|
field_def = objmodel._columns[field_name]
|
2008-07-22 14:24:36 +00:00
|
|
|
|
2008-12-16 16:21:12 +00:00
|
|
|
name = "%s,%s" % (encode(obj.model), field_name)
|
|
|
|
push_translation(module, 'field', name, 0, encode(field_def.string))
|
2008-07-22 14:24:36 +00:00
|
|
|
|
|
|
|
if field_def.help:
|
2008-12-16 16:21:12 +00:00
|
|
|
push_translation(module, 'help', name, 0, encode(field_def.help))
|
2008-07-22 14:24:36 +00:00
|
|
|
|
|
|
|
if field_def.translate:
|
2008-09-10 13:14:16 +00:00
|
|
|
ids = objmodel.search(cr, uid, [])
|
|
|
|
obj_values = objmodel.read(cr, uid, ids, [field_name])
|
2008-07-22 14:24:36 +00:00
|
|
|
for obj_value in obj_values:
|
|
|
|
res_id = obj_value['id']
|
|
|
|
if obj.name in ('ir.model', 'ir.ui.menu'):
|
|
|
|
res_id = 0
|
|
|
|
model_data_ids = model_data_obj.search(cr, uid, [
|
|
|
|
('model', '=', model),
|
|
|
|
('res_id', '=', res_id),
|
|
|
|
])
|
|
|
|
if not model_data_ids:
|
2008-12-16 16:21:12 +00:00
|
|
|
push_translation(module, 'model', name, 0, encode(obj_value[field_name]))
|
2008-07-22 14:24:36 +00:00
|
|
|
|
|
|
|
if hasattr(field_def, 'selection') and isinstance(field_def.selection, (list, tuple)):
|
2010-10-08 14:03:49 +00:00
|
|
|
for dummy, val in field_def.selection:
|
2008-12-16 16:21:12 +00:00
|
|
|
push_translation(module, 'selection', name, 0, encode(val))
|
2008-07-22 14:24:36 +00:00
|
|
|
|
|
|
|
elif model=='ir.actions.report.xml':
|
2008-12-16 16:21:12 +00:00
|
|
|
name = encode(obj.report_name)
|
2008-07-22 14:24:36 +00:00
|
|
|
fname = ""
|
|
|
|
if obj.report_rml:
|
|
|
|
fname = obj.report_rml
|
|
|
|
parse_func = trans_parse_rml
|
2010-10-08 14:03:49 +00:00
|
|
|
report_type = "report"
|
2008-07-22 14:24:36 +00:00
|
|
|
elif obj.report_xsl:
|
|
|
|
fname = obj.report_xsl
|
|
|
|
parse_func = trans_parse_xsl
|
|
|
|
report_type = "xsl"
|
2010-10-08 14:03:49 +00:00
|
|
|
if fname and obj.report_type in ('pdf', 'xsl'):
|
|
|
|
try:
|
2011-02-07 12:57:23 +00:00
|
|
|
report_file = misc.file_open(fname)
|
2011-01-04 10:13:35 +00:00
|
|
|
try:
|
|
|
|
d = etree.parse(report_file)
|
|
|
|
for t in parse_func(d.iter()):
|
|
|
|
push_translation(module, report_type, name, 0, t)
|
|
|
|
finally:
|
|
|
|
report_file.close()
|
2010-10-08 14:03:49 +00:00
|
|
|
except (IOError, etree.XMLSyntaxError):
|
2012-01-24 14:00:56 +00:00
|
|
|
_logger.exception("couldn't export translation for report %s %s %s", name, report_type, fname)
|
2010-10-20 16:38:22 +00:00
|
|
|
|
2014-07-06 14:44:26 +00:00
|
|
|
for field_name, field_def in obj._columns.items():
|
2014-08-13 09:08:02 +00:00
|
|
|
if model == 'ir.model' and field_name == 'name' and obj.name == obj.model:
|
|
|
|
# ignore model name if it is the technical one, nothing to translate
|
|
|
|
continue
|
2008-07-22 14:24:36 +00:00
|
|
|
if field_def.translate:
|
|
|
|
name = model + "," + field_name
|
2009-11-24 14:44:05 +00:00
|
|
|
try:
|
2014-08-13 09:08:02 +00:00
|
|
|
term = obj[field_name] or ''
|
2009-11-24 14:44:05 +00:00
|
|
|
except:
|
2014-08-13 09:08:02 +00:00
|
|
|
term = ''
|
|
|
|
push_translation(module, 'model', name, xml_name, encode(term))
|
2008-07-22 14:24:36 +00:00
|
|
|
|
2010-11-16 16:35:10 +00:00
|
|
|
# End of data for ir.model.data query results
|
|
|
|
|
|
|
|
cr.execute(query_models, query_param)
|
|
|
|
|
|
|
|
def push_constraint_msg(module, term_type, model, msg):
|
|
|
|
if not hasattr(msg, '__call__'):
|
2012-12-04 08:29:47 +00:00
|
|
|
push_translation(encode(module), term_type, encode(model), 0, encode(msg))
|
|
|
|
|
|
|
|
def push_local_constraints(module, model, cons_type='sql_constraints'):
|
|
|
|
"""Climb up the class hierarchy and ignore inherited constraints
|
|
|
|
from other modules"""
|
|
|
|
term_type = 'sql_constraint' if cons_type == 'sql_constraints' else 'constraint'
|
|
|
|
msg_pos = 2 if cons_type == 'sql_constraints' else 1
|
|
|
|
for cls in model.__class__.__mro__:
|
|
|
|
if getattr(cls, '_module', None) != module:
|
|
|
|
continue
|
|
|
|
constraints = getattr(cls, '_local_' + cons_type, [])
|
|
|
|
for constraint in constraints:
|
|
|
|
push_constraint_msg(module, term_type, model._name, constraint[msg_pos])
|
|
|
|
|
2012-10-03 13:26:59 +00:00
|
|
|
for (_, model, module) in cr.fetchall():
|
2013-03-29 14:07:23 +00:00
|
|
|
if model not in registry:
|
2012-01-24 14:00:56 +00:00
|
|
|
_logger.error("Unable to find object %r", model)
|
2010-11-16 16:35:10 +00:00
|
|
|
continue
|
|
|
|
|
2013-03-29 14:07:23 +00:00
|
|
|
model_obj = registry[model]
|
|
|
|
|
2012-12-04 08:29:47 +00:00
|
|
|
if model_obj._constraints:
|
|
|
|
push_local_constraints(module, model_obj, 'constraints')
|
2010-11-16 16:35:10 +00:00
|
|
|
|
2012-12-04 08:29:47 +00:00
|
|
|
if model_obj._sql_constraints:
|
|
|
|
push_local_constraints(module, model_obj, 'sql_constraints')
|
2010-11-16 16:35:10 +00:00
|
|
|
|
2014-10-10 15:29:39 +00:00
|
|
|
installed_modules = map(
|
|
|
|
lambda m: m['name'],
|
|
|
|
registry['ir.module.module'].search_read(cr, uid, [('state', '=', 'installed')], fields=['name']))
|
2009-11-20 11:57:21 +00:00
|
|
|
|
2014-01-15 18:03:13 +00:00
|
|
|
path_list = list(openerp.modules.module.ad_paths)
|
2010-12-29 11:28:50 +00:00
|
|
|
# Also scan these non-addon paths
|
|
|
|
for bin_path in ['osv', 'report' ]:
|
2011-02-07 12:57:23 +00:00
|
|
|
path_list.append(os.path.join(config.config['root_path'], bin_path))
|
2010-11-16 12:38:44 +00:00
|
|
|
|
2014-07-25 20:17:29 +00:00
|
|
|
_logger.debug("Scanning modules at paths: %s", path_list)
|
2010-11-16 12:38:44 +00:00
|
|
|
|
2014-01-15 18:03:13 +00:00
|
|
|
def get_module_from_path(path):
|
2014-10-10 15:29:39 +00:00
|
|
|
for mp in path_list:
|
|
|
|
if path.startswith(mp) and os.path.dirname(path) != mp:
|
2014-01-15 18:03:13 +00:00
|
|
|
path = path[len(mp)+1:]
|
|
|
|
return path.split(os.path.sep)[0]
|
2014-10-10 15:29:39 +00:00
|
|
|
return 'base' # files that are not in a module are considered as being in 'base' module
|
2009-07-28 07:45:36 +00:00
|
|
|
|
2012-10-03 13:26:59 +00:00
|
|
|
def verified_module_filepaths(fname, path, root):
|
2010-10-08 14:03:49 +00:00
|
|
|
fabsolutepath = join(root, fname)
|
|
|
|
frelativepath = fabsolutepath[len(path):]
|
2012-10-03 13:26:59 +00:00
|
|
|
display_path = "addons%s" % frelativepath
|
2014-01-15 18:03:13 +00:00
|
|
|
module = get_module_from_path(fabsolutepath)
|
2012-12-14 12:38:03 +00:00
|
|
|
if ('all' in modules or module in modules) and module in installed_modules:
|
2012-10-03 13:26:59 +00:00
|
|
|
return module, fabsolutepath, frelativepath, display_path
|
|
|
|
return None, None, None, None
|
|
|
|
|
|
|
|
def babel_extract_terms(fname, path, root, extract_method="python", trans_type='code',
|
|
|
|
extra_comments=None, extract_keywords={'_': None}):
|
|
|
|
module, fabsolutepath, _, display_path = verified_module_filepaths(fname, path, root)
|
|
|
|
extra_comments = extra_comments or []
|
2014-10-10 15:29:39 +00:00
|
|
|
if not module: return
|
|
|
|
src_file = open(fabsolutepath, 'r')
|
|
|
|
try:
|
|
|
|
for extracted in extract.extract(extract_method, src_file,
|
|
|
|
keywords=extract_keywords):
|
|
|
|
# Babel 0.9.6 yields lineno, message, comments
|
|
|
|
# Babel 1.3 yields lineno, message, comments, context
|
|
|
|
lineno, message, comments = extracted[:3]
|
|
|
|
push_translation(module, trans_type, display_path, lineno,
|
|
|
|
encode(message), comments + extra_comments)
|
|
|
|
except Exception:
|
|
|
|
_logger.exception("Failed to extract terms from %s", fabsolutepath)
|
|
|
|
finally:
|
|
|
|
src_file.close()
|
2010-10-08 14:03:49 +00:00
|
|
|
|
2009-11-20 11:57:21 +00:00
|
|
|
for path in path_list:
|
2012-01-24 14:00:56 +00:00
|
|
|
_logger.debug("Scanning files of modules at %s", path)
|
2011-02-07 12:57:23 +00:00
|
|
|
for root, dummy, files in osutil.walksymlinks(path):
|
2012-10-03 13:26:59 +00:00
|
|
|
for fname in fnmatch.filter(files, '*.py'):
|
|
|
|
babel_extract_terms(fname, path, root)
|
2013-01-14 11:28:31 +00:00
|
|
|
# mako provides a babel extractor: http://docs.makotemplates.org/en/latest/usage.html#babel
|
2012-10-03 13:26:59 +00:00
|
|
|
for fname in fnmatch.filter(files, '*.mako'):
|
2013-01-04 17:16:31 +00:00
|
|
|
babel_extract_terms(fname, path, root, 'mako', trans_type='report')
|
2012-10-03 13:26:59 +00:00
|
|
|
# Javascript source files in the static/src/js directory, rest is ignored (libs)
|
|
|
|
if fnmatch.fnmatch(root, '*/static/src/js*'):
|
|
|
|
for fname in fnmatch.filter(files, '*.js'):
|
|
|
|
babel_extract_terms(fname, path, root, 'javascript',
|
|
|
|
extra_comments=[WEB_TRANSLATION_COMMENT],
|
2012-11-23 16:46:04 +00:00
|
|
|
extract_keywords={'_t': None, '_lt': None})
|
2012-10-03 13:26:59 +00:00
|
|
|
# QWeb template files
|
|
|
|
if fnmatch.fnmatch(root, '*/static/src/xml*'):
|
|
|
|
for fname in fnmatch.filter(files, '*.xml'):
|
|
|
|
babel_extract_terms(fname, path, root, 'openerp.tools.translate:babel_extract_qweb',
|
|
|
|
extra_comments=[WEB_TRANSLATION_COMMENT])
|
|
|
|
|
|
|
|
out = []
|
2008-07-22 14:24:36 +00:00
|
|
|
# translate strings marked as to be translated
|
2014-10-10 15:29:39 +00:00
|
|
|
for module, source, name, id, type, comments in sorted(_to_translate):
|
2012-10-03 13:26:59 +00:00
|
|
|
trans = '' if not lang else trans_obj._get_source(cr, uid, name, type, lang, source)
|
2014-10-10 15:29:39 +00:00
|
|
|
out.append((module, type, name, id, source, encode(trans) or '', comments))
|
2008-07-22 14:24:36 +00:00
|
|
|
return out
|
2006-12-07 13:41:40 +00:00
|
|
|
|
2012-09-13 14:29:20 +00:00
|
|
|
def trans_load(cr, filename, lang, verbose=True, module_name=None, context=None):
|
2008-07-22 14:24:36 +00:00
|
|
|
try:
|
2011-11-30 08:54:52 +00:00
|
|
|
fileobj = misc.file_open(filename)
|
2012-01-24 14:00:56 +00:00
|
|
|
_logger.info("loading %s", filename)
|
2012-09-13 14:29:20 +00:00
|
|
|
fileformat = os.path.splitext(filename)[-1][1:].lower()
|
|
|
|
result = trans_load_data(cr, fileobj, fileformat, lang, verbose=verbose, module_name=module_name, context=context)
|
2008-07-22 14:24:36 +00:00
|
|
|
fileobj.close()
|
2012-09-13 14:29:20 +00:00
|
|
|
return result
|
2008-07-22 14:24:36 +00:00
|
|
|
except IOError:
|
2008-09-04 13:28:41 +00:00
|
|
|
if verbose:
|
2012-01-24 14:00:56 +00:00
|
|
|
_logger.error("couldn't read translation file %s", filename)
|
2008-07-22 14:24:36 +00:00
|
|
|
return None
|
2006-12-07 13:41:40 +00:00
|
|
|
|
2012-07-12 05:21:11 +00:00
|
|
|
def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, module_name=None, context=None):
|
2012-01-09 14:09:58 +00:00
|
|
|
"""Populates the ir_translation table."""
|
2008-09-04 13:28:41 +00:00
|
|
|
if verbose:
|
2012-01-24 14:00:56 +00:00
|
|
|
_logger.info('loading translation file for language %s', lang)
|
2010-11-23 15:27:40 +00:00
|
|
|
if context is None:
|
|
|
|
context = {}
|
2011-01-05 14:07:58 +00:00
|
|
|
db_name = cr.dbname
|
2013-03-27 11:10:14 +00:00
|
|
|
registry = openerp.registry(db_name)
|
|
|
|
lang_obj = registry.get('res.lang')
|
|
|
|
trans_obj = registry.get('ir.translation')
|
2011-02-07 12:57:23 +00:00
|
|
|
iso_lang = misc.get_iso_codes(lang)
|
2008-07-22 14:24:36 +00:00
|
|
|
try:
|
2012-09-13 14:29:20 +00:00
|
|
|
ids = lang_obj.search(cr, SUPERUSER_ID, [('code','=', lang)])
|
2009-11-20 11:57:21 +00:00
|
|
|
|
2008-07-22 14:24:36 +00:00
|
|
|
if not ids:
|
2009-01-29 23:20:17 +00:00
|
|
|
# lets create the language with locale information
|
2012-08-31 13:53:09 +00:00
|
|
|
lang_obj.load_lang(cr, SUPERUSER_ID, lang=lang, lang_name=lang_name)
|
2008-07-22 14:24:36 +00:00
|
|
|
|
2014-08-13 20:59:13 +00:00
|
|
|
# Parse also the POT: it will possibly provide additional targets.
|
|
|
|
# (Because the POT comments are correct on Launchpad but not the
|
|
|
|
# PO comments due to a Launchpad limitation. See LP bug 933496.)
|
|
|
|
pot_reader = []
|
2008-07-22 14:24:36 +00:00
|
|
|
|
2009-01-29 23:20:17 +00:00
|
|
|
# now, the serious things: we read the language file
|
|
|
|
fileobj.seek(0)
|
2008-07-22 14:24:36 +00:00
|
|
|
if fileformat == 'csv':
|
|
|
|
reader = csv.reader(fileobj, quotechar='"', delimiter=',')
|
|
|
|
# read the first line of the file (it contains columns titles)
|
|
|
|
for row in reader:
|
2014-11-26 16:36:42 +00:00
|
|
|
fields = row
|
2008-07-22 14:24:36 +00:00
|
|
|
break
|
|
|
|
elif fileformat == 'po':
|
|
|
|
reader = TinyPoFile(fileobj)
|
2014-11-26 16:36:42 +00:00
|
|
|
fields = ['type', 'name', 'res_id', 'src', 'value', 'comments']
|
2014-08-13 20:59:13 +00:00
|
|
|
|
|
|
|
# Make a reader for the POT file and be somewhat defensive for the
|
|
|
|
# stable branch.
|
|
|
|
if fileobj.name.endswith('.po'):
|
|
|
|
try:
|
|
|
|
# Normally the path looks like /path/to/xxx/i18n/lang.po
|
|
|
|
# and we try to find the corresponding
|
|
|
|
# /path/to/xxx/i18n/xxx.pot file.
|
2014-12-18 10:42:04 +00:00
|
|
|
# (Sometimes we have 'i18n_extra' instead of just 'i18n')
|
2014-11-26 16:36:42 +00:00
|
|
|
addons_module_i18n, _ = os.path.split(fileobj.name)
|
2014-12-18 10:42:04 +00:00
|
|
|
addons_module, i18n_dir = os.path.split(addons_module_i18n)
|
2014-11-26 16:36:42 +00:00
|
|
|
addons, module = os.path.split(addons_module)
|
2014-12-18 10:42:04 +00:00
|
|
|
pot_handle = misc.file_open(os.path.join(
|
|
|
|
addons, module, i18n_dir, module + '.pot'))
|
2014-08-13 20:59:13 +00:00
|
|
|
pot_reader = TinyPoFile(pot_handle)
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
2008-07-22 14:24:36 +00:00
|
|
|
else:
|
2012-01-24 14:00:56 +00:00
|
|
|
_logger.error('Bad file format: %s', fileformat)
|
2008-07-22 14:24:36 +00:00
|
|
|
raise Exception(_('Bad file format'))
|
|
|
|
|
2014-11-26 16:36:42 +00:00
|
|
|
# Read the POT references, and keep them indexed by source string.
|
|
|
|
class Target(object):
|
|
|
|
def __init__(self):
|
|
|
|
self.value = None
|
|
|
|
self.targets = set() # set of (type, name, res_id)
|
|
|
|
self.comments = None
|
|
|
|
|
|
|
|
pot_targets = defaultdict(Target)
|
2014-08-13 20:59:13 +00:00
|
|
|
for type, name, res_id, src, _, comments in pot_reader:
|
|
|
|
if type is not None:
|
2014-11-26 16:36:42 +00:00
|
|
|
target = pot_targets[src]
|
|
|
|
target.targets.add((type, name, res_id))
|
|
|
|
target.comments = comments
|
2014-08-13 20:59:13 +00:00
|
|
|
|
2008-07-22 14:24:36 +00:00
|
|
|
# read the rest of the file
|
2012-09-13 14:29:20 +00:00
|
|
|
irt_cursor = trans_obj._get_import_cursor(cr, SUPERUSER_ID, context=context)
|
ir.translation, import: push the algo. to SQL, improve performance
At a language import (translation files), we want to push a big batch of
translation records into the database. These will need update-or-insert
logic (against existing ones) or even resolution of ir.model.data .
Doing this loop in Python had been slow (invoking 2x read()s, +1 for
ir.model.data, 1 insert or update), triggered the cache (fill and clean
at each iteration).
Instead, follow the old-school db recipe for mass records insertion:
- create a temporary table w/o indexes or constraints
- quickly populate the temp with all records of the batch
(through a dedicated "cursor" object)
- process the table, doing lookups in collective SQL queries (yes, SQL
is all about loops of data processing, efficiently)
- insert all records from temp into ir.model.data
- call (implicitly) all constraints of ir.model.data at the end of that
single query.
This improves performance of translation imports by ~3x at least.
bzr revid: xrg@linux.gr-20110608162059-rfy1vvwp8w66ry0i
2011-06-08 16:20:59 +00:00
|
|
|
|
2014-08-13 20:59:13 +00:00
|
|
|
def process_row(row):
|
|
|
|
"""Process a single PO (or POT) entry."""
|
2008-07-22 14:24:36 +00:00
|
|
|
# dictionary which holds values for this line of the csv file
|
|
|
|
# {'lang': ..., 'type': ..., 'name': ..., 'res_id': ...,
|
2012-07-12 08:58:38 +00:00
|
|
|
# 'src': ..., 'value': ..., 'module':...}
|
2014-11-26 16:36:42 +00:00
|
|
|
dic = dict.fromkeys(('type', 'name', 'res_id', 'src', 'value',
|
|
|
|
'comments', 'imd_model', 'imd_name', 'module'))
|
2012-09-11 11:41:04 +00:00
|
|
|
dic['lang'] = lang
|
2014-11-26 16:36:42 +00:00
|
|
|
dic.update(zip(fields, row))
|
2008-07-22 14:24:36 +00:00
|
|
|
|
2014-11-26 16:36:42 +00:00
|
|
|
# discard the target from the POT targets.
|
|
|
|
src = dic['src']
|
|
|
|
if src in pot_targets:
|
|
|
|
target = pot_targets[src]
|
|
|
|
target.value = dic['value']
|
|
|
|
target.targets.discard((dic['type'], dic['name'], dic['res_id']))
|
2014-08-13 20:59:13 +00:00
|
|
|
|
ir.translation, import: push the algo. to SQL, improve performance
At a language import (translation files), we want to push a big batch of
translation records into the database. These will need update-or-insert
logic (against existing ones) or even resolution of ir.model.data .
Doing this loop in Python had been slow (invoking 2x read()s, +1 for
ir.model.data, 1 insert or update), triggered the cache (fill and clean
at each iteration).
Instead, follow the old-school db recipe for mass records insertion:
- create a temporary table w/o indexes or constraints
- quickly populate the temp with all records of the batch
(through a dedicated "cursor" object)
- process the table, doing lookups in collective SQL queries (yes, SQL
is all about loops of data processing, efficiently)
- insert all records from temp into ir.model.data
- call (implicitly) all constraints of ir.model.data at the end of that
single query.
This improves performance of translation imports by ~3x at least.
bzr revid: xrg@linux.gr-20110608162059-rfy1vvwp8w66ry0i
2011-06-08 16:20:59 +00:00
|
|
|
# This would skip terms that fail to specify a res_id
|
2014-11-26 16:36:42 +00:00
|
|
|
res_id = dic['res_id']
|
|
|
|
if not res_id:
|
2014-08-13 20:59:13 +00:00
|
|
|
return
|
ir.translation, import: push the algo. to SQL, improve performance
At a language import (translation files), we want to push a big batch of
translation records into the database. These will need update-or-insert
logic (against existing ones) or even resolution of ir.model.data .
Doing this loop in Python had been slow (invoking 2x read()s, +1 for
ir.model.data, 1 insert or update), triggered the cache (fill and clean
at each iteration).
Instead, follow the old-school db recipe for mass records insertion:
- create a temporary table w/o indexes or constraints
- quickly populate the temp with all records of the batch
(through a dedicated "cursor" object)
- process the table, doing lookups in collective SQL queries (yes, SQL
is all about loops of data processing, efficiently)
- insert all records from temp into ir.model.data
- call (implicitly) all constraints of ir.model.data at the end of that
single query.
This improves performance of translation imports by ~3x at least.
bzr revid: xrg@linux.gr-20110608162059-rfy1vvwp8w66ry0i
2011-06-08 16:20:59 +00:00
|
|
|
|
2014-11-27 10:07:09 +00:00
|
|
|
if isinstance(res_id, (int, long)) or \
|
|
|
|
(isinstance(res_id, basestring) and res_id.isdigit()):
|
2014-11-26 16:36:42 +00:00
|
|
|
dic['res_id'] = int(res_id)
|
|
|
|
dic['module'] = module_name
|
2008-07-22 14:24:36 +00:00
|
|
|
else:
|
2014-11-26 16:36:42 +00:00
|
|
|
# res_id is an xml id
|
|
|
|
dic['res_id'] = None
|
|
|
|
dic['imd_model'] = dic['name'].split(',')[0]
|
2012-09-11 11:41:04 +00:00
|
|
|
if '.' in res_id:
|
2014-11-26 16:36:42 +00:00
|
|
|
dic['module'], dic['imd_name'] = res_id.split('.', 1)
|
2012-09-11 11:41:04 +00:00
|
|
|
else:
|
2014-11-26 16:36:42 +00:00
|
|
|
dic['module'], dic['imd_name'] = False, res_id
|
ir.translation, import: push the algo. to SQL, improve performance
At a language import (translation files), we want to push a big batch of
translation records into the database. These will need update-or-insert
logic (against existing ones) or even resolution of ir.model.data .
Doing this loop in Python had been slow (invoking 2x read()s, +1 for
ir.model.data, 1 insert or update), triggered the cache (fill and clean
at each iteration).
Instead, follow the old-school db recipe for mass records insertion:
- create a temporary table w/o indexes or constraints
- quickly populate the temp with all records of the batch
(through a dedicated "cursor" object)
- process the table, doing lookups in collective SQL queries (yes, SQL
is all about loops of data processing, efficiently)
- insert all records from temp into ir.model.data
- call (implicitly) all constraints of ir.model.data at the end of that
single query.
This improves performance of translation imports by ~3x at least.
bzr revid: xrg@linux.gr-20110608162059-rfy1vvwp8w66ry0i
2011-06-08 16:20:59 +00:00
|
|
|
|
|
|
|
irt_cursor.push(dic)
|
|
|
|
|
2014-08-13 20:59:13 +00:00
|
|
|
# First process the entries from the PO file (doing so also fills/removes
|
|
|
|
# the entries from the POT file).
|
|
|
|
for row in reader:
|
|
|
|
process_row(row)
|
|
|
|
|
|
|
|
# Then process the entries implied by the POT file (which is more
|
|
|
|
# correct w.r.t. the targets) if some of them remain.
|
|
|
|
pot_rows = []
|
2014-11-26 16:36:42 +00:00
|
|
|
for src, target in pot_targets.iteritems():
|
|
|
|
if target.value:
|
|
|
|
for type, name, res_id in target.targets:
|
|
|
|
pot_rows.append((type, name, res_id, src, target.value, target.comments))
|
|
|
|
pot_targets.clear()
|
2014-08-13 20:59:13 +00:00
|
|
|
for row in pot_rows:
|
|
|
|
process_row(row)
|
|
|
|
|
ir.translation, import: push the algo. to SQL, improve performance
At a language import (translation files), we want to push a big batch of
translation records into the database. These will need update-or-insert
logic (against existing ones) or even resolution of ir.model.data .
Doing this loop in Python had been slow (invoking 2x read()s, +1 for
ir.model.data, 1 insert or update), triggered the cache (fill and clean
at each iteration).
Instead, follow the old-school db recipe for mass records insertion:
- create a temporary table w/o indexes or constraints
- quickly populate the temp with all records of the batch
(through a dedicated "cursor" object)
- process the table, doing lookups in collective SQL queries (yes, SQL
is all about loops of data processing, efficiently)
- insert all records from temp into ir.model.data
- call (implicitly) all constraints of ir.model.data at the end of that
single query.
This improves performance of translation imports by ~3x at least.
bzr revid: xrg@linux.gr-20110608162059-rfy1vvwp8w66ry0i
2011-06-08 16:20:59 +00:00
|
|
|
irt_cursor.finish()
|
2013-03-18 13:33:38 +00:00
|
|
|
trans_obj.clear_caches()
|
2008-09-04 13:28:41 +00:00
|
|
|
if verbose:
|
2012-01-24 14:00:56 +00:00
|
|
|
_logger.info("translation file loaded succesfully")
|
2014-11-26 16:36:42 +00:00
|
|
|
|
2008-07-22 14:24:36 +00:00
|
|
|
except IOError:
|
2009-11-20 11:57:21 +00:00
|
|
|
filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat)
|
2012-01-24 14:00:56 +00:00
|
|
|
_logger.exception("couldn't read translation file %s", filename)
|
2006-12-07 13:41:40 +00:00
|
|
|
|
2009-02-05 12:51:17 +00:00
|
|
|
def get_locales(lang=None):
|
|
|
|
if lang is None:
|
|
|
|
lang = locale.getdefaultlocale()[0]
|
2009-11-20 11:57:21 +00:00
|
|
|
|
2009-02-05 12:51:17 +00:00
|
|
|
if os.name == 'nt':
|
|
|
|
lang = _LOCALE2WIN32.get(lang, lang)
|
2009-11-20 11:57:21 +00:00
|
|
|
|
2009-02-05 12:51:17 +00:00
|
|
|
def process(enc):
|
|
|
|
ln = locale._build_localename((lang, enc))
|
|
|
|
yield ln
|
|
|
|
nln = locale.normalize(ln)
|
|
|
|
if nln != ln:
|
|
|
|
yield nln
|
|
|
|
|
|
|
|
for x in process('utf8'): yield x
|
|
|
|
|
|
|
|
prefenc = locale.getpreferredencoding()
|
|
|
|
if prefenc:
|
|
|
|
for x in process(prefenc): yield x
|
2009-11-20 11:57:21 +00:00
|
|
|
|
2009-02-05 12:51:17 +00:00
|
|
|
prefenc = {
|
2009-11-20 11:57:21 +00:00
|
|
|
'latin1': 'latin9',
|
2009-02-05 12:51:17 +00:00
|
|
|
'iso-8859-1': 'iso8859-15',
|
|
|
|
'cp1252': '1252',
|
|
|
|
}.get(prefenc.lower())
|
|
|
|
if prefenc:
|
|
|
|
for x in process(prefenc): yield x
|
|
|
|
|
|
|
|
yield lang
|
|
|
|
|
|
|
|
|
|
|
|
|
2009-02-04 17:27:00 +00:00
|
|
|
def resetlocale():
|
2009-11-20 11:57:21 +00:00
|
|
|
# locale.resetlocale is bugged with some locales.
|
2009-02-05 12:51:17 +00:00
|
|
|
for ln in get_locales():
|
2009-02-05 10:02:29 +00:00
|
|
|
try:
|
2009-02-05 13:18:12 +00:00
|
|
|
return locale.setlocale(locale.LC_ALL, ln)
|
2009-02-05 10:02:29 +00:00
|
|
|
except locale.Error:
|
2009-02-05 12:51:17 +00:00
|
|
|
continue
|
2009-02-04 17:27:00 +00:00
|
|
|
|
2010-10-20 14:42:48 +00:00
|
|
|
def load_language(cr, lang):
|
|
|
|
"""Loads a translation terms for a language.
|
|
|
|
Used mainly to automate language loading at db initialization.
|
2011-04-21 09:34:00 +00:00
|
|
|
|
2010-10-20 14:42:48 +00:00
|
|
|
:param lang: language ISO code with optional _underscore_ and l10n flavor (ex: 'fr', 'fr_BE', but not 'fr-BE')
|
|
|
|
:type lang: str
|
|
|
|
"""
|
2013-03-27 11:10:14 +00:00
|
|
|
registry = openerp.registry(cr.dbname)
|
|
|
|
language_installer = registry['base.language.install']
|
|
|
|
oid = language_installer.create(cr, SUPERUSER_ID, {'lang': lang})
|
|
|
|
language_installer.lang_install(cr, SUPERUSER_ID, [oid], context=None)
|
2010-10-20 14:42:48 +00:00
|
|
|
|
2008-07-23 15:01:27 +00:00
|
|
|
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
|
|
|