[IMP] tests:
- pass around the assertion_report to the YAML importer - removed TestReport, which was identical to assertion_report - assertion_report is simpler (no more severity level) - use the report to log a greppable sentence when some test failed. Previously the runbot had to grep for a Traceback which was an unreliable technique (e.g. an exception can be purposefuly generated as part of a test and the associated traceback visible in the logs). Now it can grep "At least one test failed when loading the modules". bzr revid: vmt@openerp.com-20120302110227-nqrl7i46ju28ntdr
This commit is contained in:
parent
6323111950
commit
dc34d73695
|
@ -50,6 +50,7 @@ import openerp.pooler as pooler
|
|||
import openerp.release as release
|
||||
import openerp.tools as tools
|
||||
import openerp.tools.osutil as osutil
|
||||
import openerp.tools.assertion_report as assertion_report
|
||||
|
||||
from openerp.tools.safe_eval import safe_eval as eval
|
||||
from openerp.tools.translate import _
|
||||
|
@ -97,9 +98,11 @@ def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=
|
|||
try:
|
||||
threading.currentThread().testing = True
|
||||
_load_data(cr, module_name, idref, mode, 'test')
|
||||
return True
|
||||
except Exception, e:
|
||||
_logger.exception(
|
||||
'Tests failed to execute in module %s', module_name)
|
||||
_logger.error(
|
||||
'module %s: an exception occured in a test', module_name)
|
||||
return False
|
||||
finally:
|
||||
threading.currentThread().testing = False
|
||||
if tools.config.options['test_commit']:
|
||||
|
@ -132,7 +135,7 @@ def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=
|
|||
elif ext == '.sql':
|
||||
process_sql_file(cr, fp)
|
||||
elif ext == '.yml':
|
||||
tools.convert_yaml_import(cr, module_name, fp, idref, mode, noupdate)
|
||||
tools.convert_yaml_import(cr, module_name, fp, idref, mode, noupdate, report)
|
||||
else:
|
||||
tools.convert_xml_import(cr, module_name, fp, idref, mode, noupdate, report)
|
||||
finally:
|
||||
|
@ -201,13 +204,13 @@ def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=
|
|||
# 'data' section, but should probably not alter the data,
|
||||
# as there is no rollback.
|
||||
if tools.config.options['test_enable']:
|
||||
load_test(module_name, idref, mode)
|
||||
report.record_result(load_test(module_name, idref, mode))
|
||||
|
||||
# Run the `fast_suite` and `checks` tests given by the module.
|
||||
if module_name == 'base':
|
||||
# Also run the core tests after the dabase is created.
|
||||
openerp.modules.module.run_unit_tests('openerp')
|
||||
openerp.modules.module.run_unit_tests(module_name)
|
||||
# Also run the core tests after the database is created.
|
||||
report.record_result(openerp.modules.module.run_unit_tests('openerp'))
|
||||
report.record_result(openerp.modules.module.run_unit_tests(module_name))
|
||||
|
||||
processed_modules.append(package.name)
|
||||
|
||||
|
@ -288,7 +291,6 @@ def load_modules(db, force_demo=False, status=None, update_module=False):
|
|||
# This is a brand new pool, just created in pooler.get_db_and_pool()
|
||||
pool = pooler.get_pool(cr.dbname)
|
||||
|
||||
report = tools.assertion_report()
|
||||
if 'base' in tools.config['update'] or 'all' in tools.config['update']:
|
||||
cr.execute("update ir_module_module set state=%s where name=%s and state=%s", ('to upgrade', 'base', 'installed'))
|
||||
|
||||
|
@ -301,6 +303,7 @@ def load_modules(db, force_demo=False, status=None, update_module=False):
|
|||
|
||||
# processed_modules: for cleanup step after install
|
||||
# loaded_modules: to avoid double loading
|
||||
report = assertion_report.assertion_report()
|
||||
loaded_modules, processed_modules = load_module_graph(cr, graph, status, perform_checks=(not update_module), report=report)
|
||||
|
||||
if tools.config['load_language']:
|
||||
|
@ -420,7 +423,10 @@ def load_modules(db, force_demo=False, status=None, update_module=False):
|
|||
cr.execute("update ir_module_module set state=%s where state=%s", ('uninstalled', 'to remove',))
|
||||
cr.commit()
|
||||
|
||||
_logger.info('Modules loaded.')
|
||||
if report.failures:
|
||||
_logger.error('At least one test failed when loading the modules.')
|
||||
else:
|
||||
_logger.info('Modules loaded.')
|
||||
finally:
|
||||
cr.close()
|
||||
|
||||
|
|
|
@ -549,6 +549,10 @@ def get_test_modules(module, submodule, explode):
|
|||
return ms
|
||||
|
||||
def run_unit_tests(module_name):
|
||||
"""
|
||||
Return True or False if some tests were found and succeeded or failed.
|
||||
Return None if no test was found.
|
||||
"""
|
||||
import unittest2
|
||||
ms = get_test_modules(module_name, '__fast_suite__', explode=False)
|
||||
ms.extend(get_test_modules(module_name, '__sanity_checks__', explode=False))
|
||||
|
@ -573,7 +577,10 @@ def run_unit_tests(module_name):
|
|||
first = False
|
||||
_logger.log(logging.TEST, c)
|
||||
result = unittest2.TextTestRunner(verbosity=2, stream=MyStream()).run(suite)
|
||||
if not result.wasSuccessful():
|
||||
if result.wasSuccessful():
|
||||
return True
|
||||
else:
|
||||
_logger.error('module %s: at least one error occured in a test', module_name)
|
||||
return False
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
|
||||
class assertion_report(object):
|
||||
"""
|
||||
Simple pair of success and failures counts (used to record YAML and XML
|
||||
`assert` tags as well as unittest2 tests outcome (in this case, not
|
||||
individual `assert`)).
|
||||
"""
|
||||
def __init__(self):
|
||||
self.successes = 0
|
||||
self.failures = 0
|
||||
|
||||
def record_success(self):
|
||||
self.successes += 1
|
||||
|
||||
def record_failure(self):
|
||||
self.failures += 1
|
||||
|
||||
def record_result(self, result):
|
||||
if result is None:
|
||||
pass
|
||||
elif result is True:
|
||||
self.record_success()
|
||||
elif result is False:
|
||||
self.record_failure()
|
||||
|
||||
def __str__(self):
|
||||
res = 'Assertions report: %s successes, %s failures' % (self.successes, self.failures)
|
||||
return res
|
||||
|
|
@ -30,6 +30,8 @@ import re
|
|||
import time
|
||||
import openerp.release as release
|
||||
|
||||
import assertion_report
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
|
@ -200,35 +202,6 @@ escape_re = re.compile(r'(?<!\\)/')
|
|||
def escape(x):
|
||||
return x.replace('\\/', '/')
|
||||
|
||||
class assertion_report(object):
|
||||
def __init__(self):
|
||||
self._report = {}
|
||||
|
||||
def record_assertion(self, success, severity):
|
||||
"""
|
||||
Records the result of an assertion for the failed/success count
|
||||
returns success
|
||||
"""
|
||||
if severity in self._report:
|
||||
self._report[severity][success] += 1
|
||||
else:
|
||||
self._report[severity] = {success:1, not success: 0}
|
||||
return success
|
||||
|
||||
def get_report(self):
|
||||
return self._report
|
||||
|
||||
def __str__(self):
|
||||
res = '\nAssertions report:\nLevel\tsuccess\tfailed\n'
|
||||
success = failed = 0
|
||||
for sev in self._report:
|
||||
res += sev + '\t' + str(self._report[sev][True]) + '\t' + str(self._report[sev][False]) + '\n'
|
||||
success += self._report[sev][True]
|
||||
failed += self._report[sev][False]
|
||||
res += 'total\t' + str(success) + '\t' + str(failed) + '\n'
|
||||
res += 'end of report (' + str(success + failed) + ' assertion(s) checked)'
|
||||
return res
|
||||
|
||||
class xml_import(object):
|
||||
@staticmethod
|
||||
def nodeattr2bool(node, attr, default=False):
|
||||
|
@ -727,7 +700,7 @@ form: module.record_id""" % (xml_id,)
|
|||
if rec_src_count:
|
||||
count = int(rec_src_count)
|
||||
if len(ids) != count:
|
||||
self.assert_report.record_assertion(False, severity)
|
||||
self.assertion_report.record_failure()
|
||||
msg = 'assertion "%s" failed!\n' \
|
||||
' Incorrect search count:\n' \
|
||||
' expected count: %d\n' \
|
||||
|
@ -759,7 +732,7 @@ form: module.record_id""" % (xml_id,)
|
|||
expected_value = _eval_xml(self, test, self.pool, cr, uid, self.idref, context=context) or True
|
||||
expression_value = unsafe_eval(f_expr, globals_dict)
|
||||
if expression_value != expected_value: # assertion failed
|
||||
self.assert_report.record_assertion(False, severity)
|
||||
self.assertion_report.record_failure()
|
||||
msg = 'assertion "%s" failed!\n' \
|
||||
' xmltag: %s\n' \
|
||||
' expected value: %r\n' \
|
||||
|
@ -772,7 +745,7 @@ form: module.record_id""" % (xml_id,)
|
|||
raise Exception('Severe assertion failure')
|
||||
return
|
||||
else: # all tests were successful for this assertion tag (no break)
|
||||
self.assert_report.record_assertion(True, severity)
|
||||
self.assertion_report.record_success()
|
||||
|
||||
def _tag_record(self, cr, rec, data_node=None):
|
||||
rec_model = rec.get("model").encode('ascii')
|
||||
|
@ -906,8 +879,8 @@ form: module.record_id""" % (xml_id,)
|
|||
self.pool = pooler.get_pool(cr.dbname)
|
||||
self.uid = 1
|
||||
if report is None:
|
||||
report = assertion_report()
|
||||
self.assert_report = report
|
||||
report = assertion_report.assertion_report()
|
||||
self.assertion_report = report
|
||||
self.noupdate = noupdate
|
||||
self._tags = {
|
||||
'menuitem': self._tag_menuitem,
|
||||
|
|
|
@ -19,6 +19,8 @@ from lxml import etree
|
|||
unsafe_eval = eval
|
||||
from safe_eval import safe_eval as eval
|
||||
|
||||
import assertion_report
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
class YamlImportException(Exception):
|
||||
|
@ -85,33 +87,6 @@ def is_ir_set(node):
|
|||
def is_string(node):
|
||||
return isinstance(node, basestring)
|
||||
|
||||
class TestReport(object):
|
||||
def __init__(self):
|
||||
self._report = {}
|
||||
|
||||
def record(self, success, severity):
|
||||
"""
|
||||
Records the result of an assertion for the failed/success count.
|
||||
Returns success.
|
||||
"""
|
||||
if severity in self._report:
|
||||
self._report[severity][success] += 1
|
||||
else:
|
||||
self._report[severity] = {success: 1, not success: 0}
|
||||
return success
|
||||
|
||||
def __str__(self):
|
||||
res = []
|
||||
res.append('\nAssertions report:\nLevel\tsuccess\tfailure')
|
||||
success = failure = 0
|
||||
for severity in self._report:
|
||||
res.append("%s\t%s\t%s" % (severity, self._report[severity][True], self._report[severity][False]))
|
||||
success += self._report[severity][True]
|
||||
failure += self._report[severity][False]
|
||||
res.append("total\t%s\t%s" % (success, failure))
|
||||
res.append("end of report (%s assertion(s) checked)" % (success + failure))
|
||||
return "\n".join(res)
|
||||
|
||||
class RecordDictWrapper(dict):
|
||||
"""
|
||||
Used to pass a record as locals in eval:
|
||||
|
@ -125,13 +100,15 @@ class RecordDictWrapper(dict):
|
|||
return dict.__getitem__(self, key)
|
||||
|
||||
class YamlInterpreter(object):
|
||||
def __init__(self, cr, module, id_map, mode, filename, noupdate=False):
|
||||
def __init__(self, cr, module, id_map, mode, filename, report=None, noupdate=False):
|
||||
self.cr = cr
|
||||
self.module = module
|
||||
self.id_map = id_map
|
||||
self.mode = mode
|
||||
self.filename = filename
|
||||
self.assert_report = TestReport()
|
||||
if report is None:
|
||||
report = assertion_report.assertion_report()
|
||||
self.assertion_report = report
|
||||
self.noupdate = noupdate
|
||||
self.pool = pooler.get_pool(cr.dbname)
|
||||
self.uid = 1
|
||||
|
@ -217,7 +194,7 @@ class YamlInterpreter(object):
|
|||
else:
|
||||
level = severity
|
||||
levelname = logging.getLevelName(level)
|
||||
self.assert_report.record(False, levelname)
|
||||
self.assertion_report.record_failure()
|
||||
_logger.log(level, msg, *args)
|
||||
if level >= config['assert_exit_level']:
|
||||
raise YamlImportAbortion('Severe assertion failure (%s), aborting.' % levelname)
|
||||
|
@ -286,7 +263,7 @@ class YamlInterpreter(object):
|
|||
self._log_assert_failure(assertion.severity, msg, *args)
|
||||
return
|
||||
else: # all tests were successful for this assertion tag (no break)
|
||||
self.assert_report.record(True, assertion.severity)
|
||||
self.assertion_report.record_success()
|
||||
|
||||
def _coerce_bool(self, value, default=False):
|
||||
if isinstance(value, types.BooleanType):
|
||||
|
@ -534,7 +511,7 @@ class YamlInterpreter(object):
|
|||
_logger.debug('Exception during evaluation of !python block in yaml_file %s.', self.filename, exc_info=True)
|
||||
raise
|
||||
else:
|
||||
self.assert_report.record(True, python.severity)
|
||||
self.assertion_report.record_success()
|
||||
|
||||
def process_workflow(self, node):
|
||||
workflow, values = node.items()[0]
|
||||
|
@ -900,11 +877,11 @@ class YamlInterpreter(object):
|
|||
is_preceded_by_comment = False
|
||||
return is_preceded_by_comment
|
||||
|
||||
def yaml_import(cr, module, yamlfile, idref=None, mode='init', noupdate=False):
|
||||
def yaml_import(cr, module, yamlfile, idref=None, mode='init', noupdate=False, report=None):
|
||||
if idref is None:
|
||||
idref = {}
|
||||
yaml_string = yamlfile.read()
|
||||
yaml_interpreter = YamlInterpreter(cr, module, idref, mode, filename=yamlfile.name, noupdate=noupdate)
|
||||
yaml_interpreter = YamlInterpreter(cr, module, idref, mode, filename=yamlfile.name, report=report, noupdate=noupdate)
|
||||
yaml_interpreter.process(yaml_string)
|
||||
|
||||
# keeps convention of convert.py
|
||||
|
|
Loading…
Reference in New Issue