[FIX]module_quality: trivial logging, multi-addons paths, Fix exceptions, use separate db cursors.

Log some of the exceptions that occur, so that we can understand why our
tests fail.
Run each test on its own cursor, don't let SQL errors render the whole

bzr revid: hmo@tinyerp.com-20100907095055-9sa11rnvir8y7npw
This commit is contained in:
P. Christeas 2010-09-07 15:20:55 +05:30 committed by Harry (OpenERP)
parent 389ce144e2
commit 49f8d24d4a
4 changed files with 47 additions and 33 deletions

View File

@ -21,13 +21,10 @@
import os
import pooler
import osv
import tools
from tools import config
from tools.translate import _
from osv import osv, fields
import logging
import addons
class abstract_quality_check(object):
'''
This Class is abstract class for all test
@ -86,12 +83,10 @@ class abstract_quality_check(object):
#The tests have to subscribe itselfs in this list, that contains
#all the test that have to be performed.
self.tests = []
self.list_folders = os.listdir(config['addons_path'] +
'/base_module_quality/')
for item in self.list_folders:
self.item = item
path = config['addons_path']+'/base_module_quality/'+item
if os.path.exists(path + '/' + item + '.py') and item not in ['report', 'wizard', 'security']:
module_path = addons.get_module_path('base_module_quality')
for item in os.listdir(module_path):
path = module_path + '/' + item
if os.path.isdir(path) and os.path.exists(path + '/' + item + '.py') and item not in ['report', 'wizard', 'security']:
item2 = 'base_module_quality.' + item +'.' + item
x_module = __import__(item2)
x_file = getattr(x_module, item)
@ -216,6 +211,7 @@ class module_quality_check(osv.osv):
So here the detail result is in html format and summary will be in text_wiki format.
'''
pool = pooler.get_pool(cr.dbname)
log = logging.getLogger('module.quality')
obj_module = pool.get('ir.module.module')
if not module_state:
module_id = obj_module.search(cr, uid, [('name', '=', module_name)])
@ -226,15 +222,22 @@ class module_quality_check(osv.osv):
score_sum = 0.0
ponderation_sum = 0.0
create_ids = []
module_path = addons.get_module_path(module_name)
log.info('Performing quality tests for %s', module_name)
for test in abstract_obj.tests:
ad = tools.config['addons_path']
if module_name == 'base':
ad = tools.config['root_path']+'/addons'
module_path = os.path.join(ad, module_name)
val = test.quality_test()
if val.active:
if not val.active:
log.info('Skipping inactive step %s for %s', val.name, module_name)
continue
log.info('Performing step %s for %s', val.name, module_name)
# Get a separate cursor per test, so that an SQL error in one
# will not block the others.
cr2 = pooler.get_db(cr.dbname).cursor()
try:
if not val.bool_installed_only or module_state == "installed":
val.run_test(cr, uid, str(module_path))
val.run_test(cr2, uid, str(module_path))
if not val.error:
data = {
'name': val.name,
@ -266,6 +269,12 @@ class module_quality_check(osv.osv):
'summary': _("The module has to be installed before running this test.")
}
create_ids.append((0, 0, data))
log.info('Finished quality test step')
except Exception, e:
log.exception("Could not finish test step %s due to %s", val.name, e)
finally:
cr2.rollback()
cr2.close()
final_score = ponderation_sum and '%.2f' % (score_sum / ponderation_sum * 100) or 0
data = {
'name': module_name,

View File

@ -53,21 +53,21 @@ This test checks if the module classes are raising exception when calling basic
pool.get(obj).search(cr, uid, [])
temp.append(_('Ok'))
ok_count += 1
except:
except Exception:
temp.append(_('Exception'))
ex_count += 1
try:
pool.get(obj).fields_view_get(cr, uid,)
temp.append(_('Ok'))
ok_count += 1
except:
except Exception:
temp.append(_('Exception'))
ex_count += 1
try:
pool.get(obj).read(cr, uid, [])
temp.append(_('Ok'))
ok_count += 1
except:
except Exception:
temp.append(_('Exception'))
ex_count += 1
result_dict[obj] = temp
@ -78,6 +78,7 @@ This test checks if the module classes are raising exception when calling basic
return None
def get_result(self, dict_method):
# TODO: improve
header = ('{| border="1" cellspacing="0" cellpadding="5" align="left" \n! %-40s \n! %-16s \n! %-20s \n! %-16s ', [_('Object Name'), 'search()', 'fields_view_get()', 'read()'])
detail = ""
if not self.error:

View File

@ -20,8 +20,7 @@
##############################################################################
import os
from tools import config
import addons
from tools.translate import _
from base_module_quality import base_module_quality
@ -36,7 +35,7 @@ class quality_test(base_module_quality.abstract_quality_check):
return None
def run_test(self, cr, uid, module_path):
config_file_path = config['addons_path']+'/base_module_quality/pylint_test/pylint_test_config.txt'
config_file_path = addons.get_module_resource('base_module_quality','pylint_test', 'pylint_test_config.txt')
list_files = os.listdir(module_path)
for i in list_files:
path = os.path.join(module_path, i)
@ -57,10 +56,9 @@ class quality_test(base_module_quality.abstract_quality_check):
try:
import pylint
res = os.popen('pylint --rcfile=' + config_file_path + ' ' + file_path).read()
except:
except Exception, e:
self.error = True
import netsvc
netsvc.Logger().notifyChannel('Pylint:', netsvc.LOG_WARNING, "Is pylint correctly installed? (http://pypi.python.org/pypi/pylint)")
self.log.exception("Cannot run pylint test for %s", file_path)
self.result += _("Error. Is pylint correctly installed? (http://pypi.python.org/pypi/pylint)")+"\n"
return None
count += 1
@ -68,7 +66,8 @@ class quality_test(base_module_quality.abstract_quality_check):
scr = res.split("Your code has been rated at")[1].split("</div>")[0].split("/")[0]
score += float(scr)
dict_py[file_py] = [file_py, scr]
except:
except Exception:
self.log.warning("Cannot parse pylint result", exc_info=True)
score += 0
dict_py[file_py] = [file_py, _("Unable to parse the result. Check the details.")]
replace_string = ''
@ -103,4 +102,4 @@ class quality_test(base_module_quality.abstract_quality_check):
return self.format_table(header, data_list=dict_py)
return ""
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

View File

@ -32,10 +32,10 @@ class CounterCursor(object):
def reset(self):
self.count = 0
def execute(self, query, params=None):
def execute(self, query, *args, **kwargs):
if query.lower().startswith('select '):
self.count += 1
return self.cr.execute(query, params)
return self.cr.execute(query, *args, **kwargs)
def __getattr__(self, attr):
return getattr(self.cr, attr)
@ -63,7 +63,6 @@ This test checks the speed of the module. Note that at least 5 demo data is need
if obj_list:
cr.execute("select w.res_model from ir_actions_todo as t left join ir_act_window as w on t.action_id=w.id where w.res_model IN %s",(tuple(obj_list),))
res = cr.fetchall()
print res
for remove_obj in res:
if remove_obj and (remove_obj[0] in obj_list):
obj_list.remove(remove_obj[0])
@ -75,10 +74,15 @@ This test checks the speed of the module. Note that at least 5 demo data is need
return None
obj_counter = 0
score = 0.0
obj_ids = self.get_ids(cr, uid, obj_list)
try:
obj_ids = self.get_ids(cr, uid, obj_list)
except Exception,e:
self.log.warning("Cannot get ids:", exc_info=True)
obj_ids= {}
self.result_details += e.message
result_dict = {}
result_dict2 = {}
self.result_details += _("<html>O(1) means that the number of SQL requests to read the object does not depand on the number of objects we are reading. This feature is hardly wished.\n</html>")
self.result_details += _("<html>O(1) means that the number of SQL requests to read the object does not depand on the number of objects we are reading. This feature is mostly wished.\n</html>")
ccr = CounterCursor(cr)
for obj, ids in obj_ids.items():
code_base_complexity = 0
@ -107,6 +111,7 @@ This test checks the speed of the module. Note that at least 5 demo data is need
code_size_complexity = ccr.count
except Exception, e:
self.log.warning('Error in read method', exc_info=True)
list2 = [obj, _("Error in Read method")]
speed_list = [obj, size, code_base_complexity, code_half_complexity, code_size_complexity, _("Error in Read method:" + str(e))]
else:
@ -130,7 +135,7 @@ This test checks the speed of the module. Note that at least 5 demo data is need
result_dict2[obj] = list2
self.score = obj_counter and score / obj_counter or 0.0
if self.score*100 < self.min_score:
self.message = 'Score is below than minimal score(%s%%)' % self.min_score
self.message = _('Score is below than minimal score(%s%%)') % self.min_score
self.result_details += self.get_result_details(result_dict)
self.result += self.get_result(result_dict2)
return None