[REM] Disabling outdated tests because hr_evaluation will be totally refactored for v8

bzr revid: rim@openerp.com-20140319122453-lgx7up6idhticj4s
This commit is contained in:
Richard Mathot (OpenERP) 2014-03-19 13:24:53 +01:00
parent 23cb33217b
commit 1e3e03b4e6
3 changed files with 113 additions and 113 deletions

View File

@ -60,10 +60,10 @@ Key Features
'hr_evaluation_installer.xml', 'hr_evaluation_installer.xml',
], ],
"demo": ["hr_evaluation_demo.xml"], "demo": ["hr_evaluation_demo.xml"],
'test': [ # 'test': [
'test/test_hr_evaluation.yml', # 'test/test_hr_evaluation.yml',
'test/hr_evalution_demo.yml', # 'test/hr_evalution_demo.yml',
], # ],
'auto_install': False, 'auto_install': False,
'installable': True, 'installable': True,
'application': True, 'application': True,

View File

@ -1,6 +1,6 @@
- # -
!record {model: hr.employee, id: hr.employee_fp, view: False}: # !record {model: hr.employee, id: hr.employee_fp, view: False}:
evaluation_plan_id: hr_evaluation_plan_managersevaluationplan0 # evaluation_plan_id: hr_evaluation_plan_managersevaluationplan0
- # -
!record {model: hr_evaluation.evaluation, id: hr_evaluation_evaluation_0, view: False}: # !record {model: hr_evaluation.evaluation, id: hr_evaluation_evaluation_0, view: False}:
plan_id: hr_evaluation.hr_evaluation_plan_managersevaluationplan0 # plan_id: hr_evaluation.hr_evaluation_plan_managersevaluationplan0

View File

@ -1,103 +1,103 @@
- # -
I check that state of "Employee Evaluation" survey is Open. # I check that state of "Employee Evaluation" survey is Open.
- # -
!assert {model: survey.survey, id: appraisal_form, severity: error, string: Survey should be in 'open' state}: # !assert {model: survey.survey, id: appraisal_form, severity: error, string: Survey should be in 'open' state}:
- state == 'open' # - state == 'open'
- # -
I start the evaluation process by click on "Start Evaluation" button. # I start the evaluation process by click on "Start Evaluation" button.
- # -
!python {model: hr_evaluation.evaluation}: | # !python {model: hr_evaluation.evaluation}: |
self.button_plan_in_progress(cr, uid, [ref('hr_evaluation_evaluation_0')]) # self.button_plan_in_progress(cr, uid, [ref('hr_evaluation_evaluation_0')])
- # -
I check that state is "Plan in progress". # I check that state is "Plan in progress".
- # -
!assert {model: hr_evaluation.evaluation, id: hr_evaluation_evaluation_0, severity: error, string: Evaluation should be 'Plan in progress' state}: # !assert {model: hr_evaluation.evaluation, id: hr_evaluation_evaluation_0, severity: error, string: Evaluation should be 'Plan in progress' state}:
- state == 'wait' # - state == 'wait'
- # -
I find a mistake on evaluation form. So I cancel the evaluation and again start it. # I find a mistake on evaluation form. So I cancel the evaluation and again start it.
- # -
!python {model: hr_evaluation.evaluation}: | # !python {model: hr_evaluation.evaluation}: |
evaluation = self.browse(cr, uid, ref('hr_evaluation_evaluation_0') , context) # evaluation = self.browse(cr, uid, ref('hr_evaluation_evaluation_0') , context)
self.button_cancel(cr, uid, [ref('hr_evaluation_evaluation_0')]) # self.button_cancel(cr, uid, [ref('hr_evaluation_evaluation_0')])
assert evaluation.state == 'cancel', 'Evaluation should be in cancel state' # assert evaluation.state == 'cancel', 'Evaluation should be in cancel state'
self.button_draft(cr, uid, [ref('hr_evaluation_evaluation_0')]) # self.button_draft(cr, uid, [ref('hr_evaluation_evaluation_0')])
evaluation = self.browse(cr, uid, ref('hr_evaluation_evaluation_0') , context) # evaluation = self.browse(cr, uid, ref('hr_evaluation_evaluation_0') , context)
assert evaluation.state == 'draft', 'Evaluation should be in draft state' # assert evaluation.state == 'draft', 'Evaluation should be in draft state'
self.button_plan_in_progress(cr, uid, [ref('hr_evaluation_evaluation_0')]) # self.button_plan_in_progress(cr, uid, [ref('hr_evaluation_evaluation_0')])
- # -
I check that state is "Plan in progress" and "Interview Request" record is created # I check that state is "Plan in progress" and "Interview Request" record is created
- # -
!python {model: hr_evaluation.evaluation}: | # !python {model: hr_evaluation.evaluation}: |
interview_obj = self.pool.get('hr.evaluation.interview') # interview_obj = self.pool.get('hr.evaluation.interview')
evaluation = self.browse(cr, uid, ref('hr_evaluation_evaluation_0') , context) # evaluation = self.browse(cr, uid, ref('hr_evaluation_evaluation_0') , context)
assert evaluation.state == 'wait', "Evaluation should be 'Plan in progress' state" # assert evaluation.state == 'wait', "Evaluation should be 'Plan in progress' state"
interview_ids = interview_obj.search(cr, uid, [('evaluation_id','=', ref('hr_evaluation_evaluation_0'))]) # interview_ids = interview_obj.search(cr, uid, [('evaluation_id','=', ref('hr_evaluation_evaluation_0'))])
assert len(interview_ids), "Interview evaluation survey not created" # assert len(interview_ids), "Interview evaluation survey not created"
- # -
Give answer of the first page in "Employee Evaluation" survey. # Give answer of the first page in "Employee Evaluation" survey.
- # -
!python {model: survey.question.wiz}: | # !python {model: survey.question.wiz}: |
name_wiz_obj=self.pool.get('survey.name.wiz') # name_wiz_obj=self.pool.get('survey.name.wiz')
interview_obj = self.pool.get('hr.evaluation.interview') # interview_obj = self.pool.get('hr.evaluation.interview')
interview_ids = interview_obj.search(cr, uid, [('evaluation_id','=', ref('hr_evaluation_evaluation_0'))]) # interview_ids = interview_obj.search(cr, uid, [('evaluation_id','=', ref('hr_evaluation_evaluation_0'))])
assert len(interview_ids), "Interview evaluation survey not created" # assert len(interview_ids), "Interview evaluation survey not created"
ctx = {'active_model':'hr.evaluation.interview', 'active_id': interview_ids[0], 'active_ids': [interview_ids], 'survey_id': ref("survey_2")} # ctx = {'active_model':'hr.evaluation.interview', 'active_id': interview_ids[0], 'active_ids': [interview_ids], 'survey_id': ref("survey_2")}
name_id = name_wiz_obj.create(cr, uid, {'survey_id': ref("survey_2")}) # name_id = name_wiz_obj.create(cr, uid, {'survey_id': ref("survey_2")})
ctx ["sur_name_id"] = name_id # ctx ["sur_name_id"] = name_id
self.create(cr, uid, {str(ref("survey_question_2")) +"_" +str(ref("survey_answer_1")) + "_multi" :'tpa', # self.create(cr, uid, {str(ref("survey_question_2")) +"_" +str(ref("survey_answer_1")) + "_multi" :'tpa',
str(ref("survey_question_2")) +"_" +str(ref("survey_answer_10")) + "_multi" :'application eng', # str(ref("survey_question_2")) +"_" +str(ref("survey_answer_10")) + "_multi" :'application eng',
str(ref("survey_question_2")) +"_" +str(ref("survey_answer_20")) + "_multi" :'3', # str(ref("survey_question_2")) +"_" +str(ref("survey_answer_20")) + "_multi" :'3',
str(ref("survey_question_2")) +"_" +str(ref("survey_answer_25")) + "_multi" :'2011-12-02 16:42:00', # str(ref("survey_question_2")) +"_" +str(ref("survey_answer_25")) + "_multi" :'2011-12-02 16:42:00',
str(ref("survey_question_2")) +"_" +str(ref("survey_answer_43")) + "_multi" :'HR', # str(ref("survey_question_2")) +"_" +str(ref("survey_answer_43")) + "_multi" :'HR',
}, context = ctx) # }, context = ctx)
- # -
I close this Evaluation survey by giving answer of questions. # I close this Evaluation survey by giving answer of questions.
- # -
!python {model: hr_evaluation.evaluation}: | # !python {model: hr_evaluation.evaluation}: |
interview_obj = self.pool.get('hr.evaluation.interview') # interview_obj = self.pool.get('hr.evaluation.interview')
evaluation = self.browse(cr, uid, ref('hr_evaluation_evaluation_0')) # evaluation = self.browse(cr, uid, ref('hr_evaluation_evaluation_0'))
interview_obj.survey_req_done(cr, uid, [r.id for r in evaluation.survey_request_ids]) # interview_obj.survey_req_done(cr, uid, [r.id for r in evaluation.survey_request_ids])
for survey in evaluation.survey_request_ids: # for survey in evaluation.survey_request_ids:
interview = interview_obj.browse(cr, uid, survey.id, context) # interview = interview_obj.browse(cr, uid, survey.id, context)
assert interview.state == "done", 'survey must be in done state' # assert interview.state == "done", 'survey must be in done state'
- # -
I print the evaluation. # I print the evaluation.
- # -
!python {model: hr_evaluation.evaluation}: | # !python {model: hr_evaluation.evaluation}: |
evaluation = self.browse(cr, uid, ref('hr_evaluation_evaluation_0')) # evaluation = self.browse(cr, uid, ref('hr_evaluation_evaluation_0'))
self.pool.get('hr.evaluation.interview').action_print_survey(cr, uid, [r.id for r in evaluation.survey_request_ids]) # self.pool.get('hr.evaluation.interview').action_print_survey(cr, uid, [r.id for r in evaluation.survey_request_ids])
- # -
I click on "Final Validation" button to finalise evaluation. # I click on "Final Validation" button to finalise evaluation.
- # -
!python {model: hr_evaluation.evaluation}: | # !python {model: hr_evaluation.evaluation}: |
self.button_final_validation(cr, uid, [ref("hr_evaluation_evaluation_0")]) # self.button_final_validation(cr, uid, [ref("hr_evaluation_evaluation_0")])
- # -
I check that state is "Waiting Appreciation". # I check that state is "Waiting Appreciation".
- # -
!assert {model: hr_evaluation.evaluation, id: hr_evaluation_evaluation_0}: # !assert {model: hr_evaluation.evaluation, id: hr_evaluation_evaluation_0}:
- state == 'progress' # - state == 'progress'
- # -
Give Rating "Meet expectations" by selecting overall Rating. # Give Rating "Meet expectations" by selecting overall Rating.
- # -
!record {model: hr_evaluation.evaluation, id: hr_evaluation_evaluation_0}: # !record {model: hr_evaluation.evaluation, id: hr_evaluation_evaluation_0}:
rating: '2' # rating: '2'
- # -
I close this Evaluation by click on "Done" button of this wizard. # I close this Evaluation by click on "Done" button of this wizard.
- # -
!python {model: hr_evaluation.evaluation}: | # !python {model: hr_evaluation.evaluation}: |
self.button_done(cr, uid, [ref("hr_evaluation_evaluation_0")]) # self.button_done(cr, uid, [ref("hr_evaluation_evaluation_0")])
- # -
I check that state of Evaluation is done. # I check that state of Evaluation is done.
- # -
!assert {model: hr_evaluation.evaluation, id: hr_evaluation_evaluation_0, severity: error, string: Evaluation should be in done state}: # !assert {model: hr_evaluation.evaluation, id: hr_evaluation_evaluation_0, severity: error, string: Evaluation should be in done state}:
- state == 'done' # - state == 'done'
- # -
Print Evaluations Statistics Report # Print Evaluations Statistics Report
- # -
!python {model: hr.evaluation.report}: | # !python {model: hr.evaluation.report}: |
import os, time # import os, time
from openerp import tools # from openerp import tools
ctx={} # ctx={}
data_dict={'state': 'done', 'rating': 2, 'employee_id': ref("hr.employee_fp")} # data_dict={'state': 'done', 'rating': 2, 'employee_id': ref("hr.employee_fp")}
from openerp.tools import test_reports # from openerp.tools import test_reports
test_reports.try_report_action(cr, uid, 'hr_evaluation_evaluation_0',wiz_data=data_dict, context=ctx, our_module='hr_evaluation') # test_reports.try_report_action(cr, uid, 'hr_evaluation_evaluation_0',wiz_data=data_dict, context=ctx, our_module='hr_evaluation')