[IMP] survey/test: improve yml code

bzr revid: rco@openerp.com-20111220160342-2i2m0gx9k2v0grk7
This commit is contained in:
Raphael Collet 2011-12-20 17:03:42 +01:00
parent 7ecdf32616
commit ab64964840
4 changed files with 19 additions and 20 deletions

View File

@ -4,7 +4,9 @@
!record {model: survey.request, id: survey_request_1}:
survey_id: survey_Initial_partner_feedback
user_id: base.user_demo
state: draft
-
!assert {model: survey.request, id: survey_request_1, string: Survey Request should be in draft state}:
- state == 'draft'
-
I set Survey Request for the survey in waiting state.
-
@ -45,4 +47,4 @@
I check that state of Survey request of the survey is done or not.
-
!assert {model: survey.request, id: survey_request_1, severity: error, string: Survey Request should be in done state}:
- state == 'done'
- state == 'done'

View File

@ -45,10 +45,10 @@
ctx = {'active_model':'survey', 'active_id': ref('survey_Initial_partner_feedback'), 'active_ids': [ref('survey_Initial_partner_feedback')]}
self.fields_view_get(cr, uid, ref("survey.view_survey_question_message"),"form", context=ctx)
values = self.default_get(cr, uid, ['name'], ctx)
ids = self.create(cr, uid, {str(ref("survey_initial_question_company_name")) +"_single" :'Tiny' , str(ref("survey_initial_question_company_size")) + "_selection" : int(ref("survey.survey_initial_question_company_size_51")), }, context)
self.action_next(cr, uid, [ids], context)
ids = self.create(cr, uid, {str(ref("survey_initial_question_contract_customers")) + "_selection" : int(ref("survey_initial_answer_sometimes")), str(ref("survey_initial_question_sell_to_your_customers")) + "_selection" : int(ref("survey_initial_answer_maintenance_contract")), }, context)
self.action_next(cr, uid, [ids], context)
id = self.create(cr, uid, {str(ref("survey_initial_question_company_name")) +"_single" :'Tiny' , str(ref("survey_initial_question_company_size")) + "_selection" : int(ref("survey.survey_initial_question_company_size_51")), }, context)
self.action_next(cr, uid, [id], context)
id = self.create(cr, uid, {str(ref("survey_initial_question_contract_customers")) + "_selection" : int(ref("survey_initial_answer_sometimes")), str(ref("survey_initial_question_sell_to_your_customers")) + "_selection" : int(ref("survey_initial_answer_maintenance_contract")), }, context)
self.action_next(cr, uid, [id], context)
-
I print the answers of the survey.
-
@ -87,13 +87,13 @@
-
I set the value in "Total start survey" field.
-
!python {model: survey}: |
ids = self.write(cr, uid, ref("survey_Initial_partner_feedback"), {'tot_start_survey' : 1}, context)
!record {model: survey, id: survey_Initial_partner_feedback}:
tot_start_survey: 1
-
I set the survey in Cancel state.
-
!python {model: survey}: |
sur = self.survey_cancel(cr, uid, [ref("survey_Initial_partner_feedback")], context)
self.survey_cancel(cr, uid, [ref("survey_Initial_partner_feedback")], context)
-
I check state of survey is cancel or not.
-
@ -103,9 +103,9 @@
I set the survey in close state.
-
!python {model: survey}: |
sur = self.survey_close(cr, uid, [ref("survey_Initial_partner_feedback")], context)
self.survey_close(cr, uid, [ref("survey_Initial_partner_feedback")], context)
-
I check state of Survey is close or not.
-
!assert {model: survey, id: survey_Initial_partner_feedback, severity: error, string: Survey should be in close state}:
- state == 'close'
- state == 'close'

View File

@ -2,5 +2,5 @@
I check the question type of the survey "Initial Partner Feedback".
-
!python {model: survey.question}: |
sur_question = self.on_change_type(cr, uid, [ref("survey_Initial_partner_feedback")], ['multiple_textboxes_diff_type'])
assert sur_question
sur_question = self.on_change_type(cr, uid, [ref("survey_Initial_partner_feedback")], 'multiple_textboxes_diff_type')
assert sur_question

View File

@ -2,8 +2,7 @@
I print the Survey Browse Response Report through the wizard.
-
!python {model: survey}: |
ctx={}
ctx.update({'model': 'survey','active_ids': [(6,0,[ref('survey_partner_feedback')])]})
ctx = {'model': 'survey','active_ids': [(6,0,[ref('survey_partner_feedback')])]}
data_dict = {'response_ids' : [(6,0,[ref('survey.survey_partner_feedback')])], 'page_number' : True, 'without_pagebreak': True}
from tools import test_reports
test_reports.try_report_action(cr, uid, 'action_view_survey_print_answer',wiz_data=data_dict, context=ctx, our_module='survey')
@ -12,8 +11,7 @@
I print the Survey Analysis Report through the wizard.
-
!python {model: survey}: |
ctx={}
ctx.update({'model': 'survey','active_ids': [(6,0,[ref('survey_partner_feedback')])]})
ctx = {'model': 'survey','active_ids': [(6,0,[ref('survey_partner_feedback')])]}
data_dict = {'survey_ids' : [(6,0,[ref('survey.survey_partner_feedback')])]}
from tools import test_reports
test_reports.try_report_action(cr, uid, 'action_view_survey_print_statistics',wiz_data=data_dict, context=ctx, our_module='survey')
@ -22,8 +20,7 @@
I print the Survey Form Report through the wizard.
-
!python {model: survey}: |
ctx={}
ctx.update({'model': 'survey','active_ids': [(6,0,[ref('survey_partner_feedback')])]})
ctx = {'model': 'survey','active_ids': [(6,0,[ref('survey_partner_feedback')])]}
data_dict = {'survey_ids' : [(6,0,[ref('survey.survey_partner_feedback')])], 'page_number' : True, 'without_pagebreak': True}
from tools import test_reports
test_reports.try_report_action(cr, uid, 'action_view_survey_print',wiz_data=data_dict, context=ctx, our_module='survey')
test_reports.try_report_action(cr, uid, 'action_view_survey_print',wiz_data=data_dict, context=ctx, our_module='survey')