Description:
fixed test run scoring bug: now it takes the minimum of each test case's score git-svn-id: http://theory.cpe.ku.ac.th/grader/judge/trunk/scripts@273 6386c4cd-e34a-4fa8-8920-d93eb39b512e
Commit status:
[Not Reviewed]
References:
Comments:
0 Commit comments 0 Inline Comments
Unresolved TODOs:
There are no unresolved TODOs
Add another comment

r66:c03ebf30fb10 - - 4 files changed: 17 inserted, 16 deleted

@@ -6,50 +6,52
6 6 module Grader
7 7
8 8 #
9 9 # A TestRequestRoomMaker is a helper object for Engine
10 10 # - finds grading room: in user_result_dir/(user)/test_request/ ...
11 11 # - prepare problem configuration for grading --- basically it copy
12 12 # all config files, and copy user's input into the testcase
13 13 # directory. First, it finds the template from problem template
14 14 # directory; if it can't find a template, it'll use the template
15 15 # from default template.
16 16 class TestRequestRoomMaker
17 17 def initialize
18 18 @config = Grader::Configuration.get_instance
19 19 end
20 20
21 21 def produce_grading_room(test_request)
22 22 grading_room = grading_room_dir(test_request)
23 23 FileUtils.mkdir_p(grading_room)
24 24
25 25 #
26 26 # Also copy additional submitted file to this directory as well.
27 27 # The program would see this file only if it is copied
28 28 # to the sandbox directory later. The run script should do it.
29 29 #
30 + if FileTest.exists?("#{test_request.input_file_name}.files")
30 31 cmd = "cp #{test_request.input_file_name}.files/* #{grading_room}"
31 32 system(cmd)
33 + end
32 34
33 35 grading_room
34 36 end
35 37
36 38 def find_problem_home(test_request)
37 39 problem_name = test_request.problem_name
38 40
39 41 template_dir = "#{@config.test_request_problem_templates_dir}/" + problem_name
40 42
41 43 raise "Test Request: error template not found" if !File.exists?(template_dir)
42 44
43 45 problem_home = problem_home_dir(test_request)
44 46 FileUtils.mkdir_p(problem_home)
45 47
46 48 copy_problem_template(template_dir,problem_home)
47 49 link_input_file(test_request,problem_home)
48 50
49 51 problem_home
50 52 end
51 53
52 54 def save_source(test_request,source_name)
53 55 dir = self.produce_grading_room(test_request)
54 56 submission = test_request.submission
55 57 f = File.open("#{dir}/#{source_name}","w")
@@ -25,82 +25,80
25 25 elsif comment =~ /[Tt]ime/
26 26 TIMEOUT_MARK
27 27 elsif res = /^[Cc]omment:(.*)$/.match(comment)
28 28 res[1]
29 29 else
30 30 RUN_ERROR_MARK # these are run time errors
31 31 end
32 32 end
33 33
34 34 problem_home = ENV['PROBLEM_HOME']
35 35 require "#{problem_home}/script/test_dsl.rb"
36 36 load "#{problem_home}/test_cases/all_tests.cfg"
37 37 problem = Problem.get_instance
38 38
39 39 if problem.well_formed? == false
40 40 log "The problem specification is not well formed."
41 41 exit(127)
42 42 end
43 43
44 44 all_score = 0
45 45 all_comment = ''
46 46 (1..(problem.runs.length-1)).each do |k|
47 47 log "grade run #{k}"
48 48 run = problem.runs[k]
49 - run_score = 0
49 + run_score = nil
50 50 run_comment = ''
51 51 run_comment_short = ''
52 52 run.tests.each do |test_num|
53 53 result_file_name = "#{test_num}/result"
54 54 if not File.exists?(result_file_name)
55 55 run_comment += "result file for test #{test_num} not found\n"
56 56 run_comment_short += RUN_ERROR_MARK
57 57 log "Cannot find the file #{test_num}/result!"
58 58 else
59 59 result_file = File.new(result_file_name, "r")
60 60 result_file_lines = result_file.readlines
61 61 if result_file_lines.length>=2
62 - run_score = run_score + result_file_lines[1].to_i
62 + current_run_score = result_file_lines[1].to_i
63 63 run_comment += result_file_lines[0]
64 64 run_comment_short += char_comment(result_file_lines[0].chomp)
65 65 else
66 + current_run_score = 0
66 67 run_comment += "result file for test #{test_num} error\n"
67 68 run_comment_short += RUN_ERROR_MARK
68 69 log "Error in #{test_num}/result!"
69 70 end
71 +
72 + # the score of this run should be the minimum of the score for
73 + # each test case
74 + if (run_score==nil) or (run_score>current_run_score)
75 + run_score = current_run_score
76 + end
70 77 result_file.close
71 78 end
72 79 end
73 80
74 - # find total score for this run
75 - run_total_score = 0
76 - problem = Problem.get_instance
77 - run.tests.each { |test_num| run_total_score += problem.get_score(test_num) }
78 -
79 - if run_total_score!=run_score # fail in some test cases, fail the run
80 - run_score = 0
81 - end
82 -
83 81 run_result_file = File.new("result-#{k}", "w")
84 82 run_result_file.write run_score
85 83 run_result_file.write "\n"
86 84 run_result_file.close
87 85
88 86 run_comment_file = File.new("comment-#{k}", "w")
89 87 run_comment_file.write "#{run_comment}\n"
90 88 run_comment_file.close
91 89
92 90 all_score = all_score + run_score
93 91
94 92 # append comment for test run with many test cases
95 93 if run.tests.length > 1
96 94 run_comment_short = '[' + run_comment_short + ']'
97 95 end
98 96 all_comment += run_comment_short
99 97 end
100 98
101 99 result_file = File.new("result", "w")
102 100 result_file.write all_score
103 101 result_file.write "\n"
104 102 result_file.close
105 103
106 104 comment_file = File.new("comment", "w")
@@ -1,20 +1,20
1 1 problem do
2 2 num_tests <%= num_testcases %>
3 3 full_score <%= num_testruns*10 %>
4 4 time_limit_each <%= options[:time_limit] %>
5 5 mem_limit_each <%= options[:mem_limit] %>
6 6 score_each 10
7 7
8 8 <% tr_num = 0 %>
9 9 <% testrun_info.each do |testrun| %>
10 10 <% tr_num += 1 %>
11 11 run <%= tr_num %> do
12 12 tests <%= (testrun.collect {|testcase| testcase[0]}).join(", ") %>
13 13 <% if testrun.length==1 %>
14 14 scores 10
15 15 <% else %>
16 - scores 10 <% (testrun.length-1).times do %>,0 <% end %>
16 + scores 10 <% (testrun.length-1).times do %>,10 <% end %>
17 17 <% end %>
18 18 end
19 19 <% end %>
20 20 end
@@ -1,39 +1,40
1 1 problem do
2 2 num_tests 10
3 3 full_score 135
4 4 time_limit_each 1
5 5 mem_limit_each 11
6 6 score_each 10
7 7
8 8 run 1 do
9 9 tests 1, 2
10 - scores 10, 20
10 + scores 30, 30
11 11 time_limits 1, 2
12 12 mem_limits 5, 6
13 13 end
14 14
15 15 run 2 do
16 16 tests 3, 4, 5, 6, 7
17 - score_each 10
17 + score_each 50
18 18 time_limit_each 3
19 19 mem_limit_each 3
20 20 end
21 21
22 22 run 3 do
23 23 tests 8, 9, 10
24 24 end
25 25
26 26 test 8 do
27 - score 30
27 + score 55
28 28 time_limit 3
29 29 mem_limit 10
30 30 end
31 31
32 32 test 9 do
33 - score 15
33 + score 55
34 34 end
35 35
36 36 test 10 do
37 + score 55
37 38 time_limit 1
38 39 end
39 40 end
You need to be logged in to leave comments. Login now