Description:
fixed test run scoring bug: now it takes the minimum of each test case's score git-svn-id: http://theory.cpe.ku.ac.th/grader/judge/trunk/scripts@273 6386c4cd-e34a-4fa8-8920-d93eb39b512e
Commit status:
[Not Reviewed]
References:
Comments:
0 Commit comments 0 Inline Comments
Unresolved TODOs:
There are no unresolved TODOs
Add another comment

r66:c03ebf30fb10 - - 4 files changed: 19 inserted, 18 deleted

@@ -24,14 +24,16
24 24
25 25 #
26 26 # Also copy additional submitted file to this directory as well.
27 27 # The program would see this file only if it is copied
28 28 # to the sandbox directory later. The run script should do it.
29 29 #
30 - cmd = "cp #{test_request.input_file_name}.files/* #{grading_room}"
31 - system(cmd)
30 + if FileTest.exists?("#{test_request.input_file_name}.files")
31 + cmd = "cp #{test_request.input_file_name}.files/* #{grading_room}"
32 + system(cmd)
33 + end
32 34
33 35 grading_room
34 36 end
35 37
36 38 def find_problem_home(test_request)
37 39 problem_name = test_request.problem_name
@@ -43,46 +43,44
43 43
44 44 all_score = 0
45 45 all_comment = ''
46 46 (1..(problem.runs.length-1)).each do |k|
47 47 log "grade run #{k}"
48 48 run = problem.runs[k]
49 - run_score = 0
49 + run_score = nil
50 50 run_comment = ''
51 51 run_comment_short = ''
52 52 run.tests.each do |test_num|
53 53 result_file_name = "#{test_num}/result"
54 54 if not File.exists?(result_file_name)
55 55 run_comment += "result file for test #{test_num} not found\n"
56 56 run_comment_short += RUN_ERROR_MARK
57 57 log "Cannot find the file #{test_num}/result!"
58 58 else
59 59 result_file = File.new(result_file_name, "r")
60 60 result_file_lines = result_file.readlines
61 61 if result_file_lines.length>=2
62 - run_score = run_score + result_file_lines[1].to_i
62 + current_run_score = result_file_lines[1].to_i
63 63 run_comment += result_file_lines[0]
64 64 run_comment_short += char_comment(result_file_lines[0].chomp)
65 65 else
66 + current_run_score = 0
66 67 run_comment += "result file for test #{test_num} error\n"
67 68 run_comment_short += RUN_ERROR_MARK
68 69 log "Error in #{test_num}/result!"
69 70 end
71 +
72 + # the score of this run should be the minimum of the score for
73 + # each test case
74 + if (run_score==nil) or (run_score>current_run_score)
75 + run_score = current_run_score
76 + end
70 77 result_file.close
71 78 end
72 79 end
73 80
74 - # find total score for this run
75 - run_total_score = 0
76 - problem = Problem.get_instance
77 - run.tests.each { |test_num| run_total_score += problem.get_score(test_num) }
78 -
79 - if run_total_score!=run_score # fail in some test cases, fail the run
80 - run_score = 0
81 - end
82 -
83 81 run_result_file = File.new("result-#{k}", "w")
84 82 run_result_file.write run_score
85 83 run_result_file.write "\n"
86 84 run_result_file.close
87 85
88 86 run_comment_file = File.new("comment-#{k}", "w")
@@ -10,11 +10,11
10 10 <% tr_num += 1 %>
11 11 run <%= tr_num %> do
12 12 tests <%= (testrun.collect {|testcase| testcase[0]}).join(", ") %>
13 13 <% if testrun.length==1 %>
14 14 scores 10
15 15 <% else %>
16 - scores 10 <% (testrun.length-1).times do %>,0 <% end %>
16 + scores 10 <% (testrun.length-1).times do %>,10 <% end %>
17 17 <% end %>
18 18 end
19 19 <% end %>
20 20 end
@@ -4,36 +4,37
4 4 time_limit_each 1
5 5 mem_limit_each 11
6 6 score_each 10
7 7
8 8 run 1 do
9 9 tests 1, 2
10 - scores 10, 20
10 + scores 30, 30
11 11 time_limits 1, 2
12 12 mem_limits 5, 6
13 13 end
14 14
15 15 run 2 do
16 16 tests 3, 4, 5, 6, 7
17 - score_each 10
17 + score_each 50
18 18 time_limit_each 3
19 19 mem_limit_each 3
20 20 end
21 21
22 22 run 3 do
23 23 tests 8, 9, 10
24 24 end
25 25
26 26 test 8 do
27 - score 30
27 + score 55
28 28 time_limit 3
29 29 mem_limit 10
30 30 end
31 31
32 32 test 9 do
33 - score 15
33 + score 55
34 34 end
35 35
36 36 test 10 do
37 + score 55
37 38 time_limit 1
38 39 end
39 40 end
You need to be logged in to leave comments. Login now