Description:
merge to master
Commit status:
[Not Reviewed]
References:
merge default
Comments:
0 Commit comments 0 Inline Comments
Unresolved TODOs:
There are no unresolved TODOs
Add another comment

r253:0654ec1bf773 - - 6 files changed: 24 inserted, 13 deleted

@@ -0,0 +1,9
1 + #!/bin/bash
2 + count=`ps aux | grep "algo_grader" | grep "grader grading queue" | wc -l`
3 + install_dir=/home/john/cafe_grader
4 + ruby_executable=/home/john/.rvm/wrappers/ruby-2.3.0/ruby
5 + if [ $count -lt 1 ]; then
6 + cd $judge_dir/judge
7 + $ruby_executable $install_dir/judge/scripts/grader grading queue > $install_dir/judge/grading.log &
8 + fi
9 +
@@ -12,175 +12,177
12 12 FileTest.exist?(File.dirname(__FILE__) + "/stop.all") or
13 13 FileTest.exist?(File.dirname(__FILE__) + "/stop.#{Process.pid}")
14 14 end
15 15
16 16 def clear_stopfile
17 17 if FileTest.exist?(File.dirname(__FILE__) + "/stop.#{Process.pid}")
18 18 File.delete(File.dirname(__FILE__) + "/stop.#{Process.pid}")
19 19 end
20 20 end
21 21
22 22 def config
23 23 Grader::Configuration.get_instance
24 24 end
25 25
26 26 def log_file_name
27 27 if !File.exists?(config.log_dir)
28 28 raise "Log directory does not exist: #{config.log_dir}"
29 29 end
30 30 config.log_dir +
31 31 "/#{GRADER_ENV}_#{config.grader_mode}.#{Process.pid}"
32 32 end
33 33
34 34 def log(str)
35 35 if config.talkative
36 36 puts str
37 37 end
38 38 if config.logging
39 39 fp = File.open(log_file_name,"a")
40 40 fp.puts("GRADER: #{Time.new.strftime("%H:%M")} #{str}")
41 41 fp.close
42 42 end
43 43 end
44 44
45 45 def display_manual
46 46 puts <<USAGE
47 47 Grader.
48 48 using: (1) grader
49 49 (2) grader environment [mode] [options]
50 50 (3) grader stop [all|pids-list]
51 51 (4) grader --help
52 52 (1) call grader with environment = 'exam', mode = 'queue'
53 53 (2) possible modes are: 'queue', 'test_request', 'prob', 'sub', 'contest', and 'autonew'
54 54 queue: repeatedly check the task queue and grade any available tasks
55 55
56 56 prob: re-grade every user latest submission of the specific problem.
57 57 the problem name must be specified by the next argument.
58 58
59 59 additional options:
60 - --all-sub re-grade every submissions instead of just the latest submission of each user.
60 + --all-sub re-grade every submissions instead of just the latest submission of each user.
61 + --only-error re-grade only submissions that are "error during grading"
61 62
62 63 sub: re-grader the specified submission.
63 64 The submission ID to be re-graded must be specified by the next argument.
64 65
65 66 options:
66 67 --err-log log error to a file in the log dir
67 68
68 69 (3) create stop-file to stop running grader in queue mode
69 70 (4) You are here.
70 71 USAGE
71 72 end
72 73
73 74 def process_options_and_stop_file
74 75 # The list of options are:
75 76 # - stop [all|process ids]
76 77 # -
77 78
78 79 # Process 'help' option
79 80 if (ARGV.length==1) and (/help/.match(ARGV[0]))
80 81 display_manual
81 82 exit(0)
82 83 end
83 84
84 85 # Process 'stop' option.
85 86 if (ARGV.length >= 1) and (ARGV[0]=='stop')
86 87 if ARGV.length==1
87 88 puts "you should specify pid-list or 'all'"
88 89 display_manual
89 90 elsif (ARGV.length==2) and (ARGV[1]=='all')
90 91 stop_grader(:all)
91 92 puts "A global stop file ('stop.all') created."
92 93 puts "You should remove it manually later."
93 94 else
94 95 (1..ARGV.length-1).each do |i|
95 96 stop_grader(ARGV[i])
96 97 end
97 98 puts "stop file(s) created"
98 99 end
99 100 exit(0)
100 101 end
101 102
102 103 # Check stop file.
103 104 if check_stopfile
104 105 puts "Stop file exists. Terminated."
105 106 clear_stopfile
106 107 exit(0)
107 108 end
108 109
109 110 #default options
110 111 options = {
111 112 :mode => 'queue',
112 113 :environment => 'exam',
113 114 :dry_run => false,
114 115 }
115 116
116 117 # Process mode and environment option
117 118 if ARGV.length >= 1
118 119 options[:environment] = ARGV.shift
119 120 if ARGV.length >=1
120 121 options[:mode] = ARGV.shift
121 122 end
122 123 else
123 124 puts 'no argument specified, using default mode and environment.'
124 125 end
125 126
126 127 options[:dry_run] = (ARGV.delete('--dry') != nil)
127 128 if options[:dry_run] and (not ['prob','contest','autonew'].include? options[:mode])
128 129 puts "Dry run currently works only for 'prob' or 'contest' modes."
129 130 exit(0)
130 131 end
131 132
132 133 options[:report] = (ARGV.delete('--report') != nil)
133 134 if options[:report] and (not ['prob','contest','autonew'].include? options[:mode])
134 135 puts "Report currently works only for 'prob' or 'contest' modes."
135 136 exit(0)
136 137 end
137 138
138 139 options[:all_sub] = (ARGV.delete('--all-sub') != nil)
140 + options[:only_err] = (ARGV.delete('--only-error') != nil)
139 141
140 142 options[:err_log] = (ARGV.delete('--err-log') != nil)
141 143
142 144 return options
143 145 end
144 146
145 147 class ResultCollector
146 148 def initialize
147 149 @results = {}
148 150 @problems = {}
149 151 @users = {}
150 152 end
151 153
152 154 def after_save_hook(submission, grading_result)
153 155 end
154 156
155 157 def save(submission, grading_result)
156 158 user = submission.user
157 159 problem = submission.problem
158 160 if not @problems.has_key? problem.id
159 161 @problems[problem.id] = problem
160 162 end
161 163 if not @users.has_key? user.id
162 164 @users[user.id] = user
163 165 end
164 166 @results[[user.id, problem.id]] = grading_result
165 167
166 168 after_save_hook(submission, grading_result)
167 169 end
168 170
169 171 def print_report_by_user
170 172 puts "---------------------"
171 173 puts " REPORT"
172 174 puts "---------------------"
173 175
174 176 print "login,email"
175 177 @problems.each_value do |problem|
176 178 print ",#{problem.name}"
177 179 end
178 180 print "\n"
179 181
180 182 @users.each_value do |user|
181 183 print "#{user.login},#{user.email}"
182 184 @problems.each_value do |problem|
183 185 if @results.has_key? [user.id, problem.id]
184 186 print ",#{@results[[user.id,problem.id]][:points]}"
185 187 else
186 188 print ","
@@ -1,67 +1,71
1 1 #
2 2 # A runner drives the engine into various tasks.
3 3 #
4 4
5 5 module Grader
6 6
7 7 class Runner
8 8
9 9 def initialize(engine, grader_process=nil)
10 10 @engine = engine
11 11 @grader_process = grader_process
12 12 end
13 13
14 14 def grade_oldest_task
15 15 task = Task.get_inqueue_and_change_status(Task::STATUS_GRADING)
16 16 if task!=nil
17 17 @grader_process.report_active(task) if @grader_process!=nil
18 18
19 19 submission = Submission.find(task.submission_id)
20 20 @engine.grade(submission)
21 21 task.status_complete!
22 22 @grader_process.report_inactive(task) if @grader_process!=nil
23 23 end
24 24 return task
25 25 end
26 26
27 27 def grade_problem(problem, options={})
28 + user_index = 0
29 + user_count = User.count
28 30 User.find_each do |u|
29 - puts "user: #{u.login}"
31 + puts "user: #{u.login} (#{user_index}/#{user_count})"
32 + user_index += 1
30 33 if options[:user_conditions]!=nil
31 34 con_proc = options[:user_conditions]
32 35 next if not con_proc.call(u)
33 36 end
34 37 if options[:all_sub]
35 38 Submission.where(user_id: u.id,problem_id: problem.id).find_each do |sub|
39 + next if options[:only_err] and sub.grader_comment != 'error during grading'
36 40 @engine.grade(sub)
37 41 end
38 42 else
39 43 last_sub = Submission.find_last_by_user_and_problem(u.id,problem.id)
40 44 if last_sub!=nil
41 - @engine.grade(last_sub)
45 + @engine.grade(last_sub) unless options[:only_err] and last_sub.grader_comment != 'error during grading'
42 46 end
43 47 end
44 48 end
45 49 end
46 50
47 51 def grade_submission(submission)
48 52 puts "Submission: #{submission.id} by #{submission.try(:user).try(:full_name)}"
49 53 @engine.grade(submission)
50 54 end
51 55
52 56 def grade_oldest_test_request
53 57 test_request = TestRequest.get_inqueue_and_change_status(Task::STATUS_GRADING)
54 58 if test_request!=nil
55 59 @grader_process.report_active(test_request) if @grader_process!=nil
56 60
57 61 @engine.grade(test_request)
58 62 test_request.status_complete!
59 63 @grader_process.report_inactive(test_request) if @grader_process!=nil
60 64 end
61 65 return test_request
62 66 end
63 67
64 68 end
65 69
66 70 end
67 71
@@ -95,58 +95,60
95 95 peak_memory = -1
96 96 end
97 97
98 98
99 99 return {points: result,
100 100 comment: comment,
101 101 cmp_msg: cmp_msg,
102 102 max_runtime: max_runtime,
103 103 peak_memory: peak_memory
104 104 }
105 105 else
106 106 if FileTest.exist?("#{test_result_dir}/a.out")
107 107 return {:points => 0,
108 108 :comment => 'error during grading',
109 109 :cmp_msg => cmp_msg}
110 110 else
111 111 return {:points => 0,
112 112 :comment => 'compilation error',
113 113 :cmp_msg => cmp_msg}
114 114 end
115 115 end
116 116 end
117 117
118 118 def save_result(submission,result)
119 119 problem = submission.problem
120 120 submission.graded_at = Time.now.gmtime
121 121 points = result[:points]
122 122 submission.points = points
123 123 comment = @config.report_comment(result[:comment])
124 124
125 125 submission.peak_memory = result[:peak_memory]
126 126 submission.max_runtime = result[:max_runtime]
127 127 submission.effective_code_length =submission.source.length
128 128
129 129 #
130 130 # TODO: FIX THIS MESSAGE
131 131 #
132 132 if problem == nil
133 133 submission.grader_comment = 'PASSED: ' + comment + '(problem is nil)'
134 134 elsif points == problem.full_score
135 135 #submission.grader_comment = 'PASSED: ' + comment
136 136 submission.grader_comment = comment
137 137 elsif result[:comment].chomp =~ /^[\[\]P]+$/
138 138 submission.grader_comment = 'PASSED: ' + comment + '(inconsistent score)'
139 139 else
140 140 #submission.grader_comment = 'FAILED: ' + comment
141 141 submission.grader_comment = comment
142 142 end
143 - submission.compiler_message = result[:cmp_msg] or ''
143 +
144 + #very lazy trim the string
145 + submission.compiler_message = result[:cmp_msg][0..60000] or ''
144 146
145 147 if not @dry_run
146 148 submission.save
147 149 end
148 150 end
149 151
150 152 end
151 153
152 154 end
@@ -46,119 +46,119
46 46 source_name = ENV['SOURCE_NAME']
47 47 require "#{problem_home}/script/test_dsl.rb"
48 48 load "#{problem_home}/test_cases/all_tests.cfg"
49 49 problem = Problem.get_instance
50 50
51 51 sandbox_dir = Dir.getwd
52 52
53 53 if problem.well_formed? == false
54 54 log "The problem specification is not well formed."
55 55 exit(127)
56 56 end
57 57
58 58 # Check if the test number is okay.
59 59 if test_num <= 0 || test_num > problem.num_tests
60 60 log "You have specified a wrong test number."
61 61 exit(127)
62 62 end
63 63
64 64 #####################################
65 65 # Set the relavant file names here. #
66 66 #####################################
67 67
68 68 input_file_name = "#{problem_home}/test_cases/#{test_num}/input-#{test_num}.txt"
69 69
70 70 #####################################
71 71
72 72 time_limit = problem.get_time_limit test_num
73 73 mem_limit = problem.get_mem_limit(test_num) * 1024
74 74
75 75 # Copy the input file.
76 76 #`cp #{problem_home}/test_cases/#{test_num}/#{input_file_name} .`
77 77
78 78 # check if box is there, if not, compile it!
79 79 if !File.exists?("#{problem_home}/script/box")
80 80 log "WARNING: Compiling box: to increase efficiency, it should be compile manually"
81 81 compile_box("#{problem_home}/script/box.cc",
82 82 "#{problem_home}/script/box")
83 83 end
84 84
85 85 # Hide PROBLEM_HOME
86 86 ENV['PROBLEM_HOME'] = nil
87 87 ENV['SOURCE_NAME'] = nil
88 88
89 89 # Run the program.
90 90 #run_command = "/usr/bin/time -f \"#{time_output_format}\" 2>run_result #{problem_home}/script/box_new -a 2 -f -t #{time_limit} -m #{mem_limit} -i #{input_file_name} -o output.txt #{program_name}"
91 91 #
92 92
93 93 JAVA_OPTION = "-s set_robust_list -s futex -s clone -s getppid -s clone -s wait4 -p /usr/bin/ -p ./"
94 - RUBY_OPTION = "-p /usr/lib64/ -p /usr/local/lib64/ -p /usr/local/lib/ -p /lib64/ -p /dev/urandom -p #{sandbox_dir}/#{program_name} -p #{sandbox_dir}/ -s set_robust_list -s sched_getaffinity -s clock_gettime -s sigaltstack -s pipe2 -s clone -s futex -s openat -s pipe"
94 + RUBY_OPTION = "-p /usr/lib64/ -p /usr/local/lib64/ -p /usr/local/lib/ -p /lib64/ -p /dev/urandom -p #{sandbox_dir}/#{program_name} -p #{sandbox_dir}/ -s set_robust_list -s sched_getaffinity -s clock_gettime -s sigaltstack -s pipe2 -s clone -s futex -s openat -s pipe -s getrandom"
95 95 PYTHON_OPTION = "-p /usr/lib64/ -p /usr/local/lib64/ -p /usr/local/lib/ -p /usr/bin/ -p /lib64/ -p /dev/urandom -p /usr/ -p #{sandbox_dir}/#{program_name} -p ./#{program_name} -p #{sandbox_dir}/#{source_name} -p /proc/sys/crypto/fips_enabled -p /proc/self/status -p /proc/mounts -p /var/lib/dpkg/status -s statfs -s set_robust_list -s openat -s sysinfo -s recvmsg -s connect -s socket -s sendto -s futex -s sigaltstack -s getrandom -E PYTHONNOUSERSITE=yes"
96 96 PHP_OPTION = "-p /usr/lib64/ -p/lib64/ -p /usr/bin/ -p #{sandbox_dir}/#{program_name} -p ./#{program_name} -p /usr/share/ -s setfsuid -s setfsgid -s openat -s set_robust_list -s futex -s clone -s socket -s connect"
97 97 HASKELL_OPTION = "-s set_robust_list -s clock_gettime -s sysinfo -s timer_create -s timer_settime -s futex -s timer_delete"
98 98
99 99 case language
100 100 when "java"
101 101 # for java, extract the classname
102 102 # wne have to add additional systemcall and we don't check the mem limit (dunno how to fix...)
103 103 classname = 'DUMMY'
104 104 File.open(program_name,"r").each do |line|
105 105 classname = line
106 106 end
107 107 #for java, we cannot really check the memory limit...
108 108 run_command = "#{problem_home}/script/box -a 3 -f -T -t #{time_limit} #{JAVA_OPTION} -i #{input_file_name} -o output.txt /usr/bin/java -A -Xmx#{mem_limit}k -A #{classname} "
109 109 when "ruby"
110 110 run_command = "#{problem_home}/script/box -a 2 -f -T -t #{time_limit*=2} -m #{mem_limit} #{RUBY_OPTION} -i #{input_file_name} -o output.txt /usr/bin/ruby #{program_name} "
111 111 when "python"
112 - run_command = "#{problem_home}/script/box -a 2 -f -T -t #{time_limit*=2} -m #{[128 * 1024,mem_limit].max} #{PYTHON_OPTION} -i #{input_file_name} -o output.txt /usr/bin/python3 #{program_name} "
112 + run_command = "#{problem_home}/script/box -a 2 -f -T -t #{time_limit*=2} -m #{[512 * 1024,mem_limit].max} #{PYTHON_OPTION} -i #{input_file_name} -o output.txt /usr/bin/python3 #{program_name} "
113 113 when "haskell"
114 114 run_command = "#{problem_home}/script/box -a 2 -f -T -t #{time_limit} -m #{[512 * 1024,mem_limit].max} #{HASKELL_OPTION} -i #{input_file_name} -o output.txt #{program_name} "
115 115 when "php"
116 - run_command = "#{problem_home}/script/box -a 2 -f -T -t #{time_limit*=2} -m #{[128 * 1024,mem_limit].max} #{PHP_OPTION} -i #{input_file_name} -o output.txt /usr/bin/php -A -d -A memory_limit=#{mem_limit}k -A #{program_name} "
116 + run_command = "#{problem_home}/script/box -a 2 -f -T -t #{time_limit*=2} -m #{[512 * 1024,mem_limit].max} #{PHP_OPTION} -i #{input_file_name} -o output.txt /usr/bin/php -A -d -A memory_limit=#{mem_limit}k -A #{program_name} "
117 117 else # for c++, pascal, we do the normal checking
118 118 run_command = "#{problem_home}/script/box -a 2 -f -T -t #{time_limit} -m #{mem_limit} -i #{input_file_name} -o output.txt #{program_name} "
119 119 end
120 120
121 121
122 122 log "Running test #{test_num}..."
123 123 log run_command
124 124 log
125 125 system(run_command,err: 'run_result')
126 126
127 127 # Restore PROBLEM_HOME
128 128 ENV['PROBLEM_HOME'] = problem_home
129 129
130 130 # Create the result file.
131 131 result_file = File.new("result", "w")
132 132 comment_file = File.new("comment", "w")
133 133
134 134 # Check if the program actually produced any output.
135 135 run_result_file = File.new("run_result", "r")
136 136 run_result = run_result_file.readlines
137 137 run_result_file.close
138 138
139 139 run_stat = run_result[run_result.length-1]
140 140 running_time = extract_time(run_stat)
141 141
142 142 report = lambda{ |status, points, comment|
143 143 result_file.write status.strip
144 144 result_file.write "\n"
145 145 result_file.write points.to_s.strip
146 146 result_file.write "\n"
147 147 result_file.write run_stat.strip
148 148 result_file.write "\n"
149 149 result_file.close
150 150 FileUtils.rm "run_result"
151 151 # `rm output.txt` --- keep the output
152 152
153 153 comment_file.write comment
154 154
155 155 # added for debuggin --- jittat
156 156 comment_file.write "--run-result--\n"
157 157 run_result.each do |l|
158 158 comment_file.write l
159 159 end
160 160
161 161 comment_file.close
162 162
163 163 log "Done!"
164 164 exit(0)
You need to be logged in to leave comments. Login now