Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
Custom checker in Python
# Start of HEAD
import json
import string
import sys
# End of HEAD
# Start of BODY
'''
TestStruct::
testcase_id [int] ID of the test-case
testcase_input_path [str] File path to test-case input
testcase_output_path [str] File path to test-case output generated by the problem solver
testcase_expected_output_path [str] File path to test-case expected output to be matched with
metadata_file_paths [list<str>] File paths to Question metadata (Extra files usually used for defining traning sets)
submission_code_path [str] File path to submission source code
testcase_result [bool] Set to True if test-case output matches test-case expected output. Matching is done line by line
testcase_signal [int] Exit code of the test-case process
testcase_time [float] Time taken by the test-case process in seconds
testcase_memory [int] Peak memory of the test-case process determined in bytes
data [str] <Future use>
ResultStruct::
result [bool] Assign test-case result. True determines success. False determines failure
score [float] Assign test-case score. Normalized between 0 to 1
message [str] Assign test-case message. This message is visible to the problem solver
'''
def run_custom_checker(t_obj, r_obj):
# Testing
print ("testcase_id: ", t_obj.testcase_id)
print ("testcase_input_path: ", t_obj.testcase_input_path)
print ("testcase_output_path: ", t_obj.testcase_output_path)
print ("testcase_expected_output_path: ", t_obj.testcase_expected_output_path)
for it in t_obj.metadata_file_paths:
print ("metadata_file_paths: ", it)
print ("submission_code_path: ", t_obj.submission_code_path)
print ("testcase_result: ", t_obj.testcase_result)
print ("testcase_signal: ", t_obj.testcase_signal)
print ("testcase_time: ", t_obj.testcase_time)
print ("testcase_memory: ", t_obj.testcase_memory)
print ("data: ", t_obj.data)
r_obj.result = True;
r_obj.score = 1.1;
r_obj.message = "Success";
# End of BODY
# Start of TAIL
class TestStruct:
def __init__(self):
self.testcase_id = 0
self.testcase_input_path = ""
self.testcase_output_path = ""
self.testcase_expected_output_path = ""
self.metadata_file_paths = []
self.submission_code_path = ""
self.testcase_result = False
self.testcase_signal = 0
self.testcase_time = 0.0
self.testcase_memory = 0
self.data = ""
class ResultStruct:
def __init__(self):
self.result = False
self.score = 0.0
self.message = ""
def read_input_json(json_file_path, t_obj):
file_obj = open(json_file_path, 'r')
json_file_contents = file_obj.read()
root = {}
try:
root = json.loads(json_file_contents)
except ValueError:
return 1
try:
# Read values
t_obj.testcase_id = root["testcase_id"]
t_obj.testcase_input_path = root["input_file_path"]
t_obj.testcase_output_path = root["output_file_path"]
t_obj.testcase_expected_output_path = root["expected_output_file_path"]
t_obj.metadata_file_paths = root["metadata_file_paths"]
t_obj.submission_code_path = root["submission_code_path"]
t_obj.testcase_result = root["testcase_result"]
t_obj.testcase_signal = root["testcase_signal"]
t_obj.testcase_time = root["testcase_time"]
t_obj.testcase_memory = root["testcase_memory"]
t_obj.data = root["data"]
except KeyError:
return 1
return 0
def write_result_json(r_obj):
root = {
"custom_result" : int(r_obj.result),
"custom_score" : max((r_obj.score if (r_obj.score <= 1.0) else 1.0), 0),
"custom_message": r_obj.message if (len(r_obj.message) <= 4096) else r_obj.message[0:4095]
}
print(json.dumps(root))
if __name__ == '__main__':
# Input parameters
t_obj = TestStruct()
# Result parameters
r_obj = ResultStruct()
if len(sys.argv) < 2:
write_result_json(r_obj)
sys.exit(1)
# Decode input JSON
failure = read_input_json(sys.argv[1], t_obj)
# Incase input JSON was malformed or not existent
if failure != 0:
r_obj.message = "Unable to read input json";
write_result_json(r_obj)
sys.exit(2)
#Run the custom checker evaluator
run_custom_checker(t_obj, r_obj)
# Encode result JSON
write_result_json(r_obj)
sys.exit(0)
# End of TAIL
@patilarpith

This comment has been minimized.

Copy link
Owner Author

patilarpith commented Aug 4, 2015

Testing:
Input file data. Path to be passed as command line argument

{"testcase_id":0,"input_file_path":"/run-XX/input00.txt","output_file_path":"/run-XX/output00.out","expected_output_file_path":"/run-XX/output_expected00.out","metadata_file_paths":["/run-XX/training.txt","/run-XX/training2.txt"],"submission_code_path":"/run-XX/solution.cpp", "testcase_result":true,"testcase_signal":0,"testcase_time":1.2,"testcase_memory":312322,"data":"Data"}

Expected output:

testcase_id:  0
testcase_input_path:  /run-XX/input00.txt
testcase_output_path:  /run-XX/output00.out
testcase_expected_output_path:  /run-XX/output_expected00.out
metadata_file_paths:  /run-XX/training.txt
metadata_file_paths:  /run-XX/training2.txt
submission_code_path:  /run-XX/solution.cpp
testcase_result:  True
testcase_signal:  0
testcase_time:  1.2
testcase_memory:  312322
data:  Data
{"custom_result": 1, "custom_score": 1.0, "custom_message": "Success"}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.