Skip to content

Instantly share code, notes, and snippets.

@LoniasGR
Last active December 30, 2020 15:54
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save LoniasGR/9944db38dfd6cd45df1bffafd2de9e02 to your computer and use it in GitHub Desktop.
Save LoniasGR/9944db38dfd6cd45df1bffafd2de9e02 to your computer and use it in GitHub Desktop.
Algorithms and Complexity tester.
#! /bin/python3
import os
import time
import subprocess
import argparse
DELIMITER = "############################################################"
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKCYAN = "\033[96m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Tests algorithm exercises. Needs to be on the same place with the executable and the test folder. The test folder needs to be in the form {filename}_test."
)
parser.add_argument(
"filename",
type=str,
help="The name of the executable. Tests need to be on {filename}_tests folder",
)
parser.add_argument(
"-n_tests",
type=int,
default=0,
help="Number of tests to run, default is all of them.",
)
args = parser.parse_args()
filename = args.filename
n_tests = args.n_tests
tests_folder = f"{filename}_tests/"
if args.n_tests > 0:
n_tests = args.n_tests
else:
n_tests = int(len(os.listdir(tests_folder)) / 2)
print(f"There are {n_tests} tests to be done...")
wrong_results = list()
for i in range(0, n_tests + 1):
in_file = f"{tests_folder}input{i}.txt"
out_file = f"{tests_folder}output{i}.txt"
# Skip file if test doesn't exist.
# Used to adjust for different naming conventions between
# test suites.
if not os.path.exists(in_file):
continue
print(DELIMITER)
print(f"{bcolors.HEADER}RUNNING TEST {i}{bcolors.ENDC}")
with open(in_file, "r") as input_file:
start = time.perf_counter()
ret = subprocess.run(
[f"./{filename}"], stdin=input_file, capture_output=True, text=True
)
runtime = time.perf_counter() - start
print(f"The program took {runtime:.4f} seconds")
if ret.stderr != "":
print(
f"{bcolors.FAIL}PROGRAM EXITED WITH ERROR:\n{ret['stderr']}{bcolors.ENDC}"
)
result = ret.stdout.strip("\n")
with open(out_file, "r") as output_file:
expected = output_file.read()
expected = expected.strip("\n")
if result == expected:
print(f"{bcolors.OKBLUE}CORRECT RESULTS{bcolors.ENDC}")
else:
print(f"{bcolors.FAIL}WARNING: WRONG RESULT!{bcolors.ENDC}")
print(f"Expected {expected}, but got {result}")
wrong_results.append(i)
print(DELIMITER)
correct_results = n_tests - len(wrong_results)
print(
f"{bcolors.OKGREEN}CORRECT RESULTS: {correct_results}/{n_tests}{bcolors.ENDC}"
)
print(f"{bcolors.FAIL}WRONG RESULTS {len(wrong_results)}/{n_tests}{bcolors.ENDC}")
if len(wrong_results) > 0:
print(f"Wrong tests were {wrong_results}")
@LoniasGR
Copy link
Author

Requires Python 3.6+

@LoniasGR
Copy link
Author

Code needs to be placed in the same root folder as the folder of the tests. Tests folder should be named {programFile}_tests. Your source should be already compiled.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment