Last active
April 16, 2026 10:43
-
-
Save karlseguin/c6bea5b35e4e8d26af6f81c22cb5d76b to your computer and use it in GitHub Desktop.
Custom Zig Test Runner, better ouput, timing display, and support for special "tests:beforeAll" and "tests:afterAll" tests
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| // This is for the Zig 0.16. | |
| // See https://gist.github.com/karlseguin/c6bea5b35e4e8d26af6f81c22cb5d76b/eb15512d6ae49663fa9df6c7a9725b20dab43edd | |
| // for a version that workson Zig 0.15.2. | |
| // See https://gist.github.com/karlseguin/c6bea5b35e4e8d26af6f81c22cb5d76b/1f317ebc9cd09bc50fd5591d09c34255e15d1d85 | |
| // for a version that workson Zig 0.14.1. | |
| // in your build.zig, you can specify a custom test runner: | |
| // const tests = b.addTest(.{ | |
| // .root_module = $MODULE_BEING_TESTED, | |
| // .test_runner = .{ .path = b.path("test_runner.zig"), .mode = .simple }, | |
| // }); | |
| // in your build.zig, you can specify a custom test runner: | |
| // const tests = b.addTest(.{ | |
| // .root_module = $MODULE_BEING_TESTED, | |
| // .test_runner = .{ .path = b.path("test_runner.zig"), .mode = .simple }, | |
| // }); | |
| const std = @import("std"); | |
| const Io = std.Io; | |
| const builtin = @import("builtin"); | |
| const Allocator = std.mem.Allocator; | |
| const BORDER = "=" ** 80; | |
| // use in custom panic handler | |
| var current_test: ?[]const u8 = null; | |
| pub fn main(init: std.process.Init) !void { | |
| var mem: [8192]u8 = undefined; | |
| var fba = std.heap.FixedBufferAllocator.init(&mem); | |
| const allocator = fba.allocator(); | |
| const env = Env.init(init.environ_map); | |
| std.testing.io_instance = .init(init.gpa, .{ | |
| .argv0 = .init(init.minimal.args), | |
| .environ = init.minimal.environ, | |
| }); | |
| defer std.testing.io_instance.deinit(); | |
| const io = std.testing.io; | |
| var slowest = SlowTracker.init(allocator, io, 5); | |
| defer slowest.deinit(); | |
| var pass: usize = 0; | |
| var fail: usize = 0; | |
| var skip: usize = 0; | |
| var leak: usize = 0; | |
| Printer.fmt("\r\x1b[0K", .{}); // beginning of line and clear to end of line | |
| for (builtin.test_functions) |t| { | |
| if (isSetup(t)) { | |
| t.func() catch |err| { | |
| Printer.status(.fail, "\nsetup \"{s}\" failed: {}\n", .{ t.name, err }); | |
| return err; | |
| }; | |
| } | |
| } | |
| for (builtin.test_functions) |t| { | |
| if (isSetup(t) or isTeardown(t)) { | |
| continue; | |
| } | |
| var status = Status.pass; | |
| slowest.startTiming(io); | |
| const is_unnamed_test = isUnnamed(t); | |
| if (env.filter) |f| { | |
| if (!is_unnamed_test and std.mem.indexOf(u8, t.name, f) == null) { | |
| continue; | |
| } | |
| } | |
| const friendly_name = blk: { | |
| const name = t.name; | |
| var it = std.mem.splitScalar(u8, name, '.'); | |
| while (it.next()) |value| { | |
| if (std.mem.eql(u8, value, "test")) { | |
| const rest = it.rest(); | |
| break :blk if (rest.len > 0) rest else name; | |
| } | |
| } | |
| break :blk name; | |
| }; | |
| current_test = friendly_name; | |
| std.testing.allocator_instance = .{}; | |
| const result = t.func(); | |
| current_test = null; | |
| const ns_taken = slowest.endTiming(io, friendly_name); | |
| if (std.testing.allocator_instance.deinit() == .leak) { | |
| leak += 1; | |
| Printer.status(.fail, "\n{s}\n\"{s}\" - Memory Leak\n{s}\n", .{ BORDER, friendly_name, BORDER }); | |
| } | |
| if (result) |_| { | |
| pass += 1; | |
| } else |err| switch (err) { | |
| error.SkipZigTest => { | |
| skip += 1; | |
| status = .skip; | |
| }, | |
| else => { | |
| status = .fail; | |
| fail += 1; | |
| Printer.status(.fail, "\n{s}\n\"{s}\" - {s}\n{s}\n", .{ BORDER, friendly_name, @errorName(err), BORDER }); | |
| if (@errorReturnTrace()) |trace| { | |
| std.debug.dumpErrorReturnTrace(trace); | |
| } | |
| if (env.fail_first) { | |
| break; | |
| } | |
| }, | |
| } | |
| if (env.verbose) { | |
| const ms = @as(f64, @floatFromInt(ns_taken)) / 1_000_000.0; | |
| Printer.status(status, "{s} ({d:.2}ms)\n", .{ friendly_name, ms }); | |
| } else { | |
| Printer.status(status, ".", .{}); | |
| } | |
| } | |
| for (builtin.test_functions) |t| { | |
| if (isTeardown(t)) { | |
| t.func() catch |err| { | |
| Printer.status(.fail, "\nteardown \"{s}\" failed: {}\n", .{ t.name, err }); | |
| return err; | |
| }; | |
| } | |
| } | |
| const total_tests = pass + fail; | |
| const status = if (fail == 0) Status.pass else Status.fail; | |
| Printer.status(status, "\n{d} of {d} test{s} passed\n", .{ pass, total_tests, if (total_tests != 1) "s" else "" }); | |
| if (skip > 0) { | |
| Printer.status(.skip, "{d} test{s} skipped\n", .{ skip, if (skip != 1) "s" else "" }); | |
| } | |
| if (leak > 0) { | |
| Printer.status(.fail, "{d} test{s} leaked\n", .{ leak, if (leak != 1) "s" else "" }); | |
| } | |
| Printer.fmt("\n", .{}); | |
| try slowest.display(); | |
| Printer.fmt("\n", .{}); | |
| std.process.exit(if (fail == 0) 0 else 1); | |
| } | |
| const Printer = struct { | |
| fn fmt(comptime format: []const u8, args: anytype) void { | |
| std.debug.print(format, args); | |
| } | |
| fn status(s: Status, comptime format: []const u8, args: anytype) void { | |
| switch (s) { | |
| .pass => std.debug.print("\x1b[32m", .{}), | |
| .fail => std.debug.print("\x1b[31m", .{}), | |
| .skip => std.debug.print("\x1b[33m", .{}), | |
| else => {}, | |
| } | |
| std.debug.print(format ++ "\x1b[0m", args); | |
| } | |
| }; | |
| const Status = enum { | |
| pass, | |
| fail, | |
| skip, | |
| text, | |
| }; | |
| const SlowTracker = struct { | |
| max: usize, | |
| slowest: SlowestQueue, | |
| start: Io.Timestamp, | |
| allocator: Allocator, | |
| const SlowestQueue = std.PriorityDequeue(TestInfo, void, compareTiming); | |
| fn init(allocator: Allocator, io: Io, count: u32) SlowTracker { | |
| const timestamp = Io.Clock.awake.now(io); | |
| var slowest: SlowestQueue = .empty; | |
| slowest.ensureTotalCapacity(allocator, count) catch @panic("OOM"); | |
| return .{ | |
| .max = count, | |
| .start = timestamp, | |
| .slowest = slowest, | |
| .allocator = allocator, | |
| }; | |
| } | |
| const TestInfo = struct { | |
| ns: u64, | |
| name: []const u8, | |
| }; | |
| fn deinit(self: *SlowTracker) void { | |
| self.slowest.deinit(self.allocator); | |
| } | |
| fn startTiming(self: *SlowTracker, io: Io) void { | |
| self.start = Io.Clock.awake.now(io); | |
| } | |
| fn endTiming(self: *SlowTracker, io: Io, test_name: []const u8) u64 { | |
| const timestamp = Io.Clock.awake.now(io); | |
| const start = self.start; | |
| self.start = timestamp; | |
| const ns: u64 = @intCast(start.durationTo(timestamp).toNanoseconds()); | |
| var slowest = &self.slowest; | |
| if (slowest.count() < self.max) { | |
| // Capacity is fixed to the # of slow tests we want to track | |
| // If we've tracked fewer tests than this capacity, than always add | |
| slowest.push(self.allocator, TestInfo{ .ns = ns, .name = test_name }) catch @panic("failed to track test timing"); | |
| return ns; | |
| } | |
| { | |
| // Optimization to avoid shifting the dequeue for the common case | |
| // where the test isn't one of our slowest. | |
| const fastest_of_the_slow = slowest.peekMin() orelse unreachable; | |
| if (fastest_of_the_slow.ns > ns) { | |
| // the test was faster than our fastest slow test, don't add | |
| return ns; | |
| } | |
| } | |
| // the previous fastest of our slow tests, has been pushed off. | |
| _ = slowest.popMin(); | |
| slowest.push(self.allocator, TestInfo{ .ns = ns, .name = test_name }) catch @panic("failed to track test timing"); | |
| return ns; | |
| } | |
| fn display(self: *SlowTracker) !void { | |
| var slowest = self.slowest; | |
| const count = slowest.count(); | |
| Printer.fmt("Slowest {d} test{s}: \n", .{ count, if (count != 1) "s" else "" }); | |
| while (slowest.popMin()) |info| { | |
| const ms = @as(f64, @floatFromInt(info.ns)) / 1_000_000.0; | |
| Printer.fmt(" {d:.2}ms\t{s}\n", .{ ms, info.name }); | |
| } | |
| } | |
| fn compareTiming(context: void, a: TestInfo, b: TestInfo) std.math.Order { | |
| _ = context; | |
| return std.math.order(a.ns, b.ns); | |
| } | |
| }; | |
| const Env = struct { | |
| verbose: bool, | |
| fail_first: bool, | |
| filter: ?[]const u8, | |
| fn init(map: *const std.process.Environ.Map) Env { | |
| return .{ | |
| .verbose = readEnvBool(map, "TEST_VERBOSE", true), | |
| .fail_first = readEnvBool(map, "TEST_FAIL_FIRST", false), | |
| .filter = readEnv(map, "TEST_FILTER"), | |
| }; | |
| } | |
| fn readEnv(map: *const std.process.Environ.Map, key: []const u8) ?[]const u8 { | |
| return map.get(key); | |
| } | |
| fn readEnvBool(map: *const std.process.Environ.Map, key: []const u8, deflt: bool) bool { | |
| const value = readEnv(map, key) orelse return deflt; | |
| return std.ascii.eqlIgnoreCase(value, "true"); | |
| } | |
| }; | |
| pub const panic = std.debug.FullPanic(struct { | |
| pub fn panicFn(msg: []const u8, first_trace_addr: ?usize) noreturn { | |
| if (current_test) |ct| { | |
| std.debug.print("\x1b[31m{s}\npanic running \"{s}\"\n{s}\x1b[0m\n", .{ BORDER, ct, BORDER }); | |
| } | |
| std.debug.defaultPanic(msg, first_trace_addr); | |
| } | |
| }.panicFn); | |
| fn isUnnamed(t: std.builtin.TestFn) bool { | |
| const marker = ".test_"; | |
| const test_name = t.name; | |
| const index = std.mem.indexOf(u8, test_name, marker) orelse return false; | |
| _ = std.fmt.parseInt(u32, test_name[index + marker.len ..], 10) catch return false; | |
| return true; | |
| } | |
| fn isSetup(t: std.builtin.TestFn) bool { | |
| return std.mem.endsWith(u8, t.name, "tests:beforeAll"); | |
| } | |
| fn isTeardown(t: std.builtin.TestFn) bool { | |
| return std.mem.endsWith(u8, t.name, "tests:afterAll"); | |
| } |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Thanks for the wonderful test runner!
I personally added this line to mine to avoid trying to run on files with no tests so that it doesn't clog the output.