Skip to content

Instantly share code, notes, and snippets.

@ianribas
Last active March 9, 2016 18:25
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ianribas/8505c03e08e602ff29ee to your computer and use it in GitHub Desktop.
Save ianribas/8505c03e08e602ff29ee to your computer and use it in GitHub Desktop.
JS Asynchronous Test Runner, on the browser
<!DOCTYPE html>
<html>
<head>
<title>Running asynchronous tests on the browser ...</title>
</head>
<body>
<ul id="alltests">
</ul>
<ul id="teststats">
<li>Passed <span id="passed"></span></li>
<li>Failed <span id="failed"></span></li>
<li>Running <span id="running"></span></li>
</ul>
<p id="finished" style="display: none;">FINISHED!</p>
<button id="runtests">Run tests</button>
<script type="text/javascript">
function TestRunner(ptests, testsViewer) {
testsViewer = testsViewer || {};
// No parameter validation for simplicity
var tests = [];
var self = this;
// Constants
// Test status
self.PASSED = 'Passed'
self.FAILED = 'Failed'
self.RUNNING = 'Running'
self.NOT_STARTED = 'Not Started Yet';
// Stats fields other that status
self.TOTAL = 'Total';
self.FINISHED = 'Finished';
// How long do we wait for the whole test suite to run.
// Would have to be adjusted to actual usage.
self.testsTimeout = 30 * 1000;
self.addTest = function(test) {
var newTest = {
'id': '_test' + tests.length, // simple id generation
'description': ptests[i].description,
'run': ptests[i].run,
'status': self.NOT_STARTED
};
tests.push(newTest);
testsViewer.testAdded && testsViewer.testAdded(self, newTest);
}
function registerTestEnd(test, result) {
if (result === true) {
test.status = self.PASSED;
} else {
test.status = self.FAILED;
}
testsViewer.testEnded && testsViewer.testEnded(self, test);
}
function runTest(test) {
test.status = self.RUNNING;
testsViewer.beforeRun && testsViewer.beforeRun(self, test);
// Create a new closure for each running test.
try {
test.run(function(result) {
registerTestEnd(test, result);
});
} catch(e) {
console.log('Error on test ', test.description, e);
registerTestEnd(test, false);
}
}
function failLongRunningTests() {
tests.forEach(function(test) {
if (test.status === self.RUNNING) {
console.log('Timeout on test ', test.description);
registerTestEnd(test, false);
}
});
}
function calcStats() {
var stats = {};
stats[self.TOTAL] = tests.length;
tests.forEach(function(test) {
stats[test.status] = (stats[test.status] || 0) + 1;
});
var totalFinished = (stats[self.PASSED] || 0) + (stats[self.FAILED] || 0);
if (tests.length > 0 && tests.length === totalFinished) {
stats[self.FINISHED] = true;
}
return stats;
}
// This could be optimized by a cache, or calculating just the difference, if this ever gets important.
self.getStats = calcStats;
self.runAll = function() {
console.log('Running all tests');
setTimeout(failLongRunningTests, self.testsTimeout);
tests.forEach(runTest);
console.log('All tests launched.')
}
for (var i = 0; i < ptests.length; i++) {
self.addTest(ptests[i]);
}
}
function HtmlTestViewer(runtestsCallback) {
var testList = document.getElementById("alltests");
var statsPassed = document.getElementById("passed");
var statsFailed = document.getElementById("failed");
var statsRunning = document.getElementById("running");
var allFinished = document.getElementById("finished");
var runtestsButton = document.getElementById("runtests");
runtestsButton.onclick = function() {
runtestsButton.disabled = true;
runtestsCallback();
};
function showStats(runner) {
var stats = runner.getStats();
statsPassed.innerHTML = stats[runner.PASSED] || '-';
statsFailed.innerHTML = stats[runner.FAILED] || '-';
statsRunning.innerHTML = stats[runner.RUNNING] || '-';
if (stats[runner.FINISHED]) {
allFinished.style.display = 'block';
}
}
function showStatus(runner, test) {
var elmt = document.getElementById(test.id+"_status");
elmt.innerText = test.status;
if (test.status === runner.FAILED) {
elmt.style.background = "red";
} else if (test.status === runner.PASSED) {
elmt.style.background = "green";
} else {
elmt.style.background = "";
}
showStats(runner);
}
this.testAdded = function(runner, test) {
var ttt = document.createElement("LI");
ttt.innerHTML = test.description + " <span id='" + test.id + "_status'>" + test.status + "</span>";
testList.appendChild(ttt);
showStats(runner);
};
this.testEnded = showStatus;
this.beforeRun = showStatus;
}
function runAllTests() {
testRunner.runAll();
}
var htmlTestViewer = new HtmlTestViewer(runAllTests);
function generateDummyTest() {
var delay = 7000 + Math.random() * 7000;
var testPassed = Math.random() > 0.5;
return function(callback) {
setTimeout(function() {
callback(testPassed);
}, delay);
};
}
var tests = [
{ description: "commas are rotated properly", run: generateDummyTest() },
{ description: "exclamation points stand up straight", run: generateDummyTest() },
{ description: "run-on sentences don't run forever", run: generateDummyTest() },
{ description: "question marks curl down, not up", run: generateDummyTest() },
{ description: "semicolons are adequately waterproof", run: generateDummyTest() },
{ description: "capital letters can do yoga", run: generateDummyTest() }
];
var testRunner = new TestRunner(tests, htmlTestViewer);
</script>
</body>
</html>
Show, in the process, the state of the notes
Very simple first, make complicated later!
Engine:
done minimal test runner first, with console.log, for quick feedback cycle (code, refresh, )
Have:
done one passing,
done one failing,
done one exception,
done one never calls done.
done make tests asynchronous
done try catch to get errors when running test, before async
done test timeout, for errors that never call done
done, no return true (not asked for), so only done - test pass only if return === true or done called with === true(?)
done keep (or calculate) stats on runner.
done show stats after each action (add test, run test finish test)
done show finished if all tests ended.
done test engine with supplied tests
(2h work up to here)
UI
done separate UI, extracting all(most) console.logs to be on UI. -> inject viewer on runner.
done actual call backs: add one, run one, finish one, and a method to retrieve stats. call backs for add one, run one, return one, finish all.
experiment UI, get simple one... (divs?)
done UI made to run tests once, so no reset for now.
done show test text, button, state
done jquery? or plain JS locators!
done button disabled if tests running
(1h:15 for the UI)
send in gist, live on firebase ...
future steps:
call runTest on timeout so one long test doesn't hold the others from starting?
(not asked for, even if useful ...) show error trace somewhere, if exception? Specially if "done" called with exception.
setup on cloud9
detect if test has parameter, meaning it is async
pass test if it returns true(?) -- no, the real way this is done is with assertions, not return values ...
@ianribas
Copy link
Author

ianribas commented Mar 9, 2016

This is the explanation of how I developed the solution to this problem. It is important to note that this is a very bare bones solution, both in terms of code organization and, specially, of user interface, but I hope that it is good enough to give an idea of my development process. To keep it simple, I kept it all in a single HTML file, with all the code together on the same script tag.

Methodology

I'd like to point out some aspects the methodology I used. Since the idea was to show how I solved the problem, and not only what solution I came up with, I tried to commit and push all major steps on my way to getting to the final code. One other thing I decided to share was my "todo" list for the solution, where I kept things I thought about, but wouldn't handle right at that moment. Unfortunately the commit comments are not there, but looking at the code and todo changes from the beginning to the final solution should give a pretty good idea of my process. The todo list also shows some features I thought of but decided not to implement.

To show this actually works, the link to a working version of the final solution is: https://blazing-inferno-2793.firebaseapp.com/test.html.

Description of the process.

General definitions

I needed a quick feedback cycle (that I usually get/seek using TDD), but creating unit tests here didn't seem very straightforward. So what I did was use the code, refresh, validate cycle. I did some coding, switched to the browser, refreshed the paged and looked at the result to see that it worked. This way, programming errors and exceptions were easily corrected, since usually related to some changes I had just done.

I separated the solution in two distinct modules: the test runner (that I called engine) and the UI. At first I thought I'd need to work on some UI first, to have a feedback cycle, but then I decided to just use the browser console for feedback and worry about the UI later.

To simplify the development, I made a couple of initial decisions:

  • Keep a local copy of test data, so as not to alter what was received as parameter.
  • Do no input parameter validation.
  • Have an HTML UI, but very bare bones, no styling at all.
  • First separate UI code from engine, then add code to create HTML UI.

The test runner

To validate the behavior of the test runner I created fake deterministic tests to run, to be sure that the errors and problems were on my code and not on the tests. Then, in good TDD style, I iteratively added new fake tests for every new behavior I was implementing, and focused on getting that new behavior right before moving on. As I kept all fake tests, that asured me that the new development didn't break what was done before. That allowed me to work on a red, green, refactor cycle, even if I didn't really do much refactoring, since the solution is very bare bones.

In this manner, I added the following features:

  • handle passing test
  • handle failing test
  • handle test with exception
  • handle test that call callback asynchronously
  • handle test that never calls callback
  • manage test running statistics on the test runner

The handling of the asynchronous tests actually worked without requiring any specific change. For the statistics management, I chose to recalculate them every time, considering that this could be optimized if and when that became necessary. The deterministic tests for each feature are no longer present on the code, but they can be seen on previous revisions. The statistics feature required no particular test, just more reporting, to validate.

Having confidence that everything worked for my deterministic tests, I tried the test runner on the provided, random tests. As everything worked for those too, it was time to move on to the UI.

The user interface

The first step was to create a console UI, with same behavior as what was already in place on the test runner, but in separate object. This way I could identify what interface the UI object needed and where to call it. I kept both behaviors to see that adding viewer (UI), didn't break anything. When the console UI was working, I cleaned up the test runner.

To create the actual HTML UI, I did some rapid prototyping, a spike, directly on the browser console and the HTML, to understand what would work and how to interact with it. This is useful for user interfaces, that place most of it's importance on visual aspects and are hard to unit test. For example, I started with divs but later settled on ul/li's for the lists (tests and statistics).

With this knowledge, I then created final HTML UI object, that received the callbacks from the test runner and reflected the state changes on the page. I tested this UI on my deterministic tests, and, when all was ready, reverted to the provided random tests. Then, all there was to do was to cleaned up the code and write this text!

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment