# notable flaws:
# totally unorthodox method for training embedding (see lines 78, 81 and 82)
# extremely inefficient demo evaluation every 100,000 training steps (see lines 125-140)
# surprises:
# was actually extremely effective on small data sets
from __future__ import print_function
import tensorflow as tf
import numpy as np
import re, random, sys
View output
hdon@behemoth:~/src/git/ML-Tests/text-processing$ node --version
hdon@behemoth:~/src/git/ML-Tests/text-processing$ node test-stream-promise-inverter.js
inverter caught transformer promise rejection:
Error: this is handled but also unhandled!
at fs.createReadStream.pipe.Inverter (/home/hdon/src/git/ML-Tests/text-processing/test-stream-promise-inverter.js:9:9)
at <anonymous>
at process._tickCallback (internal/process/next_tick.js:188:7)
(node:8082) UnhandledPromiseRejectionWarning: Unhandled promise rejection (rejection id: 1): Error: this is handled but also unhandled!
(node:8082) [DEP0018] DeprecationWarning: Unhandled promise rejections are deprecated. In the future, promise rejections that are not handled will terminate the Node.js process with a non-zero exit code.
View word2vec-extract-vocab.js
* Some examples of word2vec binary format:
* stream-promise-inverter:
* compare to JUST word2vec format decode without stream-promise-inverter:
View redux-shell.js
#!/usr/bin/env node
var util = require('../lib/util');
var fs = require('fs');
var path = require('path');
var opt;
var reducer;
var reducerFunction;
var transpiledModulePath;
opt = require('node-getopt').create([
View utils.js
var fs = require('fs');
var path = require('path');
var mkdirp = require('mkdirp');
var resolve = require('resolve');
function compileModule(babel, modulePath) {
var visitedFiles = {};
return _compileModule(modulePath);
View foo.js
var ll = new LazyLoad({
container: document.body
document.querySelector('#search').addEventListener('keypress', (ev) => {
var query =;
setTimeout(() => {'body > a'), a => { =
( query.length == 0
#import bleach # learn more:
from bs4 import BeautifulSoup as bs
from bs4 import SoupStrainer as ss
import urllib as ul
import urllib.request
import re
#TODO: spider to crawl for urls
earl = ''

For the given schema:

create table foo (a int not null, b int not null, primary key (a, b));
create table bar (a int, b int, c int, foreign key foo_fk (a, b) references foo (a, b));

I want to join a and b, where there may be any number of bs for each a.

I can only come up with three solutions: