##Bookshelf
- AIMA chapters 22-23
- Bishop chapter 13 (sequential data)
- Intro to IR
- CGEL optional but good for background
{ | |
"metadata": { | |
"name": "gp" | |
}, | |
"nbformat": 2, | |
"worksheets": [ | |
{ | |
"cells": [ | |
{ | |
"cell_type": "code", |
{ | |
"metadata": { | |
"name": "" | |
}, | |
"nbformat": 3, | |
"nbformat_minor": 0, | |
"worksheets": [ | |
{ | |
"cells": [ | |
{ |
##Bookshelf
I hereby claim:
To claim this, I am signing this object:
package main | |
// This program takes a .zim-file and dumps all contained articles below the | |
// current directory. I tested it with a Wikipedia snapshot; that did not | |
// contain deleted articles or LinkTargetEntrys, so I'm unsure how to handle | |
// those, for now I'm ignoring them. | |
// | |
// Redirects are handled by simply writing out the page pointed to by the | |
// redirect. IPFS deduplication should take care of it, so I think this is the | |
// most economical solution, even better than writing out small HTML files with |
#!/bin/sh | |
# https://nixos.org/wiki/How_to_install_nix_in_home_%28on_another_distribution%29#PRoot_Installation | |
OPT=$HOME/opt | |
ARCH=x86_64 | |
VER_NIX=nix-1.10-$ARCH-linux | |
URL_NIX=http://hydra.nixos.org/build/25489771/download/1/$VER_NIX.tar.bz2 | |
mkdir $OPT | |
mkdir $OPT/bin && cd $OPT/bin && wget http://static.proot.me/proot-$ARCH && chmod u+x proot-$ARCH |
[ | |
"abstract.coffee", | |
"animal.coffee", | |
"applause.coffee", | |
"aww.coffee", | |
"bang-bang.coffee", | |
"base58.coffee", | |
"base64.coffee", | |
"botsnack.coffee", | |
"coin.coffee", |
/* passable motion blur effect using frame blending | |
* basically move your 'draw()' into 'sample()', time runs from 0 to 1 | |
* by dave | |
* http://beesandbombs.tumblr.com | |
*/ | |
int samplesPerFrame = 32; // more is better but slower. 32 is enough probably | |
int numFrames = 48; | |
float shutterAngle = 2.0; // this should be between 0 and 1 realistically. exaggerated for effect here | |
int[][] result; |
#!/usr/bin/env python | |
import requests | |
d = requests.get('https://www.reddit.com/r/EarthPorn/top/.json', headers={'User-agent':'IOTD bot'}).json() | |
for c in d['data']['children']: | |
src = c['data']['preview']['images'][0]['source'] | |
if src['width'] >= 4000: | |
print 'Location: ' + src['url'] + '\n' | |
break |
var jsdom = require("jsdom").jsdom; | |
var Readability = require("./index").Readability; | |
function removeCommentNodesRecursively(node) { | |
for (var i = node.childNodes.length - 1; i >= 0; i--) { | |
var child = node.childNodes[i]; | |
if (child.nodeType === child.COMMENT_NODE) { | |
node.removeChild(child); | |
} else if (child.nodeType === child.ELEMENT_NODE) { | |
removeCommentNodesRecursively(child); |