Skip to content

Instantly share code, notes, and snippets.

@toinetoine
toinetoine / datastructs.h
Created February 28, 2016 07:18
datastructs.h
#define LED_COUNT (11)
#define LED(n) (1 << (n))
#define ALL_LEDS(LED(LED_COUNT)-1)
struct frame {
unsigned short *data;
unsigned int resolution;
};
#include "datastructs.h"
#define nitems(x) (sizeof(x) / sizeof((x)[0]))
static unsigned short spiral_data[LED_COUNT] = {
LED(0), LED(1), LED(2), LED(3),
LED(4), LED(5), LED(6), LED(7),
LED(8), LED(9), LED(10)
};
struct frame spiral {
@toinetoine
toinetoine / reddit_top_search.js
Created October 15, 2015 21:42
Custon Reddit Top Search
var startTime = new Date(2015, 5, 1, 0, 0, 0, 0); // june 1st 2015
var endTime = new Date(2015, 8, 1, 0, 0, 0, 0); // sept 1st 2015
searchPages(["microsoft", ".net"],
{start: Math.floor(startTime.getTime() / 1000), end: Math.floor(endTime.getTime() / 1000)},
null);
var killProcess = false;
setTimeout(function() { killProcess = true;}, 300000);
function searchPages(searchTerms, timeInterval, redditPageId) {
@toinetoine
toinetoine / create.py
Created October 2, 2015 02:45
Create lynx timetables
station_times = list()
station_names = list()
def load_station_names(filename):
times_file = open(filename)
for line in times_file:
# skip empty lines
if len(line.strip()) > 0:
line_elements = line.split(" ")
# first append the station names
import math
from math import *
# utility for calulating likelihood of having a value as many sd's
# away from the mean as the value is
def phi(val, mean, standard_deviation):
num_sds = float(abs(mean - val)/standard_deviation)
phi_low = float(1.0 + erf(-1.0 * num_sds / sqrt(2.0))) / 2.0
phi_high = float(1.0 + erf(num_sds / sqrt(2.0))) / 2.0
return (phi_low + (1.0 - phi_high))
import random
import json
data = {}
data['dataset'] = list()
# add each user to the dataset
for user_i in range(200):
new_user = {}
new_user["id"] = str(user_i)
@toinetoine
toinetoine / kmeans.py
Created September 15, 2015 16:17
kmeans clafficiation with evenly-distributed initial mean selection for user pages liked data for http://stats.stackexchange.com/questions/167922/classify-users-by-the-pages-they-liked
import json
# read data from the json file
def read_data(filename):
# open the file and read data
with open(filename) as data_file:
data = json.load(data_file)
# for each item in the file add their id and data as a
@toinetoine
toinetoine / data.json
Last active September 15, 2015 16:22
Data input for user page likes for kmeans.py (https://gist.github.com/Antoine-D/eb1f76efdea2d319e30d)
{
"dataset":[
{
"data":[
"10.com",
"9.com",
"9.com"
],
"id":"0"
},
@toinetoine
toinetoine / items.txt
Last active September 11, 2015 18:26
Reading items file (stackechange awsner for http://stats.stackexchange.com/questions/172100)
Item Search_Vol Competition Bid
item 1 10,000 17% $1.03
item 2 20,000 25% $2.7
item 3 5,000 5% $0.8
@toinetoine
toinetoine / items_ranking.py
Last active September 11, 2015 18:26
Reading items file (stackechange awsner for http://stats.stackexchange.com/questions/172100)
# parse the intput file in format:
# more than one space between columns
def parse_item_file(items_info_filename):
items_file = open(items_info_filename, "r")
items_info = list()
column_names = list()
first_parsed = False
for item_row in items_file:
# grab the items in the row place in item_components list