I hereby claim:
- I am mutaku on github.
- I am mutaku (https://keybase.io/mutaku) on keybase.
- I have a public key ASCnrkSM4KJHlFnykV2setFw4AzJYt0HSY6-IUc1g-VClwo
To claim this, I am signing this object:
""" | |
Minimal character-level Vanilla RNN model. Written by Andrej Karpathy (@karpathy) | |
BSD License | |
""" | |
import numpy as np | |
# data I/O | |
data = open('input.txt', 'r').read() # should be simple plain text file | |
chars = list(set(data)) | |
data_size, vocab_size = len(data), len(chars) |
Latency Comparison Numbers | |
-------------------------- | |
L1 cache reference 0.5 ns | |
Branch mispredict 5 ns | |
L2 cache reference 7 ns 14x L1 cache | |
Mutex lock/unlock 25 ns | |
Main memory reference 100 ns 20x L2 cache, 200x L1 cache | |
Compress 1K bytes with Zippy 3,000 ns 3 us | |
Send 1K bytes over 1 Gbps network 10,000 ns 10 us | |
Read 4K randomly from SSD* 150,000 ns 150 us ~1GB/sec SSD |
I hereby claim:
To claim this, I am signing this object:
class DTW_WINDOW(object): | |
"""Perform Distance Time Warping""" | |
def __init__(self, v1, v2, dist=dtw.DISTANCES['euclidean'], win=50): | |
set1, set2 = np.asarray(v1), np.asarray(v2) | |
self.cost_matrix = sys.maxint * np.ones((set1.size, set2.size)) | |
self.cost_matrix[0, 0] = dist(set1[0], set2[0]) |
from __future__ import division | |
import numpy as np | |
import pandas as pd | |
def standardization(x, args): | |
"""Zero mean and unit variance scaling""" | |
return (x - args['mean']) / args['std'] | |
def rescaling(x, args): |
In [20]: m = np.zeros((5, 5)) | |
In [21]: for x in combinations(range(6), 2): | |
...: m[(x[0], x[1] - 1)] = 3 | |
...: | |
In [22]: m | |
Out[22]: | |
array([[ 3., 3., 3., 3., 3.], | |
[ 0., 3., 3., 3., 3.], |
from ast import literal_eval | |
df = pd.read_csv('algorithms/parameterclustering/data/Parameters/M23_analyzed.csv', | |
index_col=0, | |
# Pandas writes the vector list as a str of a list, so we have to convert back | |
converters={'vector': literal_eval}) |
# Let's build out the first cluster (0) of first run (1) | |
# Calculate sums of diffs for all parameters in vector | |
grouped = df.groupby(['run', 'cluster']) | |
cluster_compare = pd.DataFrame(columns=('cluster', 'sums', 'cv')) | |
for group in [grouped.get_group((8, x)) | |
for x in range(max(df[df.run==1].cluster)) | |
if len(grouped.get_group((1, x))) > 1]: | |
for p in range(len(group.vector.iloc[0])): | |
sums = list() |
r = rois.all[38] | |
test_mask = copy.copy(r.metrics.flash_mask) #+ [108, 109, 111] | |
print test_mask | |
# Calculate periodicity approximation | |
# Method 1: Select max drop for each cluster | |
# |"```"|` | |
# -*****-* -> 5 points | |
working_mask_m1 = copy.copy(test_mask) | |
working_mask_m1_grouping = [map(operator.itemgetter(1), g) | |
for k, g in groupby(enumerate(working_mask_m1), |
def mad(data, b=None): | |
"""Median Absolute Distance of data""" | |
if not b: | |
b = 1 / norm.ppf(0.75) | |
median_of_data = np.median(data) | |
distances_from_median = np.median(map(lambda x: abs(x - median_of_data), | |
data)) | |
return b * distances_from_median |