I hereby claim:
- I am mrubash1 on github.
- I am mrubash1 (https://keybase.io/mrubash1) on keybase.
- I have a public key ASDSagzJIK0kOintkYJLPoVm59TF1VcxnwIwR-mAC-BcWgo
To claim this, I am signing this object:
# Install SSHFS: | |
sudo apt-get install sshfs | |
# Make a local directory to mount the volume to: | |
sudo mkdir /mnt/aws | |
# Mount the volume | |
# Note: Make sure you use abosulute paths for all of the directories. | |
sudo sshfs -o allow_other,IdentityFile=/home/yourname/.ssh/key.pem ubuntu@ec2-XX-XXX-XX-XX.us-west-2.compute.amazonaws.com:/home/ubuntu /mnt/aws |
# Set parameters for generating different images | |
parameters = [ | |
{'name': uuid.uuid4(), 't_obj_filter': 139, 'img0': img0, 'iter_n':10, 'step':1.5, 'octave_n':4, 'octave_scale':1.4}, | |
{'name': uuid.uuid4(), 't_obj_filter': 139, 'img0': img0, 'iter_n':10, 'step':1.5, 'octave_n':4, 'octave_scale':1.0}, ] | |
output_data={} | |
# Iterate through all parameters and track performance | |
for i in range(0,len(parameters)): | |
start_time=time.time() | |
# Perform the deep dream rendering and store output array, which is called output_image |
def setup_summary_statistics(self): | |
# Create a placeholder for the summary statistics | |
with tf.name_scope("accuracy"): | |
# Compute the edit (Levenshtein) distance of the top path | |
distance = tf.edit_distance(tf.cast(self.decoded[0], tf.int32), self.targets) | |
# Compute the label error rate (accuracy) | |
self.ler = tf.reduce_mean(distance, name='label_error_rate') | |
self.ler_placeholder = tf.placeholder(dtype=tf.float32, shape=[]) |
with tf.name_scope('lstm'): | |
# Forward direction cell: | |
lstm_fw_cell = tf.contrib.rnn.BasicLSTMCell(n_cell_dim, forget_bias=1.0, state_is_tuple=True) | |
# Backward direction cell: | |
lstm_bw_cell = tf.contrib.rnn.BasicLSTMCell(n_cell_dim, forget_bias=1.0, state_is_tuple=True) | |
# Now we feed `layer_3` into the LSTM BRNN cell and obtain the LSTM BRNN output. | |
outputs, output_states = tf.nn.bidirectional_dynamic_rnn(cell_fw=lstm_fw_cell, | |
cell_bw=lstm_bw_cell, | |
inputs=layer_3, |
# Load wav files | |
fs, audio = wav.read(audio_filename) | |
# Get mfcc coefficients | |
orig_inputs = mfcc(audio, samplerate=fs, numcep=numcep) | |
# For each time slice of the training set, we need to copy the context this makes | |
train_inputs = np.array([], np.float32) | |
train_inputs.resize((orig_inputs.shape[0], numcep + 2 * numcep * numcontext)) |
class DataSet: | |
def __init__(self, txt_files, thread_count, batch_size, numcep, numcontext): | |
# ... | |
def from_directory(self, dirpath, start_idx=0, limit=0, sort=None): | |
return txt_filenames(dirpath, start_idx=start_idx, limit=limit, sort=sort) | |
def next_batch(self, batch_size=None): | |
idx_list = range(_start_idx, end_idx) | |
txt_files = [_txt_files[i] for i in idx_list] |
def create_and_persist_graph(): | |
with tf.Session() as persisted_sess: | |
# Load Graph | |
with tf.gfile.FastGFile(modelFullPath,’rb’) as f: | |
graph_def = tf.GraphDef() | |
graph_def.ParseFromString(f.read()) | |
persisted_sess.graph.as_default() | |
tf.import_graph_def(graph_def, name=’’) | |
return persisted_sess.graph |
I hereby claim:
To claim this, I am signing this object:
# -*- coding: utf-8 -*- | |
import itertools | |
import re | |
import urlparse | |
import boto | |
import warc | |
from boto.s3.key import Key | |
from gzipstream import GzipStreamFile |