View model_issues.py
from __future__ import print_function | |
from keras.models import Model, Sequential | |
from keras.layers import Input, Dense, TimeDistributed | |
from keras.layers.core import Reshape, Flatten, Dropout, TimeDistributedDense | |
from keras.layers.advanced_activations import LeakyReLU | |
from keras.layers.normalization import BatchNormalization | |
from keras.layers.convolutional import Convolution2D | |
from keras.layers.recurrent import LSTM | |
from keras.optimizers import Adam |
View gist:0c3b19fb23ba680dbf3e
model = nn.Sequential() | |
lstm = nn.Sequencer( | |
nn.Sequential() | |
:add(nn.LSTM(nFeatures,nHidden)) | |
:add(nn.Dropout()) | |
:add(nn.LSTM(nHidden,nHidden)) | |
) | |
lstm:remember('neither') -- force model to call forget at each call to forward |
View istft.lua
signal = require "signal" | |
complex = require "signal.complex" | |
function istft(X, win, hop) | |
local x = torch.zeros((X:size(1)-1)*hop + win) | |
framesamp = X:size(2) | |
hopsamp = hop | |
for n=1,X:size(1) do | |
i = 1 + (n-1)*hopsamp | |
print(i, i + framesamp - 1) |
View error.log
creating an lstm with 2 layers | |
setting forget gate biases to 1 in LSTM layer 1 | |
setting forget gate biases to 1 in LSTM layer 2 | |
number of parameters in the model: 240321 | |
cloning rnn | |
cloning criterion | |
/Users/jfsantos/torch/install/bin/luajit: /Users/jfsantos/torch/install/share/lua/5.1/nn/Identity.lua:13: bad argument #1 to 'set' (expecting number or Tensor or Storage at /tmp/luarocks_torch-scm-1-8315/torch7/generic/Tensor.c:1089) | |
stack traceback: | |
[C]: in function 'set' | |
/Users/jfsantos/torch/install/share/lua/5.1/nn/Identity.lua:13: in function 'func' |
View gist:3dc73409d61add1243ad
fAed cBAG|BBBB gBeB|d2 BD E2 cB|[1cBAF G2 Bc:|[2B2 Bd e3G| | |
dcde fab2|afef g2 fg|affe fedB|GEDB E3 :| |
View spectrum_compress.py
import numpy as np | |
x = np.random.rand(513) | |
# Converting to the frequency domain | |
X = np.fft.rfft(x) | |
# Taking the log of the magnitude and converting back to rectangular | |
def P2R(magnitude, phase): |
View HDF5Matrix.py
from collections import defaultdict | |
import h5py, numpy | |
class HDF5Matrix: | |
refs = defaultdict(int) | |
def __init__(self, datapath, dataset, start, end, normalizer=None): | |
if datapath not in self.refs.keys(): | |
f = h5py.File(datapath) | |
self.refs[datapath] = f |
View test_lms.py
import numpy as np | |
import scipy.signal as sig | |
from adaptfilt import lms | |
if __name__ == '__main__': | |
import matplotlib.pyplot as plt | |
from scipy.io import wavfile | |
sigma = 0.1 | |
order = 100 |
View eval_mlp.jl
ENV["MOCHA_USE_CUDA"] = "true" | |
using HDF5, JLD, Mocha | |
X = Array[] | |
push!(X, rand(Float32, 128,11*129,1,1)) | |
y = Array[] | |
push!(y, rand(Float32, 128, 129, 1, 1)) | |
#data_layer = AsyncHDF5DataLayer("train", "train.txt", 128, 1000, [:features, :targets], false, []) |
View fft_gtgram_comparison.py
from gammatone.fftweight import fft_gtgram | |
from scipy.io.matlab import loadmat | |
s = loadmat("test.mat")["s"][:,0] | |
fs = 16000 | |
# gt_py has 260 frames | |
gt_py = fft_gtgram(s, fs, 0.010, 0.0025, 23, 125) | |
# gt_mat has 269 frames |