Skip to content

Instantly share code, notes, and snippets.

View wcneill's full-sized avatar

Wesley wcneill

View GitHub Profile
# Assume 26 unique characters
alphabet = ['a', 'b', ... , 'z']
# two sample sequences, inputs and targets
x = np.array(list('abc')) # inputs
y = np.array(list('xyz')) # targets
# define one-hot encoder and label encoder
onehot_encoder = OneHotEncoder(sparse=False).fit(alphabet)
label_encoder = {ch: i for i, ch in enumerate(alphabet)}
input_size = 50 # representing the one-hot encoded vector size
hidden_size = 100 # number of hidden nodes in the LSTM layer
n_layers = 2 # number of LSTM layers
output_size = 50 # output of 50 scores for the next character
lstm = nn.LSTM(input_size, hidden_size, n_layers, batch_first=True)
linear = nn.Linear(hidden_size, output_size)
# Data Flow Protocol
# 1. network input shape: (batch_size, seq_length, num_features)
input_size = 50 # representing the one-hot encoded vector size
hidden_size = 100 # number of hidden nodes in the LSTM layer
n_layers = 2 # number of LSTM layers
output_size = 50 # output of 50 scores for the next character
lstm = nn.LSTM(input_size, n_hidden, n_layers, batch_first=True)
linear = nn.Linear(n_hidden, output_size)
@wcneill
wcneill / lstm02.py
Created July 30, 2020 03:39
Medium lstm article
input_size = 1 # The number of variables in your sequence data.
n_hidden = 100 # The number of hidden nodes in the LSTM layer.
n_layers = 2 # The total number of LSTM layers to stack.
out_size = 1 # The size of the output you desire from your RNN.
lstm = nn.LSTM(input_size, n_hidden, n_layers, batch_first=True)
linear = nn.Linear(n_hidden, 1)
# Data Flow Protocol:
# 1. network input shape: (batch_size, seq_length, num_features)
@wcneill
wcneill / lstm01.py
Last active July 30, 2020 03:38
for medium article on LSTMs
input_size = 1 # The number of variables in your sequence data.
n_hidden = 100 # The number of hidden nodes in the LSTM layer.
n_layers = 2 # The total number of LSTM models layers
out_size = 1 # The size of the output you desire from your RNN
lstm = nn.LSTM(input_size, n_hidden, n_layers)
linear = nn.Linear(n_hidden, 1)
@wcneill
wcneill / st06.py
Last active July 12, 2020 20:12
Putting it all together
# load in content and style image, and create target image by copying content image
content = load_image('data/style/clouds-19.jpg').to(device)
style = load_image('data/style/abstract-art-freedom.jpg', shape=content.shape[-2:]).to(device)
target = content.clone().requires_grad_(True).to(device)
style_weights = {'conv1_1': .2,
'conv2_1': .2,
'conv3_1': .2,
'conv4_1': .2,
'conv5_1': .2}
@wcneill
wcneill / st05.py
Created July 12, 2020 19:51
style loss
def style_loss(s_grams, t_features, weights):
"""
Compute style loss, i.e. the weighted sum of MSE of all layers.
"""
# for each style feature, get target and style gramians, compare
loss = 0
for layer in weights:
_, d, h, w = s_features[layer].shape
t_gram = gramian(t_features[layer])
def content_loss(c_features, t_features):
"""
Compute mean squared content loss of all feature maps.
"""
loss = 0.5 * (t_features['conv4_2'] - c_features['conv4_2']) ** 2
return torch.mean(loss)
def gramian(tensor):
t = tensor.view(tensor.shape[1], -1)
return t @ t.T
def get_features(model, image):
layers = {
'0' : 'conv1_1',
'5' : 'conv2_1',
'10': 'conv3_1',
'19': 'conv4_1',
'21': 'conv4_2',
'28': 'conv5_1'
}