Skip to content

Instantly share code, notes, and snippets.

@IcedDoggie
Created May 25, 2017 03:45
Show Gist options
  • Save IcedDoggie/a677e16edbc94d3dd980028185e77c49 to your computer and use it in GitHub Desktop.
Save IcedDoggie/a677e16edbc94d3dd980028185e77c49 to your computer and use it in GitHub Desktop.
def text_encoding(vocab_len, original_text, nt, nz,cap_length, batch_size):
# initialization of pytorch functions
# Word embedding here
text_embedding = nn.Embedding(batch_size * cap_length + 1, nt).cuda()
# RNN layers
rnn = nn.LSTM(cap_length + 1, 256, batch_size).cuda()
# FC layer
linear_layer = nn.Linear((cap_length + 1) * nt, nt).cuda()
leaky_relu = nn.LeakyReLU(0.1).cuda()
# pass to fully connected layer
original_text = Variable(original_text).cuda()
original_text = text_embedding(original_text)
# print(original_text)
original_text = original_text.float()
original_text = original_text.view(batch_size, nt, cap_length+1)
# print(original_text)
# original_text = original_text.view(args.batch_size, )
# print(original_text)
out, hidden_state = rnn(original_text) # <------------memory explodes here
# print(out)
# original_text = original_text.view(batch_size, (cap_length + 1) * nt )
# print(original_text)
output = linear_layer(original_text)
output = leaky_relu(output)
# print(output)
# noise concatenation
dim_a = len(output)
dim_b = len(output[0][:])
dim_c = len(output[:][0][0])
noise_z = torch.rand(dim_a, nz, dim_c)
noise_z = Variable(noise_z).cuda()
# print(noise_z)
output = torch.cat([output, noise_z], 1)
# print(output)
return output
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment