Skip to content

Instantly share code, notes, and snippets.

What would you like to do?
Word sequence embeddings
def forward(sentence):
# Tokenize characters and words
word_ids = [vocab.word_to_id(w) for w in sentence.split()]
char_ids = [vocab.word_to_char_ids(w) for w in sentence.split()]
if sentence.find('<S>') != 0:
sentence = '<S> ' + sentence
for i in xrange(len(word_ids)):
inputs[0, 0] = word_ids[i]
char_ids_inputs[0, 0, :] = char_ids[i]
# Add 'lstm/lstm_0/control_dependency' if you want to dump previous layer
lstm_emb =['lstm/lstm_1/control_dependency'],
feed_dict={t['char_inputs_in']: char_ids_inputs,
t['inputs_in']: inputs,
t['targets_in']: targets,
t['target_weights_in']: weights})
return lstm_emb
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.