-
-
Save alokssingh/c33b5c852e255db039fd3451673b94b1 to your computer and use it in GitHub Desktop.
Video captioning ( Seq2Seq in Keras )
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from keras import backend as K | |
from keras.layers import TimeDistributed, Dense, LSTM | |
class AttentionLSTM(LSTM): | |
"""LSTM with attention mechanism | |
This is an LSTM incorporating an attention mechanism into its hidden states. | |
Currently, the context vector calculated from the attended vector is fed | |
into the model's internal states, closely following the model by Xu et al. | |
(2016, Sec. 3.1.2), using a soft attention model following | |
Bahdanau et al. (2014). | |
The layer expects two inputs instead of the usual one: | |
1. the "normal" layer input; and | |
2. a 3D vector to attend. | |
Args: | |
attn_activation: Activation function for attentional components | |
attn_init: Initialization function for attention weights | |
output_alpha (boolean): If true, outputs the alpha values, i.e., | |
what parts of the attention vector the layer attends to at each | |
timestep. | |
References: | |
* Bahdanau, Cho & Bengio (2014), "Neural Machine Translation by Jointly | |
Learning to Align and Translate", <https://arxiv.org/pdf/1409.0473.pdf> | |
* Xu, Ba, Kiros, Cho, Courville, Salakhutdinov, Zemel & Bengio (2016), | |
"Show, Attend and Tell: Neural Image Caption Generation with Visual | |
Attention", <http://arxiv.org/pdf/1502.03044.pdf> | |
See Also: | |
`LSTM`_ in the Keras documentation. | |
.. _LSTM: http://keras.io/layers/recurrent/#lstm | |
""" | |
def __init__(self, *args, attn_activation='tanh', attn_init='orthogonal', | |
output_alpha=False, **kwargs): | |
self.attn_activation = activations.get(attn_activation) | |
self.attn_init = initializations.get(attn_init) | |
self.output_alpha = output_alpha | |
super().__init__(*args, **kwargs) | |
def build(self, input_shape): | |
if not (isinstance(input_shape, list) and len(input_shape) == 2): | |
raise Exception('Input to AttentionLSTM must be a list of ' | |
'two tensors [lstm_input, attn_input].') | |
input_shape, attn_input_shape = input_shape | |
super().build(input_shape) | |
self.input_spec.append(InputSpec(shape=attn_input_shape)) | |
# weights for attention model | |
self.U_att = self.inner_init((self.output_dim, self.output_dim), | |
name='{}_U_att'.format(self.name)) | |
self.W_att = self.attn_init((attn_input_shape[-1], self.output_dim), | |
name='{}_W_att'.format(self.name)) | |
self.v_att = self.init((self.output_dim, 1), | |
name='{}_v_att'.format(self.name)) | |
self.b_att = K.zeros((self.output_dim,), name='{}_b_att'.format(self.name)) | |
self.trainable_weights += [self.U_att, self.W_att, self.v_att, self.b_att] | |
# weights for incorporating attention into hidden states | |
if self.consume_less == 'gpu': | |
self.Z = self.init((attn_input_shape[-1], 4 * self.output_dim), | |
name='{}_Z'.format(self.name)) | |
self.trainable_weights += [self.Z] | |
else: | |
self.Z_i = self.attn_init((attn_input_shape[-1], self.output_dim), | |
name='{}_Z_i'.format(self.name)) | |
self.Z_f = self.attn_init((attn_input_shape[-1], self.output_dim), | |
name='{}_Z_f'.format(self.name)) | |
self.Z_c = self.attn_init((attn_input_shape[-1], self.output_dim), | |
name='{}_Z_c'.format(self.name)) | |
self.Z_o = self.attn_init((attn_input_shape[-1], self.output_dim), | |
name='{}_Z_o'.format(self.name)) | |
self.trainable_weights += [self.Z_i, self.Z_f, self.Z_c, self.Z_o] | |
self.Z = K.concatenate([self.Z_i, self.Z_f, self.Z_c, self.Z_o]) | |
# weights for initializing states based on attention vector | |
if not self.stateful: | |
self.W_init_c = self.attn_init((attn_input_shape[-1], self.output_dim), | |
name='{}_W_init_c'.format(self.name)) | |
self.W_init_h = self.attn_init((attn_input_shape[-1], self.output_dim), | |
name='{}_W_init_h'.format(self.name)) | |
self.b_init_c = K.zeros((self.output_dim,), | |
name='{}_b_init_c'.format(self.name)) | |
self.b_init_h = K.zeros((self.output_dim,), | |
name='{}_b_init_h'.format(self.name)) | |
self.trainable_weights += [self.W_init_c, self.b_init_c, | |
self.W_init_h, self.b_init_h] | |
if self.initial_weights is not None: | |
self.set_weights(self.initial_weights) | |
del self.initial_weights | |
def get_output_shape_for(self, input_shape): | |
# output shape is not affected by the attention component | |
return super().get_output_shape_for(input_shape[0]) | |
def compute_mask(self, input, input_mask=None): | |
if input_mask is not None: | |
input_mask = input_mask[0] | |
return super().compute_mask(input, input_mask=input_mask) | |
def get_initial_states(self, x_input, x_attn, mask_attn): | |
# set initial states from mean attention vector fed through a dense | |
# activation | |
mean_attn = K.mean(x_attn * K.expand_dims(mask_attn), axis=1) | |
h0 = K.dot(mean_attn, self.W_init_h) + self.b_init_h | |
c0 = K.dot(mean_attn, self.W_init_c) + self.b_init_c | |
return [self.attn_activation(h0), self.attn_activation(c0)] | |
def call(self, x, mask=None): | |
assert isinstance(x, list) and len(x) == 2 | |
x_input, x_attn = x | |
if mask is not None: | |
mask_input, mask_attn = mask | |
else: | |
mask_input, mask_attn = None, None | |
# input shape: (nb_samples, time (padded with zeros), input_dim) | |
input_shape = self.input_spec[0].shape | |
if K._BACKEND == 'tensorflow': | |
if not input_shape[1]: | |
raise Exception('When using TensorFlow, you should define ' | |
'explicitly the number of timesteps of ' | |
'your sequences.\n' | |
'If your first layer is an Embedding, ' | |
'make sure to pass it an "input_length" ' | |
'argument. Otherwise, make sure ' | |
'the first layer has ' | |
'an "input_shape" or "batch_input_shape" ' | |
'argument, including the time axis. ' | |
'Found input shape at layer ' + self.name + | |
': ' + str(input_shape)) | |
if self.stateful: | |
initial_states = self.states | |
else: | |
initial_states = self.get_initial_states(x_input, x_attn, mask_attn) | |
constants = self.get_constants(x_input, x_attn, mask_attn) | |
preprocessed_input = self.preprocess_input(x_input) | |
last_output, outputs, states = K.rnn(self.step, preprocessed_input, | |
initial_states, | |
go_backwards=self.go_backwards, | |
mask=mask_input, | |
constants=constants, | |
unroll=self.unroll, | |
input_length=input_shape[1]) | |
if self.stateful: | |
self.updates = [] | |
for i in range(len(states)): | |
self.updates.append((self.states[i], states[i])) | |
if self.return_sequences: | |
return outputs | |
else: | |
return last_output | |
def step(self, x, states): | |
h_tm1 = states[0] | |
c_tm1 = states[1] | |
B_U = states[2] | |
B_W = states[3] | |
x_attn = states[4] | |
mask_attn = states[5] | |
attn_shape = self.input_spec[1].shape | |
#### attentional component | |
# alignment model | |
# -- keeping weight matrices for x_attn and h_s separate has the advantage | |
# that the feature dimensions of the vectors can be different | |
h_att = K.repeat(h_tm1, attn_shape[1]) | |
att = TimeDistributed(Dense(x_attn, self.W_att, self.b_att)) | |
energy = self.attn_activation(K.dot(h_att, self.U_att) + att) | |
energy = K.squeeze(K.dot(energy, self.v_att), 2) | |
# make probability tensor | |
alpha = K.exp(energy) | |
if mask_attn is not None: | |
alpha *= mask_attn | |
alpha /= K.sum(alpha, axis=1, keepdims=True) | |
alpha_r = K.repeat(alpha, attn_shape[2]) | |
alpha_r = K.permute_dimensions(alpha_r, (0, 2, 1)) | |
# make context vector -- soft attention after Bahdanau et al. | |
z_hat = x_attn * alpha_r | |
z_hat = K.sum(z_hat, axis=1) | |
if self.consume_less == 'gpu': | |
z = K.dot(x * B_W[0], self.W) + K.dot(h_tm1 * B_U[0], self.U) \ | |
+ K.dot(z_hat, self.Z) + self.b | |
z0 = z[:, :self.output_dim] | |
z1 = z[:, self.output_dim: 2 * self.output_dim] | |
z2 = z[:, 2 * self.output_dim: 3 * self.output_dim] | |
z3 = z[:, 3 * self.output_dim:] | |
else: | |
if self.consume_less == 'cpu': | |
x_i = x[:, :self.output_dim] | |
x_f = x[:, self.output_dim: 2 * self.output_dim] | |
x_c = x[:, 2 * self.output_dim: 3 * self.output_dim] | |
x_o = x[:, 3 * self.output_dim:] | |
elif self.consume_less == 'mem': | |
x_i = K.dot(x * B_W[0], self.W_i) + self.b_i | |
x_f = K.dot(x * B_W[1], self.W_f) + self.b_f | |
x_c = K.dot(x * B_W[2], self.W_c) + self.b_c | |
x_o = K.dot(x * B_W[3], self.W_o) + self.b_o | |
else: | |
raise Exception('Unknown `consume_less` mode.') | |
z0 = x_i + K.dot(h_tm1 * B_U[0], self.U_i) + K.dot(z_hat, self.Z_i) | |
z1 = x_f + K.dot(h_tm1 * B_U[1], self.U_f) + K.dot(z_hat, self.Z_f) | |
z2 = x_c + K.dot(h_tm1 * B_U[2], self.U_c) + K.dot(z_hat, self.Z_c) | |
z3 = x_o + K.dot(h_tm1 * B_U[3], self.U_o) + K.dot(z_hat, self.Z_o) | |
i = self.inner_activation(z0) | |
f = self.inner_activation(z1) | |
c = f * c_tm1 + i * self.activation(z2) | |
o = self.inner_activation(z3) | |
h = o * self.activation(c) | |
if self.output_alpha: | |
return alpha, [h, c] | |
else: | |
return h, [h, c] | |
def get_constants(self, x_input, x_attn, mask_attn): | |
constants = super().get_constants(x_input) | |
attn_shape = self.input_spec[1].shape | |
if mask_attn is not None: | |
if K.ndim(mask_attn) == 3: | |
mask_attn = K.all(mask_attn, axis=-1) | |
constants.append(x_attn) | |
constants.append(mask_attn) | |
return constants | |
def get_config(self): | |
cfg = super().get_config() | |
cfg['output_alpha'] = self.output_alpha | |
cfg['attn_activation'] = self.attn_activation.__name__ | |
return cfg | |
@classmethod | |
def from_config(cls, config): | |
instance = super(AttentionLSTM, cls).from_config(config) | |
if 'output_alpha' in config: | |
instance.output_alpha = config['output_alpha'] | |
if 'attn_activation' in config: | |
instance.attn_activation = activations.get(config['attn_activation']) | |
return instance |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
https://github.com/fchollet/keras/issues/4962 | |
https://gist.github.com/mbollmann/ccc735366221e4dba9f89d2aab86da1e | |
https://gist.github.com/mbollmann/29fd21931820c64095617125824ea246 | |
https://github.com/fchollet/keras/issues/2995 | |
https://blog.keras.io/a-ten-minute-introduction-to-sequence-to-sequence-learning-in-keras.html | |
http://colah.github.io/posts/2015-08-Understanding-LSTMs/ | |
https://docs.google.com/presentation/d/1O0eohFR5J4AStQVAyaJOfLXw2CO_KEOC_BL75MnxxV4/edit#slide=id.g2910edee95_0_150 | |
beam search | |
https://arxiv.org/pdf/1606.02960.pdf | |
schedule sampling | |
https://arxiv.org/pdf/1506.03099.pdf | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
'''Sequence to sequence example in Keras (character-level). | |
This script demonstrates how to implement a basic character-level | |
sequence-to-sequence model. We apply it to translating | |
short English sentences into short French sentences, | |
character-by-character. Note that it is fairly unusual to | |
do character-level machine translation, as word-level | |
models are more common in this domain. | |
# Summary of the algorithm: | |
- We start with input sequences from a domain (e.g. English sentences) | |
and correspding target sequences from another domain | |
(e.g. French sentences). | |
- An encoder LSTM turns input sequences to 2 state vectors | |
(we keep the last LSTM state and discard the outputs). | |
- A decoder LSTM is trained to turn the target sequences into | |
the same sequence but offset by one timestep in the future, | |
a training process called "teacher forcing" in this context. | |
Is uses as initial state the state vectors from the encoder. | |
Effectively, the decoder learns to generate `targets[t+1...]` | |
given `targets[...t]`, conditioned on the input sequence. | |
- In inference mode, when we want to decode unknown input sequences, we: | |
- Encode the input sequence into state vectors | |
- Start with a target sequence of size 1 | |
(just the start-of-sequence character) | |
- Feed the state vectors and 1-char target sequence | |
to the decoder to produce predictions for the next character | |
- Sample the next character using these predictions | |
(we simply use argmax). | |
- Append the sampled character to the target sequence | |
- Repeat until we generate the end-of-sequence character or we | |
hit the character limit. | |
# Data download: | |
English to French sentence pairs. | |
http://www.manythings.org/anki/fra-eng.zip | |
Lots of neat sentence pairs datasets can be found at: | |
http://www.manythings.org/anki/ | |
# References: | |
- Sequence to Sequence Learning with Neural Networks | |
https://arxiv.org/abs/1409.3215 | |
- Learning Phrase Representations using | |
RNN Encoder-Decoder for Statistical Machine Translation | |
https://arxiv.org/abs/1406.1078 | |
''' | |
from __future__ import print_function | |
from keras.models import Model | |
from keras.layers import Input, LSTM, Dense | |
import numpy as np | |
from AttentionLSTM import AttentionLSTM | |
batch_size = 64 # Batch size for training. | |
epochs = 100 # Number of epochs to train for. | |
latent_dim = 256 # Latent dimensionality of the encoding space. | |
num_samples = 10000 # Number of samples to train on. | |
# Path to the data txt file on disk. | |
data_path = 'fra-eng/fra.txt' | |
# Vectorize the data. | |
input_texts = [] | |
target_texts = [] | |
input_characters = set() | |
target_characters = set() | |
lines = open(data_path).read().split('\n') | |
for line in lines[: min(num_samples, len(lines) - 1)]: | |
input_text, target_text = line.split('\t') | |
# We use "tab" as the "start sequence" character | |
# for the targets, and "\n" as "end sequence" character. | |
target_text = '\t' + target_text + '\n' | |
input_texts.append(input_text) | |
target_texts.append(target_text) | |
for char in input_text: | |
if char not in input_characters: | |
input_characters.add(char) | |
for char in target_text: | |
if char not in target_characters: | |
target_characters.add(char) | |
input_characters = sorted(list(input_characters)) | |
target_characters = sorted(list(target_characters)) | |
num_encoder_tokens = len(input_characters) # 4096 | |
num_decoder_tokens = len(target_characters) # 3000 | |
max_encoder_seq_length = max([len(txt) for txt in input_texts]) # 80 | |
max_decoder_seq_length = max([len(txt) for txt in target_texts]) # 20 | |
print('Number of samples:', len(input_texts)) | |
print('Number of unique input tokens:', num_encoder_tokens) | |
print('Number of unique output tokens:', num_decoder_tokens) | |
print('Max sequence length for inputs:', max_encoder_seq_length) | |
print('Max sequence length for outputs:', max_decoder_seq_length) | |
input_token_index = dict( | |
[(char, i) for i, char in enumerate(input_characters)]) | |
target_token_index = dict( | |
[(char, i) for i, char in enumerate(target_characters)]) | |
encoder_input_data = np.zeros( | |
(len(input_texts), max_encoder_seq_length, num_encoder_tokens), | |
dtype='float32') | |
decoder_input_data = np.zeros( | |
(len(input_texts), max_decoder_seq_length, num_decoder_tokens), | |
dtype='float32') | |
decoder_target_data = np.zeros( | |
(len(input_texts), max_decoder_seq_length, num_decoder_tokens), | |
dtype='float32') | |
for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)): | |
for t, char in enumerate(input_text): | |
encoder_input_data[i, t, input_token_index[char]] = 1. | |
for t, char in enumerate(target_text): | |
# decoder_target_data is ahead of decoder_input_data by one timestep | |
decoder_input_data[i, t, target_token_index[char]] = 1. | |
if t > 0: | |
# decoder_target_data will be ahead by one timestep | |
# and will not include the start character. | |
decoder_target_data[i, t - 1, target_token_index[char]] = 1. | |
# feat : 1450*M x 80 x 4096 | |
# label: 1450*M x 20 x 3000 (max decode timestep = 20) | |
############ teacher forcing | |
# Define an input sequence and process it. | |
encoder_inputs = Input(shape=(None, num_encoder_tokens)) | |
encoder = LSTM(latent_dim, return_state=True) | |
encoder_outputs, state_h, state_c = encoder(encoder_inputs) | |
# We discard `encoder_outputs` and only keep the states. | |
encoder_states = [state_h, state_c] | |
# Set up the decoder, using `encoder_states` as initial state. | |
decoder_inputs = Input(shape=(None, num_decoder_tokens)) | |
# We set up our decoder to return full output sequences, | |
# and to return internal states as well. We don't use the | |
# return states in the training model, but we will use them in inference. | |
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True) | |
decoder_outputs, _, _ = decoder_lstm(decoder_inputs, | |
initial_state=encoder_states) | |
decoder_dense = Dense(num_decoder_tokens, activation='softmax') | |
decoder_outputs = decoder_dense(decoder_outputs) | |
############# | |
#################### non-teacher forcing | |
from keras.layers import Lambda | |
from keras import backend as K | |
# The first part is unchanged | |
encoder_inputs = Input(shape=(None, num_encoder_tokens)) | |
encoder = LSTM(latent_dim, return_state=True) | |
encoder_outputs, state_h, state_c = encoder(encoder_inputs) | |
states = [state_h, state_c] | |
# Set up the decoder, which will only process one timestep at a time. | |
decoder_inputs = Input(shape=(1, num_decoder_tokens)) | |
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True) | |
decoder_dense = Dense(num_decoder_tokens, activation='softmax') | |
all_outputs = [] | |
inputs = decoder_inputs | |
for _ in range(max_decoder_seq_length): | |
# Run the decoder on one timestep | |
outputs, state_h, state_c = decoder_lstm(inputs, | |
initial_state=states) | |
outputs = decoder_dense(outputs) | |
# Store the current prediction (we will concatenate all predictions later) | |
all_outputs.append(outputs) | |
# Reinject the outputs as inputs for the next loop iteration | |
# as well as update the states | |
inputs = outputs | |
states = [state_h, state_c] | |
# Concatenate all predictions | |
decoder_outputs = Lambda(lambda x: K.concatenate(x, axis=1))(all_outputs) | |
# Prepare decoder input data that just contains the start character | |
# Note that we could have made it a constant hard-coded in the model | |
decoder_input_data = np.zeros((num_samples, 1, num_decoder_tokens)) | |
decoder_input_data[:, 0, target_token_index['\t']] = 1. | |
#################### | |
# Define the model that will turn | |
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data` | |
model = Model([encoder_inputs, decoder_inputs], decoder_outputs) | |
model.compile(optimizer='rmsprop', loss='categorical_crossentropy') | |
# Run training | |
model.fit([encoder_input_data, decoder_input_data], decoder_target_data, | |
batch_size=batch_size, | |
epochs=epochs, | |
validation_split=0.2) | |
# Save model | |
model.save('s2s.h5') | |
# Next: inference mode (sampling). | |
# Here's the drill: | |
# 1) encode input and retrieve initial decoder state | |
# 2) run one step of decoder with this initial state | |
# and a "start of sequence" token as target. | |
# Output will be the next target token | |
# 3) Repeat with the current target token and current states | |
# Define sampling models | |
encoder_model = Model(encoder_inputs, encoder_states) | |
decoder_state_input_h = Input(shape=(latent_dim,)) | |
decoder_state_input_c = Input(shape=(latent_dim,)) | |
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c] | |
decoder_outputs, state_h, state_c = decoder_lstm( | |
decoder_inputs, initial_state=decoder_states_inputs) | |
decoder_states = [state_h, state_c] | |
decoder_outputs = decoder_dense(decoder_outputs) | |
decoder_model = Model( | |
[decoder_inputs] + decoder_states_inputs, | |
[decoder_outputs] + decoder_states) | |
# Reverse-lookup token index to decode sequences back to | |
# something readable. | |
reverse_input_char_index = dict( | |
(i, char) for char, i in input_token_index.items()) | |
reverse_target_char_index = dict( | |
(i, char) for char, i in target_token_index.items()) | |
def decode_sequence(input_seq): | |
# Encode the input as state vectors. | |
states_value = encoder_model.predict(input_seq) | |
# Generate empty target sequence of length 1. | |
target_seq = np.zeros((1, 1, num_decoder_tokens)) | |
# Populate the first character of target sequence with the start character. | |
target_seq[0, 0, target_token_index['\t']] = 1. | |
# Sampling loop for a batch of sequences | |
# (to simplify, here we assume a batch of size 1). | |
stop_condition = False | |
decoded_sentence = '' | |
while not stop_condition: | |
output_tokens, h, c = decoder_model.predict( | |
[target_seq] + states_value) | |
# Sample a token | |
sampled_token_index = np.argmax(output_tokens[0, -1, :]) | |
sampled_char = reverse_target_char_index[sampled_token_index] | |
decoded_sentence += sampled_char | |
# Exit condition: either hit max length | |
# or find stop character. | |
if (sampled_char == '\n' or | |
len(decoded_sentence) > max_decoder_seq_length): | |
stop_condition = True | |
# Update the target sequence (of length 1). | |
target_seq = np.zeros((1, 1, num_decoder_tokens)) | |
target_seq[0, 0, sampled_token_index] = 1. | |
# Update states | |
states_value = [h, c] | |
return decoded_sentence | |
for seq_index in range(100): | |
# Take one sequence (part of the training test) | |
# for trying out decoding. | |
input_seq = encoder_input_data[seq_index: seq_index + 1] | |
decoded_sentence = decode_sequence(input_seq) | |
print('-') | |
print('Input sentence:', input_texts[seq_index]) | |
print('Decoded sentence:', decoded_sentence) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment