Skip to content

Instantly share code, notes, and snippets.

@tawnkramer
Last active November 10, 2017 00:07
Show Gist options
  • Save tawnkramer/369fc8cf3957979827346b26b8938274 to your computer and use it in GitHub Desktop.
Save tawnkramer/369fc8cf3957979827346b26b8938274 to your computer and use it in GitHub Desktop.
def imu_rnn_lstm(seq_length, num_outputs, imu_vec_size=6):
from keras.layers import Input, Dense
from keras.models import Model
from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
from keras.layers import Activation, Dropout, Flatten, Cropping2D, Lambda
from keras.layers.merge import concatenate
from keras.layers import LSTM
img_in = Input(batch_shape=(seq_length, 120,160,3), name='img_in')
imu_in = Input(batch_shape=(seq_length, imu_vec_size), name="imu_in")
x = img_in
x = Cropping2D(cropping=((60,0), (0,0)))(x) #trim 60 pixels off top
x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x)
x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x)
x = Convolution2D(32, (3,3), strides=(2,2), activation='relu')(x)
x = Convolution2D(32, (3,3), strides=(1,1), activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Flatten(name='flattened')(x)
x = Dense(100, activation='relu')(x)
x = Dropout(.1)(x)
y = imu_in
y = Dense(14, activation='relu')(y)
y = Dense(14, activation='relu')(y)
y = Dense(14, activation='relu')(y)
z = concatenate([x, y])
z = Dense(256, activation='relu')(z)
z = Reshape((256, 1))(z)
z = LSTM(128, return_sequences=True, name="LSTM_seq")(z)
z = Dropout(.1)(z)
z = LSTM(128, return_sequences=False, name="LSTM_out")(z)
z = Dropout(.1)(z)
z = Dense(50, activation='relu')(z)
z = Dropout(.1)(z)
outputs = []
for i in range(num_outputs):
outputs.append(Dense(1, activation='linear', name='n_outputs' + str(i))(z))
model = Model(inputs=[img_in, imu_in], outputs=outputs)
model.compile(optimizer='adam',
loss='mse')
return model
def generator(data, batch_size=128):
num_records = len(data)
while True:
shuffle(data)
for offset in range(0, num_records, batch_size):
batch_data = data[offset:offset+batch_size]
b_inputs_img = []
b_inputs_imu = []
b_labels = []
for seq in batch_data:
inputs_img = []
inputs_imu = []
labels = []
for record in seq:
#get image data if we don't already have it
if record['img_data'] is None:
record['img_data'] = np.array(Image.open(record['image_path']))
inputs_img.append(record['img_data'])
labels.append(record['target_output'])
inputs_imu.append(record['imu_array'])
b_inputs_img.append(inputs_img)
b_inputs_imu.append(inputs_imu)
b_labels.append(labels)
X = [np.array(b_inputs_img), np.array(b_inputs_imu)]
y = np.array([b_labels])
yield X, y
'''
With:
batchsize - 128
sequence len - 5
image dim - 160, 120, 3
Error:
ValueError: Error when checking input: expected img_in to have 4 dimensions, but got array with shape (128, 5, 120, 160, 3)
'''
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment