Created
July 23, 2019 21:40
-
-
Save sarmadm/5a7a027fc24eb73c500adc38f6bb30f1 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
--------------------------------------------------------------------------- | |
TypeError Traceback (most recent call last) | |
<ipython-input-18-ceb10af85fb3> in <module>() | |
18 max_evals=5, | |
19 trials=Trials(), | |
---> 20 notebook_name='con_seq_inception-1d_5400_hyperas') | |
~/.conda/envs/tensorflow-gpu/lib/python3.6/site-packages/hyperas/optim.py in minimize(model, data, algo, max_evals, trials, functions, rseed, notebook_name, verbose, eval_space, return_space, keep_temp) | |
67 notebook_name=notebook_name, | |
68 verbose=verbose, | |
---> 69 keep_temp=keep_temp) | |
70 | |
71 best_model = None | |
~/.conda/envs/tensorflow-gpu/lib/python3.6/site-packages/hyperas/optim.py in base_minimizer(model, data, functions, algo, max_evals, trials, rseed, full_model_string, notebook_name, verbose, stack, keep_temp) | |
96 model_str = full_model_string | |
97 else: | |
---> 98 model_str = get_hyperopt_model_string(model, data, functions, notebook_name, verbose, stack) | |
99 temp_file = './temp_model.py' | |
100 write_temp_files(model_str, temp_file) | |
~/.conda/envs/tensorflow-gpu/lib/python3.6/site-packages/hyperas/optim.py in get_hyperopt_model_string(model, data, functions, notebook_name, verbose, stack) | |
196 | |
197 functions_string = retrieve_function_string(functions, verbose) | |
--> 198 data_string = retrieve_data_string(data, verbose) | |
199 model = hyperopt_keras_model(model_string, parts, aug_parts, verbose) | |
200 | |
~/.conda/envs/tensorflow-gpu/lib/python3.6/site-packages/hyperas/optim.py in retrieve_data_string(data, verbose) | |
219 data_string = inspect.getsource(data) | |
220 first_line = data_string.split("\n")[0] | |
--> 221 indent_length = len(determine_indent(data_string)) | |
222 data_string = data_string.replace(first_line, "") | |
223 r = re.compile(r'^\s*return.*') | |
TypeError: object of type 'NoneType' has no len() | |
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def data(): | |
train_dataset= pd.read_pickle('/shared/eee_comms1/Shared/Avec2018Project/auc_pkls/5400/train_tw_dataset_5400_ro_scaler.pkl') | |
aur_train_tw = train_dataset['aur_dataset_mat'] | |
auc_train_tw = train_dataset['auc_dataset_mat'] | |
eye_train_tw= train_dataset['eye_dataset_mat'] | |
pose_train_tw = train_dataset['pose_dataset_mat'] | |
Y_int= train_dataset['Y'] | |
Ymrs_train= train_dataset['Ymrs']/40 | |
del train_dataset | |
print('aur_train',aur_train_tw.shape) | |
print('auc_train',auc_train_tw.shape) | |
print('eye_train',eye_train_tw.shape) | |
print('Ymrs_train',Ymrs_train.shape) | |
print('Y_train',Y_int.shape) | |
y_encoder = OneHotEncoder() | |
y_sparse = y_encoder.fit_transform(Y_int.reshape((len(Y_int),1))) | |
Y_oh_tr = y_sparse.toarray() | |
print('Y_oh.shape',Y_oh_tr.shape) | |
dev_dataset= pd.read_pickle('/shared/eee_comms1/Shared/Avec2018Project/auc_pkls/5400/dev_tw_dataset_5400_ro_scaler.pkl') | |
aur_dev_tw = dev_dataset['aur_dataset_mat'] | |
auc_dev_tw= dev_dataset['auc_dataset_mat'] | |
eye_dev_tw = dev_dataset['eye_dataset_mat'] | |
pose_dev_tw = dev_dataset['pose_dataset_mat'] | |
Y_int= dev_dataset['Y'] | |
Ymrs_dev= dev_dataset['Ymrs']/40 # maybe standardization is better | |
del dev_dataset | |
print('aur_dev',aur_dev_tw.shape) | |
print('auc_dev',auc_dev_tw.shape) | |
print('eye_dev',pose_dev_tw.shape) | |
print('Ymrs_dev',Ymrs_dev.shape) | |
print('Y_train',Y_int.shape) | |
y_encoder = OneHotEncoder() | |
y_sparse = y_encoder.fit_transform(Y_int.reshape((len(Y_int),1))) | |
Y_oh_dev = y_sparse.toarray() | |
print('Y_oh.shape',Y_oh_dev.shape) | |
x_train = [auc_train_tw, eye_train_tw, pose_train_tw , Y_one_tr ,reg_train_preds,mask] | |
y_train = [Y_oh_tr, Ymrs_train,Y_oh_tr] | |
x_test = [ auc_dev_tw, eye_dev_tw, pose_dev_tw, Y_one_dev, reg_dev_preds , mask_dev] | |
y_test = [Y_oh_dev, Ymrs_dev,Y_oh_dev] | |
return x_train, y_train, x_test, y_test | |
def incptn_seq( x_train, y_train, x_test, y_test ): | |
inshape_auc = auc_train_tw.shape[1:] | |
inshape_eye = eye_train_tw.shape[1:] | |
inshape_pose = pose_train_tw.shape[1:] | |
# inshape_emo=emo_tr_tw.shape[1:] | |
# inshape_emo=tr_emo_feat.shape[1:] | |
# inshape_ofsum = selected_from_all_dev.shape[1:] | |
mask_inpt= Input(shape=mask_dev.shape[1:], name="mask") | |
print('mask_inpt ', mask_inpt.shape) | |
print('inshape_aur' , aur_train_tw.shape[1:]) | |
# print(' inshape_emo ',tr_emo_feat.shape[1:]) | |
outshape = len(np.unique(Y_int)) | |
print('outshape',outshape) | |
## some defaults | |
# initializers, optimizers, constraints | |
adam = optimizers.adam(lr=0.03) | |
init = initializers.he_normal() | |
fc_act = 'relu' | |
init_c = initializers.he_uniform() | |
binit = 'zeros' | |
# binit_c = initializers.glorot_normal() | |
binit_c = 'zeros' | |
outlist = [] | |
### Model declaration | |
## Inputs | |
# AU | |
# aur_in = Input(shape=inshape_aur, name='inshape_aur') | |
# aur=aur_in | |
# raw_globalpool = Lambda(global_wavg_lite, arguments={'weights':mask_inpt})(aur) | |
# outlist.append(raw_globalpool) | |
auc_in = Input(shape=inshape_auc, name='inshape_auc') | |
auc=auc_in | |
raw_globalpool = Lambda(global_wavg_lite, arguments={'weights':mask_inpt})(auc) | |
outlist.append(raw_globalpool) | |
# EYE | |
eye_in = Input(shape=inshape_eye, name='EYE_input') | |
eye=eye_in | |
raw_globalpool = Lambda(global_wavg_lite, arguments={'weights':mask_inpt})(eye) | |
outlist.append(raw_globalpool) | |
# POSE | |
pose_in = Input(shape=inshape_pose, name='Pose_input') | |
pose=pose_in | |
raw_globalpool = Lambda(global_wavg_lite, arguments={'weights':mask_inpt})(pose) | |
outlist.append(raw_globalpool) | |
# emo_in = Input(shape=inshape_emo, name='emo_input') | |
# emo=emo_in | |
# raw_globalpool = Lambda(global_wavg_lite, arguments={'weights':mask_inpt})(emo) | |
# outlist.append(raw_globalpool) | |
# emo_in = Input(shape=inshape_emo, name='inshape_emo') | |
# emo = BatchNormalization() (emo_in) | |
# outlist.append(emo) | |
# ofsum_in = Input(shape=inshape_ofsum, name='OFsum_input') | |
# ofsum = BatchNormalization() (ofsum_in) | |
# outlist.append(ofsum) | |
mask = mask_inpt | |
print('mask ' ,mask.shape) | |
mask = MaxPool1D(pool_size=2, strides=2)(mask) | |
print('mask 1 ' ,mask.shape) | |
mask = MaxPool1D(pool_size=2, strides=2)(mask) | |
print('mask 2 ',mask.shape) | |
mask = MaxPool1D(pool_size=2, strides=2)(mask) | |
print('mask 3 ',mask.shape) | |
mask = MaxPool1D(pool_size=2, strides=2)(mask) | |
print('mask 4 ',mask.shape) | |
# mask = MaxPool1D(pool_size=2, strides=2)(mask) | |
# print('mask 5 ',mask.shape) | |
# mask = MaxPool1D(pool_size=2, strides=2)(mask) | |
# print('mask 6 ',mask.shape) | |
# mask = MaxPool1D(pool_size=2, strides=2)(mask) | |
# print('mask 7 ',mask.shape) | |
# sequence extraction | |
kreg = regularizers.l1_l2(l1=0.001) | |
# seq1 = seq_event_block(aur, fn_inc=32, fn_out=32, kreg=kreg) | |
# seq_pool = GlobalRichPool(seq1, name='cnn_globalpool_aur') | |
# outlist.append(seq_pool) | |
seq2 = seq_event_block(auc, fn_inc=32, fn_out=32, kreg=kreg) | |
seq_pool = GlobalRichPool(seq2, name='cnn_globalpool_auc') | |
outlist.append(seq_pool) | |
seq3 = seq_event_block(eye, fn_inc=32, fn_out=32, kreg=kreg) | |
seq_pool = GlobalRichPool(seq2, name='cnn_globalpool_eye') | |
outlist.append(seq_pool) | |
seq4 = seq_event_block(pose, fn_inc=32, fn_out=32, kreg=kreg) | |
seq_pool = GlobalRichPool(seq3, name='cnn_globalpool_pose') | |
outlist.append(seq_pool) | |
# eq5 = seq_event_block(emo, fn_inc=32, fn_out=32, kreg=kreg) | |
# seq_pool = GlobalRichPool(seq4, name='cnn_globalpool_emo') | |
# outlist.append(seq_pool) | |
# Channel-concat RNN | |
# combined = concatenate([seq1, seq2, seq3,seq4], name='Seq_chan_concat') | |
# combined = ActivityRegularization(l1=0.003)(combined) | |
# rnn = LSTM(32)(combined) | |
# outlist.append(rnn) | |
# Fully connected layers | |
out_reg = concatenate(outlist, name='feat_concat') | |
out_reg =out_reg | |
print('Feat combined shape', out_reg .shape) | |
out_reg = BatchNormalization()(out_reg) | |
out_reg = Dropout(0.5)(out_reg) | |
kreg = regularizers.l1_l2(l1=0, l2=0.001) | |
out_reg = Dense(200, activation=fc_act, kernel_regularizer=kreg)(out_reg ) | |
out_reg = bnrelu(out_reg) | |
out_reg = Dense(70, activation=fc_act, kernel_regularizer=kreg)(out_reg ) | |
out_reg = bnrelu(out_reg) | |
# out = Dense(768, activation=fc_act)(out) | |
# out_reg2clf = Lambda(cont2cat, name='out_reg2clf')(m_out_reg) | |
et_in_reg = Input(shape=(1,)) | |
# reg_bias = Dense(1, activation='linear', name='m_out_reg')(out_reg) | |
m_out_reg = Dense(1, activation='linear', name='m_out_reg')(out_reg) | |
out_reg_biascorr = Add()([m_out_reg, et_in_reg]) | |
out_reg2clf = Lambda(cont2cat, name='out_reg2clf')(out_reg_biascorr) | |
# clf output | |
out_clf = concatenate(outlist, name='clf_feat_concat',axis=1) | |
out_clf = out_clf | |
print('Feat combined shape', out_clf.shape ) | |
out_clf = BatchNormalization()(out_clf) | |
out_clf = Dropout(0.5)(out_clf) | |
out_clf = Dense(512, activation=fc_act, kernel_regularizer=kreg)(out_clf) | |
out_clf = bnrelu(out_clf) | |
out_clf = Dense(220, activation=fc_act, kernel_regularizer=kreg)(out_clf) | |
out_clf = bnrelu(out_clf) | |
out_clf = Dense(120, activation=fc_act, kernel_regularizer=kreg)(out_clf) | |
et_in_clf=Input(shape=(3,)) | |
clf_bias=Dense(3, activation='linear')(out_clf) | |
out_bias= Add(name='out_bias')([clf_bias, et_in_clf]) | |
# out_class = Dense(3, activation='softmax', name='out_class')(out_biascorr_clf) | |
# out_class = Dense(3, activation='softmax', name='out_class')(out_clf) | |
model = Model(inputs=[auc_in, eye_in, pose_in , et_in_clf, et_in_reg, mask_inpt], | |
outputs=[out_bias , m_out_reg, out_reg2clf]) | |
# model.add(InputLayer(inshape_emo)) | |
opti = optimizers.SGD(lr=0.001, clipvalue=0.5) | |
loss = dict( | |
m_out_reg= 'mse', | |
out_bias = 'categorical_crossentropy', | |
out_reg2clf= 'categorical_crossentropy' | |
) | |
metrics = dict( | |
m_out_reg = 'mae', | |
out_bias = 'accuracy', | |
out_reg2clf = 'accuracy' | |
) | |
loss_weights = dict( | |
m_out_reg = 20, | |
out_bias = 1, | |
out_reg2clf=1 | |
) | |
model.compile(optimizer=opti, loss=loss, metrics=metrics, loss_weights=loss_weights) | |
pth_r = '/shared/eee_comms1/Shared/Avec2018Project/chkpoints2/checkpoint_r_con_seq_4500.hdf5' | |
pth_c = '/shared/eee_comms1/Shared/Avec2018Project/chkpoints2/checkpoint_c_con_seq_4500.hdf5' | |
pth_r2c = '/shared/eee_comms1/Shared/Avec2018Project/chkpoints2/checkpoint_r2c_con_seq_4500.hdf5' | |
reduceLR = ReduceLROnPlateau(monitor='loss', factor=0.42, patience=9, verbose=1, mode='auto', cooldown=10) | |
chkpt_c = ModelCheckpoint(pth_c, monitor='val_out_bias_acc', mode='max', verbose=2, save_best_only=True, save_weights_only=True, period=1) | |
chkpt_r = ModelCheckpoint(pth_r, monitor='val_m_out_reg_mean_absolute_error', mode='min', verbose=2, save_best_only=True, save_weights_only=True, period=1) | |
chkpt_r2c = ModelCheckpoint(pth_r2c, monitor='val_out_reg2clf_acc', mode='max', verbose=2, save_best_only=True, save_weights_only=True, period=1) | |
class_weight = { | |
0 : 3, | |
1 : 1, | |
2 : 1.5} | |
model.fit(x_train, y_train, | |
batch_size={{choice([64, 128])}}, | |
nb_epoch=120, | |
verbose=2, | |
validation_data=(x_test, y_test), | |
callbacks = [chkpt_r, chkpt_c, chkpt_r2c, reduceLR] , class_weight =class_weight) | |
score, acc = model.evaluate(x_test, y_test, verbose=0) | |
print('Test accuracy:', acc) | |
return {'loss': -acc, 'status': STATUS_OK, 'model': model} | |
return model | |
from hyperopt import Trials, STATUS_OK, tpe | |
from hyperas import optim | |
from hyperas.distributions import choice, uniform | |
from keras.models import Sequential | |
from keras.layers.core import Dense, Dropout, Activation | |
from keras.optimizers import RMSprop | |
from keras.datasets import mnist | |
from keras.utils import np_utils | |
x_train, y_train, x_test, y_test = data() | |
best_run, best_model = optim.minimize(model=incptn_seq, | |
data=data, | |
algo=tpe.suggest, | |
max_evals=5, | |
trials=Trials(), | |
notebook_name='con_seq_inception-1d_5400_hyperas') | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment