This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def create_model(model_name): | |
config = BertConfig() | |
config.output_hidden_states = False | |
question_bert_model = TFBertModel.from_pretrained(model_name, config=config) | |
answer_bert_model = TFBertModel.from_pretrained(model_name, config=config) | |
question_enc = tf.keras.layers.Input((MAX_SEQUENCE_LENGTH,), dtype=tf.int32) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def tokenize(sentences, tokenizer): | |
input_ids, input_masks, input_segments = [],[],[] | |
for sentence in tqdm(sentences): | |
inputs = tokenizer.encode_plus(sentence, add_special_tokens=True, max_length=128, pad_to_max_length=True, | |
return_attention_mask=True, return_token_type_ids=True) | |
input_ids.append(inputs['input_ids']) | |
input_masks.append(inputs['attention_mask']) | |
input_segments.append(inputs['token_type_ids']) | |
return np.asarray(input_ids, dtype='int32'), np.asarray(input_masks, dtype='int32'), np.asarray(input_segments, dtype='int32') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from transformers import DistilBertTokenizer, RobertaTokenizer, | |
distil_bert = 'distilbert-base-uncased' # Pick any desired pre-trained model | |
roberta = 'roberta-base-uncase' | |
# Defining DistilBERT tokonizer | |
tokenizer = DistilBertTokenizer.from_pretrained(distil_bert, do_lower_case=True, add_special_tokens=True, | |
max_length=128, pad_to_max_length=True) | |
# Defining RoBERTa tokinizer | |
tokenizer = RobertaTokenizer.from_pretrained(roberta, do_lower_case=True, add_special_tokens=True, | |
max_length=128, pad_to_max_length=True) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
X_train = [padded_docs_train_question_title,padded_docs_train_question_body,padded_docs_train_question_answer,X_train_category,X_train_host,num_field_train_scalar] | |
x_cv = [padded_docs_cv_question_title,padded_docs_cv_question_body,padded_docs_cv_question_answer,X_cv_category,X_cv_host,num_field_cv_scalar] | |
model_4.compile(optimizer='adam', loss='binary_crossentropy',metrics=['mse', 'mae']) | |
history_4 = model_4.fit(X_train,y_train,epochs=60,batch_size=400,verbose=1, validation_data=(x_cv,y_cv)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
preprocessed_question_title_input = Input(shape=(max_length,), name = "preprocessed_question_title") | |
emb = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=max_length,trainable=False)(preprocessed_question_title_input) | |
conv1 = Conv1D(filters=32, kernel_size=4, activation='relu')(emb) | |
pool1 = MaxPooling1D(pool_size=2)(conv1) | |
lstm_CNN= LSTM(128,return_sequences=True)(pool1) | |
dropout_CNN_LSTM = Dropout(0.2)(lstm_CNN) | |
flat_title = Flatten()(dropout_CNN_LSTM) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
X_train = [padded_docs_train_question_title,padded_docs_train_question_body,padded_docs_train_question_answer,X_train_category,X_train_host,num_field_train_scalar] | |
x_cv = [padded_docs_cv_question_title,padded_docs_cv_question_body,padded_docs_cv_question_answer,X_cv_category,X_cv_host,num_field_cv_scalar] | |
model_3.compile(optimizer='adam', loss='binary_crossentropy',metrics=['mse', 'mae']) | |
history_3 = model_3.fit(X_train,y_train,epochs=60,batch_size=400,verbose=1, validation_data=(x_cv,y_cv)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
preprocessed_question_title_input = Input(shape=(max_length,), name = "preprocessed_question_title") | |
emb = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=max_length,trainable=False)(preprocessed_question_title_input) | |
conv1 = Conv1D(filters=32, kernel_size=4, activation='relu')(emb) | |
pool1 = MaxPooling1D(pool_size=2)(conv1) | |
dropoutcnv = Dropout(0.2)(pool1) | |
flat_title= Flatten()(dropoutcnv) | |
concat_layers = [] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
X_train = [padded_docs_train_question_title,padded_docs_train_question_body,padded_docs_train_question_answer,X_train_category,X_train_host,num_field_train_scalar] | |
x_cv = [padded_docs_cv_question_title,padded_docs_cv_question_body,padded_docs_cv_question_answer,X_cv_category,X_cv_host,num_field_cv_scalar] | |
model_2.compile(optimizer='adam', loss='binary_crossentropy',metrics=['mse', 'mae']) | |
history_2 = model_2.fit(X_train,y_train,epochs=60,batch_size=400,verbose=1, validation_data=(x_cv,y_cv)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
concat_layers = [] | |
concat_layers.append(flat_title) | |
concat_layers.append(flat_question) | |
concat_layers.append(flat_answer) | |
concat_layers.append(category_flat) | |
concat_layers.append(host_flat) | |
concat_layers.append(num_field_dense) | |
concat_layers = Concatenate()(concat_layers) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
preprocessed_question_title_input = Input(shape=(max_length,), name = "preprocessed_question_title") | |
emb = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=max_length,trainable=False)(preprocessed_question_title_input) | |
lstm= LSTM(128,return_sequences=True)(emb) | |
dropoutlstm = Dropout(0.2)(lstm) | |
flat_title= Flatten()(dropoutlstm) |