Skip to content

Instantly share code, notes, and snippets.

# http://zetcode.com/python/prettytable/
from prettytable import PrettyTable
x = PrettyTable()
x.field_names = ["Model" ,"Train Loss" ,"SpearmenValidation Score" ]
x.add_row(["Bert Base" , "0.3617" , 0.40097])
x.add_row(["Roberta" , "0.3817" , 0.3953])
x.add_row(["XLNEt" , "0.3614" , 0.40099])
print(x)
# http://zetcode.com/python/prettytable/
from prettytable import PrettyTable
x = PrettyTable()
x.field_names = ["Sentence Vectoriser","Model" ,"Train Loss" , "validation Loss","SpearmenValidation Score" ]
x.add_row(["Universal Sentence Encoder", "Model_1",0.38,0.40,0.349 ])
x.add_row(["Fastext Word Vector", "Model_2",0.51,0.54,0.30 ])
x.add_row(["Fastext Word Vector", "Model_3",0.50,0.53,0.31 ])
x.add_row(["Fastext Word Vector", "Model_4",0.46,0.51,0.29 ])
def create_model(model_name):
config = XLNetConfig()
config.output_hidden_states = False
question_bert_model = TFXLNetModel.from_pretrained(model_name)
answer_bert_model = TFXLNetModel.from_pretrained(model_name)
question_enc = tf.keras.layers.Input((MAX_SEQUENCE_LENGTH,), dtype=tf.int32)
from transformers import XLNetTokenizer
model = 'xlnet-base-cased' # Pick any desired pre-trained model
# Defining XLNET tokonizer
tokenizer = XLNetTokenizer.from_pretrained(distil_bert, do_lower_case=True, add_special_tokens=True,
max_length=128, pad_to_max_length=True)
def create_model(model_name):
config = RobertaConfig()
config.output_hidden_states = False
question_bert_model = TFRobertaModel.from_pretrained(model_name)
answer_bert_model = TFRobertaModel.from_pretrained(model_name)
question_enc = tf.keras.layers.Input((MAX_SEQUENCE_LENGTH,), dtype=tf.int32)
def create_model(model_name):
config = BertConfig()
config.output_hidden_states = False
question_bert_model = TFBertModel.from_pretrained(model_name, config=config)
answer_bert_model = TFBertModel.from_pretrained(model_name, config=config)
question_enc = tf.keras.layers.Input((MAX_SEQUENCE_LENGTH,), dtype=tf.int32)
from transformers import BertTokenizer
model_name = 'bert-base-uncased' # Pick any desired pre-trained model
# Defining BertTokenizer tokonizer
tokenizer = BertTokenizer.from_pretrained(model_name, do_lower_case=True, add_special_tokens=True,
max_length=128, pad_to_max_length=True)
def create_model(model_name):
config = RobertaConfig()
config.output_hidden_states = False
question_bert_model = TFRobertaModel.from_pretrained(model_name)
answer_bert_model = TFRobertaModel.from_pretrained(model_name)
question_enc = tf.keras.layers.Input((MAX_SEQUENCE_LENGTH,), dtype=tf.int32)
from transformers import RobertaTokenizer, TFRobertaModel
distil_bert = 'distilbert-base-uncased' # Pick any desired pre-trained model
roberta = 'roberta-base-uncase'
# Defining RoBERTa tokinizer
tokenizer = RobertaTokenizer.from_pretrained(roberta, do_lower_case=True, add_special_tokens=True,
max_length=128, pad_to_max_length=True)
model = create_model(model_name)
optimizer = tf.keras.optimizers.Adam(learning_rate=2e-5)
model.compile(loss='binary_crossentropy', optimizer=optimizer)