Skip to content

Instantly share code, notes, and snippets.

@ThilinaRajapakse
Created April 27, 2020 10:54
Show Gist options
  • Save ThilinaRajapakse/b5703b7dc078d7ff9028e3cebb2dfb2a to your computer and use it in GitHub Desktop.
Save ThilinaRajapakse/b5703b7dc078d7ff9028e3cebb2dfb2a to your computer and use it in GitHub Desktop.
from simpletransformers.language_modeling import LanguageModelingModel
import logging
logging.basicConfig(level=logging.INFO)
transformers_logger = logging.getLogger("transformers")
transformers_logger.setLevel(logging.WARNING)
train_args = {
"reprocess_input_data": True,
"overwrite_output_dir": True,
"num_train_epochs": 20,
"save_eval_checkpoints": True,
"block_size": 509,
"max_seq_length": 509,
# "save_model_every_epoch": False,
"learning_rate": 1e-4,
"train_batch_size": 16,
"gradient_accumulation_steps": 4,
"mlm": False,
"dataset_type": "simple",
"logging_steps": 100,
"evaluate_during_training": True,
"evaluate_during_training_steps": 3000,
"evaluate_during_training_verbose": True,
"use_cached_eval_features": True,
"sliding_window": True,
"use_multiprocessing": False,
"vocab_size": 10000,
"output_dir": f"outputs/from_scratch",
"best_model_dir": f"outputs/from_scratch/best_model"
}
train_file = f"data//train.txt"
test_file = f"data/test.txt"
model = LanguageModelingModel(
"gpt2",
None,
# "outputs/best_model",
# "outputs/checkpoint-28000",
args=train_args,
train_files=train_file,
# use_cuda=False,
)
# model.train_tokenizer(train_file)
model.train_model(
train_file,
eval_file=test_file,
)
model.eval_model(test_file)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment