Skip to content

Instantly share code, notes, and snippets.

@ravenscroftj
Created August 8, 2022 07:40
Show Gist options
  • Save ravenscroftj/a3bb94069605ccbf5a2296749b213bd0 to your computer and use it in GitHub Desktop.
Save ravenscroftj/a3bb94069605ccbf5a2296749b213bd0 to your computer and use it in GitHub Desktop.
#%%
from audioop import mul
import json
from statistics import mean
from typing import List
from matplotlib.pyplot import cla
from requests import delete
import spacy
from spacy import matcher
import torch
import os
import tqdm
import numpy as np
import pandas as pd
from collections import defaultdict
from transformers import TrainingArguments, Trainer
from transformers import DataCollatorWithPadding
from sklearn.metrics import f1_score, mean_absolute_error, r2_score
from datasets import load_dataset
# %%
#BASE_IMAGE = "roberta-large"
BASE_IMAGE="roberta-base"
#BASE_IMAGE="august"
#AUX_TASK = "sciclasses"
#AUX_TASK="claimstrength"
AUX_TASK="readability"
AUGUST_MODEL_PATH = "../../scientific-writing-strategies/models/IMPACT"
#DATASET = "50shot"
DATASET="fewshot"
SEEDS=[2, 42, 52]
# %%
fold=2
data = {}
sciclass_files = {
"train_raw": "../../data/pet/support_tasks/rct-200k/train.csv",
"test_raw": "../../data/pet/support_tasks/rct-200k/test.csv"
}
sci_labels = []
for fold in ['test','train']:
df = pd.read_csv(sciclass_files[f'{fold}_raw'], names=['label','text'])
sci_labels.extend(df.label.unique())
sci_labels = set(sci_labels)
scilabel2idx = {lbl:i for i,lbl in enumerate(sorted(sci_labels))}
for fold in ['test','train']:
df = pd.read_csv(sciclass_files[f'{fold}_raw'], names=['label','text'])
filepath = os.path.join(os.path.dirname(sciclass_files[f'{fold}_raw']), fold+"_nlp.csv")
df['label'] = df.label.apply(lambda x: scilabel2idx[x])
df.to_csv(filepath, index=False)
sciclass_files[fold] = filepath
del sciclass_files[f'{fold}_raw']
readability_files = {
"train_raw": "../../data/pet/support_tasks/readability-2class/train.csv",
"test_raw": "../../data/pet/support_tasks/readability-2class/test.csv"
}
readability_labels = []
for fold in ['test','train']:
df = pd.read_csv(readability_files[f'{fold}_raw'], names=['text','label'])
readability_labels.extend(df.label.unique())
readability_labels = set(readability_labels)
readlabel2idx = {lbl:i for i,lbl in enumerate(sorted(readability_labels))}
for fold in ['test','train']:
df = pd.read_csv(readability_files[f'{fold}_raw'], names=['text','label'])
filepath = os.path.join(os.path.dirname(readability_files[f'{fold}_raw']), fold+"_nlp.csv")
df['label'] = df.label.apply(lambda x: readlabel2idx[x])
df.to_csv(filepath, index=False)
readability_files[fold] = filepath
del readability_files[f'{fold}_raw']
claim_labels = []
for fold in ['test', 'train']:
df = pd.read_csv(f"../../data/pet/support_tasks/claimstrength/{fold}_pet.csv", names=['text','label','source'])
claim_labels.extend(df.label.unique())
claimstrength_files = {}
for fold in ['test', 'train']:
df = pd.read_csv(f"../../data/pet/support_tasks/claimstrength/{fold}.csv", names=['text','label','source'])
filepath = os.path.join("../../data/pet/support_tasks/claimstrength/", fold+"_processed.csv")
df.to_csv(filepath, index=False)
claimstrength_files[fold] = filepath
# %%
import transformers
import torch.nn as nn
class MultitaskModel(transformers.PreTrainedModel):
def __init__(self, encoder, taskmodels_dict):
"""
Setting MultitaskModel up as a PretrainedModel allows us
to take better advantage of Trainer features
"""
super().__init__(transformers.PretrainedConfig())
self.encoder = encoder
self.taskmodels_dict = nn.ModuleDict(taskmodels_dict)
@classmethod
def create(cls, model_name, model_type_dict, model_config_dict):
"""
This creates a MultitaskModel using the model class and config objects
from single-task models.
We do this by creating each single-task model, and having them share
the same encoder transformer.
"""
shared_encoder = None
taskmodels_dict = {}
for task_name, model_type in model_type_dict.items():
model = model_type.from_pretrained(
model_name,
config=model_config_dict[task_name],
)
if shared_encoder is None:
shared_encoder = getattr(model, cls.get_encoder_attr_name(model))
else:
setattr(model, cls.get_encoder_attr_name(model), shared_encoder)
taskmodels_dict[task_name] = model
return cls(encoder=shared_encoder, taskmodels_dict=taskmodels_dict)
@classmethod
def get_encoder_attr_name(cls, model):
"""
The encoder transformer is named differently in each model "architecture".
This method lets us get the name of the encoder attribute
"""
model_class_name = model.__class__.__name__
if model_class_name.startswith("Bert"):
return "bert"
elif model_class_name.startswith("Roberta"):
return "roberta"
elif model_class_name.startswith("Albert"):
return "albert"
else:
raise KeyError(f"Add support for new model {model_class_name}")
def forward(self, task_name, **kwargs):
return self.taskmodels_dict[task_name](**kwargs)
# %%
#model_name = "roberta-large"
if BASE_IMAGE == "august":
model_name = "roberta-base"
else:
model_name = BASE_IMAGE
def model_init():
multitask_model = MultitaskModel.create(
model_name=model_name,
model_type_dict={
"impact": transformers.AutoModelForSequenceClassification,
AUX_TASK: transformers.AutoModelForSequenceClassification,
#"claimstrength": transformers.AutoModelForSequenceClassification,
},
model_config_dict={
"impact": transformers.AutoConfig.from_pretrained(model_name, num_labels=1),
AUX_TASK: transformers.AutoConfig.from_pretrained(model_name, num_labels=label_count),
#"claimstrength": transformers.AutoConfig.from_pretrained(model_name, num_labels=len(claim_labels))
},
)
if BASE_IMAGE == "august":
old_model = transformers.AutoModelForSequenceClassification.from_pretrained(AUGUST_MODEL_PATH)
multitask_model.encoder = old_model.roberta
multitask_model.taskmodels_dict["impact"].roberta = old_model.roberta
multitask_model.taskmodels_dict[AUX_TASK].roberta = old_model.roberta
print(multitask_model.encoder.embeddings.word_embeddings.weight.data_ptr())
print(multitask_model.taskmodels_dict["impact"].roberta.embeddings.word_embeddings.weight.data_ptr())
#print(multitask_model.taskmodels_dict["claimstrength"].roberta.embeddings.word_embeddings.weight.data_ptr())
print(multitask_model.taskmodels_dict[AUX_TASK].roberta.embeddings.word_embeddings.weight.data_ptr())
return multitask_model
# %%
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
# %%
def tokenize_dataset(dataset):
ds = dataset.map(lambda x: tokenizer(x['text'], truncation=True, padding=True))
ds.set_format(type='torch', columns=['input_ids','label','attention_mask'])
dataset.map(lambda x: tokenizer(x['text'], truncation=True, padding=True))
return ds
# %%
from torch.utils.data.dataloader import DataLoader
from transformers.data.data_collator import DataCollator, InputDataClass
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler
from typing import List, Union, Dict
#%%
class StrIgnoreDevice(str):
"""
This is a hack. The Trainer is going call .to(device) on every input
value, but we need to pass in an additional `task_name` string.
This prevents it from throwing an error
"""
def to(self, device):
return self
class DataLoaderWithTaskname:
"""
Wrapper around a DataLoader to also yield a task name
"""
def __init__(self, task_name, data_loader):
self.task_name = task_name
self.data_loader = data_loader
self.batch_size = data_loader.batch_size
self.dataset = data_loader.dataset
def __len__(self):
return len(self.data_loader)
def __iter__(self):
for batch in self.data_loader:
batch["task_name"] = StrIgnoreDevice(self.task_name)
yield batch
class MultitaskDataloader:
"""
Data loader that combines and samples from multiple single-task
data loaders.
"""
def __init__(self, dataloader_dict):
self.dataloader_dict = dataloader_dict
self.num_batches_dict = {
task_name: len(dataloader)
for task_name, dataloader in self.dataloader_dict.items()
}
self.task_name_list = list(self.dataloader_dict)
self.dataset = [None] * sum(
len(dataloader.dataset)
for dataloader in self.dataloader_dict.values()
)
def __len__(self):
return sum(self.num_batches_dict.values())
def __iter__(self):
"""
For each batch, sample a task, and yield a batch from the respective
task Dataloader.
We use size-proportional sampling, but you could easily modify this
to sample from some-other distribution.
"""
task_choice_list = []
for i, task_name in enumerate(self.task_name_list):
task_choice_list += [i] * self.num_batches_dict[task_name]
task_choice_list = np.array(task_choice_list)
np.random.shuffle(task_choice_list)
dataloader_iter_dict = {
task_name: iter(dataloader)
for task_name, dataloader in self.dataloader_dict.items()
}
for task_choice in task_choice_list:
task_name = self.task_name_list[task_choice]
yield next(dataloader_iter_dict[task_name])
class MultitaskTrainer(transformers.Trainer):
def get_single_train_dataloader(self, task_name, train_dataset):
"""
Create a single-task data loader that also yields task names
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_sampler = (
RandomSampler(train_dataset)
if self.args.local_rank == -1
else DistributedSampler(train_dataset)
)
data_loader = DataLoaderWithTaskname(
task_name=task_name,
data_loader=DataLoader(
train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
),
)
return data_loader
def get_train_dataloader(self):
"""
Returns a MultitaskDataloader, which is not actually a Dataloader
but an iterable that returns a generator that samples from each
task Dataloader
"""
return MultitaskDataloader({
task_name: self.get_single_train_dataloader(task_name, task_dataset)
for task_name, task_dataset in self.train_dataset.items()
})
# %%
# training_args = TrainingArguments(
# output_dir="./results",
# learning_rate=2e-5,
# per_device_train_batch_size=16,
# per_device_eval_batch_size=16,
# num_train_epochs=5,
# weight_decay=0.01,
# )
for RANDOM_SEED in SEEDS:
for split in ['test', 'train']:
data[split] = pd.read_csv(f"../../data/pet/1sent_regression_all3_{DATASET}/{RANDOM_SEED}/fold_2/{split}.csv", names=['sent','logit'])
##data[split] = pd.read_csv(f"../../data/pet/1sent_regression_all3_fewshot/fold_{fold}/{split}.csv", names=['sent','logit'])
#data[split] = pd.read_csv(f"../../data/pet/1sent_regression_all3_200shot/fold_{fold}/{split}.csv", names=['sent','logit'])
#data[split]['label'] = data[split]['logit'].apply(lambda x: 1 if x >=0.5 else 0)
data[split]['label'] = data[split]['logit']
#tokenized[split] = data[split].sent.apply(lambda x: impact_model.tokenizer(x, truncation=True)).values
data[split]['text'] = data[split]['sent']
data[split][['text','label']].to_csv(f"{split}.csv", index=False)
data_files = {
"train":"train.csv",
"test": "test.csv",
}
dataset_dict = {
"impact": load_dataset("csv", data_files=data_files),
}
if AUX_TASK == "sciclasses":
dataset_dict["sciclasses"] = load_dataset("csv", data_files=sciclass_files)
label_count = len(sci_labels)
elif AUX_TASK == "claimstrength":
dataset_dict["claimstrength"] = load_dataset("csv", data_files=claimstrength_files)
label_count = len(claim_labels)
elif AUX_TASK == "readability":
dataset_dict["readability"] = load_dataset("csv", data_files=readability_files)
label_count = len(readability_labels)
for task_name, dataset in dataset_dict.items():
print(task_name)
print(dataset_dict[task_name]["train"][0])
print()
features_dict = {name:tokenize_dataset(ds) for name,ds in dataset_dict.items()}
data_collator = DataCollatorWithPadding(tokenizer=tokenizer, padding=True)
train_dataset = {
task_name: dataset["train"] for task_name, dataset in features_dict.items()
}
for ds in train_dataset.values():
ds.set_format(type='torch', columns=['input_ids','label','attention_mask'])
trainer = MultitaskTrainer(
#model=multitask_model,
model_init=model_init,
args=transformers.TrainingArguments(
output_dir="./results",
overwrite_output_dir=True,
learning_rate=2e-5,
do_train=True,
num_train_epochs=5,
weight_decay=0.01,
# Adjust batch size if this doesn't fit on the Colab GPU
per_device_train_batch_size=8,
per_device_eval_batch_size=16,
save_steps=3000,
seed=RANDOM_SEED,
),
data_collator=data_collator,
train_dataset=train_dataset,
)
# %%
train_dataset['impact'].set_format(type='torch', columns=['input_ids','label','attention_mask'])
train_dataset[AUX_TASK].set_format(type='torch', columns=['input_ids','label','attention_mask'])
#train_dataset['claimstrength'].set_format(type='torch', columns=['input_ids','label','attention_mask'])
#%%
trainer.train()
# %%
test_sampler = trainer._get_eval_sampler(features_dict['impact']['test'])
features_dict['impact']['test'].set_format(type='torch', columns=['input_ids','label','attention_mask'])
eval_loader = DataLoaderWithTaskname("impact", DataLoader(
features_dict['impact']['test'],
sampler=test_sampler,
batch_size=trainer.args.eval_batch_size,
collate_fn=trainer.data_collator,
drop_last=trainer.args.dataloader_drop_last,
pin_memory=trainer.args.dataloader_pin_memory,
))
# %%
preds = trainer.prediction_loop(eval_loader, description="Eval")
# %%
from sklearn.metrics import mean_absolute_error, r2_score, f1_score
y_test = data['test'].logit >= 0.5
# %%
pred_logits = np.ravel(preds.predictions)
y_pred = pred_logits >= 0.5
df = data['test'].copy()
df['pred'] = pred_logits
df_path = f"../../results/baselines/{BASE_IMAGE}_{AUX_TASK}_{RANDOM_SEED}_{DATASET}/predictions.csv"
metrics_path = f"../../results/baselines/{BASE_IMAGE}_{AUX_TASK}_{RANDOM_SEED}_{DATASET}/metrics.json"
if not os.path.exists(os.path.dirname(df_path)):
os.makedirs(os.path.dirname(df_path))
df.to_csv(df_path, index=False)
#%%
print(AUX_TASK)
print(BASE_IMAGE)
# %%
print("Dataset:", DATASET)
print("Aux:", AUX_TASK)
print("F1:",f1_score(y_test,y_pred))
print("MAE:", mean_absolute_error(data['test'].logit, pred_logits))
print("R2:", r2_score(data['test'].logit, pred_logits))
with open(metrics_path, "w") as f:
json.dump({
"r2": r2_score(data['test'].logit, pred_logits),
"mae": mean_absolute_error(data['test'].logit, pred_logits),
"f1_binary": f1_score(y_test,y_pred, average="binary"),
"f1_macro": f1_score(y_test,y_pred, average="macro"),
"f1_micro": f1_score(y_test,y_pred, average="micro"),
}, f, indent=2)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment