Skip to content

Instantly share code, notes, and snippets.

@dainis-boumber
Last active April 29, 2019 11:15
Show Gist options
  • Save dainis-boumber/f7acc0f31eccdeca8f69bb1987e6d040 to your computer and use it in GitHub Desktop.
Save dainis-boumber/f7acc0f31eccdeca8f69bb1987e6d040 to your computer and use it in GitHub Desktop.
Multilabel Sequence Classification with BERT
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "Multilabel Sequence Classification with BERT",
"version": "0.3.2",
"provenance": [],
"collapsed_sections": [],
"toc_visible": true,
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/dainis-boumber/f7acc0f31eccdeca8f69bb1987e6d040/copy-of-bert-end-to-end-fine-tuning-predicting-with-cloud-tpu-sentence-and-sentence-pair-classification-tasks.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"metadata": {
"id": "meO7ZaISZfZ1",
"colab_type": "text"
},
"cell_type": "markdown",
"source": [
"<a href=\"https://colab.research.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"metadata": {
"id": "9H2GhO39xneB",
"colab_type": "text"
},
"cell_type": "markdown",
"source": [
"# Multilabel BERT\n",
"\n",
"### Author: Dainis Boumber\n",
"### Modification of work by Google and extended from pytorch-pretrained-bert"
]
},
{
"metadata": {
"id": "VyD16g4Zdr5Z",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"import sys\n",
"import os\n",
"import pprint\n",
"import torch\n",
"\n",
"torch.cuda.is_available()\n",
"\n",
"!test -d bert_repo || git clone https://github.com/google-research/bert bert_repo\n",
"!pip install pytorch-pretrained-bert\n",
" \n",
"if not 'bert_repo' in sys.path:\n",
" sys.path += ['bert_repo']\n",
" \n",
"!test -d data || mkdir data mkdir data/twitter; mkdir data/twitter/tmp\n",
" \n"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "ZGIYKTOQe6Ow",
"colab_type": "text"
},
"cell_type": "markdown",
"source": [
"# This takes a while dont run if you wont use fp16"
]
},
{
"metadata": {
"id": "ms2cQQ9Ee3s1",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"! test -d ./apex || git clone https://github.com/NVIDIA/apex \n",
"! pip install -v --no-cache-dir --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" ./apex\n",
"\n",
"if not 'apex' in sys.path:\n",
" sys.path += ['apex']"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "HUBP35oCDmbF",
"colab_type": "text"
},
"cell_type": "markdown",
"source": [
"### Prepare and import BERT and other modules\n"
]
},
{
"metadata": {
"id": "7wzwke0sxS6W",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"from pytorch_pretrained_bert.tokenization import BertTokenizer, WordpieceTokenizer\n",
"from pytorch_pretrained_bert.modeling import BertForPreTraining, BertPreTrainedModel, BertModel, BertConfig, BertForMaskedLM, BertForSequenceClassification\n",
"from pathlib import Path\n",
"import torch\n",
"import re\n",
"from torch import Tensor\n",
"from torch.nn import BCEWithLogitsLoss\n",
"from fastai.text import Tokenizer, Vocab\n",
"import pandas as pd\n",
"import collections\n",
"import os\n",
"import pdb\n",
"from tqdm import tqdm, trange\n",
"import sys\n",
"import random\n",
"import numpy as np\n",
"from tqdm import tqdm_notebook as tqdm\n",
"import apex\n",
"from sklearn.model_selection import train_test_split\n",
"module_path = os.path.abspath(os.path.join('..'))\n",
"if module_path not in sys.path:\n",
" sys.path.append(module_path)\n",
"\n",
"from sklearn.metrics import roc_curve, auc\n",
"\n",
"\n",
"from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n",
"from torch.utils.data.distributed import DistributedSampler\n",
"from pytorch_pretrained_bert.optimization import BertAdam\n",
"import logging\n",
"logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n",
" datefmt='%m/%d/%Y %H:%M:%S',\n",
" level=logging.INFO)\n",
"logger = logging.getLogger(__name__)"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "DHTo0-wYV-cz",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"class BertForMultiLabelClassification(BertPreTrainedModel):\n",
" \"\"\"BERT model for classification.\n",
" This module is composed of the BERT model with a linear layer on top of\n",
" the pooled output.\n",
" Params:\n",
" `config`: a BertConfig class instance with the configuration to build a new model.\n",
" `num_labels`: the number of classes for the classifier. Default = 2.\n",
" Inputs:\n",
" `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n",
" with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n",
" `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n",
" `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n",
" types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n",
" a `sentence B` token (see BERT paper for more details).\n",
" `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n",
" selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n",
" input sequence length in the current batch. It's the mask that we typically use for attention when\n",
" a batch has varying length sentences.\n",
" `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]\n",
" with indices selected in [0, ..., num_labels].\n",
" Outputs:\n",
" if `labels` is not `None`:\n",
" Outputs the CrossEntropy classification loss of the output with the labels.\n",
" if `labels` is `None`:\n",
" Outputs the classification logits of shape [batch_size, num_labels].\n",
" Example usage:\n",
" ```python\n",
" # Already been converted into WordPiece token ids\n",
" input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n",
" input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n",
" token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n",
" config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n",
" num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n",
" num_labels = 2\n",
" model = BertForSequenceClassification(config, num_labels)\n",
" logits = model(input_ids, token_type_ids, input_mask)\n",
" ```\n",
" \"\"\"\n",
" def __init__(self, config, num_labels=4):\n",
" super(BertForMultiLabelClassification, self).__init__(config)\n",
" self.num_labels = num_labels\n",
" self.bert = BertModel(config)\n",
" self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)\n",
" self.classifier = torch.nn.Linear(config.hidden_size, num_labels)\n",
" self.apply(self.init_bert_weights)\n",
"\n",
" def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):\n",
" _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)\n",
" pooled_output = self.dropout(pooled_output)\n",
" logits = self.classifier(pooled_output)\n",
"\n",
" if labels is not None:\n",
" loss_fct = BCEWithLogitsLoss()\n",
" loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1, self.num_labels))\n",
" return loss\n",
" else:\n",
" return logits\n",
"\n",
" def freeze_bert_encoder(self):\n",
" for param in self.bert.parameters():\n",
" param.requires_grad = False\n",
"\n",
" def unfreeze_bert_encoder(self):\n",
" for param in self.bert.parameters():\n",
" param.requires_grad = True"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "872USawseROx",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"\"\"\"The following code overwrites some core BERT functionality\"\"\"\n",
"\n",
"class InputExample(object):\n",
" \"\"\"A single training/test example for simple sequence classification.\"\"\"\n",
"\n",
" def __init__(self, guid, text_a, text_b=None, label=None):\n",
" \"\"\"Constructs a InputExample.\n",
"\n",
" Args:\n",
" guid: Unique id for the example.\n",
" text_a: string. The untokenized text of the first sequence. For single\n",
" sequence tasks, only this sequence must be specified.\n",
" text_b: (Optional) string. The untokenized text of the second sequence.\n",
" Only must be specified for sequence pair tasks.\n",
" labels: (Optional) [string]. The label of the example. This should be\n",
" specified for train and dev examples, but not for test examples.\n",
" \"\"\"\n",
" self.guid = guid\n",
" self.text_a = text_a\n",
" self.text_b = text_b\n",
" self.label = label\n",
"\n",
"\n",
"class InputFeatures(object):\n",
" \"\"\"A single set of features of data.\"\"\"\n",
"\n",
" def __init__(self, input_ids, input_mask, segment_ids, label_id):\n",
" self.input_ids = input_ids\n",
" self.input_mask = input_mask\n",
" self.segment_ids = segment_ids\n",
" self.label_id = label_id\n",
"\n",
"class DataProcessor(object):\n",
" \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n",
"\n",
" def get_train_examples(self, data_dir):\n",
" \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n",
" raise NotImplementedError()\n",
"\n",
" def get_dev_examples(self, data_dir):\n",
" \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n",
" raise NotImplementedError()\n",
"\n",
" def get_test_examples(self, data_dir, data_file_name, size=-1):\n",
" \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n",
" raise NotImplementedError() \n",
"\n",
" def get_labels(self):\n",
" \"\"\"Gets the list of labels for this data set.\"\"\"\n",
" raise NotImplementedError()\n",
" \n",
"class MultilabelProcessor(DataProcessor):\n",
" \"\"\"Custom Processor for the Myers-Briggs Data.\n",
"\n",
" Assumes input data as follows:\n",
"\n",
" id at index 0,text at index 1, labels at [2:]\n",
"\n",
" Example for our problem:\n",
" 0,sometext,0,1,1,1\n",
" 1,moretext,1,0,0,1\n",
" OR\n",
" ENFJ,sometext,0,1,1,1\n",
" ENFJ,moretext,0,1,1,1\n",
"\n",
" Meaning - id does not need to be unique, it can be used as a label\n",
" to try multi-class solution of the same task.\n",
" \"\"\"\n",
" \n",
" \n",
" def __init__(self, labels=None):\n",
" \"\"\"Constructs a MultilabelProcessor\n",
" \n",
" Args:\n",
" labels: array-like or list. Columns under which labels are stored in \n",
" the input dataframes. Default is None: assumes labels are [2:]\n",
" \"\"\"\n",
" \n",
" self.labels = labels\n",
"\n",
" def get_train_examples(self, data_dir):\n",
" \"\"\"Pass in train data by specifying it's directory. Looks for train.csv\"\"\"\n",
" path = Path(data_dir, \"train.csv\")\n",
" logger.info(\"LOOKING AT {}\".format(path))\n",
" return self._create_examples(pd.read_csv(path), \"train\")\n",
"\n",
" def get_dev_examples(self, data_dir):\n",
" \"\"\"Pass in val (development) data by specifying it's directory. Looks for val.csv.\"\"\"\n",
" path = Path(data_dir, \"val.csv\")\n",
" logger.info(\"LOOKING AT {}\".format(path))\n",
" return self._create_examples(pd.read_csv(path), \"val\")\n",
"\n",
" def get_test_examples(self, data_dir, data_file_name, size=-1):\n",
" \"\"\"See base class.\"\"\"\n",
" data_df = pd.read_csv(os.path.join(data_dir, data_file_name))\n",
"\n",
" if size == -1:\n",
" return self._create_examples(data_df, \"test\")\n",
" else:\n",
" return self._create_examples(data_df.sample(size), \"val\")\n",
"\n",
" def get_labels(self):\n",
" \"\"\"4 binary labels: IE, NS, FT, JP. 1 means I,N,F,J; 0 means E,S,T,P\"\"\"\n",
" return self.labels\n",
"\n",
" def _create_examples(self, df, set_type, labels_available=True):\n",
" \"\"\"Creates examples for the train, val and test sets.\"\"\"\n",
" if len(df.columns) <= 2:\n",
" raise IndexError(\"Invalid dataframe format. Must have guid, text, labels\")\n",
" if self.labels is None:\n",
" self.labels = df.columns[2:]\n",
" \n",
" examples = []\n",
"\n",
" for i, row in enumerate(df.values):\n",
" guid = \"%s-%s-%s\" % (i, set_type, row[0])\n",
" text_a = row[1]\n",
" if labels_available:\n",
" labels = row[2:]\n",
" else:\n",
" labels = []\n",
" examples.append(\n",
" InputExample(guid=guid, text_a=text_a, label=labels))\n",
" return examples\n",
" \n",
"def convert_examples_to_features(examples, label_list, max_seq_length,\n",
" tokenizer, output_mode=\"classification\"):\n",
" \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n",
"\n",
" features = []\n",
" for (ex_index, example) in enumerate(examples):\n",
" if ex_index % 10000 == 0:\n",
" logger.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n",
"\n",
" tokens_a = tokenizer.tokenize(example.text_a)\n",
" tokens_b = None\n",
" \n",
" if example.text_b:\n",
" tokens_b = tokenizer.tokenize(example.text_b)\n",
" # Modifies `tokens_a` and `tokens_b` in place so that the total\n",
" # length is less than the specified length.\n",
" # Account for [CLS], [SEP], [SEP] with \"- 3\"\n",
" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n",
" else:\n",
" # Account for [CLS] and [SEP] with \"- 2\"\n",
" if len(tokens_a) > max_seq_length - 2:\n",
" tokens_a = tokens_a[:(max_seq_length - 2)]\n",
"\n",
" # The convention in BERT is:\n",
" # (a) For sequence pairs:\n",
" # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n",
" # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n",
" # (b) For single sequences:\n",
" # tokens: [CLS] the dog is hairy . [SEP]\n",
" # type_ids: 0 0 0 0 0 0 0\n",
" #\n",
" # Where \"type_ids\" are used to indicate whether this is the first\n",
" # sequence or the second sequence. The embedding vectors for `type=0` and\n",
" # `type=1` were learned during pre-training and are added to the wordpiece\n",
" # embedding vector (and position vector). This is not *strictly* necessary\n",
" # since the [SEP] token unambigiously separates the sequences, but it makes\n",
" # it easier for the model to learn the concept of sequences.\n",
" #\n",
" # For classification tasks, the first vector (corresponding to [CLS]) is\n",
" # used as as the \"sentence vector\". Note that this only makes sense because\n",
" # the entire model is fine-tuned.\n",
" tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n",
" segment_ids = [0] * len(tokens)\n",
"\n",
" if tokens_b:\n",
" tokens += tokens_b + [\"[SEP]\"]\n",
" segment_ids += [1] * (len(tokens_b) + 1)\n",
"\n",
" input_ids = tokenizer.convert_tokens_to_ids(tokens)\n",
"\n",
" # The mask has 1 for real tokens and 0 for padding tokens. Only real\n",
" # tokens are attended to.\n",
" input_mask = [1] * len(input_ids)\n",
"\n",
" # Zero-pad up to the sequence length.\n",
" padding = [0] * (max_seq_length - len(input_ids))\n",
" input_ids += padding\n",
" input_mask += padding\n",
" segment_ids += padding\n",
"\n",
" assert len(input_ids) == max_seq_length\n",
" assert len(input_mask) == max_seq_length\n",
" assert len(segment_ids) == max_seq_length\n",
"\n",
" labels_ids = []\n",
" for label in example.label:\n",
" labels_ids.append(int(label))\n",
"\n",
" if ex_index < 1:\n",
" logger.info(\"*** Example ***\")\n",
" logger.info(\"guid: %s\" % (example.guid))\n",
" logger.info(\"tokens: %s\" % \" \".join(\n",
" [str(x) for x in tokens]))\n",
" logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n",
" logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n",
" logger.info(\n",
" \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n",
" logger.info(\"label: %s (id = %s)\" % (example.label, labels_ids))\n",
"\n",
" features.append(InputFeatures(input_ids=input_ids,\n",
" input_mask=input_mask,\n",
" segment_ids=segment_ids,\n",
" label_id=labels_ids,\n",
" ))\n",
" return features\n",
"\n",
"def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n",
" \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n",
"\n",
" # This is a simple heuristic which will always truncate the longer sequence\n",
" # one token at a time. This makes more sense than truncating an equal percent\n",
" # of tokens from each, since if one sequence is very short then each token\n",
" # that's truncated likely contains more information than a longer sequence.\n",
" while True:\n",
" total_length = len(tokens_a) + len(tokens_b)\n",
" if total_length <= max_length:\n",
" break\n",
" if len(tokens_a) > len(tokens_b):\n",
" tokens_a.pop()\n",
" else:\n",
" tokens_b.pop()\n",
"\n",
"def accuracy(y_pred:Tensor, y_true:Tensor):\n",
" \"\"\"Simple accuracy. A bad metric for multilabel.\"\"\"\n",
" return (y_pred == y_true).sum() / len(y_true)\n",
"\n",
"def accuracy_thresh(y_pred:Tensor, y_true:Tensor, thresh:float=0.5, sigmoid:bool=True):\n",
" \"\"\"Compute accuracy when `y_pred` and `y_true` are the same size.\"\"\"\n",
" if sigmoid: \n",
" y_pred = y_pred.sigmoid()\n",
" return np.mean(((y_pred>thresh)==y_true.byte()).float().cpu().numpy(), axis=1).sum()\n",
" \n",
"def fbeta(y_pred:Tensor, y_true:Tensor, thresh:float=0.2, beta:float=2, \n",
" eps:float=1e-9, sigmoid:bool=True):\n",
" \"\"\"Computes the f_beta between `preds` and `targets`.\"\n",
" F1-metric version, `beta` controls importantce of precision vs recall,\n",
" where beta=1 is equivalent to F1.\n",
" \"\"\"\n",
" beta2 = beta ** 2\n",
" if sigmoid: y_pred = y_pred.sigmoid()\n",
" y_pred = (y_pred>thresh).float()\n",
" y_true = y_true.float()\n",
" TP = (y_pred*y_true).sum(dim=1)\n",
" prec = TP/(y_pred.sum(dim=1)+eps)\n",
" rec = TP/(y_true.sum(dim=1)+eps)\n",
" res = (prec*rec)/(prec*beta2+rec+eps)*(1+beta2)\n",
" return res.mean().item()\n",
"\n"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "TC7hHzQjRfJy",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"! cp test.csv data/twitter/tmp ; cp train.csv data/twitter/tmp ; cp val.csv data/twitter/tmp"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "A0WJgiuwV3rr",
"colab_type": "code",
"outputId": "a9e489c9-5463-4419-d558-4dd5369fe5ee",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
}
},
"cell_type": "code",
"source": [
"\"\"\"Set pytorch parameters\"\"\"\n",
"\n",
"LABEL_COLS=['I','N','F','J']\n",
"\n",
"DATA=Path('./data')\n",
"DATA.mkdir(exist_ok=True)\n",
"\n",
"DATA_PATH=DATA/'twitter'\n",
"DATA_PATH.mkdir(exist_ok=True)\n",
"BERT_MODEL = 'uncased_L-12_H-768_A-12'\n",
"PATH=DATA_PATH/'tmp'\n",
"PATH.mkdir(exist_ok=True)\n",
"\n",
"CLAS_DATA_PATH=PATH/'class'\n",
"CLAS_DATA_PATH.mkdir(exist_ok=True)\n",
"\n",
"model_state_dict = None\n",
"\n",
"args = {\n",
" \"train_size\": -1,\n",
" \"val_size\": -1,\n",
" \"full_data_dir\": DATA_PATH,\n",
" \"data_dir\": PATH,\n",
" \"task_name\": \"twitter\",\n",
" \"labels\": LABEL_COLS,\n",
" \"no_cuda\": False,\n",
" \"bert_model\": 'bert-base-uncased',\n",
" \"output_dir\": CLAS_DATA_PATH/'output',\n",
" \"max_seq_length\": 128,\n",
" \"do_train\": True,\n",
" \"do_eval\": True,\n",
" \"do_lower_case\": True,\n",
" \"train_batch_size\": 16,\n",
" \"eval_batch_size\": 8,\n",
" \"learning_rate\": 3e-5,\n",
" \"num_train_epochs\": 4.0,\n",
" \"warmup_proportion\": 0.1,\n",
" \"local_rank\": -1,\n",
" \"seed\": 42,\n",
" \"gradient_accumulation_steps\": 1,\n",
" \"optimize_on_cpu\": False,\n",
" \"fp16\": False,\n",
" \"loss_scale\": 128,\n",
" \n",
"}\n",
"\n",
"\n",
"\n",
"if args[\"local_rank\"] == -1 or args[\"no_cuda\"]:\n",
" device = torch.device(\"cuda\" if torch.cuda.is_available() and not args[\"no_cuda\"] else \"cpu\")\n",
" n_gpu = torch.cuda.device_count()\n",
"else:\n",
" torch.cuda.set_device(args['local_rank'])\n",
" device = torch.device(\"cuda\", args['local_rank'])\n",
" n_gpu = 1\n",
" # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n",
" torch.distributed.init_process_group(backend='nccl')\n",
"logger.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format(\n",
" device, n_gpu, bool(args['local_rank'] != -1), args['fp16']))\n",
"\n",
"processors = {\n",
" \"twitter\": MultilabelProcessor\n",
"}\n",
"\n",
"args['train_batch_size'] = int(args['train_batch_size'] / args['gradient_accumulation_steps'])\n",
"\n",
"random.seed(args['seed'])\n",
"np.random.seed(args['seed'])\n",
"torch.manual_seed(args['seed'])\n",
"task_name = args['task_name'].lower()\n",
"\n",
"if task_name not in processors:\n",
" raise ValueError(\"Task not found: %s\" % (task_name))\n",
"\n",
"label_list=args['labels']\n",
"processor = processors[task_name](label_list)\n",
"num_labels = len(args['labels'])\n",
"\n"
],
"execution_count": 0,
"outputs": [
{
"output_type": "stream",
"text": [
"04/29/2019 09:51:12 - INFO - __main__ - device: cuda n_gpu: 1, distributed training: False, 16-bits training: False\n"
],
"name": "stderr"
}
]
},
{
"metadata": {
"id": "Z9W1U0sMOv-k",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"tokenizer = BertTokenizer.from_pretrained(args['bert_model'], do_lower_case=args['do_lower_case'])\n",
"\n",
"logger.info(\"***** Loading data *****\")\n",
"\n",
"train_examples = None\n",
"num_train_steps = None\n",
"if args['do_train']:\n",
" train_examples = processor.get_train_examples(args['full_data_dir'])\n",
" num_train_steps = int(\n",
" len(train_examples)/args['train_batch_size']/args['gradient_accumulation_steps']*args['num_train_epochs']\n",
" )\n",
"eval_examples = processor.get_dev_examples(args['data_dir'])\n",
"train_features = convert_examples_to_features(train_examples, args['labels'], args['max_seq_length'], tokenizer)\n",
"\n",
"\n",
"logger.info(\" Num examples = %d\", len(train_examples))\n",
"logger.info(\" Batch size = %d\", args['train_batch_size'])\n",
"logger.info(\" Num steps = %d\", num_train_steps)\n",
"all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)\n",
"all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)\n",
"all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)\n",
"all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.float)\n",
"train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n",
"if args['local_rank'] == -1:\n",
" train_sampler = RandomSampler(train_data)\n",
"else:\n",
" train_sampler = DistributedSampler(train_data)\n",
"train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args['train_batch_size'])"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "AJvK45ZkSpmg",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"def get_model():\n",
" model = BertForMultiLabelClassification.from_pretrained(args['bert_model'], num_labels = num_labels)\n",
" return model\n",
"\n",
"model = get_model()\n",
"\n",
"\n",
"\n",
"if args['fp16']:\n",
" model.half()\n",
"model.to(device)"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "jAVIfyBiUspe",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"from torch.optim.lr_scheduler import _LRScheduler, Optimizer\n",
"\n",
"def warmup_linear(x, warmup=0.002):\n",
" \"\"\"Warmup for how long?\"\"\"\n",
" if x < warmup:\n",
" return x/warmup\n",
" return 1.0 - x\n",
"\n",
"class CyclicLR(object):\n",
" \"\"\"Sets the learning rate of each parameter group according to\n",
" cyclical learning rate policy (CLR). The policy cycles the learning\n",
" rate between two boundaries with a constant frequency, as detailed in\n",
" the paper `Cyclical Learning Rates for Training Neural Networks`_.\n",
" The distance between the two boundaries can be scaled on a per-iteration\n",
" or per-cycle basis.\n",
" Cyclical learning rate policy changes the learning rate after every batch.\n",
" `batch_step` should be called after a batch has been used for training.\n",
" To resume training, save `last_batch_iteration` and use it to instantiate `CycleLR`.\n",
" This class has three built-in policies, as put forth in the paper:\n",
" \"triangular\":\n",
" A basic triangular cycle w/ no amplitude scaling.\n",
" \"triangular2\":\n",
" A basic triangular cycle that scales initial amplitude by half each cycle.\n",
" \"exp_range\":\n",
" A cycle that scales initial amplitude by gamma**(cycle iterations) at each\n",
" cycle iteration.\n",
" This implementation was adapted from the github repo: `bckenstler/CLR`_\n",
" Args:\n",
" optimizer (Optimizer): Wrapped optimizer.\n",
" base_lr (float or list): Initial learning rate which is the\n",
" lower boundary in the cycle for eachparam groups.\n",
" Default: 0.001\n",
" max_lr (float or list): Upper boundaries in the cycle for\n",
" each parameter group. Functionally,\n",
" it defines the cycle amplitude (max_lr - base_lr).\n",
" The lr at any cycle is the sum of base_lr\n",
" and some scaling of the amplitude; therefore\n",
" max_lr may not actually be reached depending on\n",
" scaling function. Default: 0.006\n",
" step_size (int): Number of training iterations per\n",
" half cycle. Authors suggest setting step_size\n",
" 2-8 x training iterations in epoch. Default: 2000\n",
" mode (str): One of {triangular, triangular2, exp_range}.\n",
" Values correspond to policies detailed above.\n",
" If scale_fn is not None, this argument is ignored.\n",
" Default: 'triangular'\n",
" gamma (float): Constant in 'exp_range' scaling function:\n",
" gamma**(cycle iterations)\n",
" Default: 1.0\n",
" scale_fn (function): Custom scaling policy defined by a single\n",
" argument lambda function, where\n",
" 0 <= scale_fn(x) <= 1 for all x >= 0.\n",
" mode paramater is ignored\n",
" Default: None\n",
" scale_mode (str): {'cycle', 'iterations'}.\n",
" Defines whether scale_fn is evaluated on\n",
" cycle number or cycle iterations (training\n",
" iterations since start of cycle).\n",
" Default: 'cycle'\n",
" last_batch_iteration (int): The index of the last batch. Default: -1\n",
" Example:\n",
" >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)\n",
" >>> scheduler = torch.optim.CyclicLR(optimizer)\n",
" >>> data_loader = torch.utils.data.DataLoader(...)\n",
" >>> for epoch in range(10):\n",
" >>> for batch in data_loader:\n",
" >>> scheduler.batch_step()\n",
" >>> train_batch(...)\n",
" .. _Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186\n",
" .. _bckenstler/CLR: https://github.com/bckenstler/CLR\n",
" \"\"\"\n",
"\n",
" def __init__(self,\n",
" optimizer,\n",
" base_lr=1e-3,\n",
" max_lr=6e-3,\n",
" step_size=2000,\n",
" mode='triangular',\n",
" gamma=1.,\n",
" scale_fn=None,\n",
" scale_mode='cycle',\n",
" last_batch_iteration=-1,\n",
" ):\n",
" \n",
" self.optimizer = optimizer\n",
"\n",
" if isinstance(base_lr, list) or isinstance(base_lr, tuple):\n",
" if len(base_lr) != len(optimizer.param_groups):\n",
" raise ValueError(\"expected {} base_lr, got {}\".format(\n",
" len(optimizer.param_groups), len(base_lr)))\n",
" self.base_lrs = list(base_lr)\n",
" else:\n",
" self.base_lrs = [base_lr] * len(optimizer.param_groups)\n",
"\n",
" if isinstance(max_lr, list) or isinstance(max_lr, tuple):\n",
" if len(max_lr) != len(optimizer.param_groups):\n",
" raise ValueError(\"expected {} max_lr, got {}\".format(\n",
" len(optimizer.param_groups), len(max_lr)))\n",
" self.max_lrs = list(max_lr)\n",
" else:\n",
" self.max_lrs = [max_lr] * len(optimizer.param_groups)\n",
"\n",
" self.step_size = step_size\n",
"\n",
" if mode not in ['triangular', 'triangular2', 'exp_range'] and scale_fn is None:\n",
" raise ValueError('mode is invalid and scale_fn is None')\n",
"\n",
" self.mode = mode\n",
" self.gamma = gamma\n",
"\n",
" if scale_fn is None:\n",
" if self.mode == 'triangular':\n",
" self.scale_fn = self._triangular_scale_fn\n",
" self.scale_mode = 'cycle'\n",
" elif self.mode == 'triangular2':\n",
" self.scale_fn = self._triangular2_scale_fn\n",
" self.scale_mode = 'cycle'\n",
" elif self.mode == 'exp_range':\n",
" self.scale_fn = self._exp_range_scale_fn\n",
" self.scale_mode = 'iterations'\n",
" else:\n",
" self.scale_fn = scale_fn\n",
" self.scale_mode = scale_mode\n",
"\n",
" self.batch_step(last_batch_iteration + 1)\n",
" self.last_batch_iteration = last_batch_iteration\n",
"\n",
" def batch_step(self, batch_iteration=None):\n",
" if batch_iteration is None:\n",
" batch_iteration = self.last_batch_iteration + 1\n",
" self.last_batch_iteration = batch_iteration\n",
" for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):\n",
" param_group['lr'] = lr\n",
"\n",
" def _triangular_scale_fn(self, x):\n",
" return 1.\n",
"\n",
" def _triangular2_scale_fn(self, x):\n",
" return 1 / (2. ** (x - 1))\n",
"\n",
" def _exp_range_scale_fn(self, x):\n",
" return self.gamma**(x)\n",
"\n",
" def get_lr(self):\n",
" step_size = float(self.step_size)\n",
" cycle = np.floor(1 + self.last_batch_iteration / (2 * step_size))\n",
" x = np.abs(self.last_batch_iteration / step_size - 2 * cycle + 1)\n",
"\n",
" lrs = []\n",
" param_lrs = zip(self.optimizer.param_groups, self.base_lrs, self.max_lrs)\n",
" for param_group, base_lr, max_lr in param_lrs:\n",
" base_height = (max_lr - base_lr) * np.maximum(0, (1 - x))\n",
" if self.scale_mode == 'cycle':\n",
" lr = base_lr + base_height * self.scale_fn(cycle)\n",
" else:\n",
" lr = base_lr + base_height * self.scale_fn(self.last_batch_iteration)\n",
" lrs.append(lr)\n",
" return lrs\n",
" \n",
"# Prepare optimizer\n",
"param_optimizer = list(model.named_parameters())\n",
"no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n",
"\n",
"optimizer_grouped_parameters = [\n",
" {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n",
" {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0},\n",
" ]\n",
"\n",
"t_total = num_train_steps\n",
"\n",
"if args['local_rank'] != -1:\n",
" t_total = t_total // torch.distributed.get_world_size()\n",
"\n",
"bert_adam=BertAdam(optimizer_grouped_parameters,lr=args['learning_rate'],warmup=args['warmup_proportion'],t_total=t_total)\n",
"\n",
"scheduler = CyclicLR(optimizer=bert_adam, base_lr=2e-5, max_lr=5e-5, step_size=2500, last_batch_iteration=0)"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "E0l12eQkOFSV",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"# Prepare optimizer\n",
"param_optimizer = list(model.named_parameters())\n",
"no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n",
"optimizer_grouped_parameters = [\n",
" {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n",
" {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n",
" ]\n",
"t_total = num_train_steps\n",
"if args['local_rank'] != -1:\n",
" t_total = t_total // torch.distributed.get_world_size()\n",
"if args['fp16']:\n",
" try:\n",
" from apex.optimizers import FP16_Optimizer\n",
" from apex.optimizers import FusedAdam\n",
" except ImportError:\n",
" raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n",
"\n",
" optimizer = FusedAdam(optimizer_grouped_parameters,\n",
" lr=args['learning_rate'],\n",
" bias_correction=False,\n",
" max_grad_norm=1.0)\n",
" if args['loss_scale'] == 0:\n",
" optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)\n",
" else:\n",
" optimizer = FP16_Optimizer(optimizer, static_loss_scale=args['loss_scale'])\n",
"\n",
"else:\n",
" optimizer = BertAdam(optimizer_grouped_parameters,\n",
" lr=args['learning_rate'],\n",
" warmup=args['warmup_proportion'],\n",
" t_total=t_total)\n",
"\n",
"scheduler = CyclicLR(optimizer, base_lr=2e-5, max_lr=5e-5, step_size=2500, last_batch_iteration=0)"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "hyfO_jQgwkB-",
"colab_type": "text"
},
"cell_type": "markdown",
"source": [
"## Validation function"
]
},
{
"metadata": {
"id": "6Qk2E8YCvniS",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"\n",
"def eval():\n",
" args['output_dir'].mkdir(exist_ok=True)\n",
"\n",
" eval_features = convert_examples_to_features(eval_examples, args['labels'], args['max_seq_length'], tokenizer)\n",
" logger.info(\"***** Running evaluation *****\")\n",
" logger.info(\" Num examples = %d\", len(eval_examples))\n",
" logger.info(\" Batch size = %d\", args['eval_batch_size'])\n",
" all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)\n",
" all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)\n",
" all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)\n",
" all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.float)\n",
" eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n",
" # Run prediction for full data\n",
" eval_sampler = SequentialSampler(eval_data)\n",
" eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args['eval_batch_size'])\n",
"\n",
" all_logits = None\n",
" all_labels = None\n",
"\n",
" model.eval()\n",
" eval_loss, eval_accuracy = 0, 0\n",
" nb_eval_steps, nb_eval_examples = 0, 0\n",
" for input_ids, input_mask, segment_ids, label_id in eval_dataloader:\n",
" input_ids = input_ids.to(device)\n",
" input_mask = input_mask.to(device)\n",
" segment_ids = segment_ids.to(device)\n",
" label_id = label_id.to(device)\n",
"\n",
" with torch.no_grad():\n",
" tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_id)\n",
" logits = model(input_ids, segment_ids, input_mask)\n",
"\n",
"\n",
" tmp_eval_accuracy = accuracy_thresh(logits, label_id)\n",
" if all_logits is None:\n",
" all_logits = logits.detach().cpu().numpy()\n",
" else:\n",
" all_logits = np.concatenate((all_logits, logits.detach().cpu().numpy()), axis=0)\n",
"\n",
" if all_labels is None:\n",
" all_labels = label_id.detach().cpu().numpy()\n",
" else: \n",
" all_labels = np.concatenate((all_labels, label_id.detach().cpu().numpy()), axis=0)\n",
"\n",
"\n",
" eval_loss += tmp_eval_loss.mean().item()\n",
" eval_accuracy += tmp_eval_accuracy\n",
"\n",
" nb_eval_examples += input_ids.size(0)\n",
" nb_eval_steps += 1\n",
"\n",
" eval_loss = eval_loss / nb_eval_steps\n",
" eval_accuracy = eval_accuracy / nb_eval_examples\n",
"\n",
" # Compute ROC curve and ROC area for each class\n",
" fpr = dict()\n",
" tpr = dict()\n",
" roc_auc = dict()\n",
"\n",
" for i in range(num_labels):\n",
" fpr[i], tpr[i], _ = roc_curve(all_labels[:, i], all_logits[:, i])\n",
" roc_auc[i] = auc(fpr[i], tpr[i])\n",
"\n",
" # Compute micro-average ROC curve and ROC area\n",
" fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(all_labels.ravel(), all_logits.ravel())\n",
" roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n",
"\n",
" result = {'eval_loss': eval_loss,\n",
" 'eval_accuracy': eval_accuracy,\n",
" 'roc_auc': roc_auc }\n",
"\n",
" output_eval_file = os.path.join(args['output_dir'], \"eval_results.txt\")\n",
" with open(output_eval_file, \"w\") as writer:\n",
" logger.info(\"***** Eval results *****\")\n",
" for key in sorted(result.keys()):\n",
" logger.info(\" %s = %s\", key, str(result[key]))\n",
" return result"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "7uOAu1lA0EuH",
"colab_type": "text"
},
"cell_type": "markdown",
"source": [
"## Train model"
]
},
{
"metadata": {
"id": "4YTbMxsP0H9I",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"\n",
"def fit(num_epocs=args['num_train_epochs']):\n",
" global_step = 0\n",
" model.train()\n",
" for i_ in tqdm(range(int(num_epocs)), desc=\"Epoch\"):\n",
"\n",
" tr_loss = 0\n",
" nb_tr_examples, nb_tr_steps = 0, 0\n",
" for step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")):\n",
"\n",
" batch = tuple(t.to(device) for t in batch)\n",
" input_ids, input_mask, segment_ids, label_id = batch\n",
" loss = model(input_ids, segment_ids, input_mask, label_id)\n",
" if n_gpu > 1:\n",
" loss = loss.mean() # mean() to average on multi-gpu.\n",
" if args['gradient_accumulation_steps'] > 1:\n",
" loss = loss / args['gradient_accumulation_steps']\n",
"\n",
" if args['fp16']:\n",
" optimizer.backward(loss)\n",
" else:\n",
" loss.backward()\n",
"\n",
" tr_loss += loss.item()\n",
" nb_tr_examples += input_ids.size(0)\n",
" nb_tr_steps += 1\n",
" if (step + 1) % args['gradient_accumulation_steps'] == 0:\n",
" # modify learning rate with special warm up BERT uses\n",
" lr_this_step = args['learning_rate'] * warmup_linear(global_step/t_total, args['warmup_proportion'])\n",
" for param_group in optimizer.param_groups:\n",
" param_group['lr'] = lr_this_step\n",
" optimizer.step()\n",
" optimizer.zero_grad()\n",
" global_step += 1\n",
"\n",
" logger.info('Loss after epoc {}'.format(tr_loss / nb_tr_steps))\n",
" logger.info('Eval after epoc {}'.format(i_+1))\n",
" eval()"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "sfQeTnCu0t2z",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"model.unfreeze_bert_encoder()"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "qPcBRUib0xUy",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"fit()"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "cx_8BAUH1HWn",
"colab_type": "text"
},
"cell_type": "markdown",
"source": [
"## Save pretrained model "
]
},
{
"metadata": {
"id": "wVH6k5cJiWOG",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"! mkdir data/twitter/models\n",
"\n",
"PYTORCH_PRETRAINED_BERT_CACHE=Path('./data/twitter/models')"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "cP9SO7fI02uW",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"# Save a trained model\n",
"model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n",
"output_model_file = os.path.join(PYTORCH_PRETRAINED_BERT_CACHE, \"finetuned_pytorch_model.bin\")\n",
"torch.save(model_to_save.state_dict(), output_model_file)\n",
"\n"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "lkUNQumj1NWl",
"colab_type": "text"
},
"cell_type": "markdown",
"source": [
"## Reload it"
]
},
{
"metadata": {
"id": "V8RbMBpg1PCx",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"# Load a trained model that you have fine-tuned\n",
"model_state_dict = torch.load(output_model_file)\n",
"model = BertForMultiLabelClassification.from_pretrained(args['bert_model'], num_labels = num_labels, state_dict=model_state_dict)\n",
"model.to(device)"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "CUuIsGgP1TQZ",
"colab_type": "text"
},
"cell_type": "markdown",
"source": [
"## Run on val"
]
},
{
"metadata": {
"id": "3fvFaHFR1WeS",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"eval()"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "eOBavSrp1YWd",
"colab_type": "text"
},
"cell_type": "markdown",
"source": [
"## Run on test"
]
},
{
"metadata": {
"id": "qV_NYfsV1i6q",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"def predict(model, path, test_filename='test.csv'):\n",
" predict_processor = MultilabelProcessor(path)\n",
" test_examples = predict_processor.get_test_examples(path, test_filename, size=-1)\n",
"\n",
" # Hold input data for returning it \n",
" input_data = [{ 'id': input_example.guid, 'comment_text': input_example.text_a } for input_example in test_examples]\n",
"\n",
" test_features = convert_examples_to_features(test_examples, label_list, args['max_seq_length'], tokenizer)\n",
"\n",
" logger.info(\"***** Running prediction *****\")\n",
" logger.info(\" Num examples = %d\", len(test_examples))\n",
" logger.info(\" Batch size = %d\", args['eval_batch_size'])\n",
"\n",
" all_input_ids = torch.tensor([f.input_ids for f in test_features], dtype=torch.long)\n",
" all_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long)\n",
" all_segment_ids = torch.tensor([f.segment_ids for f in test_features], dtype=torch.long)\n",
"\n",
" test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids)\n",
"\n",
" # Run prediction for full data\n",
" test_sampler = SequentialSampler(test_data)\n",
" test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=args['eval_batch_size'])\n",
"\n",
" all_logits = None\n",
"\n",
" model.eval()\n",
" eval_loss, eval_accuracy = 0, 0\n",
" nb_eval_steps, nb_eval_examples = 0, 0\n",
" for step, batch in enumerate(tqdm(test_dataloader, desc=\"Prediction Iteration\")):\n",
" input_ids, input_mask, segment_ids = batch\n",
" input_ids = input_ids.to(device)\n",
" input_mask = input_mask.to(device)\n",
" segment_ids = segment_ids.to(device)\n",
"\n",
" with torch.no_grad():\n",
" logits = model(input_ids, segment_ids, input_mask)\n",
" logits = logits.sigmoid()\n",
"\n",
" if all_logits is None:\n",
" all_logits = logits.detach().cpu().numpy()\n",
" else:\n",
" all_logits = np.concatenate((all_logits, logits.detach().cpu().numpy()), axis=0)\n",
"\n",
" nb_eval_examples += input_ids.size(0)\n",
" nb_eval_steps += 1\n",
"\n",
" return pd.merge(pd.DataFrame(input_data), pd.DataFrame(all_logits, columns=label_list), left_index=True, right_index=True)"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "hJ5X7vRs1vdi",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"result = predict(model, DATA_PATH)"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "0_xYq-RF1xtP",
"colab_type": "code",
"outputId": "608d068b-7858-4102-a76e-51df3a14ffcc",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
}
},
"cell_type": "code",
"source": [
"result.shape"
],
"execution_count": 0,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"(296, 6)"
]
},
"metadata": {
"tags": []
},
"execution_count": 39
}
]
},
{
"metadata": {
"id": "4j4YyYj-2NdZ",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"result[LABEL_COLS].to_csv(DATA_PATH/'classification_result.csv', index=None)"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "2Q4MNbRAorri",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"result[LABEL_COLS]"
],
"execution_count": 0,
"outputs": []
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment