Skip to content

Instantly share code, notes, and snippets.

@myselfHimanshu
Last active February 21, 2019 14:20
Show Gist options
  • Save myselfHimanshu/2b4f7449a404d772e653c82026e0479d to your computer and use it in GitHub Desktop.
Save myselfHimanshu/2b4f7449a404d772e653c82026e0479d to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "bow.ipynb",
"version": "0.3.2",
"provenance": [],
"collapsed_sections": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"accelerator": "GPU"
},
"cells": [
{
"metadata": {
"id": "IFuAZHhpErir",
"colab_type": "text"
},
"cell_type": "markdown",
"source": [
"# Intro to Neural Networks for NLP\n",
"\n",
"We will look at BOW(bag of words) model in pytorch building sentiment analyzer.\n",
"\n",
"Labels = \"very bad\":0, \"bad\":1, \"neutral\":2, \"good\":3, \"very good\":4"
]
},
{
"metadata": {
"id": "Dkhmcu-NEgWw",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"#mount google drive change directory and download the dataset\n",
"\n",
"#mount google drive\n",
"from google.colab import drive\n",
"drive.mount('/content/drive')\n",
"\n",
"import os\n",
"os.chdir(\"path\")\n",
"\n",
"# uncomment to download the files and install seaborn==0.9.0\n",
"# !wget -c https://github.com/neubig/nn4nlp-code/raw/master/data/classes/dev.txt\n",
"# !wget -c https://github.com/neubig/nn4nlp-code/raw/master/data/classes/test.txt\n",
"# !wget -c https://github.com/neubig/nn4nlp-code/raw/master/data/classes/train.txt"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "qUQhsLUqFXKA",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"import torch\n",
"import torch.nn as nn\n",
"import torch.nn.functional as F\n",
"import torch.optim as optim"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "a_HOGmSnPn0k",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"outputId": "3d5a7c24-0fa0-436a-ace1-894445c4c519"
},
"cell_type": "code",
"source": [
"use_cuda = True\n",
"print(\"CUDA Available: \",torch.cuda.is_available())\n",
"device = torch.device(\"cuda\" if (use_cuda and torch.cuda.is_available()) else \"cpu\")"
],
"execution_count": 54,
"outputs": [
{
"output_type": "stream",
"text": [
"CUDA Available: True\n"
],
"name": "stdout"
}
]
},
{
"metadata": {
"id": "TlQClPTIHIfL",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"#read the data\n",
"import pandas as pd\n",
"\n",
"train_data = pd.read_table(\"./train.txt\", delimiter=\"|\", names = ['label','remove','remove1','text']).drop([\"remove\",\"remove1\"], axis=1)\n",
"test_data = pd.read_table(\"./test.txt\", delimiter=\"|\", names = ['label','remove','remove1','text']).drop([\"remove\",\"remove1\"],axis=1)"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "5ThY8WNIIgTw",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 204
},
"outputId": "2c2fc6f1-6db0-41b4-9c08-6d4ef85030c9"
},
"cell_type": "code",
"source": [
"#look at traing data\n",
"train_data.head()"
],
"execution_count": 20,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>label</th>\n",
" <th>text</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>3</td>\n",
" <td>The Rock is destined to be the 21st Century '...</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>4</td>\n",
" <td>The gorgeously elaborate continuation of `` T...</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>3</td>\n",
" <td>Singer\\/composer Bryan Adams contributes a sl...</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>2</td>\n",
" <td>You 'd think by now America would have had en...</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>3</td>\n",
" <td>Yet the act is still charming here .</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" label text\n",
"0 3 The Rock is destined to be the 21st Century '...\n",
"1 4 The gorgeously elaborate continuation of `` T...\n",
"2 3 Singer\\/composer Bryan Adams contributes a sl...\n",
"3 2 You 'd think by now America would have had en...\n",
"4 3 Yet the act is still charming here ."
]
},
"metadata": {
"tags": []
},
"execution_count": 20
}
]
},
{
"metadata": {
"id": "8Baw5WXBIqaA",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"outputId": "c96d25a4-9cc1-46f0-f31a-ff4a1c0a3041"
},
"cell_type": "code",
"source": [
"#create word to index vocabulary, as machines plays with numbers.\n",
"word_to_inx = {}\n",
"sentences = \" \".join(train_data[\"text\"].values)\n",
"\n",
"word_to_inx = dict([(y,i) for i,y in enumerate(set(sentences.split()))])\n",
"print(\"Vocab size : \", len(word_to_inx))"
],
"execution_count": 25,
"outputs": [
{
"output_type": "stream",
"text": [
"Vocab size : 18278\n"
],
"name": "stdout"
}
]
},
{
"metadata": {
"id": "XfTDWQrhK0VZ",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 51
},
"outputId": "479a14c6-cd71-416f-82ce-6d3e084a463a"
},
"cell_type": "code",
"source": [
"vocab_size = len(word_to_inx)\n",
"num_labels = len(set(train_data[\"label\"].values))\n",
"print(\"Vocab Size : \",vocab_size)\n",
"print(\"No. Lables : \", num_labels)"
],
"execution_count": 27,
"outputs": [
{
"output_type": "stream",
"text": [
"Vocab Size : 18278\n",
"No. Lables : 5\n"
],
"name": "stdout"
}
]
},
{
"metadata": {
"id": "_Wsy4Ef5LkYi",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"class BOW(nn.Module):\n",
" \n",
" def __init__(self, num_labels, vocab_size):\n",
" super(BOW, self).__init__()\n",
" \n",
" self.linear = nn.Linear(vocab_size, num_labels)\n",
" \n",
" def forward(self, bow_vec):\n",
" return F.log_softmax(self.linear(bow_vec), dim=1)"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "yvpaMaGkMR3L",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"def make_bow_vec(sentence, word_to_inx):\n",
" vec = torch.zeros(len(word_to_inx))\n",
" for word in sentence.split():\n",
" if word in word_to_inx:\n",
" vec[word_to_inx[word]] += 1\n",
" \n",
" return vec.view(1,-1)"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "iujvvGelMkIh",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"def make_target(label, label_to_inx):\n",
" return torch.LongTensor([label_to_ix[\"%s\"%label]])"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "hbKVja6RMsLe",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 68
},
"outputId": "1998d659-134a-4651-c914-2db3af20623b"
},
"cell_type": "code",
"source": [
"model = BOW(num_labels, vocab_size)\n",
"model.to(device)"
],
"execution_count": 78,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"BOW(\n",
" (linear): Linear(in_features=18278, out_features=5, bias=True)\n",
")"
]
},
"metadata": {
"tags": []
},
"execution_count": 78
}
]
},
{
"metadata": {
"id": "d_CmRWKXMyNl",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 255
},
"outputId": "b6ec0310-0a4a-470c-f96a-ee7fd3dda525"
},
"cell_type": "code",
"source": [
"for param in model.parameters():\n",
" print(param)"
],
"execution_count": 79,
"outputs": [
{
"output_type": "stream",
"text": [
"Parameter containing:\n",
"tensor([[-3.0609e-03, -3.7802e-03, -7.2286e-03, ..., -4.3003e-03,\n",
" -2.4255e-03, -6.9896e-03],\n",
" [ 2.9169e-03, -5.9267e-03, -2.3873e-03, ..., -2.4500e-03,\n",
" 4.1482e-03, 7.5482e-04],\n",
" [ 4.8378e-03, 3.5321e-03, 2.8201e-03, ..., 3.7384e-03,\n",
" 6.3972e-03, -3.5305e-05],\n",
" [ 5.4210e-03, 4.8197e-03, 3.7114e-03, ..., -6.0604e-03,\n",
" -1.1903e-03, -6.7124e-03],\n",
" [ 2.3258e-03, -2.1585e-03, -3.2275e-04, ..., -6.0369e-03,\n",
" 3.4184e-03, 7.2604e-03]], device='cuda:0', requires_grad=True)\n",
"Parameter containing:\n",
"tensor([ 0.0054, 0.0073, -0.0067, -0.0031, -0.0048], device='cuda:0',\n",
" requires_grad=True)\n"
],
"name": "stdout"
}
]
},
{
"metadata": {
"id": "Js8M4FCOM07M",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"outputId": "cb060e1f-632b-492a-d20a-5e2d33a74485"
},
"cell_type": "code",
"source": [
"with torch.no_grad():\n",
" sample = train_data[\"text\"].iloc[0]\n",
" bow_vector = make_bow_vec(sample, word_to_inx).to(device)\n",
" log_probs = model(bow_vector)\n",
" print(log_probs)"
],
"execution_count": 80,
"outputs": [
{
"output_type": "stream",
"text": [
"tensor([[-1.6042, -1.6215, -1.6007, -1.6191, -1.6019]], device='cuda:0')\n"
],
"name": "stdout"
}
]
},
{
"metadata": {
"id": "XTYb8I3oNTBh",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"outputId": "8eb3e892-1ff1-4a97-dba0-9abd46ed2d28"
},
"cell_type": "code",
"source": [
"set(train_data[\"label\"].values)"
],
"execution_count": 81,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"{0, 1, 2, 3, 4}"
]
},
"metadata": {
"tags": []
},
"execution_count": 81
}
]
},
{
"metadata": {
"id": "2PNO4E-7M_2-",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"label_to_ix = {\"0\": 0, \"1\": 1, \"2\": 2, \"3\":3, \"4\": 4}"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "fdWTchGMNfJY",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"loss_function = nn.NLLLoss()\n",
"optimizer = optim.SGD(model.parameters(), lr=0.1)"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "twLAXUBmNu4F",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"data = train_data[[\"text\",\"label\"]].values\n",
"t_data = test_data[[\"text\",\"label\"]].values"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "0Q1xf0lFN6mL",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 51
},
"outputId": "04559936-f201-437b-ec4e-676b10c9fce0"
},
"cell_type": "code",
"source": [
"data[0]"
],
"execution_count": 85,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"array([\" The Rock is destined to be the 21st Century 's new `` Conan '' and that he 's going to make a splash even greater than Arnold Schwarzenegger , Jean-Claud Van Damme or Steven Segal .\",\n",
" 3], dtype=object)"
]
},
"metadata": {
"tags": []
},
"execution_count": 85
}
]
},
{
"metadata": {
"id": "s-WuTQZBRlQL",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"import time\n",
"import numpy as np"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "352NIrPCN7kn",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 896
},
"outputId": "740cacd1-b5dc-4ef3-902b-af42f53530f5"
},
"cell_type": "code",
"source": [
"for epoch in range(100):\n",
" start = time.time()\n",
" total_loss = 0\n",
" \n",
" \n",
" for instance in data:\n",
" text = instance[0]\n",
" label = instance[1]\n",
"\n",
" # Step 1. Remember that PyTorch accumulates gradients.\n",
" # We need to clear them out before each instance\n",
" model.zero_grad()\n",
"\n",
" # Step 2. Make our BOW vector\n",
" bow_vec = make_bow_vec(text, word_to_inx).to(device)\n",
" target = make_target(label, label_to_ix).to(device)\n",
"\n",
" # Step 3. Run our forward pass.\n",
" prediction = model(bow_vec)\n",
"\n",
" # Step 4. Compute the loss, gradients, and update the parameters by\n",
" # calling optimizer.step()\n",
" loss = loss_function(prediction, target)\n",
" total_loss += loss.item()\n",
" loss.backward()\n",
" optimizer.step()\n",
" \n",
" \n",
" \n",
" \n",
" #testing accuracy\n",
" \n",
" test_correct = 0.0\n",
" for instance in t_data:\n",
" model.eval()\n",
" text = instance[0]\n",
" label = instance[1]\n",
" \n",
" # Step 2. Make our BOW vector\n",
" bow_vec = make_bow_vec(text, word_to_inx).to(device)\n",
" target = make_target(label, label_to_ix).to(device)\n",
"\n",
" scores = model(bow_vec).detach().cpu().numpy()\n",
" \n",
" predict = np.argmax(scores)\n",
" \n",
" if predict==label:\n",
" test_correct += 1\n",
" \n",
" print(\"Epoch : {}, Loss : {}, Val Accuracy : {}\".format(epoch,total_loss/len(data),test_correct/len(t_data)))\n",
" model.train()\n",
" \n",
"print(\"Time taken : \", time.time()-start)"
],
"execution_count": 89,
"outputs": [
{
"output_type": "stream",
"text": [
"Epoch : 0, Loss : 1.0280600509184241, Val Accuracy : 0.23891402714932128\n",
"Epoch : 1, Loss : 0.7755610834639729, Val Accuracy : 0.26289592760180996\n",
"Epoch : 2, Loss : 0.6225328992856911, Val Accuracy : 0.27918552036199096\n",
"Epoch : 3, Loss : 0.5197520237067219, Val Accuracy : 0.2909502262443439\n",
"Epoch : 4, Loss : 0.4464171468890673, Val Accuracy : 0.29638009049773756\n",
"Epoch : 5, Loss : 0.3913967481182979, Val Accuracy : 0.3036199095022624\n",
"Epoch : 6, Loss : 0.34859211772177995, Val Accuracy : 0.3054298642533937\n",
"Epoch : 7, Loss : 0.31438403238266815, Val Accuracy : 0.31040723981900453\n",
"Epoch : 8, Loss : 0.28647941092119905, Val Accuracy : 0.31266968325791855\n",
"Epoch : 9, Loss : 0.2632945368157121, Val Accuracy : 0.31447963800904977\n",
"Epoch : 10, Loss : 0.24371491451416347, Val Accuracy : 0.316289592760181\n",
"Epoch : 11, Loss : 0.2269521335467752, Val Accuracy : 0.31493212669683257\n",
"Epoch : 12, Loss : 0.21243254667364256, Val Accuracy : 0.316289592760181\n",
"Epoch : 13, Loss : 0.1997268639211909, Val Accuracy : 0.32036199095022627\n",
"Epoch : 14, Loss : 0.18850702608932046, Val Accuracy : 0.3199095022624434\n",
"Epoch : 15, Loss : 0.17851887773792172, Val Accuracy : 0.3180995475113122\n",
"Epoch : 16, Loss : 0.16956340705745676, Val Accuracy : 0.3208144796380091\n",
"Epoch : 17, Loss : 0.16148315462866303, Val Accuracy : 0.3217194570135747\n",
"Epoch : 18, Loss : 0.1541520461774944, Val Accuracy : 0.3212669683257919\n",
"Epoch : 19, Loss : 0.14746801294944484, Val Accuracy : 0.3217194570135747\n",
"Epoch : 20, Loss : 0.1413474517843027, Val Accuracy : 0.3226244343891403\n",
"Epoch : 21, Loss : 0.13572116872846857, Val Accuracy : 0.3226244343891403\n",
"Epoch : 22, Loss : 0.13053131462789877, Val Accuracy : 0.3239819004524887\n",
"Epoch : 23, Loss : 0.12572903542566613, Val Accuracy : 0.3226244343891403\n",
"Epoch : 24, Loss : 0.1212727145225591, Val Accuracy : 0.3235294117647059\n"
],
"name": "stdout"
},
{
"output_type": "error",
"ename": "KeyboardInterrupt",
"evalue": "ignored",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-89-64323abe62d9>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloss_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mprediction\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 24\u001b[0m \u001b[0mtotal_loss\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 25\u001b[0;31m \u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 26\u001b[0m \u001b[0moptimizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 27\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python3.6/dist-packages/torch/tensor.py\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(self, gradient, retain_graph, create_graph)\u001b[0m\n\u001b[1;32m 100\u001b[0m \u001b[0mproducts\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0mDefaults\u001b[0m \u001b[0mto\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 101\u001b[0m \"\"\"\n\u001b[0;32m--> 102\u001b[0;31m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgradient\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 103\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 104\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mregister_hook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python3.6/dist-packages/torch/autograd/__init__.py\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables)\u001b[0m\n\u001b[1;32m 88\u001b[0m Variable._execution_engine.run_backward(\n\u001b[1;32m 89\u001b[0m \u001b[0mtensors\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgrad_tensors\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 90\u001b[0;31m allow_unreachable=True) # allow_unreachable flag\n\u001b[0m\u001b[1;32m 91\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 92\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
]
},
{
"metadata": {
"id": "3pU-ppdjOo3q",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
""
],
"execution_count": 0,
"outputs": []
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment