Skip to content

Instantly share code, notes, and snippets.

@h5li
Created February 24, 2019 19:04
Show Gist options
  • Save h5li/26c5b3cfc4c83068188ac14183ae1f1e to your computer and use it in GitHub Desktop.
Save h5li/26c5b3cfc4c83068188ac14183ae1f1e to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 136,
"metadata": {},
"outputs": [],
"source": [
"import imageio\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib.image as mpimg\n",
"import os\n",
"import warnings\n",
"import cv2\n",
"import keras\n",
"import numpy as np\n",
"from keras.utils import to_categorical\n",
"from keras.models import Sequential\n",
"from keras.layers import RNN\n",
"from keras.layers import SimpleRNN,LSTM,GRU\n",
"from keras.layers import Input, Dense\n",
"from keras.optimizers import Adam"
]
},
{
"cell_type": "code",
"execution_count": 144,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# We are going to use Videos from fall_floor,sit,situp,somersault,stand and walk\n",
"file_to_read = ['walk','fall_floor','sit','situp','somersault','stand']\n",
"files_labels = {'fall_floor':0,'sit':1,'situp':2,'somersault':3,'stand':4,'walk':5}\n",
"data_dir = 'hmdb51_org'\n",
"image_size = (50,50)\n",
"data_dim = 2500"
]
},
{
"cell_type": "code",
"execution_count": 145,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def load_data(file_list):\n",
" train_data = []\n",
" train_labels = []\n",
" test_data = []\n",
" test_labels = []\n",
" for f in file_list:\n",
" print('Reading '+ f + '...')\n",
" file_name = f + '_test_split1.txt'\n",
" file = open(file_name,'r')\n",
" for i,line in enumerate(file.readlines()):\n",
" if i % 100 == 0:\n",
" print('\\tReading '+ str(i) + 'th videos')\n",
" line = line.split(' ')\n",
" video_name = line[0]\n",
" label = int(line[1])\n",
" file_path = data_dir + '/' + f + '/' + video_name\n",
" vid = imageio.get_reader(file_path, 'ffmpeg')\n",
" \n",
" video_screenshot = []\n",
" for image in vid.iter_data():\n",
" warnings.simplefilter(\"ignore\")\n",
" image = cv2.resize(image,dsize = image_size,interpolation = cv2.INTER_CUBIC)\n",
" gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n",
" video_screenshot.append(gray_image)\n",
" if label != 2:\n",
" train_data.append(video_screenshot)\n",
" train_labels.append(files_labels[f])\n",
" else:\n",
" test_data.append(video_screenshot)\n",
" test_labels.append(files_labels[f])\n",
" print('Finish Reading '+ f + '...')\n",
" return train_data,train_labels,test_data,test_labels"
]
},
{
"cell_type": "code",
"execution_count": 146,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Reading walk...\n",
"\tReading 0th videos\n",
"\tReading 100th videos\n",
"\tReading 200th videos\n",
"\tReading 300th videos\n",
"\tReading 400th videos\n",
"\tReading 500th videos\n",
"Finish Reading walk...\n",
"Reading fall_floor...\n",
"\tReading 0th videos\n",
"\tReading 100th videos\n",
"Finish Reading fall_floor...\n",
"Reading sit...\n",
"\tReading 0th videos\n",
"\tReading 100th videos\n",
"Finish Reading sit...\n",
"Reading situp...\n",
"\tReading 0th videos\n",
"\tReading 100th videos\n",
"Finish Reading situp...\n",
"Reading somersault...\n",
"\tReading 0th videos\n",
"\tReading 100th videos\n",
"Finish Reading somersault...\n",
"Reading stand...\n",
"\tReading 0th videos\n",
"\tReading 100th videos\n",
"Finish Reading stand...\n"
]
}
],
"source": [
"train_data,train_labels,test_data,test_labels = load_data(file_to_read)"
]
},
{
"cell_type": "code",
"execution_count": 147,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{(50, 50)}\n",
"532 18\n"
]
}
],
"source": [
"size = set()\n",
"length = []\n",
"for i in train_data:\n",
" size.add(i[0].shape)\n",
" length.append(len(i))\n",
"print(size)\n",
"print(max(length),min(length))"
]
},
{
"cell_type": "code",
"execution_count": 148,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def convertData(data):\n",
" convertedData = []\n",
" for i,d in enumerate(data):\n",
" if i % 100 == 0:\n",
" print(\"Finish \"+str(i))\n",
" length = len(d)\n",
" interval = length//18\n",
" video_filtered = []\n",
" for j in range(18):\n",
" video_filtered.append(d[0 + j*interval].reshape(-1,1).flatten())\n",
" convertedData.append(np.array(video_filtered))\n",
" convertedData = np.array(convertedData)\n",
" print(convertedData.shape)\n",
" return convertedData"
]
},
{
"cell_type": "code",
"execution_count": 149,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Finish 0\n",
"Finish 100\n",
"Finish 200\n",
"Finish 300\n",
"Finish 400\n",
"Finish 500\n",
"Finish 600\n",
"Finish 700\n",
"Finish 800\n",
"Finish 900\n",
"Finish 1000\n",
"(1045, 18, 2500)\n"
]
}
],
"source": [
"new_train_data = convertData(train_data)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"new_train_labels = []\n",
"for i,l in enumerate(train_labels):\n",
" if l != 5:\n",
" new_train_labels.append(0)\n",
" else:\n",
" new_train_labels.append(1)\n",
"new_train_labels = to_categorical(new_train_labels)\n",
"test_labels = to_categorical(test_labels)"
]
},
{
"cell_type": "code",
"execution_count": 140,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"_________________________________________________________________\n",
"Layer (type) Output Shape Param # \n",
"=================================================================\n",
"gru_2 (GRU) (None, 2) 618 \n",
"_________________________________________________________________\n",
"dense_37 (Dense) (None, 2) 6 \n",
"=================================================================\n",
"Total params: 624\n",
"Trainable params: 624\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n"
]
}
],
"source": [
"model = Sequential()\n",
"model.add(GRU(2,\n",
" input_shape = (18,data_dim)))\n",
"model.add(Dense(2,activation = 'softmax'))\n",
"model.compile(loss='categorical_crossentropy',\n",
" optimizer=Adam(lr = 0.001),\n",
" metrics=['accuracy'])\n",
"model.summary()"
]
},
{
"cell_type": "code",
"execution_count": 141,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Train on 836 samples, validate on 209 samples\n",
"Epoch 1/200\n",
"836/836 [==============================] - 4s 5ms/step - loss: 0.7937 - acc: 0.5538 - val_loss: 1.1049 - val_acc: 0.1148\n",
"Epoch 2/200\n",
"836/836 [==============================] - 1s 710us/step - loss: 0.7121 - acc: 0.6100 - val_loss: 1.0743 - val_acc: 0.0622\n",
"Epoch 3/200\n",
"836/836 [==============================] - 1s 810us/step - loss: 0.6984 - acc: 0.6160 - val_loss: 1.0637 - val_acc: 0.0957\n",
"Epoch 4/200\n",
"836/836 [==============================] - 1s 771us/step - loss: 0.6986 - acc: 0.6089 - val_loss: 1.0550 - val_acc: 0.0622\n",
"Epoch 5/200\n",
"836/836 [==============================] - 1s 768us/step - loss: 0.6923 - acc: 0.6124 - val_loss: 1.0613 - val_acc: 0.0431\n",
"Epoch 6/200\n",
"836/836 [==============================] - 1s 753us/step - loss: 0.6904 - acc: 0.6112 - val_loss: 1.0512 - val_acc: 0.0383\n",
"Epoch 7/200\n",
"836/836 [==============================] - 1s 901us/step - loss: 0.6849 - acc: 0.6160 - val_loss: 1.0455 - val_acc: 0.0431\n",
"Epoch 8/200\n",
"836/836 [==============================] - 1s 811us/step - loss: 0.6818 - acc: 0.6148 - val_loss: 1.0334 - val_acc: 0.0478\n",
"Epoch 9/200\n",
"836/836 [==============================] - 1s 898us/step - loss: 0.6807 - acc: 0.6172 - val_loss: 1.0316 - val_acc: 0.0526\n",
"Epoch 10/200\n",
"836/836 [==============================] - 1s 864us/step - loss: 0.6760 - acc: 0.6232 - val_loss: 1.0296 - val_acc: 0.0526\n",
"Epoch 11/200\n",
"836/836 [==============================] - 1s 757us/step - loss: 0.6743 - acc: 0.6244 - val_loss: 1.0161 - val_acc: 0.0526\n",
"Epoch 12/200\n",
"836/836 [==============================] - 1s 834us/step - loss: 0.6727 - acc: 0.6256 - val_loss: 1.0146 - val_acc: 0.0526\n",
"Epoch 13/200\n",
"836/836 [==============================] - 1s 905us/step - loss: 0.6717 - acc: 0.6256 - val_loss: 1.0094 - val_acc: 0.0526\n",
"Epoch 14/200\n",
"836/836 [==============================] - 1s 832us/step - loss: 0.6715 - acc: 0.6256 - val_loss: 1.0152 - val_acc: 0.0478\n",
"Epoch 15/200\n",
"836/836 [==============================] - 1s 796us/step - loss: 0.6707 - acc: 0.6256 - val_loss: 1.0183 - val_acc: 0.0478\n",
"Epoch 16/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6703 - acc: 0.6256 - val_loss: 1.0214 - val_acc: 0.0526\n",
"Epoch 17/200\n",
"836/836 [==============================] - 1s 940us/step - loss: 0.6701 - acc: 0.6256 - val_loss: 1.0248 - val_acc: 0.0478\n",
"Epoch 18/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6695 - acc: 0.6256 - val_loss: 1.0209 - val_acc: 0.0478\n",
"Epoch 19/200\n",
"836/836 [==============================] - 1s 966us/step - loss: 0.6691 - acc: 0.6256 - val_loss: 1.0173 - val_acc: 0.0478\n",
"Epoch 20/200\n",
"836/836 [==============================] - 1s 828us/step - loss: 0.6687 - acc: 0.6268 - val_loss: 1.0146 - val_acc: 0.0526\n",
"Epoch 21/200\n",
"836/836 [==============================] - 1s 740us/step - loss: 0.6687 - acc: 0.6268 - val_loss: 1.0145 - val_acc: 0.0526\n",
"Epoch 22/200\n",
"836/836 [==============================] - 1s 725us/step - loss: 0.6679 - acc: 0.6268 - val_loss: 1.0092 - val_acc: 0.0574\n",
"Epoch 23/200\n",
"836/836 [==============================] - 1s 785us/step - loss: 0.6676 - acc: 0.6268 - val_loss: 1.0082 - val_acc: 0.0526\n",
"Epoch 24/200\n",
"836/836 [==============================] - 1s 805us/step - loss: 0.6673 - acc: 0.6268 - val_loss: 1.0031 - val_acc: 0.0526\n",
"Epoch 25/200\n",
"836/836 [==============================] - 1s 753us/step - loss: 0.6670 - acc: 0.6268 - val_loss: 1.0038 - val_acc: 0.0574\n",
"Epoch 26/200\n",
"836/836 [==============================] - 1s 795us/step - loss: 0.6664 - acc: 0.6280 - val_loss: 0.9987 - val_acc: 0.0478\n",
"Epoch 27/200\n",
"836/836 [==============================] - 1s 793us/step - loss: 0.6662 - acc: 0.6268 - val_loss: 1.0020 - val_acc: 0.0574\n",
"Epoch 28/200\n",
"836/836 [==============================] - 1s 835us/step - loss: 0.6656 - acc: 0.6280 - val_loss: 1.0013 - val_acc: 0.0574\n",
"Epoch 29/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6713 - acc: 0.6244 - val_loss: 0.9999 - val_acc: 0.0574\n",
"Epoch 30/200\n",
"836/836 [==============================] - 1s 2ms/step - loss: 0.6648 - acc: 0.6292 - val_loss: 1.0008 - val_acc: 0.0478\n",
"Epoch 31/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6698 - acc: 0.6220 - val_loss: 1.0002 - val_acc: 0.0383\n",
"Epoch 32/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6739 - acc: 0.6148 - val_loss: 1.0012 - val_acc: 0.0287\n",
"Epoch 33/200\n",
"836/836 [==============================] - 1s 924us/step - loss: 0.6724 - acc: 0.6208 - val_loss: 0.9854 - val_acc: 0.0287\n",
"Epoch 34/200\n",
"836/836 [==============================] - 1s 987us/step - loss: 0.6706 - acc: 0.6208 - val_loss: 0.9868 - val_acc: 0.0287\n",
"Epoch 35/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6685 - acc: 0.6220 - val_loss: 0.9771 - val_acc: 0.0335\n",
"Epoch 36/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6696 - acc: 0.6208 - val_loss: 0.9801 - val_acc: 0.0287\n",
"Epoch 37/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6699 - acc: 0.6220 - val_loss: 0.9824 - val_acc: 0.0335\n",
"Epoch 38/200\n",
"836/836 [==============================] - 1s 959us/step - loss: 0.6697 - acc: 0.6208 - val_loss: 0.9787 - val_acc: 0.0335\n",
"Epoch 39/200\n",
"836/836 [==============================] - 1s 840us/step - loss: 0.6688 - acc: 0.6196 - val_loss: 0.9853 - val_acc: 0.0335\n",
"Epoch 40/200\n",
"836/836 [==============================] - 1s 891us/step - loss: 0.6689 - acc: 0.6232 - val_loss: 0.9912 - val_acc: 0.0335\n",
"Epoch 41/200\n",
"836/836 [==============================] - 1s 884us/step - loss: 0.6663 - acc: 0.6256 - val_loss: 0.9765 - val_acc: 0.0239\n",
"Epoch 42/200\n",
"836/836 [==============================] - 1s 877us/step - loss: 0.6655 - acc: 0.6268 - val_loss: 0.9666 - val_acc: 0.0335\n",
"Epoch 43/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6660 - acc: 0.6268 - val_loss: 0.9638 - val_acc: 0.0287\n",
"Epoch 44/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6650 - acc: 0.6244 - val_loss: 0.9754 - val_acc: 0.0239\n",
"Epoch 45/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6667 - acc: 0.6208 - val_loss: 0.9685 - val_acc: 0.0191\n",
"Epoch 46/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6654 - acc: 0.6208 - val_loss: 0.9563 - val_acc: 0.0239A: 0s - loss: 0.6701 - acc: 0.61\n",
"Epoch 47/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6646 - acc: 0.6208 - val_loss: 0.9605 - val_acc: 0.0239\n",
"Epoch 48/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6640 - acc: 0.6220 - val_loss: 0.9665 - val_acc: 0.0239\n",
"Epoch 49/200\n",
"836/836 [==============================] - 1s 954us/step - loss: 0.6624 - acc: 0.6244 - val_loss: 0.9652 - val_acc: 0.0239\n",
"Epoch 50/200\n",
"836/836 [==============================] - 1s 866us/step - loss: 0.6620 - acc: 0.6244 - val_loss: 0.9754 - val_acc: 0.0239\n",
"Epoch 51/200\n",
"836/836 [==============================] - 1s 923us/step - loss: 0.6619 - acc: 0.6244 - val_loss: 0.9696 - val_acc: 0.0239\n",
"Epoch 52/200\n",
"836/836 [==============================] - 1s 801us/step - loss: 0.6622 - acc: 0.6244 - val_loss: 0.9633 - val_acc: 0.0239\n",
"Epoch 53/200\n",
"836/836 [==============================] - 1s 797us/step - loss: 0.6622 - acc: 0.6244 - val_loss: 0.9531 - val_acc: 0.0239\n",
"Epoch 54/200\n",
"836/836 [==============================] - 1s 818us/step - loss: 0.6618 - acc: 0.6244 - val_loss: 0.9587 - val_acc: 0.0239\n",
"Epoch 55/200\n",
"836/836 [==============================] - 1s 839us/step - loss: 0.6618 - acc: 0.6244 - val_loss: 0.9565 - val_acc: 0.0191\n",
"Epoch 56/200\n",
"836/836 [==============================] - 1s 913us/step - loss: 0.6619 - acc: 0.6244 - val_loss: 0.9552 - val_acc: 0.0239\n",
"Epoch 57/200\n",
"836/836 [==============================] - 1s 803us/step - loss: 0.6619 - acc: 0.6244 - val_loss: 0.9587 - val_acc: 0.0239\n",
"Epoch 58/200\n",
"836/836 [==============================] - 1s 791us/step - loss: 0.6617 - acc: 0.6244 - val_loss: 0.9665 - val_acc: 0.0191\n",
"Epoch 59/200\n",
"836/836 [==============================] - 1s 813us/step - loss: 0.6615 - acc: 0.6244 - val_loss: 0.9607 - val_acc: 0.0191\n",
"Epoch 60/200\n",
"836/836 [==============================] - 1s 834us/step - loss: 0.6606 - acc: 0.6256 - val_loss: 0.9683 - val_acc: 0.0191\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 61/200\n",
"836/836 [==============================] - 1s 779us/step - loss: 0.6628 - acc: 0.6232 - val_loss: 0.9576 - val_acc: 0.0239\n",
"Epoch 62/200\n",
"836/836 [==============================] - 1s 857us/step - loss: 0.6602 - acc: 0.6256 - val_loss: 0.9581 - val_acc: 0.0287\n",
"Epoch 63/200\n",
"836/836 [==============================] - 1s 821us/step - loss: 0.6592 - acc: 0.6292 - val_loss: 0.9570 - val_acc: 0.0287\n",
"Epoch 64/200\n",
"836/836 [==============================] - 1s 808us/step - loss: 0.6571 - acc: 0.6328 - val_loss: 0.9656 - val_acc: 0.0287\n",
"Epoch 65/200\n",
"836/836 [==============================] - 1s 824us/step - loss: 0.6572 - acc: 0.6316 - val_loss: 0.9769 - val_acc: 0.0287\n",
"Epoch 66/200\n",
"836/836 [==============================] - 1s 810us/step - loss: 0.6810 - acc: 0.6148 - val_loss: 1.0128 - val_acc: 0.0431\n",
"Epoch 67/200\n",
"836/836 [==============================] - 1s 818us/step - loss: 0.6632 - acc: 0.6280 - val_loss: 0.9517 - val_acc: 0.0383\n",
"Epoch 68/200\n",
"836/836 [==============================] - 1s 834us/step - loss: 0.6562 - acc: 0.6352 - val_loss: 0.9540 - val_acc: 0.0383\n",
"Epoch 69/200\n",
"836/836 [==============================] - 1s 771us/step - loss: 0.6559 - acc: 0.6352 - val_loss: 0.9840 - val_acc: 0.0287\n",
"Epoch 70/200\n",
"836/836 [==============================] - 1s 884us/step - loss: 0.6557 - acc: 0.6328 - val_loss: 0.9755 - val_acc: 0.0431\n",
"Epoch 71/200\n",
"836/836 [==============================] - 1s 914us/step - loss: 0.6531 - acc: 0.6352 - val_loss: 0.9958 - val_acc: 0.0335\n",
"Epoch 72/200\n",
"836/836 [==============================] - 1s 983us/step - loss: 0.6539 - acc: 0.6364 - val_loss: 0.9825 - val_acc: 0.0287\n",
"Epoch 73/200\n",
"836/836 [==============================] - 1s 895us/step - loss: 0.6511 - acc: 0.6364 - val_loss: 0.9733 - val_acc: 0.0383\n",
"Epoch 74/200\n",
"836/836 [==============================] - 1s 959us/step - loss: 0.6498 - acc: 0.6388 - val_loss: 0.9834 - val_acc: 0.0431\n",
"Epoch 75/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6488 - acc: 0.6411 - val_loss: 0.9795 - val_acc: 0.0383\n",
"Epoch 76/200\n",
"836/836 [==============================] - 1s 962us/step - loss: 0.6482 - acc: 0.6411 - val_loss: 0.9847 - val_acc: 0.0431\n",
"Epoch 77/200\n",
"836/836 [==============================] - 1s 939us/step - loss: 0.6477 - acc: 0.6411 - val_loss: 0.9840 - val_acc: 0.0431\n",
"Epoch 78/200\n",
"836/836 [==============================] - 1s 939us/step - loss: 0.6476 - acc: 0.6411 - val_loss: 0.9814 - val_acc: 0.0431\n",
"Epoch 79/200\n",
"836/836 [==============================] - 1s 934us/step - loss: 0.6474 - acc: 0.6411 - val_loss: 0.9710 - val_acc: 0.0431\n",
"Epoch 80/200\n",
"836/836 [==============================] - 1s 878us/step - loss: 0.6473 - acc: 0.6411 - val_loss: 0.9706 - val_acc: 0.0431\n",
"Epoch 81/200\n",
"836/836 [==============================] - 1s 920us/step - loss: 0.6473 - acc: 0.6411 - val_loss: 0.9726 - val_acc: 0.0431\n",
"Epoch 82/200\n",
"836/836 [==============================] - 1s 981us/step - loss: 0.6471 - acc: 0.6411 - val_loss: 0.9811 - val_acc: 0.0431\n",
"Epoch 83/200\n",
"836/836 [==============================] - 1s 963us/step - loss: 0.6470 - acc: 0.6411 - val_loss: 0.9847 - val_acc: 0.0431\n",
"Epoch 84/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6468 - acc: 0.6411 - val_loss: 0.9810 - val_acc: 0.0431\n",
"Epoch 85/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6470 - acc: 0.6411 - val_loss: 0.9781 - val_acc: 0.0431\n",
"Epoch 86/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6466 - acc: 0.6411 - val_loss: 0.9754 - val_acc: 0.0431\n",
"Epoch 87/200\n",
"836/836 [==============================] - 1s 897us/step - loss: 0.6467 - acc: 0.6411 - val_loss: 0.9769 - val_acc: 0.0431\n",
"Epoch 88/200\n",
"836/836 [==============================] - 1s 896us/step - loss: 0.6465 - acc: 0.6411 - val_loss: 0.9847 - val_acc: 0.0431\n",
"Epoch 89/200\n",
"836/836 [==============================] - 1s 810us/step - loss: 0.6465 - acc: 0.6411 - val_loss: 0.9909 - val_acc: 0.0431\n",
"Epoch 90/200\n",
"836/836 [==============================] - 1s 782us/step - loss: 0.6465 - acc: 0.6411 - val_loss: 0.9948 - val_acc: 0.0431\n",
"Epoch 91/200\n",
"836/836 [==============================] - 1s 826us/step - loss: 0.6463 - acc: 0.6411 - val_loss: 0.9833 - val_acc: 0.0431\n",
"Epoch 92/200\n",
"836/836 [==============================] - 1s 774us/step - loss: 0.6464 - acc: 0.6411 - val_loss: 0.9803 - val_acc: 0.0431\n",
"Epoch 93/200\n",
"836/836 [==============================] - 1s 778us/step - loss: 0.6461 - acc: 0.6411 - val_loss: 0.9784 - val_acc: 0.0383\n",
"Epoch 94/200\n",
"836/836 [==============================] - 1s 768us/step - loss: 0.6461 - acc: 0.6411 - val_loss: 0.9819 - val_acc: 0.0431\n",
"Epoch 95/200\n",
"836/836 [==============================] - 1s 800us/step - loss: 0.6460 - acc: 0.6411 - val_loss: 0.9792 - val_acc: 0.0431\n",
"Epoch 96/200\n",
"836/836 [==============================] - 1s 857us/step - loss: 0.6461 - acc: 0.6411 - val_loss: 0.9891 - val_acc: 0.0383\n",
"Epoch 97/200\n",
"836/836 [==============================] - 1s 877us/step - loss: 0.6460 - acc: 0.6411 - val_loss: 0.9911 - val_acc: 0.0383\n",
"Epoch 98/200\n",
"836/836 [==============================] - 1s 957us/step - loss: 0.6460 - acc: 0.6411 - val_loss: 0.9841 - val_acc: 0.0383\n",
"Epoch 99/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6485 - acc: 0.6376 - val_loss: 0.9797 - val_acc: 0.0431\n",
"Epoch 100/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6499 - acc: 0.6364 - val_loss: 0.9801 - val_acc: 0.0431\n",
"Epoch 101/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6467 - acc: 0.6411 - val_loss: 0.9805 - val_acc: 0.0431\n",
"Epoch 102/200\n",
"836/836 [==============================] - 1s 918us/step - loss: 0.6533 - acc: 0.6328 - val_loss: 0.9792 - val_acc: 0.0478\n",
"Epoch 103/200\n",
"836/836 [==============================] - 1s 995us/step - loss: 0.6503 - acc: 0.6388 - val_loss: 0.9818 - val_acc: 0.0431\n",
"Epoch 104/200\n",
"836/836 [==============================] - 1s 890us/step - loss: 0.6452 - acc: 0.6423 - val_loss: 0.9867 - val_acc: 0.0478\n",
"Epoch 105/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6443 - acc: 0.6423 - val_loss: 0.9738 - val_acc: 0.0431\n",
"Epoch 106/200\n",
"836/836 [==============================] - 1s 977us/step - loss: 0.6445 - acc: 0.6411 - val_loss: 0.9748 - val_acc: 0.0431\n",
"Epoch 107/200\n",
"836/836 [==============================] - 1s 957us/step - loss: 0.6435 - acc: 0.6435 - val_loss: 0.9820 - val_acc: 0.0478\n",
"Epoch 108/200\n",
"836/836 [==============================] - 1s 957us/step - loss: 0.6487 - acc: 0.6388 - val_loss: 0.9735 - val_acc: 0.0383\n",
"Epoch 109/200\n",
"836/836 [==============================] - 1s 809us/step - loss: 0.6464 - acc: 0.6388 - val_loss: 0.9943 - val_acc: 0.0574\n",
"Epoch 110/200\n",
"836/836 [==============================] - 1s 823us/step - loss: 0.6570 - acc: 0.6340 - val_loss: 0.9889 - val_acc: 0.0287\n",
"Epoch 111/200\n",
"836/836 [==============================] - 1s 764us/step - loss: 0.6489 - acc: 0.6376 - val_loss: 1.0131 - val_acc: 0.0383\n",
"Epoch 112/200\n",
"836/836 [==============================] - 1s 926us/step - loss: 0.6681 - acc: 0.6232 - val_loss: 0.9557 - val_acc: 0.1148\n",
"Epoch 113/200\n",
"836/836 [==============================] - 1s 787us/step - loss: 0.6870 - acc: 0.6029 - val_loss: 0.9429 - val_acc: 0.0957\n",
"Epoch 114/200\n",
"836/836 [==============================] - 1s 818us/step - loss: 0.6702 - acc: 0.6172 - val_loss: 0.9576 - val_acc: 0.0861\n",
"Epoch 115/200\n",
"836/836 [==============================] - 1s 872us/step - loss: 0.6624 - acc: 0.6256 - val_loss: 0.9781 - val_acc: 0.0478\n",
"Epoch 116/200\n",
"836/836 [==============================] - 1s 864us/step - loss: 0.6630 - acc: 0.6268 - val_loss: 0.9776 - val_acc: 0.0478\n",
"Epoch 117/200\n",
"836/836 [==============================] - 1s 819us/step - loss: 0.6601 - acc: 0.6292 - val_loss: 0.9707 - val_acc: 0.0526\n",
"Epoch 118/200\n",
"836/836 [==============================] - 1s 771us/step - loss: 0.6596 - acc: 0.6304 - val_loss: 0.9557 - val_acc: 0.0670\n",
"Epoch 119/200\n",
"836/836 [==============================] - 1s 815us/step - loss: 0.6598 - acc: 0.6304 - val_loss: 0.9619 - val_acc: 0.0670\n",
"Epoch 120/200\n",
"836/836 [==============================] - 1s 751us/step - loss: 0.6595 - acc: 0.6304 - val_loss: 0.9662 - val_acc: 0.0670\n",
"Epoch 121/200\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"836/836 [==============================] - 1s 782us/step - loss: 0.6594 - acc: 0.6304 - val_loss: 0.9714 - val_acc: 0.0670\n",
"Epoch 122/200\n",
"836/836 [==============================] - 1s 736us/step - loss: 0.6593 - acc: 0.6304 - val_loss: 0.9687 - val_acc: 0.0670\n",
"Epoch 123/200\n",
"836/836 [==============================] - 1s 830us/step - loss: 0.6593 - acc: 0.6304 - val_loss: 0.9715 - val_acc: 0.0670\n",
"Epoch 124/200\n",
"836/836 [==============================] - 1s 829us/step - loss: 0.6592 - acc: 0.6304 - val_loss: 0.9742 - val_acc: 0.0670\n",
"Epoch 125/200\n",
"836/836 [==============================] - 1s 800us/step - loss: 0.6592 - acc: 0.6304 - val_loss: 0.9756 - val_acc: 0.0670\n",
"Epoch 126/200\n",
"836/836 [==============================] - 1s 688us/step - loss: 0.6591 - acc: 0.6304 - val_loss: 0.9811 - val_acc: 0.0670\n",
"Epoch 127/200\n",
"836/836 [==============================] - 1s 720us/step - loss: 0.6593 - acc: 0.6304 - val_loss: 0.9863 - val_acc: 0.0670\n",
"Epoch 128/200\n",
"836/836 [==============================] - 1s 723us/step - loss: 0.6591 - acc: 0.6304 - val_loss: 0.9887 - val_acc: 0.0670\n",
"Epoch 129/200\n",
"836/836 [==============================] - 1s 772us/step - loss: 0.6591 - acc: 0.6304 - val_loss: 0.9777 - val_acc: 0.0670\n",
"Epoch 130/200\n",
"836/836 [==============================] - 1s 767us/step - loss: 0.6582 - acc: 0.6304 - val_loss: 0.9659 - val_acc: 0.0718\n",
"Epoch 131/200\n",
"836/836 [==============================] - 1s 810us/step - loss: 0.6573 - acc: 0.6316 - val_loss: 0.9772 - val_acc: 0.0574\n",
"Epoch 132/200\n",
"836/836 [==============================] - 1s 791us/step - loss: 0.6569 - acc: 0.6328 - val_loss: 0.9621 - val_acc: 0.0813\n",
"Epoch 133/200\n",
"836/836 [==============================] - 1s 846us/step - loss: 0.6580 - acc: 0.6304 - val_loss: 0.9718 - val_acc: 0.0718\n",
"Epoch 134/200\n",
"836/836 [==============================] - 1s 802us/step - loss: 0.6566 - acc: 0.6328 - val_loss: 0.9539 - val_acc: 0.0957\n",
"Epoch 135/200\n",
"836/836 [==============================] - 1s 767us/step - loss: 0.6581 - acc: 0.6316 - val_loss: 0.9855 - val_acc: 0.0622\n",
"Epoch 136/200\n",
"836/836 [==============================] - 1s 766us/step - loss: 0.6550 - acc: 0.6352 - val_loss: 0.9841 - val_acc: 0.0670\n",
"Epoch 137/200\n",
"836/836 [==============================] - 1s 815us/step - loss: 0.6590 - acc: 0.6292 - val_loss: 1.0067 - val_acc: 0.0335\n",
"Epoch 138/200\n",
"836/836 [==============================] - 1s 809us/step - loss: 0.6603 - acc: 0.6244 - val_loss: 0.9832 - val_acc: 0.0574\n",
"Epoch 139/200\n",
"836/836 [==============================] - 1s 753us/step - loss: 0.6557 - acc: 0.6328 - val_loss: 0.9716 - val_acc: 0.0766\n",
"Epoch 140/200\n",
"836/836 [==============================] - 1s 921us/step - loss: 0.6543 - acc: 0.6352 - val_loss: 0.9649 - val_acc: 0.0861\n",
"Epoch 141/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6539 - acc: 0.6364 - val_loss: 0.9574 - val_acc: 0.0861\n",
"Epoch 142/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6534 - acc: 0.6376 - val_loss: 0.9557 - val_acc: 0.0813\n",
"Epoch 143/200\n",
"836/836 [==============================] - 1s 931us/step - loss: 0.6523 - acc: 0.6388 - val_loss: 0.9823 - val_acc: 0.0670\n",
"Epoch 144/200\n",
"836/836 [==============================] - 1s 886us/step - loss: 0.6521 - acc: 0.6388 - val_loss: 0.9969 - val_acc: 0.0670\n",
"Epoch 145/200\n",
"836/836 [==============================] - 1s 927us/step - loss: 0.6500 - acc: 0.6411 - val_loss: 0.9894 - val_acc: 0.0670\n",
"Epoch 146/200\n",
"836/836 [==============================] - 1s 893us/step - loss: 0.6488 - acc: 0.6411 - val_loss: 0.9995 - val_acc: 0.0574\n",
"Epoch 147/200\n",
"836/836 [==============================] - 1s 925us/step - loss: 0.6477 - acc: 0.6423 - val_loss: 0.9947 - val_acc: 0.0622\n",
"Epoch 148/200\n",
"836/836 [==============================] - 1s 837us/step - loss: 0.6467 - acc: 0.6435 - val_loss: 0.9837 - val_acc: 0.0622\n",
"Epoch 149/200\n",
"836/836 [==============================] - 1s 838us/step - loss: 0.6473 - acc: 0.6447 - val_loss: 1.0195 - val_acc: 0.0478\n",
"Epoch 150/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6452 - acc: 0.6459 - val_loss: 1.0054 - val_acc: 0.0478\n",
"Epoch 151/200\n",
"836/836 [==============================] - 1s 930us/step - loss: 0.6461 - acc: 0.6447 - val_loss: 1.0105 - val_acc: 0.0526\n",
"Epoch 152/200\n",
"836/836 [==============================] - 1s 920us/step - loss: 0.6413 - acc: 0.6507 - val_loss: 1.0042 - val_acc: 0.0478\n",
"Epoch 153/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6475 - acc: 0.6388 - val_loss: 1.0082 - val_acc: 0.0526\n",
"Epoch 154/200\n",
"836/836 [==============================] - 1s 880us/step - loss: 0.6439 - acc: 0.6459 - val_loss: 0.9986 - val_acc: 0.0431\n",
"Epoch 155/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6411 - acc: 0.6483 - val_loss: 1.0032 - val_acc: 0.0526\n",
"Epoch 156/200\n",
"836/836 [==============================] - 1s 892us/step - loss: 0.6401 - acc: 0.6495 - val_loss: 0.9990 - val_acc: 0.0526\n",
"Epoch 157/200\n",
"836/836 [==============================] - 1s 938us/step - loss: 0.6421 - acc: 0.6471 - val_loss: 0.9816 - val_acc: 0.0526\n",
"Epoch 158/200\n",
"836/836 [==============================] - 1s 899us/step - loss: 0.6394 - acc: 0.6495 - val_loss: 0.9886 - val_acc: 0.0526\n",
"Epoch 159/200\n",
"836/836 [==============================] - 1s 919us/step - loss: 0.6400 - acc: 0.6495 - val_loss: 0.9737 - val_acc: 0.0478\n",
"Epoch 160/200\n",
"836/836 [==============================] - 1s 796us/step - loss: 0.6396 - acc: 0.6495 - val_loss: 0.9777 - val_acc: 0.0526\n",
"Epoch 161/200\n",
"836/836 [==============================] - 1s 828us/step - loss: 0.6391 - acc: 0.6495 - val_loss: 1.0011 - val_acc: 0.0526\n",
"Epoch 162/200\n",
"836/836 [==============================] - 1s 859us/step - loss: 0.6380 - acc: 0.6507 - val_loss: 0.9951 - val_acc: 0.0526\n",
"Epoch 163/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6378 - acc: 0.6507 - val_loss: 0.9976 - val_acc: 0.0526\n",
"Epoch 164/200\n",
"836/836 [==============================] - 1s 897us/step - loss: 0.6376 - acc: 0.6543 - val_loss: 0.9907 - val_acc: 0.0526\n",
"Epoch 165/200\n",
"836/836 [==============================] - 1s 804us/step - loss: 0.6375 - acc: 0.6555 - val_loss: 0.9987 - val_acc: 0.0526\n",
"Epoch 166/200\n",
"836/836 [==============================] - 1s 746us/step - loss: 0.6373 - acc: 0.6555 - val_loss: 1.0008 - val_acc: 0.0526\n",
"Epoch 167/200\n",
"836/836 [==============================] - 1s 735us/step - loss: 0.6381 - acc: 0.6543 - val_loss: 1.0155 - val_acc: 0.0526\n",
"Epoch 168/200\n",
"836/836 [==============================] - 1s 783us/step - loss: 0.6410 - acc: 0.6543 - val_loss: 0.9900 - val_acc: 0.0478\n",
"Epoch 169/200\n",
"836/836 [==============================] - 1s 803us/step - loss: 0.6371 - acc: 0.6531 - val_loss: 1.0120 - val_acc: 0.0526\n",
"Epoch 170/200\n",
"836/836 [==============================] - 1s 753us/step - loss: 0.6357 - acc: 0.6567 - val_loss: 1.0071 - val_acc: 0.0526\n",
"Epoch 171/200\n",
"836/836 [==============================] - 1s 685us/step - loss: 0.6353 - acc: 0.6567 - val_loss: 0.9786 - val_acc: 0.0478\n",
"Epoch 172/200\n",
"836/836 [==============================] - 1s 717us/step - loss: 0.6384 - acc: 0.6519 - val_loss: 1.0028 - val_acc: 0.0526\n",
"Epoch 173/200\n",
"836/836 [==============================] - 1s 715us/step - loss: 0.6372 - acc: 0.6555 - val_loss: 0.9935 - val_acc: 0.0526\n",
"Epoch 174/200\n",
"836/836 [==============================] - 1s 668us/step - loss: 0.6354 - acc: 0.6567 - val_loss: 1.0105 - val_acc: 0.0574\n",
"Epoch 175/200\n",
"836/836 [==============================] - 1s 728us/step - loss: 0.6343 - acc: 0.6567 - val_loss: 1.0024 - val_acc: 0.0574\n",
"Epoch 176/200\n",
"836/836 [==============================] - 1s 724us/step - loss: 0.6336 - acc: 0.6567 - val_loss: 1.0004 - val_acc: 0.0574\n",
"Epoch 177/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6337 - acc: 0.6567 - val_loss: 0.9989 - val_acc: 0.0574\n",
"Epoch 178/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6338 - acc: 0.6567 - val_loss: 0.9960 - val_acc: 0.0478\n",
"Epoch 179/200\n",
"836/836 [==============================] - 1s 979us/step - loss: 0.6340 - acc: 0.6567 - val_loss: 0.9892 - val_acc: 0.0526\n",
"Epoch 180/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6336 - acc: 0.6567 - val_loss: 0.9963 - val_acc: 0.0574\n",
"Epoch 181/200\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"836/836 [==============================] - 1s 961us/step - loss: 0.6337 - acc: 0.6543 - val_loss: 1.0068 - val_acc: 0.0574\n",
"Epoch 182/200\n",
"836/836 [==============================] - 1s 880us/step - loss: 0.6323 - acc: 0.6567 - val_loss: 1.0081 - val_acc: 0.0526\n",
"Epoch 183/200\n",
"836/836 [==============================] - ETA: 0s - loss: 0.6330 - acc: 0.657 - 1s 926us/step - loss: 0.6323 - acc: 0.6567 - val_loss: 1.0123 - val_acc: 0.0526\n",
"Epoch 184/200\n",
"836/836 [==============================] - 1s 818us/step - loss: 0.6356 - acc: 0.6555 - val_loss: 0.9923 - val_acc: 0.0526\n",
"Epoch 185/200\n",
"836/836 [==============================] - 1s 901us/step - loss: 0.6335 - acc: 0.6567 - val_loss: 1.0056 - val_acc: 0.0526\n",
"Epoch 186/200\n",
"836/836 [==============================] - 1s 920us/step - loss: 0.6348 - acc: 0.6555 - val_loss: 1.0016 - val_acc: 0.0526\n",
"Epoch 187/200\n",
"836/836 [==============================] - 1s 826us/step - loss: 0.6330 - acc: 0.6555 - val_loss: 0.9910 - val_acc: 0.0526\n",
"Epoch 188/200\n",
"836/836 [==============================] - 1s 839us/step - loss: 0.6322 - acc: 0.6567 - val_loss: 0.9973 - val_acc: 0.0526\n",
"Epoch 189/200\n",
"836/836 [==============================] - 1s 781us/step - loss: 0.6323 - acc: 0.6567 - val_loss: 0.9932 - val_acc: 0.0526\n",
"Epoch 190/200\n",
"836/836 [==============================] - 1s 756us/step - loss: 0.6321 - acc: 0.6567 - val_loss: 1.0003 - val_acc: 0.0526\n",
"Epoch 191/200\n",
"836/836 [==============================] - 1s 813us/step - loss: 0.6320 - acc: 0.6567 - val_loss: 0.9995 - val_acc: 0.0526\n",
"Epoch 192/200\n",
"836/836 [==============================] - 1s 845us/step - loss: 0.6320 - acc: 0.6567 - val_loss: 1.0077 - val_acc: 0.0526\n",
"Epoch 193/200\n",
"836/836 [==============================] - 1s 887us/step - loss: 0.6320 - acc: 0.6567 - val_loss: 1.0090 - val_acc: 0.0526\n",
"Epoch 194/200\n",
"836/836 [==============================] - 1s 753us/step - loss: 0.6320 - acc: 0.6567 - val_loss: 1.0108 - val_acc: 0.0526\n",
"Epoch 195/200\n",
"836/836 [==============================] - 1s 809us/step - loss: 0.6320 - acc: 0.6567 - val_loss: 1.0062 - val_acc: 0.0526\n",
"Epoch 196/200\n",
"836/836 [==============================] - 1s 804us/step - loss: 0.6318 - acc: 0.6567 - val_loss: 1.0096 - val_acc: 0.0526\n",
"Epoch 197/200\n",
"836/836 [==============================] - 1s 767us/step - loss: 0.6318 - acc: 0.6567 - val_loss: 1.0092 - val_acc: 0.0526\n",
"Epoch 198/200\n",
"836/836 [==============================] - 1s 806us/step - loss: 0.6318 - acc: 0.6567 - val_loss: 1.0058 - val_acc: 0.0526\n",
"Epoch 199/200\n",
"836/836 [==============================] - 1s 915us/step - loss: 0.6317 - acc: 0.6567 - val_loss: 1.0059 - val_acc: 0.0526\n",
"Epoch 200/200\n",
"836/836 [==============================] - 1s 1ms/step - loss: 0.6318 - acc: 0.6567 - val_loss: 0.9995 - val_acc: 0.0526\n"
]
},
{
"data": {
"text/plain": [
"<keras.callbacks.History at 0x1833d34d68>"
]
},
"execution_count": 141,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"history = model.fit(new_train_data,new_train_labels,epochs = 200,validation_split = 0.2,shuffle = True)"
]
},
{
"cell_type": "code",
"execution_count": 100,
"metadata": {},
"outputs": [
{
"ename": "AttributeError",
"evalue": "'list' object has no attribute 'shape'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-100-efa91b743fc3>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrain_labels\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;31mAttributeError\u001b[0m: 'list' object has no attribute 'shape'"
]
}
],
"source": [
"print(train_labels.shape)"
]
},
{
"cell_type": "code",
"execution_count": 28,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"150528"
]
},
"execution_count": 28,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"224*224*3"
]
},
{
"cell_type": "code",
"execution_count": 34,
"metadata": {},
"outputs": [],
"source": [
"a = np.array([1,2,3]).reshape(-1,1)"
]
},
{
"cell_type": "code",
"execution_count": 35,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([[1],\n",
" [2],\n",
" [3]])"
]
},
"execution_count": 35,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"a"
]
},
{
"cell_type": "code",
"execution_count": 40,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(3,)"
]
},
"execution_count": 40,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"a.T.flatten().shape"
]
},
{
"cell_type": "code",
"execution_count": 86,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[[0. 1.]\n",
" [0. 1.]\n",
" [0. 1.]\n",
" ...\n",
" [1. 0.]\n",
" [1. 0.]\n",
" [1. 0.]]\n"
]
}
],
"source": [
"print(new_train_labels)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment