Skip to content

Instantly share code, notes, and snippets.

@Vincent-Stragier
Created May 16, 2023 14:16
Show Gist options
  • Save Vincent-Stragier/1562c159b2e0e7cda804ce153f1a83d4 to your computer and use it in GitHub Desktop.
Save Vincent-Stragier/1562c159b2e0e7cda804ce153f1a83d4 to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 21,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"number of instances: 35888\n"
]
}
],
"source": [
"import numpy as np\n",
"\n",
"# Load the dataset\n",
"lines = np.array(open(\"./data/fer2013/fer2013.csv\").readlines())\n",
"\n",
"number_of_instances = lines.size\n",
"print(f\"number of instances: {lines.size}\")\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"### Store train and test instances in separated variables."
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"number of classes: 7\n"
]
}
],
"source": [
"number_of_classes = len({instance.split(\",\")[0] for instance in lines[1:]})\n",
"print(f\"number of classes: {number_of_classes}\")"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [],
"source": [
"# import keras\n",
"from keras.utils import np_utils\n",
"\n",
"# raise Exception(\"Just stop the f*cking code.\")\n",
"x_train, y_train, x_test, y_test = [], [], [], []\n",
"\n",
"for instance in lines[1:]:\n",
" try:\n",
" emotion, img, usage = instance.split(\",\")\n",
"\n",
" emotion = np_utils.to_categorical(emotion, number_of_classes)\n",
" pixels = np.array(img.split(\" \"), 'float32')\n",
" pixels = np.reshape(pixels, (48, 48, 1))\n",
"\n",
" if 'Training' in usage:\n",
" y_train.append(emotion)\n",
" x_train.append(pixels)\n",
" elif 'PublicTest' in usage:\n",
" y_test.append(emotion)\n",
" x_test.append(pixels)\n",
"\n",
" except Exception:\n",
" import traceback\n",
" traceback.print_exc()\n",
"\n",
"\n",
"x_train, y_train, x_test, y_test = np.asarray(x_train), np.asarray(y_train), np.asarray(x_test), np.asarray(y_test)"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"os.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\n",
"\n",
"import tensorflow as tf\n",
"from tensorflow import keras\n",
"from tensorflow.keras import layers\n",
"\n",
"model = keras.Sequential()\n",
" \n",
"#1st convolution layer\n",
"model.add(layers.Conv2D(64, (5, 5), activation='relu', input_shape=(48,48,1)))\n",
"model.add(layers.MaxPooling2D(pool_size=(5,5), strides=(2, 2)))\n",
" \n",
"#2nd convolution layer\n",
"model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n",
"model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n",
"model.add(layers.AveragePooling2D(pool_size=(3,3), strides=(2, 2)))\n",
" \n",
"#3rd convolution layer\n",
"model.add(layers.Conv2D(128, (3, 3), activation='relu'))\n",
"model.add(layers.Conv2D(128, (3, 3), activation='relu'))\n",
"model.add(layers.AveragePooling2D(pool_size=(3,3), strides=(2, 2)))\n",
" \n",
"model.add(layers.Flatten())\n",
" \n",
"#fully connected neural networks\n",
"model.add(layers.Dense(1024, activation='relu'))\n",
"model.add(layers.Dropout(0.2))\n",
"model.add(layers.Dense(1024, activation='relu'))\n",
"model.add(layers.Dropout(0.2))\n",
" \n",
"model.add(layers.Dense(number_of_classes, activation='softmax'))"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"### Training"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/300\n",
"128/128 [==============================] - 4s 19ms/step - loss: 1.6781 - accuracy: 0.3329\n",
"Epoch 2/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 1.5306 - accuracy: 0.4102\n",
"Epoch 3/300\n",
"128/128 [==============================] - 2s 16ms/step - loss: 1.4558 - accuracy: 0.4318\n",
"Epoch 4/300\n",
"128/128 [==============================] - 2s 17ms/step - loss: 1.4023 - accuracy: 0.4625\n",
"Epoch 5/300\n",
"128/128 [==============================] - 3s 21ms/step - loss: 1.3618 - accuracy: 0.4803\n",
"Epoch 6/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 1.3085 - accuracy: 0.5009\n",
"Epoch 7/300\n",
"128/128 [==============================] - 2s 17ms/step - loss: 1.2693 - accuracy: 0.5139\n",
"Epoch 8/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 1.2321 - accuracy: 0.5302\n",
"Epoch 9/300\n",
"128/128 [==============================] - 2s 19ms/step - loss: 1.2150 - accuracy: 0.5406\n",
"Epoch 10/300\n",
"128/128 [==============================] - 2s 17ms/step - loss: 1.1937 - accuracy: 0.5488\n",
"Epoch 11/300\n",
"128/128 [==============================] - 3s 21ms/step - loss: 1.1587 - accuracy: 0.5600\n",
"Epoch 12/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 1.1372 - accuracy: 0.5735\n",
"Epoch 13/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 1.1131 - accuracy: 0.5820\n",
"Epoch 14/300\n",
"128/128 [==============================] - 3s 21ms/step - loss: 1.0703 - accuracy: 0.5970\n",
"Epoch 15/300\n",
"128/128 [==============================] - 3s 25ms/step - loss: 1.0372 - accuracy: 0.6108\n",
"Epoch 16/300\n",
"128/128 [==============================] - 3s 21ms/step - loss: 1.0112 - accuracy: 0.6225\n",
"Epoch 17/300\n",
"128/128 [==============================] - 3s 27ms/step - loss: 0.9920 - accuracy: 0.6279\n",
"Epoch 18/300\n",
"128/128 [==============================] - 2s 19ms/step - loss: 0.9749 - accuracy: 0.6335\n",
"Epoch 19/300\n",
"128/128 [==============================] - 2s 19ms/step - loss: 0.9485 - accuracy: 0.6494\n",
"Epoch 20/300\n",
"128/128 [==============================] - 2s 19ms/step - loss: 0.9075 - accuracy: 0.6642\n",
"Epoch 21/300\n",
"128/128 [==============================] - 3s 21ms/step - loss: 0.8809 - accuracy: 0.6706\n",
"Epoch 22/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.8440 - accuracy: 0.6866\n",
"Epoch 23/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.8200 - accuracy: 0.6971\n",
"Epoch 24/300\n",
"128/128 [==============================] - 2s 19ms/step - loss: 0.8008 - accuracy: 0.7021\n",
"Epoch 25/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.7746 - accuracy: 0.7119\n",
"Epoch 26/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.7274 - accuracy: 0.7295\n",
"Epoch 27/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.7106 - accuracy: 0.7347\n",
"Epoch 28/300\n",
"128/128 [==============================] - 3s 27ms/step - loss: 0.6781 - accuracy: 0.7506\n",
"Epoch 29/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.6310 - accuracy: 0.7661\n",
"Epoch 30/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.6266 - accuracy: 0.7704\n",
"Epoch 31/300\n",
"128/128 [==============================] - 3s 25ms/step - loss: 0.5994 - accuracy: 0.7787\n",
"Epoch 32/300\n",
"128/128 [==============================] - 3s 19ms/step - loss: 0.5536 - accuracy: 0.7996\n",
"Epoch 33/300\n",
"128/128 [==============================] - 2s 19ms/step - loss: 0.5707 - accuracy: 0.7927\n",
"Epoch 34/300\n",
"128/128 [==============================] - 2s 17ms/step - loss: 0.5493 - accuracy: 0.7978\n",
"Epoch 35/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.4931 - accuracy: 0.8185\n",
"Epoch 36/300\n",
"128/128 [==============================] - 2s 17ms/step - loss: 0.4587 - accuracy: 0.8314\n",
"Epoch 37/300\n",
"128/128 [==============================] - 2s 17ms/step - loss: 0.4462 - accuracy: 0.8397\n",
"Epoch 38/300\n",
"128/128 [==============================] - 2s 17ms/step - loss: 0.4568 - accuracy: 0.8308\n",
"Epoch 39/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.4092 - accuracy: 0.8511\n",
"Epoch 40/300\n",
"128/128 [==============================] - 2s 17ms/step - loss: 0.4070 - accuracy: 0.8545\n",
"Epoch 41/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.4043 - accuracy: 0.8527\n",
"Epoch 42/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.3737 - accuracy: 0.8625\n",
"Epoch 43/300\n",
"128/128 [==============================] - 3s 21ms/step - loss: 0.3557 - accuracy: 0.8715\n",
"Epoch 44/300\n",
"128/128 [==============================] - 3s 22ms/step - loss: 0.3419 - accuracy: 0.8743\n",
"Epoch 45/300\n",
"128/128 [==============================] - 3s 25ms/step - loss: 0.3249 - accuracy: 0.8815\n",
"Epoch 46/300\n",
"128/128 [==============================] - 3s 21ms/step - loss: 0.2993 - accuracy: 0.8922\n",
"Epoch 47/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.3060 - accuracy: 0.8884\n",
"Epoch 48/300\n",
"128/128 [==============================] - 2s 19ms/step - loss: 0.2911 - accuracy: 0.8943\n",
"Epoch 49/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.2634 - accuracy: 0.9068\n",
"Epoch 50/300\n",
"128/128 [==============================] - 2s 19ms/step - loss: 0.2459 - accuracy: 0.9105\n",
"Epoch 51/300\n",
"128/128 [==============================] - 3s 19ms/step - loss: 0.2771 - accuracy: 0.9036\n",
"Epoch 52/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.2521 - accuracy: 0.9095\n",
"Epoch 53/300\n",
"128/128 [==============================] - 3s 21ms/step - loss: 0.2394 - accuracy: 0.9142\n",
"Epoch 54/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.2354 - accuracy: 0.9170\n",
"Epoch 55/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.2100 - accuracy: 0.9267\n",
"Epoch 56/300\n",
"128/128 [==============================] - 3s 25ms/step - loss: 0.2221 - accuracy: 0.9224\n",
"Epoch 57/300\n",
"128/128 [==============================] - 3s 26ms/step - loss: 0.2140 - accuracy: 0.9240\n",
"Epoch 58/300\n",
"128/128 [==============================] - 3s 25ms/step - loss: 0.2217 - accuracy: 0.9214\n",
"Epoch 59/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.1989 - accuracy: 0.9305\n",
"Epoch 60/300\n",
"128/128 [==============================] - 3s 19ms/step - loss: 0.2060 - accuracy: 0.9270\n",
"Epoch 61/300\n",
"128/128 [==============================] - 3s 24ms/step - loss: 0.1887 - accuracy: 0.9352\n",
"Epoch 62/300\n",
"128/128 [==============================] - 3s 26ms/step - loss: 0.2006 - accuracy: 0.9287\n",
"Epoch 63/300\n",
"128/128 [==============================] - 4s 27ms/step - loss: 0.1954 - accuracy: 0.9332\n",
"Epoch 64/300\n",
"128/128 [==============================] - 3s 22ms/step - loss: 0.1722 - accuracy: 0.9387\n",
"Epoch 65/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.2120 - accuracy: 0.9269\n",
"Epoch 66/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.1798 - accuracy: 0.9366\n",
"Epoch 67/300\n",
"128/128 [==============================] - 3s 21ms/step - loss: 0.1724 - accuracy: 0.9405\n",
"Epoch 68/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.1608 - accuracy: 0.9440\n",
"Epoch 69/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.1706 - accuracy: 0.9396\n",
"Epoch 70/300\n",
"128/128 [==============================] - 2s 19ms/step - loss: 0.1612 - accuracy: 0.9440\n",
"Epoch 71/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.1551 - accuracy: 0.9468\n",
"Epoch 72/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1728 - accuracy: 0.9403\n",
"Epoch 73/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1679 - accuracy: 0.9414\n",
"Epoch 74/300\n",
"128/128 [==============================] - 2s 17ms/step - loss: 0.1783 - accuracy: 0.9388\n",
"Epoch 75/300\n",
"128/128 [==============================] - 2s 17ms/step - loss: 0.1628 - accuracy: 0.9443\n",
"Epoch 76/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1561 - accuracy: 0.9468\n",
"Epoch 77/300\n",
"128/128 [==============================] - 3s 21ms/step - loss: 0.1707 - accuracy: 0.9417\n",
"Epoch 78/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.1564 - accuracy: 0.9488\n",
"Epoch 79/300\n",
"128/128 [==============================] - 2s 17ms/step - loss: 0.1452 - accuracy: 0.9537\n",
"Epoch 80/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1216 - accuracy: 0.9593\n",
"Epoch 81/300\n",
"128/128 [==============================] - 3s 19ms/step - loss: 0.1421 - accuracy: 0.9510\n",
"Epoch 82/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1801 - accuracy: 0.9397\n",
"Epoch 83/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1380 - accuracy: 0.9542\n",
"Epoch 84/300\n",
"128/128 [==============================] - 2s 19ms/step - loss: 0.1480 - accuracy: 0.9485\n",
"Epoch 85/300\n",
"128/128 [==============================] - 2s 17ms/step - loss: 0.1555 - accuracy: 0.9472\n",
"Epoch 86/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1493 - accuracy: 0.9514\n",
"Epoch 87/300\n",
"128/128 [==============================] - 3s 22ms/step - loss: 0.1308 - accuracy: 0.9550\n",
"Epoch 88/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1531 - accuracy: 0.9487\n",
"Epoch 89/300\n",
"128/128 [==============================] - 2s 17ms/step - loss: 0.1549 - accuracy: 0.9485\n",
"Epoch 90/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1444 - accuracy: 0.9520\n",
"Epoch 91/300\n",
"128/128 [==============================] - 2s 19ms/step - loss: 0.1107 - accuracy: 0.9624\n",
"Epoch 92/300\n",
"128/128 [==============================] - 2s 17ms/step - loss: 0.1280 - accuracy: 0.9561\n",
"Epoch 93/300\n",
"128/128 [==============================] - 2s 19ms/step - loss: 0.1335 - accuracy: 0.9552\n",
"Epoch 94/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.1429 - accuracy: 0.9524\n",
"Epoch 95/300\n",
"128/128 [==============================] - 2s 19ms/step - loss: 0.1301 - accuracy: 0.9590\n",
"Epoch 96/300\n",
"128/128 [==============================] - 2s 19ms/step - loss: 0.1445 - accuracy: 0.9517\n",
"Epoch 97/300\n",
"128/128 [==============================] - 2s 19ms/step - loss: 0.1339 - accuracy: 0.9554\n",
"Epoch 98/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1345 - accuracy: 0.9543\n",
"Epoch 99/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1066 - accuracy: 0.9641\n",
"Epoch 100/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1335 - accuracy: 0.9554\n",
"Epoch 101/300\n",
"128/128 [==============================] - 2s 19ms/step - loss: 0.1405 - accuracy: 0.9541\n",
"Epoch 102/300\n",
"128/128 [==============================] - 2s 19ms/step - loss: 0.1300 - accuracy: 0.9557\n",
"Epoch 103/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1120 - accuracy: 0.9633\n",
"Epoch 104/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1225 - accuracy: 0.9575\n",
"Epoch 105/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1202 - accuracy: 0.9611\n",
"Epoch 106/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1397 - accuracy: 0.9545\n",
"Epoch 107/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1397 - accuracy: 0.9546\n",
"Epoch 108/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1209 - accuracy: 0.9607\n",
"Epoch 109/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1405 - accuracy: 0.9527\n",
"Epoch 110/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.1227 - accuracy: 0.9601\n",
"Epoch 111/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.1024 - accuracy: 0.9667\n",
"Epoch 112/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1210 - accuracy: 0.9612\n",
"Epoch 113/300\n",
"128/128 [==============================] - 3s 21ms/step - loss: 0.1590 - accuracy: 0.9471\n",
"Epoch 114/300\n",
"128/128 [==============================] - 3s 21ms/step - loss: 0.1227 - accuracy: 0.9577\n",
"Epoch 115/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.1127 - accuracy: 0.9631\n",
"Epoch 116/300\n",
"128/128 [==============================] - 2s 19ms/step - loss: 0.1281 - accuracy: 0.9588\n",
"Epoch 117/300\n",
"128/128 [==============================] - 3s 22ms/step - loss: 0.1206 - accuracy: 0.9595\n",
"Epoch 118/300\n",
"128/128 [==============================] - 3s 21ms/step - loss: 0.1277 - accuracy: 0.9572\n",
"Epoch 119/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1230 - accuracy: 0.9589\n",
"Epoch 120/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1178 - accuracy: 0.9629\n",
"Epoch 121/300\n",
"128/128 [==============================] - 3s 22ms/step - loss: 0.1342 - accuracy: 0.9579\n",
"Epoch 122/300\n",
"128/128 [==============================] - 2s 19ms/step - loss: 0.1227 - accuracy: 0.9605\n",
"Epoch 123/300\n",
"128/128 [==============================] - 2s 19ms/step - loss: 0.1244 - accuracy: 0.9578\n",
"Epoch 124/300\n",
"128/128 [==============================] - 3s 22ms/step - loss: 0.1167 - accuracy: 0.9623\n",
"Epoch 125/300\n",
"128/128 [==============================] - 3s 25ms/step - loss: 0.0986 - accuracy: 0.9676\n",
"Epoch 126/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1144 - accuracy: 0.9618\n",
"Epoch 127/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1233 - accuracy: 0.9580\n",
"Epoch 128/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1451 - accuracy: 0.9522\n",
"Epoch 129/300\n",
"128/128 [==============================] - 2s 17ms/step - loss: 0.1042 - accuracy: 0.9661\n",
"Epoch 130/300\n",
"128/128 [==============================] - 3s 23ms/step - loss: 0.1094 - accuracy: 0.9647\n",
"Epoch 131/300\n",
"128/128 [==============================] - 3s 23ms/step - loss: 0.1149 - accuracy: 0.9641\n",
"Epoch 132/300\n",
"128/128 [==============================] - 3s 20ms/step - loss: 0.1082 - accuracy: 0.9623\n",
"Epoch 133/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1355 - accuracy: 0.9561\n",
"Epoch 134/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1424 - accuracy: 0.9562\n",
"Epoch 135/300\n",
"128/128 [==============================] - 2s 18ms/step - loss: 0.1010 - accuracy: 0.9669\n",
"Epoch 136/300\n",
"128/128 [==============================] - 3s 21ms/step - loss: 0.0910 - accuracy: 0.9691\n",
"Epoch 137/300\n",
" 45/128 [=========>....................] - ETA: 1s - loss: 0.1442 - accuracy: 0.9545"
]
},
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp/ipykernel_57768/2396853543.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 12\u001b[0m optimizer=keras.optimizers.Adam(), metrics=['accuracy'])\n\u001b[0;32m 13\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 14\u001b[1;33m \u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtrain_generator\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msteps_per_epoch\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mepochs\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mepochs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 15\u001b[0m \u001b[1;31m# model.fit(x_train, y_train, steps_per_epoch=batch_size, epochs=epochs)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mc:\\Users\\Vincent\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\wandb\\integration\\keras\\keras.py\u001b[0m in \u001b[0;36mnew_v2\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 172\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mcbk\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mcbks\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 173\u001b[0m \u001b[0mset_wandb_attrs\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcbk\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mval_data\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 174\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mold_v2\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 175\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 176\u001b[0m \u001b[0mtraining_arrays\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0morig_fit_loop\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mold_arrays\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mc:\\Users\\Vincent\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\wandb\\integration\\keras\\keras.py\u001b[0m in \u001b[0;36mnew_v2\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 172\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mcbk\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mcbks\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 173\u001b[0m \u001b[0mset_wandb_attrs\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcbk\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mval_data\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 174\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mold_v2\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 175\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 176\u001b[0m \u001b[0mtraining_arrays\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0morig_fit_loop\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mold_arrays\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mc:\\Users\\Vincent\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\wandb\\integration\\keras\\keras.py\u001b[0m in \u001b[0;36mnew_v2\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 172\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mcbk\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mcbks\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 173\u001b[0m \u001b[0mset_wandb_attrs\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcbk\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mval_data\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 174\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mold_v2\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 175\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 176\u001b[0m \u001b[0mtraining_arrays\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0morig_fit_loop\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mold_arrays\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mc:\\Users\\Vincent\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\keras\\utils\\traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 62\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 63\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 64\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 65\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m:\u001b[0m \u001b[1;31m# pylint: disable=broad-except\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 66\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0me\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mc:\\Users\\Vincent\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\keras\\engine\\training.py\u001b[0m in \u001b[0;36mfit\u001b[1;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)\u001b[0m\n\u001b[0;32m 1382\u001b[0m _r=1):\n\u001b[0;32m 1383\u001b[0m \u001b[0mcallbacks\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mon_train_batch_begin\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mstep\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1384\u001b[1;33m \u001b[0mtmp_logs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrain_function\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0miterator\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1385\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mdata_handler\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshould_sync\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1386\u001b[0m \u001b[0mcontext\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0masync_wait\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mc:\\Users\\Vincent\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\tensorflow\\python\\util\\traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 148\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 149\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 150\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 151\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 152\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0me\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mc:\\Users\\Vincent\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\tensorflow\\python\\eager\\def_function.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *args, **kwds)\u001b[0m\n\u001b[0;32m 913\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 914\u001b[0m \u001b[1;32mwith\u001b[0m \u001b[0mOptionalXlaContext\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_jit_compile\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 915\u001b[1;33m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 916\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 917\u001b[0m \u001b[0mnew_tracing_count\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mexperimental_get_tracing_count\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mc:\\Users\\Vincent\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\tensorflow\\python\\eager\\def_function.py\u001b[0m in \u001b[0;36m_call\u001b[1;34m(self, *args, **kwds)\u001b[0m\n\u001b[0;32m 945\u001b[0m \u001b[1;31m# In this case we have created variables on the first call, so we run the\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 946\u001b[0m \u001b[1;31m# defunned version which is guaranteed to never create variables.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 947\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_stateless_fn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;31m# pylint: disable=not-callable\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 948\u001b[0m \u001b[1;32melif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_stateful_fn\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 949\u001b[0m \u001b[1;31m# Release the lock early so that multiple threads can perform the call\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mc:\\Users\\Vincent\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\tensorflow\\python\\eager\\function.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 2954\u001b[0m (graph_function,\n\u001b[0;32m 2955\u001b[0m filtered_flat_args) = self._maybe_define_function(args, kwargs)\n\u001b[1;32m-> 2956\u001b[1;33m return graph_function._call_flat(\n\u001b[0m\u001b[0;32m 2957\u001b[0m filtered_flat_args, captured_inputs=graph_function.captured_inputs) # pylint: disable=protected-access\n\u001b[0;32m 2958\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mc:\\Users\\Vincent\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\tensorflow\\python\\eager\\function.py\u001b[0m in \u001b[0;36m_call_flat\u001b[1;34m(self, args, captured_inputs, cancellation_manager)\u001b[0m\n\u001b[0;32m 1851\u001b[0m and executing_eagerly):\n\u001b[0;32m 1852\u001b[0m \u001b[1;31m# No tape is watching; skip to running the function.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1853\u001b[1;33m return self._build_call_outputs(self._inference_function.call(\n\u001b[0m\u001b[0;32m 1854\u001b[0m ctx, args, cancellation_manager=cancellation_manager))\n\u001b[0;32m 1855\u001b[0m forward_backward = self._select_forward_and_backward_functions(\n",
"\u001b[1;32mc:\\Users\\Vincent\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\tensorflow\\python\\eager\\function.py\u001b[0m in \u001b[0;36mcall\u001b[1;34m(self, ctx, args, cancellation_manager)\u001b[0m\n\u001b[0;32m 497\u001b[0m \u001b[1;32mwith\u001b[0m \u001b[0m_InterpolateFunctionError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 498\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mcancellation_manager\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 499\u001b[1;33m outputs = execute.execute(\n\u001b[0m\u001b[0;32m 500\u001b[0m \u001b[0mstr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msignature\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mname\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 501\u001b[0m \u001b[0mnum_outputs\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_num_outputs\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mc:\\Users\\Vincent\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\tensorflow\\python\\eager\\execute.py\u001b[0m in \u001b[0;36mquick_execute\u001b[1;34m(op_name, num_outputs, inputs, attrs, ctx, name)\u001b[0m\n\u001b[0;32m 52\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 53\u001b[0m \u001b[0mctx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mensure_initialized\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 54\u001b[1;33m tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,\n\u001b[0m\u001b[0;32m 55\u001b[0m inputs, attrs, num_outputs)\n\u001b[0;32m 56\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0mcore\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_NotOkStatusException\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31mKeyboardInterrupt\u001b[0m: "
]
}
],
"source": [
"gen = keras.preprocessing.image.ImageDataGenerator()\n",
"\n",
"# Batch and epoch sizes are arbitrary\n",
"batch_size = 128\n",
"epochs = 300\n",
"image_shape = (128, 128, 1)\n",
"image_size = (128, 128)\n",
"\n",
"train_generator = gen.flow(x_train, y_train, batch_size=batch_size)\n",
"\n",
"model.compile(loss='categorical_crossentropy',\n",
" optimizer=keras.optimizers.Adam(), metrics=['accuracy'])\n",
"\n",
"model.fit(train_generator, steps_per_epoch=batch_size, epochs=epochs)\n",
"# model.fit(x_train, y_train, steps_per_epoch=batch_size, epochs=epochs)\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Training with Wandb"
]
},
{
"cell_type": "code",
"execution_count": 27,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"Finishing last run (ID:l8f2yfp4) before initializing another..."
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"Waiting for W&B process to finish... <strong style=\"color:green\">(success).</strong>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"<style>\n",
" table.wandb td:nth-child(1) { padding: 0 10px; text-align: left ; width: auto;} td:nth-child(2) {text-align: left ; width: 100%}\n",
" .wandb-row { display: flex; flex-direction: row; flex-wrap: wrap; justify-content: flex-start; width: 100% }\n",
" .wandb-col { display: flex; flex-direction: column; flex-basis: 100%; flex: 1; padding: 10px; }\n",
" </style>\n",
"<div class=\"wandb-row\"><div class=\"wandb-col\"><h3>Run history:</h3><br/><table class=\"wandb\"><tr><td>accuracy</td><td>▁</td></tr><tr><td>epoch</td><td>▁</td></tr><tr><td>loss</td><td>▁</td></tr><tr><td>val_accuracy</td><td>▁</td></tr><tr><td>val_loss</td><td>▁</td></tr></table><br/></div><div class=\"wandb-col\"><h3>Run summary:</h3><br/><table class=\"wandb\"><tr><td>accuracy</td><td>0.2362</td></tr><tr><td>best_epoch</td><td>0</td></tr><tr><td>best_val_loss</td><td>1.81816</td></tr><tr><td>epoch</td><td>0</td></tr><tr><td>loss</td><td>2.04756</td></tr><tr><td>val_accuracy</td><td>0.24965</td></tr><tr><td>val_loss</td><td>1.81816</td></tr></table><br/></div></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
" View run <strong style=\"color:#cdcd00\">dazzling-leaf-8</strong> at: <a href='https://wandb.ai/vincents/emotion-classification/runs/l8f2yfp4' target=\"_blank\">https://wandb.ai/vincents/emotion-classification/runs/l8f2yfp4</a><br/>Synced 5 W&B file(s), 37 media file(s), 4 artifact file(s) and 1 other file(s)"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"Find logs at: <code>.\\wandb\\run-20230516_105400-l8f2yfp4\\logs</code>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"Successfully finished last run (ID:l8f2yfp4). Initializing new run:<br/>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"Tracking run with wandb version 0.15.2"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"Run data is saved locally in <code>c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta</code>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"Syncing run <strong><a href='https://wandb.ai/vincents/emotion-classification/runs/gb41t2ta' target=\"_blank\">vital-wave-9</a></strong> to <a href='https://wandb.ai/vincents/emotion-classification' target=\"_blank\">Weights & Biases</a> (<a href='https://wandb.me/run' target=\"_blank\">docs</a>)<br/>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
" View project at <a href='https://wandb.ai/vincents/emotion-classification' target=\"_blank\">https://wandb.ai/vincents/emotion-classification</a>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
" View run at <a href='https://wandb.ai/vincents/emotion-classification/runs/gb41t2ta' target=\"_blank\">https://wandb.ai/vincents/emotion-classification/runs/gb41t2ta</a>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/300\n",
"127/128 [============================>.] - ETA: 0s - loss: 1.8249 - accuracy: 0.2483INFO:tensorflow:Assets written to: c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best\\assets\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best)... Done. 0.2s\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"128/128 [==============================] - 14s 87ms/step - loss: 1.8243 - accuracy: 0.2491 - val_loss: 1.8012 - val_accuracy: 0.2597\n",
"Epoch 2/300\n",
"127/128 [============================>.] - ETA: 0s - loss: 1.7413 - accuracy: 0.2877INFO:tensorflow:Assets written to: c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best\\assets\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best)... Done. 0.2s\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"128/128 [==============================] - 10s 77ms/step - loss: 1.7406 - accuracy: 0.2880 - val_loss: 1.6436 - val_accuracy: 0.3369\n",
"Epoch 3/300\n",
"127/128 [============================>.] - ETA: 0s - loss: 1.6035 - accuracy: 0.3652INFO:tensorflow:Assets written to: c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best\\assets\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best)... Done. 0.2s\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"128/128 [==============================] - 10s 76ms/step - loss: 1.6024 - accuracy: 0.3657 - val_loss: 1.5916 - val_accuracy: 0.3742\n",
"Epoch 4/300\n",
"128/128 [==============================] - ETA: 0s - loss: 1.5022 - accuracy: 0.4152INFO:tensorflow:Assets written to: c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best\\assets\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best)... Done. 0.2s\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"128/128 [==============================] - 10s 76ms/step - loss: 1.5022 - accuracy: 0.4152 - val_loss: 1.4435 - val_accuracy: 0.4358\n",
"Epoch 5/300\n",
"128/128 [==============================] - 6s 43ms/step - loss: 1.4442 - accuracy: 0.4424 - val_loss: 1.4616 - val_accuracy: 0.4422\n",
"Epoch 6/300\n",
"128/128 [==============================] - ETA: 0s - loss: 1.3823 - accuracy: 0.4675INFO:tensorflow:Assets written to: c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best\\assets\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best)... Done. 0.2s\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"128/128 [==============================] - 11s 83ms/step - loss: 1.3823 - accuracy: 0.4675 - val_loss: 1.3840 - val_accuracy: 0.4653\n",
"Epoch 7/300\n",
"126/128 [============================>.] - ETA: 0s - loss: 1.3514 - accuracy: 0.4828INFO:tensorflow:Assets written to: c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best\\assets\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best)... Done. 0.2s\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"128/128 [==============================] - 11s 83ms/step - loss: 1.3514 - accuracy: 0.4822 - val_loss: 1.3511 - val_accuracy: 0.4859\n",
"Epoch 8/300\n",
"127/128 [============================>.] - ETA: 0s - loss: 1.3016 - accuracy: 0.5009INFO:tensorflow:Assets written to: c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best\\assets\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best)... Done. 0.2s\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"128/128 [==============================] - 10s 77ms/step - loss: 1.3016 - accuracy: 0.5004 - val_loss: 1.3115 - val_accuracy: 0.4943\n",
"Epoch 9/300\n",
"128/128 [==============================] - 6s 43ms/step - loss: 1.2739 - accuracy: 0.5150 - val_loss: 1.3404 - val_accuracy: 0.4831\n",
"Epoch 10/300\n",
"128/128 [==============================] - ETA: 0s - loss: 1.2392 - accuracy: 0.5264INFO:tensorflow:Assets written to: c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best\\assets\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best)... Done. 0.2s\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"128/128 [==============================] - 11s 86ms/step - loss: 1.2392 - accuracy: 0.5264 - val_loss: 1.2924 - val_accuracy: 0.4999\n",
"Epoch 11/300\n",
"126/128 [============================>.] - ETA: 0s - loss: 1.1975 - accuracy: 0.5425INFO:tensorflow:Assets written to: c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best\\assets\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best)... Done. 0.2s\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"128/128 [==============================] - 11s 82ms/step - loss: 1.1965 - accuracy: 0.5427 - val_loss: 1.2498 - val_accuracy: 0.5224\n",
"Epoch 12/300\n",
"128/128 [==============================] - 6s 45ms/step - loss: 1.1648 - accuracy: 0.5555 - val_loss: 1.2731 - val_accuracy: 0.5180\n",
"Epoch 13/300\n",
"128/128 [==============================] - 6s 43ms/step - loss: 1.1416 - accuracy: 0.5670 - val_loss: 1.2598 - val_accuracy: 0.5280\n",
"Epoch 14/300\n",
"128/128 [==============================] - ETA: 0s - loss: 1.1291 - accuracy: 0.5750INFO:tensorflow:Assets written to: c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best\\assets\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best)... Done. 0.3s\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"128/128 [==============================] - 11s 83ms/step - loss: 1.1291 - accuracy: 0.5750 - val_loss: 1.2457 - val_accuracy: 0.5291\n",
"Epoch 15/300\n",
"126/128 [============================>.] - ETA: 0s - loss: 1.0938 - accuracy: 0.5871INFO:tensorflow:Assets written to: c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best\\assets\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (c:\\Users\\Vincent\\OneDrive - UMONS\\Documents\\emotions_with_keras\\wandb\\run-20230516_105444-gb41t2ta\\files\\model-best)... Done. 0.2s\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"128/128 [==============================] - 10s 81ms/step - loss: 1.0932 - accuracy: 0.5869 - val_loss: 1.2392 - val_accuracy: 0.5386\n",
"Epoch 16/300\n",
"128/128 [==============================] - 7s 51ms/step - loss: 1.0501 - accuracy: 0.6055 - val_loss: 1.2521 - val_accuracy: 0.5344\n",
"Epoch 17/300\n",
"128/128 [==============================] - 5s 41ms/step - loss: 1.0287 - accuracy: 0.6101 - val_loss: 1.2545 - val_accuracy: 0.5325\n",
"Epoch 18/300\n",
"128/128 [==============================] - 6s 47ms/step - loss: 1.0099 - accuracy: 0.6221 - val_loss: 1.2628 - val_accuracy: 0.5358\n",
"Epoch 19/300\n",
"128/128 [==============================] - 7s 51ms/step - loss: 0.9784 - accuracy: 0.6309 - val_loss: 1.2493 - val_accuracy: 0.5475\n",
"Epoch 20/300\n",
"128/128 [==============================] - 7s 51ms/step - loss: 0.9474 - accuracy: 0.6465 - val_loss: 1.2840 - val_accuracy: 0.5391\n",
"Epoch 21/300\n",
"128/128 [==============================] - 7s 52ms/step - loss: 0.9053 - accuracy: 0.6580 - val_loss: 1.2767 - val_accuracy: 0.5492\n",
"Epoch 22/300\n",
"128/128 [==============================] - 6s 44ms/step - loss: 0.8807 - accuracy: 0.6722 - val_loss: 1.3167 - val_accuracy: 0.5464\n",
"Epoch 23/300\n",
"128/128 [==============================] - 6s 43ms/step - loss: 0.8471 - accuracy: 0.6825 - val_loss: 1.3115 - val_accuracy: 0.5403\n",
"Epoch 24/300\n",
"128/128 [==============================] - 5s 41ms/step - loss: 0.8060 - accuracy: 0.7005 - val_loss: 1.3176 - val_accuracy: 0.5514\n",
"Epoch 25/300\n",
"128/128 [==============================] - 7s 57ms/step - loss: 0.7844 - accuracy: 0.7071 - val_loss: 1.3398 - val_accuracy: 0.5467\n",
"Epoch 26/300\n",
"128/128 [==============================] - 6s 49ms/step - loss: 0.7425 - accuracy: 0.7253 - val_loss: 1.4042 - val_accuracy: 0.5405\n",
"Epoch 27/300\n",
"128/128 [==============================] - 6s 46ms/step - loss: 0.6860 - accuracy: 0.7458 - val_loss: 1.4474 - val_accuracy: 0.5553\n",
"Epoch 28/300\n",
"128/128 [==============================] - 7s 56ms/step - loss: 0.6693 - accuracy: 0.7499 - val_loss: 1.4420 - val_accuracy: 0.5567\n",
"Epoch 29/300\n",
"128/128 [==============================] - 7s 53ms/step - loss: 0.6419 - accuracy: 0.7637 - val_loss: 1.4690 - val_accuracy: 0.5578\n",
"Epoch 30/300\n",
"128/128 [==============================] - 7s 56ms/step - loss: 0.6373 - accuracy: 0.7662 - val_loss: 1.4857 - val_accuracy: 0.5503\n",
"Epoch 31/300\n",
"128/128 [==============================] - 6s 44ms/step - loss: 0.5958 - accuracy: 0.7827 - val_loss: 1.5619 - val_accuracy: 0.5472\n",
"Epoch 32/300\n",
"128/128 [==============================] - 7s 54ms/step - loss: 0.5724 - accuracy: 0.7911 - val_loss: 1.5975 - val_accuracy: 0.5478\n",
"Epoch 33/300\n",
"128/128 [==============================] - 7s 57ms/step - loss: 0.5329 - accuracy: 0.8040 - val_loss: 1.7070 - val_accuracy: 0.5528\n",
"Epoch 34/300\n",
"128/128 [==============================] - 7s 53ms/step - loss: 0.4991 - accuracy: 0.8194 - val_loss: 1.7840 - val_accuracy: 0.5581\n",
"Epoch 35/300\n",
"128/128 [==============================] - 7s 52ms/step - loss: 0.4793 - accuracy: 0.8269 - val_loss: 1.8621 - val_accuracy: 0.5461\n",
"Epoch 36/300\n",
"128/128 [==============================] - 6s 47ms/step - loss: 0.4765 - accuracy: 0.8286 - val_loss: 1.7893 - val_accuracy: 0.5492\n",
"Epoch 37/300\n",
"128/128 [==============================] - 6s 48ms/step - loss: 0.4324 - accuracy: 0.8446 - val_loss: 1.8654 - val_accuracy: 0.5550\n",
"Epoch 38/300\n",
"128/128 [==============================] - 9s 68ms/step - loss: 0.4151 - accuracy: 0.8490 - val_loss: 1.8698 - val_accuracy: 0.5578\n",
"Epoch 39/300\n",
"128/128 [==============================] - 7s 57ms/step - loss: 0.4013 - accuracy: 0.8552 - val_loss: 2.0011 - val_accuracy: 0.5492\n",
"Epoch 40/300\n",
"128/128 [==============================] - 6s 48ms/step - loss: 0.3863 - accuracy: 0.8611 - val_loss: 1.9482 - val_accuracy: 0.5536\n",
"Epoch 41/300\n",
"128/128 [==============================] - 7s 53ms/step - loss: 0.3725 - accuracy: 0.8646 - val_loss: 2.1449 - val_accuracy: 0.5539\n",
"Epoch 42/300\n",
"128/128 [==============================] - 6s 44ms/step - loss: 0.3453 - accuracy: 0.8761 - val_loss: 2.1732 - val_accuracy: 0.5475\n",
"Epoch 43/300\n",
"128/128 [==============================] - 7s 51ms/step - loss: 0.3384 - accuracy: 0.8775 - val_loss: 2.2212 - val_accuracy: 0.5550\n",
"Epoch 44/300\n",
"128/128 [==============================] - 6s 48ms/step - loss: 0.3157 - accuracy: 0.8849 - val_loss: 2.2126 - val_accuracy: 0.5361\n",
"Epoch 45/300\n",
"128/128 [==============================] - 7s 54ms/step - loss: 0.3152 - accuracy: 0.8871 - val_loss: 2.3328 - val_accuracy: 0.5486\n",
"Epoch 46/300\n",
"128/128 [==============================] - 8s 63ms/step - loss: 0.2847 - accuracy: 0.9005 - val_loss: 2.4204 - val_accuracy: 0.5598\n",
"Epoch 47/300\n",
"128/128 [==============================] - 10s 78ms/step - loss: 0.2990 - accuracy: 0.8925 - val_loss: 2.3677 - val_accuracy: 0.5550\n",
"Epoch 48/300\n",
"128/128 [==============================] - 10s 73ms/step - loss: 0.2700 - accuracy: 0.9014 - val_loss: 2.3853 - val_accuracy: 0.5433\n",
"Epoch 49/300\n",
"128/128 [==============================] - 8s 59ms/step - loss: 0.2657 - accuracy: 0.9032 - val_loss: 2.5410 - val_accuracy: 0.5550\n",
"Epoch 50/300\n",
"128/128 [==============================] - 8s 62ms/step - loss: 0.2493 - accuracy: 0.9134 - val_loss: 2.4437 - val_accuracy: 0.5483\n",
"Epoch 51/300\n",
"128/128 [==============================] - 8s 62ms/step - loss: 0.2519 - accuracy: 0.9097 - val_loss: 2.6863 - val_accuracy: 0.5570\n",
"Epoch 52/300\n",
"128/128 [==============================] - 8s 60ms/step - loss: 0.2490 - accuracy: 0.9142 - val_loss: 2.4397 - val_accuracy: 0.5570\n",
"Epoch 53/300\n",
"128/128 [==============================] - 8s 65ms/step - loss: 0.2399 - accuracy: 0.9166 - val_loss: 2.5987 - val_accuracy: 0.5450\n",
"Epoch 54/300\n",
"128/128 [==============================] - 8s 58ms/step - loss: 0.2200 - accuracy: 0.9230 - val_loss: 2.6906 - val_accuracy: 0.5626\n",
"Epoch 55/300\n",
"128/128 [==============================] - 7s 54ms/step - loss: 0.2133 - accuracy: 0.9257 - val_loss: 2.7780 - val_accuracy: 0.5539\n",
"Epoch 56/300\n",
"128/128 [==============================] - 6s 44ms/step - loss: 0.2123 - accuracy: 0.9259 - val_loss: 2.8422 - val_accuracy: 0.5453\n",
"Epoch 57/300\n",
"128/128 [==============================] - 5s 39ms/step - loss: 0.2172 - accuracy: 0.9238 - val_loss: 2.7059 - val_accuracy: 0.5520\n",
"Epoch 58/300\n",
"128/128 [==============================] - 5s 42ms/step - loss: 0.1934 - accuracy: 0.9327 - val_loss: 2.7811 - val_accuracy: 0.5506\n",
"Epoch 59/300\n",
"128/128 [==============================] - 5s 41ms/step - loss: 0.2080 - accuracy: 0.9250 - val_loss: 2.8946 - val_accuracy: 0.5486\n",
"Epoch 60/300\n",
"128/128 [==============================] - 5s 42ms/step - loss: 0.1946 - accuracy: 0.9333 - val_loss: 2.9259 - val_accuracy: 0.5436\n",
"Epoch 61/300\n",
"128/128 [==============================] - 5s 42ms/step - loss: 0.2270 - accuracy: 0.9206 - val_loss: 3.0898 - val_accuracy: 0.5542\n",
"Epoch 62/300\n",
"128/128 [==============================] - 5s 40ms/step - loss: 0.1575 - accuracy: 0.9441 - val_loss: 3.1171 - val_accuracy: 0.5517\n",
"Epoch 63/300\n",
"128/128 [==============================] - 6s 45ms/step - loss: 0.1851 - accuracy: 0.9354 - val_loss: 2.9162 - val_accuracy: 0.5573\n",
"Epoch 64/300\n",
"128/128 [==============================] - 5s 41ms/step - loss: 0.1572 - accuracy: 0.9451 - val_loss: 3.2835 - val_accuracy: 0.5531\n",
"Epoch 65/300\n",
"128/128 [==============================] - 6s 45ms/step - loss: 0.1707 - accuracy: 0.9408 - val_loss: 3.0406 - val_accuracy: 0.5397\n",
"Epoch 66/300\n",
"128/128 [==============================] - 5s 42ms/step - loss: 0.1631 - accuracy: 0.9415 - val_loss: 3.1832 - val_accuracy: 0.5475\n",
"Epoch 67/300\n",
"128/128 [==============================] - 5s 40ms/step - loss: 0.1552 - accuracy: 0.9454 - val_loss: 3.3466 - val_accuracy: 0.5506\n",
"Epoch 68/300\n",
"128/128 [==============================] - 5s 41ms/step - loss: 0.1656 - accuracy: 0.9434 - val_loss: 3.1675 - val_accuracy: 0.5520\n",
"Epoch 69/300\n",
"128/128 [==============================] - 5s 41ms/step - loss: 0.1542 - accuracy: 0.9467 - val_loss: 3.2296 - val_accuracy: 0.5573\n",
"Epoch 70/300\n",
"128/128 [==============================] - 5s 41ms/step - loss: 0.1527 - accuracy: 0.9461 - val_loss: 3.1687 - val_accuracy: 0.5458\n",
"Epoch 71/300\n",
"128/128 [==============================] - 6s 43ms/step - loss: 0.1584 - accuracy: 0.9459 - val_loss: 3.2012 - val_accuracy: 0.5481\n",
"Epoch 72/300\n",
"128/128 [==============================] - 5s 40ms/step - loss: 0.1584 - accuracy: 0.9453 - val_loss: 3.0802 - val_accuracy: 0.5489\n",
"Epoch 73/300\n",
"128/128 [==============================] - 5s 43ms/step - loss: 0.1536 - accuracy: 0.9496 - val_loss: 3.1408 - val_accuracy: 0.5581\n",
"Epoch 74/300\n",
"128/128 [==============================] - 5s 41ms/step - loss: 0.1253 - accuracy: 0.9590 - val_loss: 3.2236 - val_accuracy: 0.5525\n",
"Epoch 75/300\n",
"128/128 [==============================] - 6s 45ms/step - loss: 0.1521 - accuracy: 0.9492 - val_loss: 3.1938 - val_accuracy: 0.5369\n",
"Epoch 76/300\n",
"128/128 [==============================] - 5s 41ms/step - loss: 0.1676 - accuracy: 0.9430 - val_loss: 3.3184 - val_accuracy: 0.5458\n",
"Epoch 77/300\n",
"128/128 [==============================] - 6s 44ms/step - loss: 0.1429 - accuracy: 0.9543 - val_loss: 3.3849 - val_accuracy: 0.5511\n",
"Epoch 78/300\n",
"128/128 [==============================] - 6s 45ms/step - loss: 0.1412 - accuracy: 0.9528 - val_loss: 3.4873 - val_accuracy: 0.5520\n",
"Epoch 79/300\n",
"128/128 [==============================] - 7s 52ms/step - loss: 0.1443 - accuracy: 0.9508 - val_loss: 3.2795 - val_accuracy: 0.5400\n",
"Epoch 80/300\n",
"128/128 [==============================] - 6s 49ms/step - loss: 0.1526 - accuracy: 0.9488 - val_loss: 3.2916 - val_accuracy: 0.5603\n",
"Epoch 81/300\n",
"128/128 [==============================] - 7s 55ms/step - loss: 0.1401 - accuracy: 0.9519 - val_loss: 3.3555 - val_accuracy: 0.5536\n",
"Epoch 82/300\n",
"128/128 [==============================] - 7s 52ms/step - loss: 0.1455 - accuracy: 0.9497 - val_loss: 3.2454 - val_accuracy: 0.5483\n",
"Epoch 83/300\n",
"128/128 [==============================] - 6s 47ms/step - loss: 0.1440 - accuracy: 0.9486 - val_loss: 3.5384 - val_accuracy: 0.5442\n",
"Epoch 84/300\n",
"128/128 [==============================] - 5s 42ms/step - loss: 0.1111 - accuracy: 0.9638 - val_loss: 3.4603 - val_accuracy: 0.5609\n",
"Epoch 85/300\n",
"128/128 [==============================] - 6s 43ms/step - loss: 0.1564 - accuracy: 0.9479 - val_loss: 3.2920 - val_accuracy: 0.5450\n",
"Epoch 86/300\n",
"128/128 [==============================] - 8s 61ms/step - loss: 0.1155 - accuracy: 0.9611 - val_loss: 3.5356 - val_accuracy: 0.5447\n",
"Epoch 87/300\n",
"128/128 [==============================] - 7s 54ms/step - loss: 0.1395 - accuracy: 0.9522 - val_loss: 3.2904 - val_accuracy: 0.5439\n",
"Epoch 88/300\n",
"128/128 [==============================] - 6s 48ms/step - loss: 0.1192 - accuracy: 0.9573 - val_loss: 3.3654 - val_accuracy: 0.5450\n",
"Epoch 89/300\n",
"128/128 [==============================] - 6s 50ms/step - loss: 0.1245 - accuracy: 0.9570 - val_loss: 3.5589 - val_accuracy: 0.5467\n",
"Epoch 90/300\n",
"128/128 [==============================] - 7s 51ms/step - loss: 0.1229 - accuracy: 0.9602 - val_loss: 3.5770 - val_accuracy: 0.5417\n",
"Epoch 91/300\n",
"128/128 [==============================] - 7s 53ms/step - loss: 0.1506 - accuracy: 0.9513 - val_loss: 3.4320 - val_accuracy: 0.5419\n",
"Epoch 92/300\n",
"128/128 [==============================] - 7s 58ms/step - loss: 0.1236 - accuracy: 0.9583 - val_loss: 3.4313 - val_accuracy: 0.5478\n",
"Epoch 93/300\n",
"128/128 [==============================] - 9s 67ms/step - loss: 0.1176 - accuracy: 0.9615 - val_loss: 3.5019 - val_accuracy: 0.5548\n",
"Epoch 94/300\n",
"128/128 [==============================] - 8s 58ms/step - loss: 0.1033 - accuracy: 0.9669 - val_loss: 3.6996 - val_accuracy: 0.5344\n",
"Epoch 95/300\n",
"128/128 [==============================] - 7s 51ms/step - loss: 0.1399 - accuracy: 0.9522 - val_loss: 3.5506 - val_accuracy: 0.5450\n",
"Epoch 96/300\n",
"128/128 [==============================] - 7s 52ms/step - loss: 0.1086 - accuracy: 0.9629 - val_loss: 3.7280 - val_accuracy: 0.5514\n",
"Epoch 97/300\n",
"128/128 [==============================] - 6s 48ms/step - loss: 0.1565 - accuracy: 0.9487 - val_loss: 3.5658 - val_accuracy: 0.5461\n",
"Epoch 98/300\n",
"128/128 [==============================] - 7s 56ms/step - loss: 0.1123 - accuracy: 0.9617 - val_loss: 3.8387 - val_accuracy: 0.5587\n",
"Epoch 99/300\n",
"128/128 [==============================] - 6s 47ms/step - loss: 0.1118 - accuracy: 0.9640 - val_loss: 3.6447 - val_accuracy: 0.5467\n",
"Epoch 100/300\n",
"128/128 [==============================] - 6s 46ms/step - loss: 0.1035 - accuracy: 0.9644 - val_loss: 3.7561 - val_accuracy: 0.5411\n",
"Epoch 101/300\n",
"128/128 [==============================] - 6s 44ms/step - loss: 0.1174 - accuracy: 0.9628 - val_loss: 3.7154 - val_accuracy: 0.5556\n",
"Epoch 102/300\n",
"128/128 [==============================] - 6s 43ms/step - loss: 0.1458 - accuracy: 0.9524 - val_loss: 3.3345 - val_accuracy: 0.5559\n",
"Epoch 103/300\n",
"128/128 [==============================] - 5s 38ms/step - loss: 0.1249 - accuracy: 0.9584 - val_loss: 3.6254 - val_accuracy: 0.5536\n",
"Epoch 104/300\n",
"128/128 [==============================] - 5s 41ms/step - loss: 0.1142 - accuracy: 0.9604 - val_loss: 3.6865 - val_accuracy: 0.5598\n",
"Epoch 105/300\n",
"128/128 [==============================] - 5s 40ms/step - loss: 0.1205 - accuracy: 0.9605 - val_loss: 3.6990 - val_accuracy: 0.5506\n",
"Epoch 106/300\n",
"128/128 [==============================] - 5s 40ms/step - loss: 0.1241 - accuracy: 0.9589 - val_loss: 3.5510 - val_accuracy: 0.5528\n",
"Epoch 107/300\n",
"128/128 [==============================] - 5s 41ms/step - loss: 0.1282 - accuracy: 0.9584 - val_loss: 3.6177 - val_accuracy: 0.5553\n",
"Epoch 108/300\n",
"128/128 [==============================] - 5s 39ms/step - loss: 0.1178 - accuracy: 0.9611 - val_loss: 3.7374 - val_accuracy: 0.5614\n",
"Epoch 109/300\n",
"128/128 [==============================] - 6s 47ms/step - loss: 0.1062 - accuracy: 0.9642 - val_loss: 3.7431 - val_accuracy: 0.5556\n",
"Epoch 110/300\n",
"128/128 [==============================] - 7s 53ms/step - loss: 0.1135 - accuracy: 0.9630 - val_loss: 3.7662 - val_accuracy: 0.5433\n",
"Epoch 111/300\n",
"128/128 [==============================] - 7s 53ms/step - loss: 0.0958 - accuracy: 0.9674 - val_loss: 3.9025 - val_accuracy: 0.5461\n",
"Epoch 112/300\n",
"128/128 [==============================] - 6s 48ms/step - loss: 0.1127 - accuracy: 0.9618 - val_loss: 3.8452 - val_accuracy: 0.5486\n",
"Epoch 113/300\n",
"128/128 [==============================] - 6s 49ms/step - loss: 0.1318 - accuracy: 0.9567 - val_loss: 3.4513 - val_accuracy: 0.5447\n",
"Epoch 114/300\n",
"128/128 [==============================] - 7s 53ms/step - loss: 0.1275 - accuracy: 0.9578 - val_loss: 3.5793 - val_accuracy: 0.5453\n",
"Epoch 115/300\n",
"128/128 [==============================] - 6s 45ms/step - loss: 0.1064 - accuracy: 0.9645 - val_loss: 3.7907 - val_accuracy: 0.5481\n"
]
},
{
"data": {
"text/plain": [
"<keras.callbacks.History at 0x19ceea437c0>"
]
},
"execution_count": 27,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import wandb\n",
"from wandb.keras import WandbCallback\n",
"gen = keras.preprocessing.image.ImageDataGenerator()\n",
"\n",
"# Batch and epoch sizes are arbitrary\n",
"batch_size = 128\n",
"epochs = 300\n",
"image_shape = (128, 128, 1)\n",
"image_size = (128, 128)\n",
"\n",
"# use wandb to log hyperparameters and metrics\n",
"\n",
"run = wandb.init(project='emotion-classification', entity='vincents', config={\n",
" \"learning_rate\": 0.001,\n",
" \"epochs\": epochs,\n",
" \"batch_size\": batch_size,\n",
" \"loss_function\": \"categorical_crossentropy\",\n",
" \"architecture\": \"CNN\",\n",
" \"dataset\": \"fer2013\",\n",
" \"num_classes\": number_of_classes,\n",
" # \"img_size\": image_size,\n",
" # \"img_shape\": image_shape\n",
"})\n",
"\n",
"# print(x_train[0].shape)\n",
"# print(x_train[0])\n",
"# print(y_train[0].shape)\n",
"# print(y_train[0])\n",
"# print(f'labels shape: {type(y_train)}')\n",
"# raise(\"stop here\")\n",
"\n",
"train_generator = gen.flow(x_train, y_train, batch_size=batch_size)\n",
"\n",
"model.compile(loss='categorical_crossentropy',\n",
" optimizer=keras.optimizers.Adam(), metrics=['accuracy'])\n",
"\n",
"# model.fit(train_generator, steps_per_epoch=batch_size, epochs=epochs)\n",
"model.fit(train_generator, steps_per_epoch=batch_size, epochs=epochs, validation_data=(x_test, y_test), callbacks=[WandbCallback(data_type=\"image\", validation_data=(x_test, y_test), labels=[\n",
" \"Angry\", \"Disgust\", \"Fear\", \"Happy\", \"Sad\", \"Surprise\", \"Neutral\"]), tf.keras.callbacks.EarlyStopping(patience=100, restore_best_weights=True)])\n"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"# model.save(\"emotion_classification.h5\")"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"113/113 [==============================] - 1s 11ms/step - loss: 4.6099 - accuracy: 0.5631\n",
"Test loss: 4.609869480133057\n",
"Test accuracy: 56.310951709747314\n"
]
}
],
"source": [
"model_test = keras.models.load_model(\"emotion_classification.h5\")\n",
"\n",
"score = model_test.evaluate(x_test, y_test)\n",
"print('Test loss:', score[0])\n",
"print('Test accuracy:', 100*score[1])"
]
},
{
"cell_type": "code",
"execution_count": 31,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Train loss: 1.0293246507644653\n",
"Train accuracy: 61.026161909103394\n",
"Test loss: 1.2392393350601196\n",
"Test accuracy: 53.8590133190155\n"
]
}
],
"source": [
"train_score = model.evaluate(x_train, y_train, verbose=0)\n",
"print('Train loss:', train_score[0])\n",
"print('Train accuracy:', 100*train_score[1])\n",
" \n",
"test_score = model.evaluate(x_test, y_test, verbose=0)\n",
"print('Test loss:', test_score[0])\n",
"print('Test accuracy:', 100*test_score[1])\n",
"\n",
"# Old model\n",
"# Train loss: 0.03706146404147148\n",
"# Train accuracy: 98.80873560905457\n",
"# Test loss: 4.6098809242248535\n",
"# Test accuracy: 56.310951709747314\n"
]
},
{
"cell_type": "code",
"execution_count": 32,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([[187, 3, 16, 55, 86, 32, 88],\n",
" [ 18, 5, 5, 5, 10, 2, 11],\n",
" [ 47, 0, 63, 67, 128, 88, 103],\n",
" [ 24, 0, 11, 714, 48, 27, 71],\n",
" [ 66, 1, 21, 67, 278, 30, 190],\n",
" [ 5, 0, 18, 30, 19, 317, 26],\n",
" [ 39, 0, 14, 72, 96, 17, 369]], dtype=int64)"
]
},
"execution_count": 32,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from sklearn.metrics import confusion_matrix\n",
" \n",
"pred_list = []; actual_list = []\n",
" \n",
"predictions = model.predict(x_test)\n",
"\n",
"for i in predictions:\n",
" pred_list.append(np.argmax(i))\n",
" \n",
"for i in y_test:\n",
" actual_list.append(np.argmax(i))\n",
" \n",
"confusion_matrix(actual_list, pred_list)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment