Skip to content

Instantly share code, notes, and snippets.

@JediKnightChan
Created July 7, 2021 16:47
Show Gist options
  • Save JediKnightChan/a270c8dfa1509c5a07c6e0bd07bdb224 to your computer and use it in GitHub Desktop.
Save JediKnightChan/a270c8dfa1509c5a07c6e0bd07bdb224 to your computer and use it in GitHub Desktop.
NRP NN
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "NRP NN",
"provenance": [],
"collapsed_sections": [],
"include_colab_link": true
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/JediKnightChan/a270c8dfa1509c5a07c6e0bd07bdb224/nrp-nn.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"metadata": {
"id": "JhXglJ_cPqSt"
},
"source": [
"import pandas as pd\n",
"import numpy as np\n",
"import keras\n",
"import tensorflow as tf\n",
"from sklearn.model_selection import train_test_split"
],
"execution_count": 2,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "_-K5QZt1bvLp",
"outputId": "fefb1eee-48e4-4327-bf4c-c53d158220d1"
},
"source": [
"!wget https://raw.githubusercontent.com/prometneus/Nonribosomal-Peptides/main/data/FullDB.csv"
],
"execution_count": 3,
"outputs": [
{
"output_type": "stream",
"text": [
"--2021-07-07 15:31:54-- https://raw.githubusercontent.com/prometneus/Nonribosomal-Peptides/main/data/FullDB.csv\n",
"Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...\n",
"Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 612651 (598K) [text/plain]\n",
"Saving to: ‘FullDB.csv’\n",
"\n",
"\rFullDB.csv 0%[ ] 0 --.-KB/s \rFullDB.csv 100%[===================>] 598.29K --.-KB/s in 0.04s \n",
"\n",
"2021-07-07 15:31:54 (13.5 MB/s) - ‘FullDB.csv’ saved [612651/612651]\n",
"\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 363
},
"id": "4yLtHwmqPriQ",
"outputId": "b5d868a8-6e0a-42f8-801c-89932644c46b"
},
"source": [
"data = pd.read_csv(\"FullDB.csv\")\n",
"data = data[data.groupby(\"spec\")[\"spec\"].transform('count').ge(5)]\n",
"data.head(10)"
],
"execution_count": 30,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>spec</th>\n",
" <th>Sequence</th>\n",
" <th>8A-signature</th>\n",
" <th>stachelhaus-code</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>pro</td>\n",
" <td>YRELDERANRLAHLLAAHGVGPERIVALALPRSVDLVVAVLAVLKA...</td>\n",
" <td>LWHTFDVSAQESFAAQAGEHNHYGPTETHVVTAH</td>\n",
" <td>DVQFAAHVV-</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>pro</td>\n",
" <td>TYKELNESANRLARHLRKKGVMRQEPVAIMMERSTEFVTGILGILK...</td>\n",
" <td>LFEAFDVCYQESVSITAGEHNHYGPSETHVVSAY</td>\n",
" <td>DVQVIAHVV-</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>pro</td>\n",
" <td>TYRELHERSNQLARFLREKGVKKESIIGIMMERSVEMIVGILGILK...</td>\n",
" <td>LFTTFDVCYQESSFITAGEHNHYGPSETHVVTTY</td>\n",
" <td>DVQSIAHVV-</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>pro</td>\n",
" <td>SYRRLDQLSSSLAEELIGHGVGVEMTIPVLLEKTCWTPVAMLAVLK...</td>\n",
" <td>AHLSWDIPVTDLLLVLAGEIQGYGPAECSLVSTV</td>\n",
" <td>DITLVAGLV-</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>pro</td>\n",
" <td>TYRELNKAANRLARHLRMKGVVRQEPVAIMMERSAAFITGVLGILK...</td>\n",
" <td>LFEAFDVCYQESFLIAAGEHNHYGPSETHVVSTY</td>\n",
" <td>DVQFIAHVV-</td>\n",
" </tr>\n",
" <tr>\n",
" <th>5</th>\n",
" <td>pro</td>\n",
" <td>TYRELWAHSSFFARQLQRYGVTRGTPVAVCLDRSRWSIAVILGVLL...</td>\n",
" <td>SHHSFDVSIYETFLVLGGEINGYGPAEATICGVG</td>\n",
" <td>DVYFVGGIC-</td>\n",
" </tr>\n",
" <tr>\n",
" <th>6</th>\n",
" <td>pro</td>\n",
" <td>TYQQLNQKANQLAHSLQCLGVTPDSLVAICLERSLDMAVAILGTLK...</td>\n",
" <td>LHHAFDVSYHEAILITAGEYNFYGPSEADLVTAY</td>\n",
" <td>DVHIIAFLV-</td>\n",
" </tr>\n",
" <tr>\n",
" <th>7</th>\n",
" <td>pro</td>\n",
" <td>SYTELERVSSTWARQLQKQGISQGSWVLFCFEKSRLAVVSMIAILK...</td>\n",
" <td>AAYSFDISIADTVLIFTGEYNVYGPAENTLITTA</td>\n",
" <td>DIAVITVLI-</td>\n",
" </tr>\n",
" <tr>\n",
" <th>8</th>\n",
" <td>pro</td>\n",
" <td>TYSELDRQSDELAGWLRQQRLPAESLVAVLAPRSCQTIVAFLGILK...</td>\n",
" <td>VSPTFDVSLWETVFFSSGDHNAYGPTENGIQSTI</td>\n",
" <td>DVWVFSAIQ-</td>\n",
" </tr>\n",
" <tr>\n",
" <th>9</th>\n",
" <td>pro</td>\n",
" <td>TYYELNYRANQLAHYLQSLGVGSDALVGLCVERSLEMVIGLLGILK...</td>\n",
" <td>LWHAFDVSFQETFLITAGEHNHYGPSESHLATSF</td>\n",
" <td>DVQFIAHLA-</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" spec ... stachelhaus-code\n",
"0 pro ... DVQFAAHVV-\n",
"1 pro ... DVQVIAHVV-\n",
"2 pro ... DVQSIAHVV-\n",
"3 pro ... DITLVAGLV-\n",
"4 pro ... DVQFIAHVV-\n",
"5 pro ... DVYFVGGIC-\n",
"6 pro ... DVHIIAFLV-\n",
"7 pro ... DIAVITVLI-\n",
"8 pro ... DVWVFSAIQ-\n",
"9 pro ... DVQFIAHLA-\n",
"\n",
"[10 rows x 4 columns]"
]
},
"metadata": {
"tags": []
},
"execution_count": 30
}
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "NasdkwDfOsfb",
"outputId": "ea9d4c86-8bf3-4c3b-8b64-416066b4fe0a"
},
"source": [
"data.dropna()\n",
"data.isna().sum()"
],
"execution_count": 31,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"spec 0\n",
"Sequence 0\n",
"8A-signature 0\n",
"stachelhaus-code 0\n",
"dtype: int64"
]
},
"metadata": {
"tags": []
},
"execution_count": 31
}
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "zPDP1qrspZwD",
"outputId": "34f4d528-fb9e-4f09-b25d-c58c30200a96"
},
"source": [
"X = data['8A-signature']\n",
"y = data['spec']\n",
"classes_number = len(y.unique())\n",
"classes_number"
],
"execution_count": 32,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"26"
]
},
"metadata": {
"tags": []
},
"execution_count": 32
}
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "Lltn-8NJ0-Wm",
"outputId": "5196de5c-ad2a-4654-ca66-be09155b9ff2"
},
"source": [
"uarr = []\n",
"for line in X:\n",
" uarr += set(line)\n",
"uarr = set(uarr)\n",
"uarr"
],
"execution_count": 33,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"{'-',\n",
" 'A',\n",
" 'C',\n",
" 'D',\n",
" 'E',\n",
" 'F',\n",
" 'G',\n",
" 'H',\n",
" 'I',\n",
" 'K',\n",
" 'L',\n",
" 'M',\n",
" 'N',\n",
" 'P',\n",
" 'Q',\n",
" 'R',\n",
" 'S',\n",
" 'T',\n",
" 'V',\n",
" 'W',\n",
" 'Y'}"
]
},
"metadata": {
"tags": []
},
"execution_count": 33
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "fRaDRm4XtTX_"
},
"source": [
"uarr_dum = pd.get_dummies(list(uarr))"
],
"execution_count": 34,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "x97jLP0euAiz"
},
"source": [
"result = []\n",
"for line in X:\n",
" new_line = []\n",
" for char in line:\n",
" new_line.append(uarr_dum[char])\n",
" result.append(new_line)"
],
"execution_count": 35,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "wGS-1K2psTq_",
"outputId": "7bd10f09-5019-4930-d7be-fe06a28597f7"
},
"source": [
"np.array(result).shape"
],
"execution_count": 36,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"(1340, 34, 21)"
]
},
"metadata": {
"tags": []
},
"execution_count": 36
}
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "cXzf4srScYAQ",
"outputId": "78c9bd96-cf9c-4f10-b28b-9bff8952a452"
},
"source": [
"_X_ = np.array(result)\n",
"_X_"
],
"execution_count": 37,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"array([[[0, 0, 1, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" ...,\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0]],\n",
"\n",
" [[0, 0, 1, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 1, 0, 0],\n",
" ...,\n",
" [0, 0, 0, ..., 0, 0, 1],\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0]],\n",
"\n",
" [[0, 0, 1, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" ...,\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0]],\n",
"\n",
" ...,\n",
"\n",
" [[0, 0, 0, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" ...,\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0]],\n",
"\n",
" [[0, 0, 0, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" ...,\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0]],\n",
"\n",
" [[0, 0, 0, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" ...,\n",
" [0, 0, 0, ..., 0, 0, 1],\n",
" [0, 0, 0, ..., 0, 0, 0],\n",
" [0, 0, 0, ..., 0, 0, 0]]], dtype=uint8)"
]
},
"metadata": {
"tags": []
},
"execution_count": 37
}
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "VgyBtb5Yfj9V",
"outputId": "44f76635-918d-43c5-ad8c-bf076b07bf40"
},
"source": [
"y = pd.factorize(y)[0]\n",
"print(y)"
],
"execution_count": 38,
"outputs": [
{
"output_type": "stream",
"text": [
"[ 0 0 0 ... 25 25 25]\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "qMQmn4cSQlpn",
"outputId": "05598bc4-e889-4129-b24d-db554d56537b"
},
"source": [
"_X_.shape, y.shape"
],
"execution_count": 39,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"((1340, 34, 21), (1340,))"
]
},
"metadata": {
"tags": []
},
"execution_count": 39
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "5o2lr1kXo4lY"
},
"source": [
"X_train, X_test, y_train, y_test = train_test_split(_X_, y, train_size = 0.8)"
],
"execution_count": 40,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "yqcNhQGdQoRX",
"outputId": "d5a3d2ca-158b-4230-8e64-052e197c1f67"
},
"source": [
"X_train.shape, y_train.shape"
],
"execution_count": 41,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"((1072, 34, 21), (1072,))"
]
},
"metadata": {
"tags": []
},
"execution_count": 41
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "6enmf63rrAc2"
},
"source": [
"# Dense nn"
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "bN926EHirI-o",
"outputId": "95910591-b036-42a4-9452-9cf860c88dcc"
},
"source": [
"%tensorflow_version 2.x\n",
"import tensorflow as tf\n",
"from tensorflow import keras\n",
"from tensorflow.keras import layers\n",
"\n",
"print(tf.test.gpu_device_name())\n",
"\n",
"\n",
"model = keras.Sequential([\n",
" layers.Dense(256, activation='relu'),\n",
" layers.Dense(512, activation='relu'),\n",
" layers.Dense(512, activation='relu'),\n",
" layers.Dense(classes_number, activation=\"softmax\")\n",
"])\n",
"\n",
"\n",
"\n",
"model.compile(optimizer=\"rmsprop\",\n",
" loss='sparse_categorical_crossentropy',\n",
" metrics=[\"accuracy\"])\n",
"\n",
"batch_size = 128\n",
"\n",
"X_train_ = X_train.reshape(X_train.shape[0], X_train.shape[1]*X_train.shape[2]).astype(\"float32\")\n",
"y_train = y_train.astype(\"float32\")\n",
"\n",
"from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\n",
"\n",
"early_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min')\n",
"mcp_save = ModelCheckpoint('model.hdf5', save_best_only=True, monitor='val_loss', mode='min')\n",
"reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='min')\n",
"\n",
"print(X_train_.shape)\n",
"print(y_train.shape)\n",
"\n",
"model.fit(X_train_, y_train, batch_size=batch_size, epochs=50, validation_split=0.25, callbacks=[mcp_save, reduce_lr_loss])\n",
"model.summary()"
],
"execution_count": 55,
"outputs": [
{
"output_type": "stream",
"text": [
"/device:GPU:0\n",
"WARNING:tensorflow:`epsilon` argument is deprecated and will be removed, use `min_delta` instead.\n",
"(1072, 714)\n",
"(1072,)\n",
"Epoch 1/50\n",
"7/7 [==============================] - 1s 24ms/step - loss: 1.8717 - accuracy: 0.5137 - val_loss: 1.0086 - val_accuracy: 0.7575\n",
"Epoch 2/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.9079 - accuracy: 0.7662 - val_loss: 0.8197 - val_accuracy: 0.7537\n",
"Epoch 3/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.6505 - accuracy: 0.8159 - val_loss: 0.6805 - val_accuracy: 0.8097\n",
"Epoch 4/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.4317 - accuracy: 0.8893 - val_loss: 0.7654 - val_accuracy: 0.7687\n",
"Epoch 5/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.3556 - accuracy: 0.9005 - val_loss: 0.7859 - val_accuracy: 0.7948\n",
"Epoch 6/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.3218 - accuracy: 0.9092 - val_loss: 0.6234 - val_accuracy: 0.8507\n",
"Epoch 7/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.1632 - accuracy: 0.9602 - val_loss: 0.6816 - val_accuracy: 0.8209\n",
"Epoch 8/50\n",
"7/7 [==============================] - 0s 7ms/step - loss: 0.1539 - accuracy: 0.9577 - val_loss: 0.6027 - val_accuracy: 0.8507\n",
"Epoch 9/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.1030 - accuracy: 0.9689 - val_loss: 0.5012 - val_accuracy: 0.8843\n",
"Epoch 10/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.0893 - accuracy: 0.9714 - val_loss: 0.7267 - val_accuracy: 0.8582\n",
"Epoch 11/50\n",
"7/7 [==============================] - 0s 8ms/step - loss: 0.1057 - accuracy: 0.9726 - val_loss: 0.5629 - val_accuracy: 0.8731\n",
"Epoch 12/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.0560 - accuracy: 0.9813 - val_loss: 0.5371 - val_accuracy: 0.8918\n",
"Epoch 13/50\n",
"7/7 [==============================] - 0s 5ms/step - loss: 0.0813 - accuracy: 0.9764 - val_loss: 0.6410 - val_accuracy: 0.8582\n",
"Epoch 14/50\n",
"7/7 [==============================] - 0s 5ms/step - loss: 0.0440 - accuracy: 0.9876 - val_loss: 0.5747 - val_accuracy: 0.8806\n",
"Epoch 15/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.0402 - accuracy: 0.9876 - val_loss: 0.5797 - val_accuracy: 0.8881\n",
"Epoch 16/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.0388 - accuracy: 0.9863 - val_loss: 0.8386 - val_accuracy: 0.8507\n",
"\n",
"Epoch 00016: ReduceLROnPlateau reducing learning rate to 0.00010000000474974513.\n",
"Epoch 17/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.0703 - accuracy: 0.9813 - val_loss: 0.6895 - val_accuracy: 0.8731\n",
"Epoch 18/50\n",
"7/7 [==============================] - 0s 7ms/step - loss: 0.0381 - accuracy: 0.9900 - val_loss: 0.6352 - val_accuracy: 0.8843\n",
"Epoch 19/50\n",
"7/7 [==============================] - 0s 7ms/step - loss: 0.0269 - accuracy: 0.9900 - val_loss: 0.6109 - val_accuracy: 0.8843\n",
"Epoch 20/50\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.0217 - accuracy: 0.9950 - val_loss: 0.6005 - val_accuracy: 0.8843\n",
"Epoch 21/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.0183 - accuracy: 0.9963 - val_loss: 0.5953 - val_accuracy: 0.8918\n",
"Epoch 22/50\n",
"7/7 [==============================] - 0s 5ms/step - loss: 0.0171 - accuracy: 0.9963 - val_loss: 0.5989 - val_accuracy: 0.8881\n",
"Epoch 23/50\n",
"7/7 [==============================] - 0s 5ms/step - loss: 0.0160 - accuracy: 0.9963 - val_loss: 0.6034 - val_accuracy: 0.8881\n",
"\n",
"Epoch 00023: ReduceLROnPlateau reducing learning rate to 1.0000000474974514e-05.\n",
"Epoch 24/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.0145 - accuracy: 0.9963 - val_loss: 0.6040 - val_accuracy: 0.8881\n",
"Epoch 25/50\n",
"7/7 [==============================] - 0s 5ms/step - loss: 0.0144 - accuracy: 0.9963 - val_loss: 0.6043 - val_accuracy: 0.8881\n",
"Epoch 26/50\n",
"7/7 [==============================] - 0s 5ms/step - loss: 0.0143 - accuracy: 0.9963 - val_loss: 0.6048 - val_accuracy: 0.8881\n",
"Epoch 27/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.0141 - accuracy: 0.9963 - val_loss: 0.6053 - val_accuracy: 0.8881\n",
"Epoch 28/50\n",
"7/7 [==============================] - 0s 5ms/step - loss: 0.0139 - accuracy: 0.9963 - val_loss: 0.6058 - val_accuracy: 0.8881\n",
"Epoch 29/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.0138 - accuracy: 0.9963 - val_loss: 0.6064 - val_accuracy: 0.8881\n",
"Epoch 30/50\n",
"7/7 [==============================] - 0s 5ms/step - loss: 0.0136 - accuracy: 0.9963 - val_loss: 0.6067 - val_accuracy: 0.8881\n",
"\n",
"Epoch 00030: ReduceLROnPlateau reducing learning rate to 1.0000000656873453e-06.\n",
"Epoch 31/50\n",
"7/7 [==============================] - 0s 5ms/step - loss: 0.0134 - accuracy: 0.9963 - val_loss: 0.6068 - val_accuracy: 0.8881\n",
"Epoch 32/50\n",
"7/7 [==============================] - 0s 5ms/step - loss: 0.0134 - accuracy: 0.9963 - val_loss: 0.6069 - val_accuracy: 0.8881\n",
"Epoch 33/50\n",
"7/7 [==============================] - 0s 5ms/step - loss: 0.0133 - accuracy: 0.9963 - val_loss: 0.6070 - val_accuracy: 0.8881\n",
"Epoch 34/50\n",
"7/7 [==============================] - 0s 5ms/step - loss: 0.0133 - accuracy: 0.9963 - val_loss: 0.6070 - val_accuracy: 0.8881\n",
"Epoch 35/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.0133 - accuracy: 0.9963 - val_loss: 0.6071 - val_accuracy: 0.8881\n",
"Epoch 36/50\n",
"7/7 [==============================] - 0s 5ms/step - loss: 0.0133 - accuracy: 0.9963 - val_loss: 0.6071 - val_accuracy: 0.8881\n",
"Epoch 37/50\n",
"7/7 [==============================] - 0s 8ms/step - loss: 0.0133 - accuracy: 0.9963 - val_loss: 0.6072 - val_accuracy: 0.8881\n",
"\n",
"Epoch 00037: ReduceLROnPlateau reducing learning rate to 1.0000001111620805e-07.\n",
"Epoch 38/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.0133 - accuracy: 0.9963 - val_loss: 0.6072 - val_accuracy: 0.8881\n",
"Epoch 39/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.0133 - accuracy: 0.9963 - val_loss: 0.6073 - val_accuracy: 0.8881\n",
"Epoch 40/50\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.0133 - accuracy: 0.9963 - val_loss: 0.6073 - val_accuracy: 0.8881\n",
"Epoch 41/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.0133 - accuracy: 0.9963 - val_loss: 0.6073 - val_accuracy: 0.8881\n",
"Epoch 42/50\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.0133 - accuracy: 0.9963 - val_loss: 0.6073 - val_accuracy: 0.8881\n",
"Epoch 43/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.0133 - accuracy: 0.9963 - val_loss: 0.6073 - val_accuracy: 0.8881\n",
"Epoch 44/50\n",
"7/7 [==============================] - 0s 5ms/step - loss: 0.0133 - accuracy: 0.9963 - val_loss: 0.6073 - val_accuracy: 0.8881\n",
"\n",
"Epoch 00044: ReduceLROnPlateau reducing learning rate to 1.000000082740371e-08.\n",
"Epoch 45/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.0132 - accuracy: 0.9963 - val_loss: 0.6073 - val_accuracy: 0.8881\n",
"Epoch 46/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.0132 - accuracy: 0.9963 - val_loss: 0.6073 - val_accuracy: 0.8881\n",
"Epoch 47/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.0132 - accuracy: 0.9963 - val_loss: 0.6073 - val_accuracy: 0.8881\n",
"Epoch 48/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.0132 - accuracy: 0.9963 - val_loss: 0.6073 - val_accuracy: 0.8881\n",
"Epoch 49/50\n",
"7/7 [==============================] - 0s 6ms/step - loss: 0.0132 - accuracy: 0.9963 - val_loss: 0.6073 - val_accuracy: 0.8881\n",
"Epoch 50/50\n",
"7/7 [==============================] - 0s 5ms/step - loss: 0.0132 - accuracy: 0.9963 - val_loss: 0.6073 - val_accuracy: 0.8881\n",
"Model: \"sequential_16\"\n",
"_________________________________________________________________\n",
"Layer (type) Output Shape Param # \n",
"=================================================================\n",
"dense_27 (Dense) (None, 256) 183040 \n",
"_________________________________________________________________\n",
"dense_28 (Dense) (None, 512) 131584 \n",
"_________________________________________________________________\n",
"dense_29 (Dense) (None, 512) 262656 \n",
"_________________________________________________________________\n",
"dense_30 (Dense) (None, 26) 13338 \n",
"=================================================================\n",
"Total params: 590,618\n",
"Trainable params: 590,618\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "CvxEE7R3er_H"
},
"source": [
"# Convolutional nn"
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "5Bvvh0rhRqWb",
"outputId": "cd0e318d-f4e0-4ca4-9ef9-9823df356498"
},
"source": [
"%tensorflow_version 2.x\n",
"import tensorflow as tf\n",
"from tensorflow import keras\n",
"from tensorflow.keras import layers\n",
"\n",
"print(tf.test.gpu_device_name())\n",
"\n",
"\n",
"model = keras.Sequential([\n",
" layers.Conv1D(128, activation='relu', kernel_size=10, input_shape=(34, 21)),\n",
" layers.MaxPool1D(),\n",
" layers.Conv1D(256, activation='relu', kernel_size=5),\n",
" layers.Conv1D(512, activation='relu', kernel_size=4),\n",
" layers.MaxPool1D(),\n",
" layers.Conv1D(512, activation='relu', kernel_size=2),\n",
" layers.GlobalAveragePooling1D(),\n",
" layers.Dense(classes_number, activation=\"softmax\")\n",
"])\n",
"\n",
"model.summary()\n",
"\n",
"model.compile(optimizer=\"rmsprop\",\n",
" loss='sparse_categorical_crossentropy',\n",
" metrics=[\"accuracy\"])\n",
"\n",
"batch_size = 128\n",
"\n",
"X_train_ = X_train.reshape(X_train.shape[0], X_train.shape[1]*X_train.shape[2]).astype(\"float32\")\n",
"y_train = y_train.astype(\"float32\")\n",
"\n",
"X_train_ = X_train\n",
"\n",
"from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\n",
"\n",
"early_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min')\n",
"mcp_save = ModelCheckpoint('model.hdf5', save_best_only=True, monitor='val_loss', mode='min')\n",
"reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='min')\n",
"\n",
"print(X_train_.shape)\n",
"print(y_train.shape)\n",
"\n",
"model.fit(X_train_, y_train, batch_size=batch_size, epochs=50, validation_split=0.25, callbacks=[mcp_save, reduce_lr_loss])"
],
"execution_count": 50,
"outputs": [
{
"output_type": "stream",
"text": [
"/device:GPU:0\n",
"Model: \"sequential_14\"\n",
"_________________________________________________________________\n",
"Layer (type) Output Shape Param # \n",
"=================================================================\n",
"conv1d_105 (Conv1D) (None, 25, 128) 27008 \n",
"_________________________________________________________________\n",
"max_pooling1d_40 (MaxPooling (None, 12, 128) 0 \n",
"_________________________________________________________________\n",
"conv1d_106 (Conv1D) (None, 8, 256) 164096 \n",
"_________________________________________________________________\n",
"conv1d_107 (Conv1D) (None, 5, 512) 524800 \n",
"_________________________________________________________________\n",
"max_pooling1d_41 (MaxPooling (None, 2, 512) 0 \n",
"_________________________________________________________________\n",
"conv1d_108 (Conv1D) (None, 1, 512) 524800 \n",
"_________________________________________________________________\n",
"global_average_pooling1d_20 (None, 512) 0 \n",
"_________________________________________________________________\n",
"dense_19 (Dense) (None, 26) 13338 \n",
"=================================================================\n",
"Total params: 1,254,042\n",
"Trainable params: 1,254,042\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n",
"WARNING:tensorflow:`epsilon` argument is deprecated and will be removed, use `min_delta` instead.\n",
"(1072, 34, 21)\n",
"(1072,)\n",
"Epoch 1/50\n",
"7/7 [==============================] - 1s 36ms/step - loss: 2.2526 - accuracy: 0.3856 - val_loss: 1.2849 - val_accuracy: 0.6269\n",
"Epoch 2/50\n",
"7/7 [==============================] - 0s 16ms/step - loss: 1.3260 - accuracy: 0.6244 - val_loss: 1.0986 - val_accuracy: 0.7015\n",
"Epoch 3/50\n",
"7/7 [==============================] - 0s 14ms/step - loss: 1.1415 - accuracy: 0.6965 - val_loss: 0.9445 - val_accuracy: 0.7649\n",
"Epoch 4/50\n",
"7/7 [==============================] - 0s 15ms/step - loss: 0.9460 - accuracy: 0.7463 - val_loss: 1.0591 - val_accuracy: 0.7127\n",
"Epoch 5/50\n",
"7/7 [==============================] - 0s 15ms/step - loss: 0.8050 - accuracy: 0.7749 - val_loss: 0.8163 - val_accuracy: 0.7985\n",
"Epoch 6/50\n",
"7/7 [==============================] - 0s 14ms/step - loss: 0.6725 - accuracy: 0.8109 - val_loss: 0.7179 - val_accuracy: 0.8209\n",
"Epoch 7/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.5198 - accuracy: 0.8582 - val_loss: 0.7542 - val_accuracy: 0.8060\n",
"Epoch 8/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.4189 - accuracy: 0.8769 - val_loss: 0.9666 - val_accuracy: 0.7425\n",
"Epoch 9/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.4091 - accuracy: 0.8694 - val_loss: 0.6694 - val_accuracy: 0.8209\n",
"Epoch 10/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.3291 - accuracy: 0.8968 - val_loss: 0.5962 - val_accuracy: 0.8507\n",
"Epoch 11/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.2175 - accuracy: 0.9353 - val_loss: 0.7922 - val_accuracy: 0.7724\n",
"Epoch 12/50\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.1928 - accuracy: 0.9453 - val_loss: 2.5033 - val_accuracy: 0.6157\n",
"Epoch 13/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.4233 - accuracy: 0.9328 - val_loss: 0.8186 - val_accuracy: 0.8396\n",
"Epoch 14/50\n",
"7/7 [==============================] - 0s 12ms/step - loss: 0.1050 - accuracy: 0.9726 - val_loss: 1.1154 - val_accuracy: 0.7649\n",
"Epoch 15/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.1583 - accuracy: 0.9590 - val_loss: 0.7237 - val_accuracy: 0.8582\n",
"Epoch 16/50\n",
"7/7 [==============================] - 0s 14ms/step - loss: 0.0690 - accuracy: 0.9801 - val_loss: 1.3215 - val_accuracy: 0.7649\n",
"Epoch 17/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.1856 - accuracy: 0.9527 - val_loss: 0.7215 - val_accuracy: 0.8619\n",
"\n",
"Epoch 00017: ReduceLROnPlateau reducing learning rate to 0.00010000000474974513.\n",
"Epoch 18/50\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.0498 - accuracy: 0.9851 - val_loss: 0.7041 - val_accuracy: 0.8694\n",
"Epoch 19/50\n",
"7/7 [==============================] - 0s 12ms/step - loss: 0.0399 - accuracy: 0.9888 - val_loss: 0.7032 - val_accuracy: 0.8769\n",
"Epoch 20/50\n",
"7/7 [==============================] - 0s 12ms/step - loss: 0.0346 - accuracy: 0.9888 - val_loss: 0.7141 - val_accuracy: 0.8769\n",
"Epoch 21/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0325 - accuracy: 0.9900 - val_loss: 0.7256 - val_accuracy: 0.8657\n",
"Epoch 22/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0305 - accuracy: 0.9900 - val_loss: 0.7423 - val_accuracy: 0.8769\n",
"Epoch 23/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0312 - accuracy: 0.9900 - val_loss: 0.7509 - val_accuracy: 0.8619\n",
"Epoch 24/50\n",
"7/7 [==============================] - 0s 12ms/step - loss: 0.0324 - accuracy: 0.9876 - val_loss: 0.7571 - val_accuracy: 0.8731\n",
"\n",
"Epoch 00024: ReduceLROnPlateau reducing learning rate to 1.0000000474974514e-05.\n",
"Epoch 25/50\n",
"7/7 [==============================] - 0s 12ms/step - loss: 0.0251 - accuracy: 0.9913 - val_loss: 0.7586 - val_accuracy: 0.8731\n",
"Epoch 26/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0248 - accuracy: 0.9900 - val_loss: 0.7609 - val_accuracy: 0.8731\n",
"Epoch 27/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0246 - accuracy: 0.9900 - val_loss: 0.7626 - val_accuracy: 0.8731\n",
"Epoch 28/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0244 - accuracy: 0.9913 - val_loss: 0.7637 - val_accuracy: 0.8731\n",
"Epoch 29/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0241 - accuracy: 0.9913 - val_loss: 0.7654 - val_accuracy: 0.8769\n",
"Epoch 30/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0240 - accuracy: 0.9900 - val_loss: 0.7673 - val_accuracy: 0.8769\n",
"Epoch 31/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0239 - accuracy: 0.9900 - val_loss: 0.7693 - val_accuracy: 0.8769\n",
"\n",
"Epoch 00031: ReduceLROnPlateau reducing learning rate to 1.0000000656873453e-06.\n",
"Epoch 32/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0233 - accuracy: 0.9913 - val_loss: 0.7695 - val_accuracy: 0.8769\n",
"Epoch 33/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0233 - accuracy: 0.9913 - val_loss: 0.7697 - val_accuracy: 0.8769\n",
"Epoch 34/50\n",
"7/7 [==============================] - 0s 15ms/step - loss: 0.0233 - accuracy: 0.9913 - val_loss: 0.7699 - val_accuracy: 0.8769\n",
"Epoch 35/50\n",
"7/7 [==============================] - 0s 12ms/step - loss: 0.0232 - accuracy: 0.9913 - val_loss: 0.7701 - val_accuracy: 0.8769\n",
"Epoch 36/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0232 - accuracy: 0.9913 - val_loss: 0.7703 - val_accuracy: 0.8769\n",
"Epoch 37/50\n",
"7/7 [==============================] - 0s 12ms/step - loss: 0.0232 - accuracy: 0.9913 - val_loss: 0.7704 - val_accuracy: 0.8769\n",
"Epoch 38/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0232 - accuracy: 0.9913 - val_loss: 0.7706 - val_accuracy: 0.8769\n",
"\n",
"Epoch 00038: ReduceLROnPlateau reducing learning rate to 1.0000001111620805e-07.\n",
"Epoch 39/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0231 - accuracy: 0.9913 - val_loss: 0.7707 - val_accuracy: 0.8769\n",
"Epoch 40/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0231 - accuracy: 0.9913 - val_loss: 0.7707 - val_accuracy: 0.8769\n",
"Epoch 41/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0231 - accuracy: 0.9913 - val_loss: 0.7707 - val_accuracy: 0.8769\n",
"Epoch 42/50\n",
"7/7 [==============================] - 0s 12ms/step - loss: 0.0231 - accuracy: 0.9913 - val_loss: 0.7707 - val_accuracy: 0.8769\n",
"Epoch 43/50\n",
"7/7 [==============================] - 0s 12ms/step - loss: 0.0231 - accuracy: 0.9913 - val_loss: 0.7708 - val_accuracy: 0.8769\n",
"Epoch 44/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0231 - accuracy: 0.9913 - val_loss: 0.7708 - val_accuracy: 0.8769\n",
"Epoch 45/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0231 - accuracy: 0.9913 - val_loss: 0.7708 - val_accuracy: 0.8769\n",
"\n",
"Epoch 00045: ReduceLROnPlateau reducing learning rate to 1.000000082740371e-08.\n",
"Epoch 46/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0231 - accuracy: 0.9913 - val_loss: 0.7708 - val_accuracy: 0.8769\n",
"Epoch 47/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0231 - accuracy: 0.9913 - val_loss: 0.7708 - val_accuracy: 0.8769\n",
"Epoch 48/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0231 - accuracy: 0.9913 - val_loss: 0.7708 - val_accuracy: 0.8769\n",
"Epoch 49/50\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.0231 - accuracy: 0.9913 - val_loss: 0.7708 - val_accuracy: 0.8769\n",
"Epoch 50/50\n",
"7/7 [==============================] - 0s 12ms/step - loss: 0.0231 - accuracy: 0.9913 - val_loss: 0.7708 - val_accuracy: 0.8769\n"
],
"name": "stdout"
},
{
"output_type": "execute_result",
"data": {
"text/plain": [
"<tensorflow.python.keras.callbacks.History at 0x7f37932e7690>"
]
},
"metadata": {
"tags": []
},
"execution_count": 50
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "lbN1qYYOqhgY"
},
"source": [
"# Residual CNN"
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "dT_r8C2SoRT_",
"outputId": "f8d58d63-d4f2-42ab-f585-d1b469d27d89"
},
"source": [
"%tensorflow_version 2.x\n",
"import tensorflow as tf\n",
"from tensorflow import keras\n",
"from tensorflow.keras import layers\n",
"\n",
"print(tf.test.gpu_device_name())\n",
"\n",
"inputs = keras.Input(shape=(34, 21))\n",
"\n",
"def residual_block(x, filters, pooling=False):\n",
" residual = x\n",
" x = layers.Conv1D(filters, 3, activation=\"relu\", padding=\"same\")(x)\n",
" x = layers.Conv1D(filters, 3, activation=\"relu\", padding=\"same\")(x)\n",
" if pooling:\n",
" x = layers.MaxPooling1D(2, padding=\"same\")(x)\n",
" residual = layers.Conv1D(filters, 1, strides=2)(residual)\n",
" elif filters != residual.shape[-1]:\n",
" residual = layers.Conv1D(filters, 1)(residual)\n",
" x = layers.add([x, residual])\n",
" return x\n",
"\n",
"x = residual_block(inputs, filters=64, pooling=True)\n",
"x = residual_block(x, filters=64, pooling=True)\n",
"x = residual_block(x, filters=128, pooling=False)\n",
"\n",
"x = layers.GlobalAveragePooling1D()(x)\n",
"outputs = layers.Dense(classes_number, activation=\"softmax\")(x)\n",
"model = keras.Model(inputs=inputs, outputs=outputs)\n",
"\n",
"model.summary()\n",
"\n",
"model.compile(optimizer=\"adam\",\n",
" loss='sparse_categorical_crossentropy',\n",
" metrics=[\"accuracy\"])\n",
"\n",
"batch_size = 128\n",
"\n",
"X_train_ = X_train.reshape(X_train.shape[0], X_train.shape[1]*X_train.shape[2]).astype(\"float32\")\n",
"y_train = y_train.astype(\"float32\")\n",
"\n",
"X_train_ = X_train / 255\n",
"\n",
"from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\n",
"\n",
"early_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min')\n",
"mcp_save = ModelCheckpoint('model.hdf5', save_best_only=True, monitor='val_loss', mode='min')\n",
"reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='min')\n",
"\n",
"print(X_train_.shape)\n",
"print(y_train.shape)\n",
"\n",
"model.fit(X_train_, y_train, batch_size=batch_size, epochs=250, validation_split=0.25, callbacks=[mcp_save])"
],
"execution_count": 53,
"outputs": [
{
"output_type": "stream",
"text": [
"/device:GPU:0\n",
"Model: \"model_7\"\n",
"__________________________________________________________________________________________________\n",
"Layer (type) Output Shape Param # Connected to \n",
"==================================================================================================\n",
"input_10 (InputLayer) [(None, 34, 21)] 0 \n",
"__________________________________________________________________________________________________\n",
"conv1d_127 (Conv1D) (None, 34, 64) 4096 input_10[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv1d_128 (Conv1D) (None, 34, 64) 12352 conv1d_127[0][0] \n",
"__________________________________________________________________________________________________\n",
"max_pooling1d_46 (MaxPooling1D) (None, 17, 64) 0 conv1d_128[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv1d_129 (Conv1D) (None, 17, 64) 1408 input_10[0][0] \n",
"__________________________________________________________________________________________________\n",
"add_21 (Add) (None, 17, 64) 0 max_pooling1d_46[0][0] \n",
" conv1d_129[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv1d_130 (Conv1D) (None, 17, 64) 12352 add_21[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv1d_131 (Conv1D) (None, 17, 64) 12352 conv1d_130[0][0] \n",
"__________________________________________________________________________________________________\n",
"max_pooling1d_47 (MaxPooling1D) (None, 9, 64) 0 conv1d_131[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv1d_132 (Conv1D) (None, 9, 64) 4160 add_21[0][0] \n",
"__________________________________________________________________________________________________\n",
"add_22 (Add) (None, 9, 64) 0 max_pooling1d_47[0][0] \n",
" conv1d_132[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv1d_133 (Conv1D) (None, 9, 128) 24704 add_22[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv1d_134 (Conv1D) (None, 9, 128) 49280 conv1d_133[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv1d_135 (Conv1D) (None, 9, 128) 8320 add_22[0][0] \n",
"__________________________________________________________________________________________________\n",
"add_23 (Add) (None, 9, 128) 0 conv1d_134[0][0] \n",
" conv1d_135[0][0] \n",
"__________________________________________________________________________________________________\n",
"global_average_pooling1d_23 (Gl (None, 128) 0 add_23[0][0] \n",
"__________________________________________________________________________________________________\n",
"dense_22 (Dense) (None, 26) 3354 global_average_pooling1d_23[0][0]\n",
"==================================================================================================\n",
"Total params: 132,378\n",
"Trainable params: 132,378\n",
"Non-trainable params: 0\n",
"__________________________________________________________________________________________________\n",
"WARNING:tensorflow:`epsilon` argument is deprecated and will be removed, use `min_delta` instead.\n",
"(1072, 34, 21)\n",
"(1072,)\n",
"Epoch 1/250\n",
"7/7 [==============================] - 1s 53ms/step - loss: 3.1703 - accuracy: 0.3408 - val_loss: 2.8268 - val_accuracy: 0.4328\n",
"Epoch 2/250\n",
"7/7 [==============================] - 0s 12ms/step - loss: 2.5885 - accuracy: 0.4067 - val_loss: 2.2501 - val_accuracy: 0.4328\n",
"Epoch 3/250\n",
"1/7 [===>..........................] - ETA: 0s - loss: 2.0226 - accuracy: 0.5234"
],
"name": "stdout"
},
{
"output_type": "stream",
"text": [
"/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/utils/generic_utils.py:497: CustomMaskWarning: Custom mask layers require a config and must override get_config. When loading, the custom mask layer must be passed to the custom_objects argument.\n",
" category=CustomMaskWarning)\n"
],
"name": "stderr"
},
{
"output_type": "stream",
"text": [
"7/7 [==============================] - 0s 10ms/step - loss: 2.3149 - accuracy: 0.4067 - val_loss: 2.2238 - val_accuracy: 0.4328\n",
"Epoch 4/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.3189 - accuracy: 0.4067 - val_loss: 2.1210 - val_accuracy: 0.4328\n",
"Epoch 5/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.2922 - accuracy: 0.4067 - val_loss: 2.1342 - val_accuracy: 0.4328\n",
"Epoch 6/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.2937 - accuracy: 0.4067 - val_loss: 2.1413 - val_accuracy: 0.4328\n",
"Epoch 7/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.2752 - accuracy: 0.4067 - val_loss: 2.1182 - val_accuracy: 0.4328\n",
"Epoch 8/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.2745 - accuracy: 0.4067 - val_loss: 2.1400 - val_accuracy: 0.4328\n",
"Epoch 9/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.2862 - accuracy: 0.4067 - val_loss: 2.1216 - val_accuracy: 0.4328\n",
"Epoch 10/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 2.2722 - accuracy: 0.4067 - val_loss: 2.1184 - val_accuracy: 0.4328\n",
"Epoch 11/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 2.2715 - accuracy: 0.4067 - val_loss: 2.1284 - val_accuracy: 0.4328\n",
"Epoch 12/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.2694 - accuracy: 0.4067 - val_loss: 2.1204 - val_accuracy: 0.4328\n",
"Epoch 13/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.2714 - accuracy: 0.4067 - val_loss: 2.1182 - val_accuracy: 0.4328\n",
"Epoch 14/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 2.2682 - accuracy: 0.4067 - val_loss: 2.1118 - val_accuracy: 0.4328\n",
"Epoch 15/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.2763 - accuracy: 0.4067 - val_loss: 2.1236 - val_accuracy: 0.4328\n",
"Epoch 16/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.2707 - accuracy: 0.4067 - val_loss: 2.1176 - val_accuracy: 0.4328\n",
"Epoch 17/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 2.2688 - accuracy: 0.4067 - val_loss: 2.1224 - val_accuracy: 0.4328\n",
"Epoch 18/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 2.2654 - accuracy: 0.4067 - val_loss: 2.1122 - val_accuracy: 0.4328\n",
"Epoch 19/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 2.2678 - accuracy: 0.4067 - val_loss: 2.1258 - val_accuracy: 0.4328\n",
"Epoch 20/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.2661 - accuracy: 0.4067 - val_loss: 2.1111 - val_accuracy: 0.4328\n",
"Epoch 21/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.2747 - accuracy: 0.4067 - val_loss: 2.1365 - val_accuracy: 0.4328\n",
"Epoch 22/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.2681 - accuracy: 0.4067 - val_loss: 2.1031 - val_accuracy: 0.4328\n",
"Epoch 23/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 2.2616 - accuracy: 0.4067 - val_loss: 2.1159 - val_accuracy: 0.4328\n",
"Epoch 24/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 2.2610 - accuracy: 0.4067 - val_loss: 2.1089 - val_accuracy: 0.4328\n",
"Epoch 25/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 2.2572 - accuracy: 0.4067 - val_loss: 2.1015 - val_accuracy: 0.4328\n",
"Epoch 26/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.2669 - accuracy: 0.4067 - val_loss: 2.1404 - val_accuracy: 0.4328\n",
"Epoch 27/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.2671 - accuracy: 0.4067 - val_loss: 2.0954 - val_accuracy: 0.4328\n",
"Epoch 28/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.2482 - accuracy: 0.4067 - val_loss: 2.1024 - val_accuracy: 0.4328\n",
"Epoch 29/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.2461 - accuracy: 0.4067 - val_loss: 2.0950 - val_accuracy: 0.4328\n",
"Epoch 30/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.2371 - accuracy: 0.4067 - val_loss: 2.0827 - val_accuracy: 0.4328\n",
"Epoch 31/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.2310 - accuracy: 0.4067 - val_loss: 2.0803 - val_accuracy: 0.4328\n",
"Epoch 32/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.2142 - accuracy: 0.4067 - val_loss: 2.1013 - val_accuracy: 0.4328\n",
"Epoch 33/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 2.2208 - accuracy: 0.4067 - val_loss: 2.0879 - val_accuracy: 0.4328\n",
"Epoch 34/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 2.1723 - accuracy: 0.4067 - val_loss: 1.9891 - val_accuracy: 0.4328\n",
"Epoch 35/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 2.1229 - accuracy: 0.4067 - val_loss: 1.9332 - val_accuracy: 0.4328\n",
"Epoch 36/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 2.0257 - accuracy: 0.4067 - val_loss: 1.8248 - val_accuracy: 0.4328\n",
"Epoch 37/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 1.9249 - accuracy: 0.4341 - val_loss: 1.7082 - val_accuracy: 0.4440\n",
"Epoch 38/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.8162 - accuracy: 0.5087 - val_loss: 1.6315 - val_accuracy: 0.5933\n",
"Epoch 39/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.7252 - accuracy: 0.5647 - val_loss: 1.5770 - val_accuracy: 0.5933\n",
"Epoch 40/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.6807 - accuracy: 0.5659 - val_loss: 1.5660 - val_accuracy: 0.5933\n",
"Epoch 41/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.6548 - accuracy: 0.5659 - val_loss: 1.5675 - val_accuracy: 0.5896\n",
"Epoch 42/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.6377 - accuracy: 0.5659 - val_loss: 1.5483 - val_accuracy: 0.5933\n",
"Epoch 43/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.6127 - accuracy: 0.5672 - val_loss: 1.5910 - val_accuracy: 0.5858\n",
"Epoch 44/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 1.6132 - accuracy: 0.5672 - val_loss: 1.4978 - val_accuracy: 0.5933\n",
"Epoch 45/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.5726 - accuracy: 0.5659 - val_loss: 1.4518 - val_accuracy: 0.6007\n",
"Epoch 46/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.5232 - accuracy: 0.5659 - val_loss: 1.4259 - val_accuracy: 0.5970\n",
"Epoch 47/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.4740 - accuracy: 0.5771 - val_loss: 1.5106 - val_accuracy: 0.5933\n",
"Epoch 48/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 1.5859 - accuracy: 0.5920 - val_loss: 1.3783 - val_accuracy: 0.6530\n",
"Epoch 49/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.4752 - accuracy: 0.6057 - val_loss: 1.3954 - val_accuracy: 0.6381\n",
"Epoch 50/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 1.4139 - accuracy: 0.5871 - val_loss: 1.3425 - val_accuracy: 0.6530\n",
"Epoch 51/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.3574 - accuracy: 0.6144 - val_loss: 1.2772 - val_accuracy: 0.6157\n",
"Epoch 52/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.3052 - accuracy: 0.6169 - val_loss: 1.2639 - val_accuracy: 0.6567\n",
"Epoch 53/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.3172 - accuracy: 0.6244 - val_loss: 1.2532 - val_accuracy: 0.6231\n",
"Epoch 54/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.2668 - accuracy: 0.6219 - val_loss: 1.2160 - val_accuracy: 0.6604\n",
"Epoch 55/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 1.2533 - accuracy: 0.6281 - val_loss: 1.1824 - val_accuracy: 0.6418\n",
"Epoch 56/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.2278 - accuracy: 0.6269 - val_loss: 1.1773 - val_accuracy: 0.6455\n",
"Epoch 57/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.2125 - accuracy: 0.6368 - val_loss: 1.2155 - val_accuracy: 0.6306\n",
"Epoch 58/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 1.2224 - accuracy: 0.6343 - val_loss: 1.1587 - val_accuracy: 0.7015\n",
"Epoch 59/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.2003 - accuracy: 0.6356 - val_loss: 1.1687 - val_accuracy: 0.6604\n",
"Epoch 60/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.1938 - accuracy: 0.6530 - val_loss: 1.2210 - val_accuracy: 0.6754\n",
"Epoch 61/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 1.1635 - accuracy: 0.6716 - val_loss: 1.1252 - val_accuracy: 0.7090\n",
"Epoch 62/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.1645 - accuracy: 0.6580 - val_loss: 1.1887 - val_accuracy: 0.6567\n",
"Epoch 63/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 1.1531 - accuracy: 0.6505 - val_loss: 1.2178 - val_accuracy: 0.6866\n",
"Epoch 64/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.3816 - accuracy: 0.6206 - val_loss: 1.3285 - val_accuracy: 0.6381\n",
"Epoch 65/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.2023 - accuracy: 0.6393 - val_loss: 1.1723 - val_accuracy: 0.6567\n",
"Epoch 66/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 1.1564 - accuracy: 0.6716 - val_loss: 1.2311 - val_accuracy: 0.6567\n",
"Epoch 67/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 1.1407 - accuracy: 0.6667 - val_loss: 1.1467 - val_accuracy: 0.6679\n",
"Epoch 68/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.1302 - accuracy: 0.6580 - val_loss: 1.0884 - val_accuracy: 0.6978\n",
"Epoch 69/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.1018 - accuracy: 0.6642 - val_loss: 1.1089 - val_accuracy: 0.6940\n",
"Epoch 70/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 1.0835 - accuracy: 0.6965 - val_loss: 1.0897 - val_accuracy: 0.7127\n",
"Epoch 71/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.0802 - accuracy: 0.6915 - val_loss: 1.0844 - val_accuracy: 0.7090\n",
"Epoch 72/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.0679 - accuracy: 0.6903 - val_loss: 1.1029 - val_accuracy: 0.7015\n",
"Epoch 73/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.0736 - accuracy: 0.7027 - val_loss: 1.1063 - val_accuracy: 0.6940\n",
"Epoch 74/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.0450 - accuracy: 0.6965 - val_loss: 1.0875 - val_accuracy: 0.6903\n",
"Epoch 75/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 1.0417 - accuracy: 0.6915 - val_loss: 1.0894 - val_accuracy: 0.6791\n",
"Epoch 76/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.0493 - accuracy: 0.6853 - val_loss: 1.1358 - val_accuracy: 0.7090\n",
"Epoch 77/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.1367 - accuracy: 0.6754 - val_loss: 1.0502 - val_accuracy: 0.7052\n",
"Epoch 78/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.0996 - accuracy: 0.6903 - val_loss: 1.0568 - val_accuracy: 0.6903\n",
"Epoch 79/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 1.0344 - accuracy: 0.6903 - val_loss: 1.0500 - val_accuracy: 0.6940\n",
"Epoch 80/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.0226 - accuracy: 0.6953 - val_loss: 1.0583 - val_accuracy: 0.7090\n",
"Epoch 81/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 1.0467 - accuracy: 0.7090 - val_loss: 1.0603 - val_accuracy: 0.7052\n",
"Epoch 82/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.0408 - accuracy: 0.7002 - val_loss: 1.0484 - val_accuracy: 0.6866\n",
"Epoch 83/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 1.0092 - accuracy: 0.7002 - val_loss: 1.0532 - val_accuracy: 0.7052\n",
"Epoch 84/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.9685 - accuracy: 0.7338 - val_loss: 1.0466 - val_accuracy: 0.6903\n",
"Epoch 85/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.9620 - accuracy: 0.7376 - val_loss: 1.0274 - val_accuracy: 0.7015\n",
"Epoch 86/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.9471 - accuracy: 0.7313 - val_loss: 1.0267 - val_accuracy: 0.6978\n",
"Epoch 87/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.9334 - accuracy: 0.7301 - val_loss: 1.0420 - val_accuracy: 0.6940\n",
"Epoch 88/250\n",
"7/7 [==============================] - 0s 12ms/step - loss: 0.9499 - accuracy: 0.7313 - val_loss: 1.0480 - val_accuracy: 0.7127\n",
"Epoch 89/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.9057 - accuracy: 0.7438 - val_loss: 1.0302 - val_accuracy: 0.6978\n",
"Epoch 90/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.9124 - accuracy: 0.7226 - val_loss: 1.0502 - val_accuracy: 0.6978\n",
"Epoch 91/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.9325 - accuracy: 0.7338 - val_loss: 1.0313 - val_accuracy: 0.6978\n",
"Epoch 92/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.9021 - accuracy: 0.7425 - val_loss: 1.0224 - val_accuracy: 0.7015\n",
"Epoch 93/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.8849 - accuracy: 0.7500 - val_loss: 1.0381 - val_accuracy: 0.7127\n",
"Epoch 94/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.8790 - accuracy: 0.7512 - val_loss: 1.1085 - val_accuracy: 0.7201\n",
"Epoch 95/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.8772 - accuracy: 0.7475 - val_loss: 1.0966 - val_accuracy: 0.7015\n",
"Epoch 96/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.8723 - accuracy: 0.7512 - val_loss: 1.0288 - val_accuracy: 0.7201\n",
"Epoch 97/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.8809 - accuracy: 0.7562 - val_loss: 1.0011 - val_accuracy: 0.7164\n",
"Epoch 98/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.8409 - accuracy: 0.7587 - val_loss: 1.0174 - val_accuracy: 0.7201\n",
"Epoch 99/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.8256 - accuracy: 0.7662 - val_loss: 0.9956 - val_accuracy: 0.7201\n",
"Epoch 100/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.8269 - accuracy: 0.7624 - val_loss: 1.0560 - val_accuracy: 0.7201\n",
"Epoch 101/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.8233 - accuracy: 0.7624 - val_loss: 1.0270 - val_accuracy: 0.7425\n",
"Epoch 102/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.8391 - accuracy: 0.7562 - val_loss: 1.0348 - val_accuracy: 0.7164\n",
"Epoch 103/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.8569 - accuracy: 0.7450 - val_loss: 1.0854 - val_accuracy: 0.7239\n",
"Epoch 104/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.8184 - accuracy: 0.7674 - val_loss: 1.0754 - val_accuracy: 0.7090\n",
"Epoch 105/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.8205 - accuracy: 0.7674 - val_loss: 1.0118 - val_accuracy: 0.7164\n",
"Epoch 106/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.7973 - accuracy: 0.7724 - val_loss: 0.9771 - val_accuracy: 0.7351\n",
"Epoch 107/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.8052 - accuracy: 0.7761 - val_loss: 1.0050 - val_accuracy: 0.7425\n",
"Epoch 108/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.8108 - accuracy: 0.7736 - val_loss: 1.0799 - val_accuracy: 0.7090\n",
"Epoch 109/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.7768 - accuracy: 0.7861 - val_loss: 0.9900 - val_accuracy: 0.7463\n",
"Epoch 110/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.7491 - accuracy: 0.7898 - val_loss: 0.9663 - val_accuracy: 0.7425\n",
"Epoch 111/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.7319 - accuracy: 0.7886 - val_loss: 1.0362 - val_accuracy: 0.7276\n",
"Epoch 112/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.7283 - accuracy: 0.7873 - val_loss: 0.9720 - val_accuracy: 0.7537\n",
"Epoch 113/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.6890 - accuracy: 0.8072 - val_loss: 0.9683 - val_accuracy: 0.7463\n",
"Epoch 114/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.7220 - accuracy: 0.7811 - val_loss: 0.9976 - val_accuracy: 0.7239\n",
"Epoch 115/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.7085 - accuracy: 0.8072 - val_loss: 0.9679 - val_accuracy: 0.7425\n",
"Epoch 116/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.6826 - accuracy: 0.8035 - val_loss: 0.9457 - val_accuracy: 0.7500\n",
"Epoch 117/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.6570 - accuracy: 0.8097 - val_loss: 0.9567 - val_accuracy: 0.7463\n",
"Epoch 118/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.6550 - accuracy: 0.8047 - val_loss: 0.9756 - val_accuracy: 0.7313\n",
"Epoch 119/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.6408 - accuracy: 0.8072 - val_loss: 0.9664 - val_accuracy: 0.7537\n",
"Epoch 120/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.6220 - accuracy: 0.8147 - val_loss: 0.9512 - val_accuracy: 0.7612\n",
"Epoch 121/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.6214 - accuracy: 0.8246 - val_loss: 1.0454 - val_accuracy: 0.7313\n",
"Epoch 122/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.6339 - accuracy: 0.8047 - val_loss: 1.0158 - val_accuracy: 0.7276\n",
"Epoch 123/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.6848 - accuracy: 0.7985 - val_loss: 1.1598 - val_accuracy: 0.7276\n",
"Epoch 124/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.7251 - accuracy: 0.7886 - val_loss: 1.1269 - val_accuracy: 0.7388\n",
"Epoch 125/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.6636 - accuracy: 0.7873 - val_loss: 1.0390 - val_accuracy: 0.7612\n",
"Epoch 126/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.6417 - accuracy: 0.8172 - val_loss: 0.9734 - val_accuracy: 0.7500\n",
"Epoch 127/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.6157 - accuracy: 0.8147 - val_loss: 0.9974 - val_accuracy: 0.7351\n",
"Epoch 128/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.5936 - accuracy: 0.8383 - val_loss: 0.9867 - val_accuracy: 0.7351\n",
"Epoch 129/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.5744 - accuracy: 0.8321 - val_loss: 0.9450 - val_accuracy: 0.7612\n",
"Epoch 130/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.5737 - accuracy: 0.8383 - val_loss: 0.9716 - val_accuracy: 0.7463\n",
"Epoch 131/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.5517 - accuracy: 0.8221 - val_loss: 0.9528 - val_accuracy: 0.7463\n",
"Epoch 132/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.5462 - accuracy: 0.8321 - val_loss: 0.9706 - val_accuracy: 0.7612\n",
"Epoch 133/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.5316 - accuracy: 0.8557 - val_loss: 0.9848 - val_accuracy: 0.7425\n",
"Epoch 134/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.5427 - accuracy: 0.8420 - val_loss: 1.0018 - val_accuracy: 0.7687\n",
"Epoch 135/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.5183 - accuracy: 0.8607 - val_loss: 0.9655 - val_accuracy: 0.7612\n",
"Epoch 136/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.5368 - accuracy: 0.8371 - val_loss: 0.9610 - val_accuracy: 0.7649\n",
"Epoch 137/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.5166 - accuracy: 0.8483 - val_loss: 1.0139 - val_accuracy: 0.7388\n",
"Epoch 138/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.5188 - accuracy: 0.8396 - val_loss: 1.1429 - val_accuracy: 0.7313\n",
"Epoch 139/250\n",
"7/7 [==============================] - 0s 15ms/step - loss: 0.5905 - accuracy: 0.8246 - val_loss: 1.0936 - val_accuracy: 0.7276\n",
"Epoch 140/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.5315 - accuracy: 0.8433 - val_loss: 0.9785 - val_accuracy: 0.7687\n",
"Epoch 141/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.5114 - accuracy: 0.8433 - val_loss: 0.9849 - val_accuracy: 0.7537\n",
"Epoch 142/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.4984 - accuracy: 0.8470 - val_loss: 1.0120 - val_accuracy: 0.7687\n",
"Epoch 143/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.4836 - accuracy: 0.8582 - val_loss: 0.9982 - val_accuracy: 0.7836\n",
"Epoch 144/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.4908 - accuracy: 0.8458 - val_loss: 0.9907 - val_accuracy: 0.7612\n",
"Epoch 145/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.4617 - accuracy: 0.8632 - val_loss: 1.0252 - val_accuracy: 0.7388\n",
"Epoch 146/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.4696 - accuracy: 0.8483 - val_loss: 0.9492 - val_accuracy: 0.7687\n",
"Epoch 147/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.4401 - accuracy: 0.8682 - val_loss: 0.9599 - val_accuracy: 0.7575\n",
"Epoch 148/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.4373 - accuracy: 0.8706 - val_loss: 1.0003 - val_accuracy: 0.7500\n",
"Epoch 149/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.4624 - accuracy: 0.8545 - val_loss: 1.1764 - val_accuracy: 0.7239\n",
"Epoch 150/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.4976 - accuracy: 0.8346 - val_loss: 0.9875 - val_accuracy: 0.7649\n",
"Epoch 151/250\n",
"7/7 [==============================] - 0s 12ms/step - loss: 0.4619 - accuracy: 0.8570 - val_loss: 0.9878 - val_accuracy: 0.7463\n",
"Epoch 152/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.4376 - accuracy: 0.8570 - val_loss: 0.9957 - val_accuracy: 0.7687\n",
"Epoch 153/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.4211 - accuracy: 0.8831 - val_loss: 1.0094 - val_accuracy: 0.7649\n",
"Epoch 154/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.4164 - accuracy: 0.8719 - val_loss: 0.9937 - val_accuracy: 0.7649\n",
"Epoch 155/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.4301 - accuracy: 0.8682 - val_loss: 0.9869 - val_accuracy: 0.7761\n",
"Epoch 156/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.4158 - accuracy: 0.8744 - val_loss: 1.0057 - val_accuracy: 0.7687\n",
"Epoch 157/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.3919 - accuracy: 0.8831 - val_loss: 0.9989 - val_accuracy: 0.7687\n",
"Epoch 158/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.3986 - accuracy: 0.8818 - val_loss: 1.0166 - val_accuracy: 0.7799\n",
"Epoch 159/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.3856 - accuracy: 0.8818 - val_loss: 0.9931 - val_accuracy: 0.7612\n",
"Epoch 160/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.3803 - accuracy: 0.8856 - val_loss: 1.0425 - val_accuracy: 0.7649\n",
"Epoch 161/250\n",
"7/7 [==============================] - 0s 12ms/step - loss: 0.4044 - accuracy: 0.8781 - val_loss: 1.0459 - val_accuracy: 0.7687\n",
"Epoch 162/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.4046 - accuracy: 0.8843 - val_loss: 1.0579 - val_accuracy: 0.7612\n",
"Epoch 163/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.3709 - accuracy: 0.8856 - val_loss: 1.0208 - val_accuracy: 0.7948\n",
"Epoch 164/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.3546 - accuracy: 0.8980 - val_loss: 1.0279 - val_accuracy: 0.7910\n",
"Epoch 165/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.3502 - accuracy: 0.9017 - val_loss: 1.0175 - val_accuracy: 0.7687\n",
"Epoch 166/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.3472 - accuracy: 0.8918 - val_loss: 1.0121 - val_accuracy: 0.7910\n",
"Epoch 167/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.3632 - accuracy: 0.8781 - val_loss: 1.0419 - val_accuracy: 0.7761\n",
"Epoch 168/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.3374 - accuracy: 0.8930 - val_loss: 1.1226 - val_accuracy: 0.7612\n",
"Epoch 169/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.4112 - accuracy: 0.8856 - val_loss: 1.0612 - val_accuracy: 0.7910\n",
"Epoch 170/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.4082 - accuracy: 0.8706 - val_loss: 1.1180 - val_accuracy: 0.7724\n",
"Epoch 171/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.3815 - accuracy: 0.8769 - val_loss: 1.0186 - val_accuracy: 0.7761\n",
"Epoch 172/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.3398 - accuracy: 0.8918 - val_loss: 1.0469 - val_accuracy: 0.7873\n",
"Epoch 173/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.3285 - accuracy: 0.9005 - val_loss: 1.0466 - val_accuracy: 0.7724\n",
"Epoch 174/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.3244 - accuracy: 0.9017 - val_loss: 1.0449 - val_accuracy: 0.7799\n",
"Epoch 175/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.3197 - accuracy: 0.9042 - val_loss: 1.0683 - val_accuracy: 0.7649\n",
"Epoch 176/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.3187 - accuracy: 0.9067 - val_loss: 1.0353 - val_accuracy: 0.7836\n",
"Epoch 177/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.3358 - accuracy: 0.9067 - val_loss: 1.1395 - val_accuracy: 0.7873\n",
"Epoch 178/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.3361 - accuracy: 0.8943 - val_loss: 1.0449 - val_accuracy: 0.7799\n",
"Epoch 179/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.2972 - accuracy: 0.9142 - val_loss: 1.0420 - val_accuracy: 0.7873\n",
"Epoch 180/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.2845 - accuracy: 0.9142 - val_loss: 1.0803 - val_accuracy: 0.7985\n",
"Epoch 181/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.2964 - accuracy: 0.9092 - val_loss: 1.0137 - val_accuracy: 0.7948\n",
"Epoch 182/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.2769 - accuracy: 0.9216 - val_loss: 1.0810 - val_accuracy: 0.7687\n",
"Epoch 183/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.2871 - accuracy: 0.9117 - val_loss: 1.0658 - val_accuracy: 0.8060\n",
"Epoch 184/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.2880 - accuracy: 0.9204 - val_loss: 1.0991 - val_accuracy: 0.7724\n",
"Epoch 185/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.2719 - accuracy: 0.9167 - val_loss: 1.1672 - val_accuracy: 0.7761\n",
"Epoch 186/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.3113 - accuracy: 0.9129 - val_loss: 1.0980 - val_accuracy: 0.7761\n",
"Epoch 187/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.2645 - accuracy: 0.9204 - val_loss: 1.0914 - val_accuracy: 0.7836\n",
"Epoch 188/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.2419 - accuracy: 0.9291 - val_loss: 1.0959 - val_accuracy: 0.7799\n",
"Epoch 189/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.2549 - accuracy: 0.9254 - val_loss: 1.2023 - val_accuracy: 0.7500\n",
"Epoch 190/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.2680 - accuracy: 0.9179 - val_loss: 1.0851 - val_accuracy: 0.8134\n",
"Epoch 191/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.2478 - accuracy: 0.9279 - val_loss: 1.1375 - val_accuracy: 0.8134\n",
"Epoch 192/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.2477 - accuracy: 0.9229 - val_loss: 1.0884 - val_accuracy: 0.7948\n",
"Epoch 193/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.2192 - accuracy: 0.9316 - val_loss: 1.1348 - val_accuracy: 0.7761\n",
"Epoch 194/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.2218 - accuracy: 0.9353 - val_loss: 1.1336 - val_accuracy: 0.7836\n",
"Epoch 195/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.2204 - accuracy: 0.9366 - val_loss: 1.1398 - val_accuracy: 0.8022\n",
"Epoch 196/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.2243 - accuracy: 0.9303 - val_loss: 1.1652 - val_accuracy: 0.7910\n",
"Epoch 197/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.2170 - accuracy: 0.9353 - val_loss: 1.1580 - val_accuracy: 0.7910\n",
"Epoch 198/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.2075 - accuracy: 0.9366 - val_loss: 1.2053 - val_accuracy: 0.7799\n",
"Epoch 199/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.2281 - accuracy: 0.9328 - val_loss: 1.1707 - val_accuracy: 0.8022\n",
"Epoch 200/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1893 - accuracy: 0.9440 - val_loss: 1.2088 - val_accuracy: 0.7724\n",
"Epoch 201/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.2557 - accuracy: 0.9080 - val_loss: 1.1752 - val_accuracy: 0.8060\n",
"Epoch 202/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.3336 - accuracy: 0.9030 - val_loss: 1.2355 - val_accuracy: 0.8022\n",
"Epoch 203/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.2370 - accuracy: 0.9266 - val_loss: 1.1870 - val_accuracy: 0.7799\n",
"Epoch 204/250\n",
"7/7 [==============================] - 0s 12ms/step - loss: 0.2189 - accuracy: 0.9316 - val_loss: 1.1379 - val_accuracy: 0.7910\n",
"Epoch 205/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.2029 - accuracy: 0.9440 - val_loss: 1.1484 - val_accuracy: 0.8097\n",
"Epoch 206/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.2217 - accuracy: 0.9415 - val_loss: 1.2517 - val_accuracy: 0.7761\n",
"Epoch 207/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.2251 - accuracy: 0.9291 - val_loss: 1.2151 - val_accuracy: 0.8022\n",
"Epoch 208/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.2145 - accuracy: 0.9291 - val_loss: 1.1811 - val_accuracy: 0.8022\n",
"Epoch 209/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1950 - accuracy: 0.9378 - val_loss: 1.1993 - val_accuracy: 0.7910\n",
"Epoch 210/250\n",
"7/7 [==============================] - 0s 12ms/step - loss: 0.1800 - accuracy: 0.9453 - val_loss: 1.1938 - val_accuracy: 0.7761\n",
"Epoch 211/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1979 - accuracy: 0.9403 - val_loss: 1.2585 - val_accuracy: 0.7873\n",
"Epoch 212/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.2040 - accuracy: 0.9279 - val_loss: 1.1801 - val_accuracy: 0.7948\n",
"Epoch 213/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.2546 - accuracy: 0.9092 - val_loss: 1.2982 - val_accuracy: 0.8134\n",
"Epoch 214/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.2107 - accuracy: 0.9328 - val_loss: 1.3976 - val_accuracy: 0.7761\n",
"Epoch 215/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.2367 - accuracy: 0.9229 - val_loss: 1.1944 - val_accuracy: 0.8060\n",
"Epoch 216/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.2032 - accuracy: 0.9453 - val_loss: 1.2179 - val_accuracy: 0.8172\n",
"Epoch 217/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.1976 - accuracy: 0.9415 - val_loss: 1.2112 - val_accuracy: 0.7799\n",
"Epoch 218/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1809 - accuracy: 0.9403 - val_loss: 1.2693 - val_accuracy: 0.7761\n",
"Epoch 219/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1930 - accuracy: 0.9378 - val_loss: 1.2859 - val_accuracy: 0.7761\n",
"Epoch 220/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.1830 - accuracy: 0.9403 - val_loss: 1.3077 - val_accuracy: 0.7836\n",
"Epoch 221/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1642 - accuracy: 0.9490 - val_loss: 1.2286 - val_accuracy: 0.7649\n",
"Epoch 222/250\n",
"7/7 [==============================] - 0s 9ms/step - loss: 0.1496 - accuracy: 0.9577 - val_loss: 1.2461 - val_accuracy: 0.7836\n",
"Epoch 223/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1401 - accuracy: 0.9614 - val_loss: 1.2605 - val_accuracy: 0.7761\n",
"Epoch 224/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1354 - accuracy: 0.9639 - val_loss: 1.2660 - val_accuracy: 0.8060\n",
"Epoch 225/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1306 - accuracy: 0.9614 - val_loss: 1.2466 - val_accuracy: 0.7799\n",
"Epoch 226/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1232 - accuracy: 0.9677 - val_loss: 1.3141 - val_accuracy: 0.8022\n",
"Epoch 227/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1216 - accuracy: 0.9726 - val_loss: 1.3261 - val_accuracy: 0.7761\n",
"Epoch 228/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1192 - accuracy: 0.9689 - val_loss: 1.2907 - val_accuracy: 0.8172\n",
"Epoch 229/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1266 - accuracy: 0.9590 - val_loss: 1.3225 - val_accuracy: 0.7910\n",
"Epoch 230/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1251 - accuracy: 0.9689 - val_loss: 1.3868 - val_accuracy: 0.8060\n",
"Epoch 231/250\n",
"7/7 [==============================] - 0s 12ms/step - loss: 0.1235 - accuracy: 0.9614 - val_loss: 1.3235 - val_accuracy: 0.7985\n",
"Epoch 232/250\n",
"7/7 [==============================] - 0s 12ms/step - loss: 0.1106 - accuracy: 0.9701 - val_loss: 1.3611 - val_accuracy: 0.7799\n",
"Epoch 233/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.1314 - accuracy: 0.9639 - val_loss: 1.3306 - val_accuracy: 0.7873\n",
"Epoch 234/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1107 - accuracy: 0.9739 - val_loss: 1.3603 - val_accuracy: 0.8022\n",
"Epoch 235/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1120 - accuracy: 0.9701 - val_loss: 1.4043 - val_accuracy: 0.7873\n",
"Epoch 236/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1098 - accuracy: 0.9726 - val_loss: 1.3612 - val_accuracy: 0.7836\n",
"Epoch 237/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.1057 - accuracy: 0.9776 - val_loss: 1.3887 - val_accuracy: 0.8060\n",
"Epoch 238/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1045 - accuracy: 0.9677 - val_loss: 1.3443 - val_accuracy: 0.8097\n",
"Epoch 239/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.0906 - accuracy: 0.9838 - val_loss: 1.3957 - val_accuracy: 0.8022\n",
"Epoch 240/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.0956 - accuracy: 0.9701 - val_loss: 1.3979 - val_accuracy: 0.8022\n",
"Epoch 241/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.0928 - accuracy: 0.9776 - val_loss: 1.4348 - val_accuracy: 0.7910\n",
"Epoch 242/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.0986 - accuracy: 0.9751 - val_loss: 1.4549 - val_accuracy: 0.7910\n",
"Epoch 243/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1077 - accuracy: 0.9689 - val_loss: 1.6090 - val_accuracy: 0.8022\n",
"Epoch 244/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1139 - accuracy: 0.9714 - val_loss: 1.4873 - val_accuracy: 0.8097\n",
"Epoch 245/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1073 - accuracy: 0.9714 - val_loss: 1.4669 - val_accuracy: 0.7910\n",
"Epoch 246/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.1435 - accuracy: 0.9565 - val_loss: 1.5558 - val_accuracy: 0.7724\n",
"Epoch 247/250\n",
"7/7 [==============================] - 0s 13ms/step - loss: 0.1482 - accuracy: 0.9502 - val_loss: 1.4823 - val_accuracy: 0.7724\n",
"Epoch 248/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.0974 - accuracy: 0.9751 - val_loss: 1.4016 - val_accuracy: 0.8022\n",
"Epoch 249/250\n",
"7/7 [==============================] - 0s 10ms/step - loss: 0.0804 - accuracy: 0.9838 - val_loss: 1.4795 - val_accuracy: 0.8022\n",
"Epoch 250/250\n",
"7/7 [==============================] - 0s 11ms/step - loss: 0.0758 - accuracy: 0.9863 - val_loss: 1.4763 - val_accuracy: 0.8022\n"
],
"name": "stdout"
},
{
"output_type": "execute_result",
"data": {
"text/plain": [
"<tensorflow.python.keras.callbacks.History at 0x7f37910e87d0>"
]
},
"metadata": {
"tags": []
},
"execution_count": 53
}
]
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment