Skip to content

Instantly share code, notes, and snippets.

@meyerzinn
Created April 17, 2019 16:22
Show Gist options
  • Save meyerzinn/8a2da46f59faf8ca19d83d03ec85ac7c to your computer and use it in GitHub Desktop.
Save meyerzinn/8a2da46f59faf8ca19d83d03ec85ac7c to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"WARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0.\n",
"For more information, please see:\n",
" * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n",
" * https://github.com/tensorflow/addons\n",
"If you depend on functionality not listed there, please file an issue.\n",
"\n"
]
}
],
"source": [
"import matplotlib.pyplot as plt\n",
"import matplotlib.image as mpimg\n",
"import matplotlib\n",
"import os\n",
"import numpy as np\n",
"import tensorflow as tf\n",
"import tensorflow_datasets as tfds\n",
"%matplotlib inline\n",
"tf.enable_eager_execution()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"num_cores = 4 # Used for parallel data processing\n",
"batch_size = 32\n",
"img_rows = 28\n",
"img_cols = 28"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def scale(x, min_val=0.0, max_val=255.0):\n",
" \"\"\"\n",
" Utility function to normalize pixel values.\n",
" \"\"\"\n",
" x = tf.to_float(x)\n",
" return tf.div(tf.subtract(x, min_val), tf.subtract(max_val, min_val))\n",
"\n",
"# Augmentations from https://www.wouterbulten.nl/blog/tech/data-augmentation-using-tensorflow-data-dataset/#rotation-and-flipping\n",
"def flip(x: tf.Tensor) -> tf.Tensor:\n",
" x = tf.image.random_flip_left_right(x)\n",
" x = tf.image.random_flip_up_down(x)\n",
" return x\n",
"\n",
"def rotate(x: tf.Tensor) -> tf.Tensor:\n",
" return tf.image.rot90(x, tf.random_uniform(shape=[], minval=0, maxval=4, dtype=tf.int32))\n",
"\n",
"def zoom(x: tf.Tensor) -> tf.Tensor:\n",
" # Generate 20 crop settings, ranging from a 1% to 20% crop.\n",
" scales = list(np.arange(0.8, 1.0, 0.01))\n",
" boxes = np.zeros((len(scales), 4))\n",
" for i, scale in enumerate(scales):\n",
" x1 = y1 = 0.5 - (0.5 * scale)\n",
" x2 = y2 = 0.5 + (0.5 * scale)\n",
" boxes[i] = [x1, y1, x2, y2]\n",
" def random_crop(img):\n",
" # Create different crops for an image\n",
" crops = tf.image.crop_and_resize([img], boxes=boxes, box_ind=np.zeros(len(scales)), crop_size=(img_rows, img_cols))\n",
" # Return a random crop\n",
" return crops[tf.random_uniform(shape=[], minval=0, maxval=len(scales), dtype=tf.int32)]\n",
" choice = tf.random_uniform(shape=[], minval=0., maxval=1., dtype=tf.float32)\n",
" # Only apply cropping 50% of the time\n",
" return tf.cond(choice < 0.5, lambda: x, lambda: random_crop(x))"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:From C:\\Users\\meyer\\Anaconda3\\envs\\ml\\lib\\site-packages\\tensorflow\\python\\ops\\control_flow_ops.py:423: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Colocations handled automatically by placer.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING: Logging before flag parsing goes to stderr.\n",
"W0417 11:21:17.151838 22520 deprecation.py:323] From C:\\Users\\meyer\\Anaconda3\\envs\\ml\\lib\\site-packages\\tensorflow\\python\\ops\\control_flow_ops.py:423: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Colocations handled automatically by placer.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Number of samples: 697932.\n",
"<DatasetV1Adapter shapes: ((28, 28, 1), ()), types: (tf.uint8, tf.int64)>\n"
]
}
],
"source": [
"train_ds, ds_info = tfds.load(\"emnist/byclass\",\n",
" split=\"train\",\n",
" as_supervised=True,\n",
" with_info=True)\n",
"num_examples = ds_info.splits['train'].num_examples\n",
"print(f\"Number of samples: {num_examples}.\")\n",
"print(train_ds)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:From <ipython-input-3-722d97e23cd2>:5: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Use tf.cast instead.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"W0417 11:21:17.224838 22520 deprecation.py:323] From <ipython-input-3-722d97e23cd2>:5: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Use tf.cast instead.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:From <ipython-input-3-722d97e23cd2>:6: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Deprecated in favor of operator or tf.math.divide.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"W0417 11:21:17.231839 22520 deprecation.py:323] From <ipython-input-3-722d97e23cd2>:6: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Deprecated in favor of operator or tf.math.divide.\n"
]
}
],
"source": [
"num_classes = ds_info.features['label'].num_classes\n",
"\n",
"train_ds = train_ds.map(lambda x, y: (scale(x), tf.one_hot(y, num_classes)), num_parallel_calls=num_cores)\n",
"for f in [flip, rotate, zoom]:\n",
" train_ds = train_ds.map(lambda x, y: (tf.cond(tf.random_uniform([], 0, 1) > 0.75, lambda: f(x), lambda: x), y), num_parallel_calls=num_cores)\n",
"train_ds = train_ds.map(lambda x, y: (tf.clip_by_value(x, 0, 1), y), num_parallel_calls=num_cores)\n",
"train_ds = train_ds.apply(tf.data.experimental.shuffle_and_repeat(10000)).batch(batch_size)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"def build_model():\n",
" model = tf.keras.Sequential([\n",
" tf.keras.layers.Conv2D(64, (2,2), input_shape=(28,28,1), activation=tf.nn.relu),\n",
" tf.keras.layers.Conv2D(128, (2,2), activation=tf.nn.relu),\n",
" tf.keras.layers.MaxPooling2D((2,2), strides=2, padding='valid'),\n",
" tf.keras.layers.Dropout(rate=.25),\n",
" tf.keras.layers.Flatten(),\n",
" tf.keras.layers.Dense(128, activation=tf.nn.relu),\n",
" tf.keras.layers.Dense(128, activation=tf.nn.relu),\n",
" tf.keras.layers.Dense(num_classes, activation=tf.nn.softmax)\n",
" ])\n",
" return model"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:From C:\\Users\\meyer\\Anaconda3\\envs\\ml\\lib\\site-packages\\tensorflow\\python\\keras\\layers\\core.py:143: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"W0417 11:21:17.706149 22520 deprecation.py:506] From C:\\Users\\meyer\\Anaconda3\\envs\\ml\\lib\\site-packages\\tensorflow\\python\\keras\\layers\\core.py:143: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n"
]
}
],
"source": [
"model = build_model()\n",
"model.compile(loss = tf.keras.metrics.categorical_crossentropy,\n",
" optimizer = tf.train.AdamOptimizer(),\n",
" metrics=['accuracy'])"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"_________________________________________________________________\n",
"Layer (type) Output Shape Param # \n",
"=================================================================\n",
"conv2d (Conv2D) (None, 27, 27, 64) 320 \n",
"_________________________________________________________________\n",
"conv2d_1 (Conv2D) (None, 26, 26, 128) 32896 \n",
"_________________________________________________________________\n",
"max_pooling2d (MaxPooling2D) (None, 13, 13, 128) 0 \n",
"_________________________________________________________________\n",
"dropout (Dropout) (None, 13, 13, 128) 0 \n",
"_________________________________________________________________\n",
"flatten (Flatten) (None, 21632) 0 \n",
"_________________________________________________________________\n",
"dense (Dense) (None, 128) 2769024 \n",
"_________________________________________________________________\n",
"dense_1 (Dense) (None, 128) 16512 \n",
"_________________________________________________________________\n",
"dense_2 (Dense) (None, 62) 7998 \n",
"=================================================================\n",
"Total params: 2,826,750\n",
"Trainable params: 2,826,750\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n"
]
}
],
"source": [
"model.summary()"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"checkpoint_path = \"training_emnist/cp.ckpt\"\n",
"checkpoint_dir = os.path.dirname(checkpoint_path)\n",
"\n",
"# Create checkpoint callback\n",
"cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, \n",
" save_weights_only=True,\n",
" save_best_only=True,\n",
" monitor='loss',\n",
" mode='max',\n",
" verbose=1)\n",
"\n",
"tb_callback = tf.keras.callbacks.TensorBoard(log_dir='./logs', batch_size=batch_size, write_images=True)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"last_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)\n",
"if last_checkpoint is not None:\n",
" model.load_weights(last_checkpoint)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/100\n",
"WARNING:tensorflow:From C:\\Users\\meyer\\Anaconda3\\envs\\ml\\lib\\site-packages\\tensorflow\\python\\ops\\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Use tf.cast instead.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"W0417 11:21:19.099714 22520 deprecation.py:323] From C:\\Users\\meyer\\Anaconda3\\envs\\ml\\lib\\site-packages\\tensorflow\\python\\ops\\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Use tf.cast instead.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
" 841/21810 [>.............................] - ETA: 19:37:03 - loss: 4.1096 - acc: 0.093 - ETA: 3:19:20 - loss: 4.0076 - acc: 0.046 - ETA: 1:50:21 - loss: 3.8474 - acc: 0.08 - ETA: 1:12:35 - loss: 3.7789 - acc: 0.11 - ETA: 54:30 - loss: 3.7141 - acc: 0.1291 - ETA: 43:54 - loss: 3.6454 - acc: 0.15 - ETA: 36:54 - loss: 3.5753 - acc: 0.17 - ETA: 31:57 - loss: 3.4857 - acc: 0.18 - ETA: 28:17 - loss: 3.4089 - acc: 0.20 - ETA: 25:26 - loss: 3.3558 - acc: 0.21 - ETA: 23:10 - loss: 3.2955 - acc: 0.22 - ETA: 21:19 - loss: 3.2600 - acc: 0.23 - ETA: 19:47 - loss: 3.1897 - acc: 0.24 - ETA: 18:29 - loss: 3.1504 - acc: 0.25 - ETA: 17:22 - loss: 3.1308 - acc: 0.25 - ETA: 16:15 - loss: 3.1025 - acc: 0.26 - ETA: 15:25 - loss: 3.0604 - acc: 0.27 - ETA: 14:41 - loss: 3.0363 - acc: 0.27 - ETA: 14:03 - loss: 3.0059 - acc: 0.28 - ETA: 13:28 - loss: 2.9757 - acc: 0.29 - ETA: 12:57 - loss: 2.9609 - acc: 0.29 - ETA: 12:28 - loss: 2.9389 - acc: 0.30 - ETA: 12:02 - loss: 2.9142 - acc: 0.30 - ETA: 11:35 - loss: 2.8806 - acc: 0.31 - ETA: 11:14 - loss: 2.8575 - acc: 0.31 - ETA: 10:55 - loss: 2.8374 - acc: 0.32 - ETA: 10:36 - loss: 2.8115 - acc: 0.32 - ETA: 10:17 - loss: 2.7915 - acc: 0.32 - ETA: 10:01 - loss: 2.7733 - acc: 0.33 - ETA: 9:47 - loss: 2.7551 - acc: 0.3350 - ETA: 9:34 - loss: 2.7329 - acc: 0.340 - ETA: 9:19 - loss: 2.7089 - acc: 0.345 - ETA: 9:08 - loss: 2.6923 - acc: 0.349 - ETA: 8:54 - loss: 2.6716 - acc: 0.353 - ETA: 8:42 - loss: 2.6529 - acc: 0.357 - ETA: 8:32 - loss: 2.6393 - acc: 0.361 - ETA: 8:21 - loss: 2.6197 - acc: 0.364 - ETA: 8:13 - loss: 2.6034 - acc: 0.367 - ETA: 8:05 - loss: 2.5846 - acc: 0.370 - ETA: 7:56 - loss: 2.5720 - acc: 0.373 - ETA: 7:49 - loss: 2.5555 - acc: 0.376 - ETA: 7:40 - loss: 2.5376 - acc: 0.380 - ETA: 7:34 - loss: 2.5238 - acc: 0.383 - ETA: 7:26 - loss: 2.5068 - acc: 0.387 - ETA: 7:19 - loss: 2.4948 - acc: 0.389 - ETA: 7:12 - loss: 2.4839 - acc: 0.391 - ETA: 7:07 - loss: 2.4723 - acc: 0.393 - ETA: 7:01 - loss: 2.4585 - acc: 0.396 - ETA: 6:55 - loss: 2.4484 - acc: 0.397 - ETA: 6:50 - loss: 2.4381 - acc: 0.399 - ETA: 6:45 - loss: 2.4262 - acc: 0.402 - ETA: 6:41 - loss: 2.4160 - acc: 0.405 - ETA: 6:37 - loss: 2.4049 - acc: 0.407 - ETA: 6:32 - loss: 2.3925 - acc: 0.409 - ETA: 6:27 - loss: 2.3816 - acc: 0.411 - ETA: 6:24 - loss: 2.3716 - acc: 0.414 - ETA: 6:20 - loss: 2.3615 - acc: 0.415 - ETA: 6:17 - loss: 2.3586 - acc: 0.416 - ETA: 6:12 - loss: 2.3492 - acc: 0.418 - ETA: 6:09 - loss: 2.3352 - acc: 0.421 - ETA: 6:05 - loss: 2.3204 - acc: 0.424 - ETA: 6:02 - loss: 2.3104 - acc: 0.426 - ETA: 5:59 - loss: 2.2991 - acc: 0.428 - ETA: 5:56 - loss: 2.2887 - acc: 0.430 - ETA: 5:53 - loss: 2.2804 - acc: 0.431 - ETA: 5:50 - loss: 2.2730 - acc: 0.433 - ETA: 5:48 - loss: 2.2640 - acc: 0.434 - ETA: 5:45 - loss: 2.2518 - acc: 0.437 - ETA: 5:42 - loss: 2.2413 - acc: 0.439 - ETA: 5:40 - loss: 2.2331 - acc: 0.440 - ETA: 5:37 - loss: 2.2271 - acc: 0.441 - ETA: 5:34 - loss: 2.2178 - acc: 0.443 - ETA: 5:32 - loss: 2.2118 - acc: 0.444 - ETA: 5:29 - loss: 2.2030 - acc: 0.446 - ETA: 5:27 - loss: 2.1964 - acc: 0.447 - ETA: 5:25 - loss: 2.1870 - acc: 0.449 - ETA: 5:23 - loss: 2.1778 - acc: 0.451 - ETA: 5:21 - loss: 2.1705 - acc: 0.452 - ETA: 5:19 - loss: 2.1631 - acc: 0.454 - ETA: 5:17 - loss: 2.1566 - acc: 0.455 - ETA: 5:15 - loss: 2.1524 - acc: 0.456 - ETA: 5:13 - loss: 2.1445 - acc: 0.457 - ETA: 5:11 - loss: 2.1366 - acc: 0.460 - ETA: 5:09 - loss: 2.1296 - acc: 0.461 - ETA: 5:08 - loss: 2.1235 - acc: 0.463 - ETA: 5:06 - loss: 2.1150 - acc: 0.464 - ETA: 5:05 - loss: 2.1061 - acc: 0.466 - ETA: 5:03 - loss: 2.0991 - acc: 0.467 - ETA: 5:02 - loss: 2.0933 - acc: 0.468 - ETA: 5:00 - loss: 2.0888 - acc: 0.469 - ETA: 4:59 - loss: 2.0856 - acc: 0.471 - ETA: 4:57 - loss: 2.0817 - acc: 0.471 - ETA: 4:56 - loss: 2.0762 - acc: 0.472 - ETA: 4:54 - loss: 2.0693 - acc: 0.473 - ETA: 4:53 - loss: 2.0625 - acc: 0.475 - ETA: 4:51 - loss: 2.0564 - acc: 0.476 - ETA: 4:50 - loss: 2.0493 - acc: 0.477 - ETA: 4:49 - loss: 2.0418 - acc: 0.478 - ETA: 4:48 - loss: 2.0362 - acc: 0.480 - ETA: 4:46 - loss: 2.0306 - acc: 0.481 - ETA: 4:45 - loss: 2.0249 - acc: 0.482 - ETA: 4:44 - loss: 2.0176 - acc: 0.484 - ETA: 4:43 - loss: 2.0123 - acc: 0.485 - ETA: 4:42 - loss: 2.0076 - acc: 0.486 - ETA: 4:41 - loss: 2.0049 - acc: 0.486 - ETA: 4:39 - loss: 2.0009 - acc: 0.487 - ETA: 4:38 - loss: 1.9945 - acc: 0.489 - ETA: 4:37 - loss: 1.9885 - acc: 0.490 - ETA: 4:36 - loss: 1.9839 - acc: 0.491 - ETA: 4:35 - loss: 1.9796 - acc: 0.491 - ETA: 4:34 - loss: 1.9755 - acc: 0.492 - ETA: 4:33 - loss: 1.9727 - acc: 0.492 - ETA: 4:32 - loss: 1.9680 - acc: 0.493 - ETA: 4:31 - loss: 1.9665 - acc: 0.493 - ETA: 4:30 - loss: 1.9605 - acc: 0.495 - ETA: 4:30 - loss: 1.9555 - acc: 0.496 - ETA: 4:29 - loss: 1.9530 - acc: 0.497 - ETA: 4:28 - loss: 1.9504 - acc: 0.498 - ETA: 4:27 - loss: 1.9461 - acc: 0.498 - ETA: 4:26 - loss: 1.9422 - acc: 0.499 - ETA: 4:25 - loss: 1.9385 - acc: 0.500 - ETA: 4:25 - loss: 1.9348 - acc: 0.501 - ETA: 4:24 - loss: 1.9287 - acc: 0.502 - ETA: 4:23 - loss: 1.9242 - acc: 0.503 - ETA: 4:22 - loss: 1.9211 - acc: 0.504 - ETA: 4:21 - loss: 1.9172 - acc: 0.505 - ETA: 4:21 - loss: 1.9147 - acc: 0.505 - ETA: 4:20 - loss: 1.9106 - acc: 0.506 - ETA: 4:19 - loss: 1.9076 - acc: 0.506 - ETA: 4:19 - loss: 1.9044 - acc: 0.507 - ETA: 4:18 - loss: 1.9003 - acc: 0.508 - ETA: 4:17 - loss: 1.8971 - acc: 0.508 - ETA: 4:17 - loss: 1.8942 - acc: 0.509\r"
]
},
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-11-af65a7ca48d9>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[0mepochs\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m100\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;31m# How many epochs\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[0mcallbacks\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mcp_callback\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtb_callback\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 5\u001b[1;33m verbose=1)\n\u001b[0m",
"\u001b[1;32m~\\Anaconda3\\envs\\ml\\lib\\site-packages\\tensorflow\\python\\keras\\engine\\training.py\u001b[0m in \u001b[0;36mfit\u001b[1;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, max_queue_size, workers, use_multiprocessing, **kwargs)\u001b[0m\n\u001b[0;32m 849\u001b[0m \u001b[0mvalidation_steps\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mvalidation_steps\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 850\u001b[0m \u001b[0mworkers\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 851\u001b[1;33m initial_epoch=initial_epoch)\n\u001b[0m\u001b[0;32m 852\u001b[0m elif distributed_training_utils.is_tpu_strategy(\n\u001b[0;32m 853\u001b[0m self._distribution_strategy):\n",
"\u001b[1;32m~\\Anaconda3\\envs\\ml\\lib\\site-packages\\tensorflow\\python\\keras\\engine\\training_generator.py\u001b[0m in \u001b[0;36mmodel_iteration\u001b[1;34m(model, data, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch, mode, batch_size, **kwargs)\u001b[0m\n\u001b[0;32m 201\u001b[0m \u001b[0mbatch_logs\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mupdate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtraining_utils\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmake_logs\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mbatch_outs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 202\u001b[0m \u001b[0mcallbacks\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_call_batch_hook\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmode\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'end'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mstep\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mbatch_logs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 203\u001b[1;33m \u001b[0mprogbar\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mon_batch_end\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mstep\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mbatch_logs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 204\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 205\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mcallbacks\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstop_training\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\Anaconda3\\envs\\ml\\lib\\site-packages\\tensorflow\\python\\keras\\callbacks.py\u001b[0m in \u001b[0;36mon_batch_end\u001b[1;34m(self, batch, logs)\u001b[0m\n\u001b[0;32m 490\u001b[0m \u001b[1;31m# will be handled by on_epoch_end.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 491\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mverbose\u001b[0m \u001b[1;32mand\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mseen\u001b[0m \u001b[1;33m<\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtarget\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 492\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mprogbar\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mupdate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mseen\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlog_values\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 493\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 494\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mon_epoch_end\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mepoch\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlogs\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\Anaconda3\\envs\\ml\\lib\\site-packages\\tensorflow\\python\\keras\\utils\\generic_utils.py\u001b[0m in \u001b[0;36mupdate\u001b[1;34m(self, current, values)\u001b[0m\n\u001b[0;32m 387\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_dynamic_display\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 388\u001b[0m \u001b[0msys\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstdout\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwrite\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'\\b'\u001b[0m \u001b[1;33m*\u001b[0m \u001b[0mprev_total_width\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 389\u001b[1;33m \u001b[0msys\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstdout\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwrite\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'\\r'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 390\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 391\u001b[0m \u001b[0msys\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstdout\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwrite\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'\\n'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\AppData\\Roaming\\Python\\Python36\\site-packages\\colorama\\ansitowin32.py\u001b[0m in \u001b[0;36mwrite\u001b[1;34m(self, text)\u001b[0m\n\u001b[0;32m 39\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 40\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mwrite\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtext\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 41\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__convertor\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwrite\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtext\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 42\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 43\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0misatty\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\AppData\\Roaming\\Python\\Python36\\site-packages\\colorama\\ansitowin32.py\u001b[0m in \u001b[0;36mwrite\u001b[1;34m(self, text)\u001b[0m\n\u001b[0;32m 160\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mwrite\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtext\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 161\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstrip\u001b[0m \u001b[1;32mor\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconvert\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 162\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwrite_and_convert\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtext\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 163\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 164\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwrapped\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwrite\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtext\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\AppData\\Roaming\\Python\\Python36\\site-packages\\colorama\\ansitowin32.py\u001b[0m in \u001b[0;36mwrite_and_convert\u001b[1;34m(self, text)\u001b[0m\n\u001b[0;32m 188\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconvert_ansi\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mmatch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgroups\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 189\u001b[0m \u001b[0mcursor\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mend\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 190\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwrite_plain_text\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtext\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcursor\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtext\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 191\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 192\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\AppData\\Roaming\\Python\\Python36\\site-packages\\colorama\\ansitowin32.py\u001b[0m in \u001b[0;36mwrite_plain_text\u001b[1;34m(self, text, start, end)\u001b[0m\n\u001b[0;32m 194\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mstart\u001b[0m \u001b[1;33m<\u001b[0m \u001b[0mend\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 195\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwrapped\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwrite\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtext\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mstart\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mend\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 196\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwrapped\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mflush\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 197\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 198\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\AppData\\Roaming\\Python\\Python36\\site-packages\\ipykernel\\iostream.py\u001b[0m in \u001b[0;36mflush\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 347\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpub_thread\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mschedule\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mevt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mset\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 348\u001b[0m \u001b[1;31m# and give a timeout to avoid\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 349\u001b[1;33m \u001b[1;32mif\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0mevt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwait\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mflush_timeout\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 350\u001b[0m \u001b[1;31m# write directly to __stderr__ instead of warning because\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 351\u001b[0m \u001b[1;31m# if this is happening sys.stderr may be the problem.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\Anaconda3\\envs\\ml\\lib\\threading.py\u001b[0m in \u001b[0;36mwait\u001b[1;34m(self, timeout)\u001b[0m\n\u001b[0;32m 549\u001b[0m \u001b[0msignaled\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_flag\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 550\u001b[0m \u001b[1;32mif\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0msignaled\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 551\u001b[1;33m \u001b[0msignaled\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_cond\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwait\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtimeout\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 552\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0msignaled\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 553\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\Anaconda3\\envs\\ml\\lib\\threading.py\u001b[0m in \u001b[0;36mwait\u001b[1;34m(self, timeout)\u001b[0m\n\u001b[0;32m 297\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 298\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mtimeout\u001b[0m \u001b[1;33m>\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 299\u001b[1;33m \u001b[0mgotit\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mwaiter\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0macquire\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtimeout\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 300\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 301\u001b[0m \u001b[0mgotit\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mwaiter\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0macquire\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31mKeyboardInterrupt\u001b[0m: "
]
}
],
"source": [
"model.fit(train_ds,\n",
" steps_per_epoch=num_examples // batch_size, # How many batches per epoch\n",
" epochs=100, # How many epochs\n",
" callbacks=[cp_callback, tb_callback],\n",
" verbose=1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python (ml)",
"language": "python",
"name": "ml"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment