Skip to content

Instantly share code, notes, and snippets.

@okwrtdsh
Created May 14, 2019 18:44
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save okwrtdsh/2936adcf3691b98e750f8e7b75396db1 to your computer and use it in GitHub Desktop.
Save okwrtdsh/2936adcf3691b98e750f8e7b75396db1 to your computer and use it in GitHub Desktop.
colab_tutorial.ipynb
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "colab_tutorial.ipynb",
"version": "0.3.2",
"provenance": [],
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/okwrtdsh/2936adcf3691b98e750f8e7b75396db1/colab_tutorial.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"metadata": {
"id": "CMCw7iuaZeG5",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 221
},
"outputId": "0008064e-9b1b-45c7-828e-14db2b15f11f"
},
"source": [
"!cat /etc/os-release"
],
"execution_count": 1,
"outputs": [
{
"output_type": "stream",
"text": [
"NAME=\"Ubuntu\"\n",
"VERSION=\"18.04.2 LTS (Bionic Beaver)\"\n",
"ID=ubuntu\n",
"ID_LIKE=debian\n",
"PRETTY_NAME=\"Ubuntu 18.04.2 LTS\"\n",
"VERSION_ID=\"18.04\"\n",
"HOME_URL=\"https://www.ubuntu.com/\"\n",
"SUPPORT_URL=\"https://help.ubuntu.com/\"\n",
"BUG_REPORT_URL=\"https://bugs.launchpad.net/ubuntu/\"\n",
"PRIVACY_POLICY_URL=\"https://www.ubuntu.com/legal/terms-and-policies/privacy-policy\"\n",
"VERSION_CODENAME=bionic\n",
"UBUNTU_CODENAME=bionic\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "nEd3SFZcZgi7",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"outputId": "e96500b2-9b99-45d8-a4cf-9a154aa6413a"
},
"source": [
"!pwd"
],
"execution_count": 2,
"outputs": [
{
"output_type": "stream",
"text": [
"/content\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "nVy0hEGlak1i",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"outputId": "2d2fb1c7-258a-48f2-e810-68f9af13d020"
},
"source": [
"!ls"
],
"execution_count": 3,
"outputs": [
{
"output_type": "stream",
"text": [
"gdrive\tmnist_cnn.py sample_data\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "d3BHoSLlaqT6",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 68
},
"outputId": "50511942-e46c-44e5-f0b8-88b8b0577f4f"
},
"source": [
"!ls sample_data/"
],
"execution_count": 4,
"outputs": [
{
"output_type": "stream",
"text": [
"anscombe.json\t\t mnist_test.csv\n",
"california_housing_test.csv mnist_train_small.csv\n",
"california_housing_train.csv README.md\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "92Q-KZYqYpKy",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"outputId": "81003cfd-c37f-40ee-81e2-cf69130b78a1"
},
"source": [
"import tensorflow as tf\n",
"import keras\n",
"import torch"
],
"execution_count": 5,
"outputs": [
{
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n"
],
"name": "stderr"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "d15uBkb7YxuH",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"outputId": "71683116-c943-41da-d29a-fbde416a9cc2"
},
"source": [
"tf.__version__"
],
"execution_count": 6,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"'1.13.1'"
]
},
"metadata": {
"tags": []
},
"execution_count": 6
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "30zjH2WYYuQd",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"outputId": "97688d9d-ce60-4910-dcaf-4379d291f378"
},
"source": [
"keras.__version__"
],
"execution_count": 7,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"'2.2.4'"
]
},
"metadata": {
"tags": []
},
"execution_count": 7
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "NtalNj-UZQCj",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"outputId": "b9faf1d9-61d9-4712-af96-43d5f96ada26"
},
"source": [
"torch.__version__"
],
"execution_count": 8,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"'1.1.0'"
]
},
"metadata": {
"tags": []
},
"execution_count": 8
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "0Lh7pmxtawzs",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 306
},
"outputId": "356aee02-5273-40ce-b6a6-074478440188"
},
"source": [
"!nvidia-smi"
],
"execution_count": 9,
"outputs": [
{
"output_type": "stream",
"text": [
"Tue May 14 18:35:15 2019 \n",
"+-----------------------------------------------------------------------------+\n",
"| NVIDIA-SMI 418.56 Driver Version: 410.79 CUDA Version: 10.0 |\n",
"|-------------------------------+----------------------+----------------------+\n",
"| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n",
"| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n",
"|===============================+======================+======================|\n",
"| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n",
"| N/A 56C P8 17W / 70W | 0MiB / 15079MiB | 0% Default |\n",
"+-------------------------------+----------------------+----------------------+\n",
" \n",
"+-----------------------------------------------------------------------------+\n",
"| Processes: GPU Memory |\n",
"| GPU PID Type Process name Usage |\n",
"|=============================================================================|\n",
"| No running processes found |\n",
"+-----------------------------------------------------------------------------+\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "hVDzkpSybGJ-",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 221
},
"outputId": "6a32c7b6-bc83-4005-8a52-84d1d5bd9f8c"
},
"source": [
"!wget -N https://raw.githubusercontent.com/keras-team/keras/master/examples/mnist_cnn.py"
],
"execution_count": 10,
"outputs": [
{
"output_type": "stream",
"text": [
"--2019-05-14 18:35:16-- https://raw.githubusercontent.com/keras-team/keras/master/examples/mnist_cnn.py\n",
"Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.0.133, 151.101.64.133, 151.101.128.133, ...\n",
"Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.0.133|:443... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 2257 (2.2K) [text/plain]\n",
"Saving to: ‘mnist_cnn.py’\n",
"\n",
"\rmnist_cnn.py 0%[ ] 0 --.-KB/s \rmnist_cnn.py 100%[===================>] 2.20K --.-KB/s in 0s \n",
"\n",
"Last-modified header missing -- time-stamps turned off.\n",
"2019-05-14 18:35:16 (52.7 MB/s) - ‘mnist_cnn.py’ saved [2257/2257]\n",
"\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "txSfXQYCcLe5",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 1006
},
"outputId": "d7fdbf16-7287-4af9-c1a9-e21565b880cb"
},
"source": [
"!python mnist_cnn.py"
],
"execution_count": 11,
"outputs": [
{
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n",
"x_train shape: (60000, 28, 28, 1)\n",
"60000 train samples\n",
"10000 test samples\n",
"WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Colocations handled automatically by placer.\n",
"WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
"WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Use tf.cast instead.\n",
"Train on 60000 samples, validate on 10000 samples\n",
"Epoch 1/12\n",
"2019-05-14 18:35:21.186226: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 2300000000 Hz\n",
"2019-05-14 18:35:21.186547: I tensorflow/compiler/xla/service/service.cc:150] XLA service 0x322de40 executing computations on platform Host. Devices:\n",
"2019-05-14 18:35:21.186586: I tensorflow/compiler/xla/service/service.cc:158] StreamExecutor device (0): <undefined>, <undefined>\n",
"2019-05-14 18:35:21.393815: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2019-05-14 18:35:21.394454: I tensorflow/compiler/xla/service/service.cc:150] XLA service 0x322d8c0 executing computations on platform CUDA. Devices:\n",
"2019-05-14 18:35:21.394510: I tensorflow/compiler/xla/service/service.cc:158] StreamExecutor device (0): Tesla T4, Compute Capability 7.5\n",
"2019-05-14 18:35:21.394919: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1433] Found device 0 with properties: \n",
"name: Tesla T4 major: 7 minor: 5 memoryClockRate(GHz): 1.59\n",
"pciBusID: 0000:00:04.0\n",
"totalMemory: 14.73GiB freeMemory: 14.60GiB\n",
"2019-05-14 18:35:21.394947: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1512] Adding visible gpu devices: 0\n",
"2019-05-14 18:35:21.966399: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2019-05-14 18:35:21.966466: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990] 0 \n",
"2019-05-14 18:35:21.966481: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1003] 0: N \n",
"2019-05-14 18:35:21.966803: W tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc:42] Overriding allow_growth setting because the TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original config value was 0.\n",
"2019-05-14 18:35:21.966880: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 14115 MB memory) -> physical GPU (device: 0, name: Tesla T4, pci bus id: 0000:00:04.0, compute capability: 7.5)\n",
"2019-05-14 18:35:22.299302: I tensorflow/stream_executor/dso_loader.cc:152] successfully opened CUDA library libcublas.so.10.0 locally\n",
"60000/60000 [==============================] - 8s 128us/step - loss: 0.2633 - acc: 0.9181 - val_loss: 0.0608 - val_acc: 0.9805\n",
"Epoch 2/12\n",
"60000/60000 [==============================] - 4s 74us/step - loss: 0.0877 - acc: 0.9744 - val_loss: 0.0418 - val_acc: 0.9856\n",
"Epoch 3/12\n",
"60000/60000 [==============================] - 4s 73us/step - loss: 0.0643 - acc: 0.9808 - val_loss: 0.0321 - val_acc: 0.9891\n",
"Epoch 4/12\n",
"60000/60000 [==============================] - 4s 74us/step - loss: 0.0548 - acc: 0.9838 - val_loss: 0.0325 - val_acc: 0.9882\n",
"Epoch 5/12\n",
"60000/60000 [==============================] - 4s 74us/step - loss: 0.0448 - acc: 0.9866 - val_loss: 0.0314 - val_acc: 0.9893\n",
"Epoch 6/12\n",
"60000/60000 [==============================] - 4s 74us/step - loss: 0.0410 - acc: 0.9876 - val_loss: 0.0282 - val_acc: 0.9914\n",
"Epoch 7/12\n",
"60000/60000 [==============================] - 4s 74us/step - loss: 0.0371 - acc: 0.9884 - val_loss: 0.0291 - val_acc: 0.9900\n",
"Epoch 8/12\n",
"60000/60000 [==============================] - 4s 74us/step - loss: 0.0350 - acc: 0.9895 - val_loss: 0.0319 - val_acc: 0.9900\n",
"Epoch 9/12\n",
"60000/60000 [==============================] - 4s 74us/step - loss: 0.0332 - acc: 0.9901 - val_loss: 0.0269 - val_acc: 0.9906\n",
"Epoch 10/12\n",
"60000/60000 [==============================] - 4s 74us/step - loss: 0.0303 - acc: 0.9909 - val_loss: 0.0260 - val_acc: 0.9916\n",
"Epoch 11/12\n",
"60000/60000 [==============================] - 4s 74us/step - loss: 0.0280 - acc: 0.9911 - val_loss: 0.0281 - val_acc: 0.9913\n",
"Epoch 12/12\n",
"60000/60000 [==============================] - 4s 75us/step - loss: 0.0269 - acc: 0.9917 - val_loss: 0.0261 - val_acc: 0.9919\n",
"Test loss: 0.026122948979064857\n",
"Test accuracy: 0.9919\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "s2XBfPv4ceZg",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"outputId": "a8be3786-57a9-41e2-883e-8be077d57a5a"
},
"source": [
"from google.colab import drive\n",
"drive.mount('./gdrive')"
],
"execution_count": 12,
"outputs": [
{
"output_type": "stream",
"text": [
"Drive already mounted at ./gdrive; to attempt to forcibly remount, call drive.mount(\"./gdrive\", force_remount=True).\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "oma0FOPNmgb8",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"outputId": "fc143195-fe81-4e51-ba3e-e162539c7fbf"
},
"source": [
"!ls gdrive/My\\ Drive"
],
"execution_count": 13,
"outputs": [
{
"output_type": "stream",
"text": [
"colab_tutorial.ipynb\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "5Wkrwmv1m4_q",
"colab_type": "code",
"colab": {}
},
"source": [
"with open(\"./gdrive/My Drive/hello.txt\", \"w\") as f:\n",
" f.write(\"hello world\")"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "t4Cgz_gcnjDT",
"colab_type": "code",
"colab": {}
},
"source": [
""
],
"execution_count": 0,
"outputs": []
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment