Skip to content

Instantly share code, notes, and snippets.

@poutyface
Created January 8, 2019 23:25
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save poutyface/56be21f96f41807ad32ecfa76d8695a0 to your computer and use it in GitHub Desktop.
Save poutyface/56be21f96f41807ad32ecfa76d8695a0 to your computer and use it in GitHub Desktop.
Untitled0.ipynb
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "Untitled0.ipynb",
"version": "0.3.2",
"provenance": [],
"collapsed_sections": [],
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/poutyface/56be21f96f41807ad32ecfa76d8695a0/untitled0.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"metadata": {
"id": "z_fYBadnW4iM",
"colab_type": "code",
"outputId": "45fe7bfe-daa0-4230-83dc-09e3ecf0670a",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 326
}
},
"cell_type": "code",
"source": [
"!pip install torchvision\n"
],
"execution_count": 0,
"outputs": [
{
"output_type": "stream",
"text": [
"Collecting torchvision\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/ca/0d/f00b2885711e08bd71242ebe7b96561e6f6d01fdb4b9dcf4d37e2e13c5e1/torchvision-0.2.1-py2.py3-none-any.whl (54kB)\n",
"\u001b[K 100% |████████████████████████████████| 61kB 5.6MB/s \n",
"\u001b[?25hCollecting torch (from torchvision)\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/7e/60/66415660aa46b23b5e1b72bc762e816736ce8d7260213e22365af51e8f9c/torch-1.0.0-cp36-cp36m-manylinux1_x86_64.whl (591.8MB)\n",
"\u001b[K 100% |████████████████████████████████| 591.8MB 25kB/s \n",
"tcmalloc: large alloc 1073750016 bytes == 0x61ece000 @ 0x7fbdb08d02a4 0x591a07 0x5b5d56 0x502e9a 0x506859 0x502209 0x502f3d 0x506859 0x504c28 0x502540 0x502f3d 0x506859 0x504c28 0x502540 0x502f3d 0x506859 0x504c28 0x502540 0x502f3d 0x507641 0x502209 0x502f3d 0x506859 0x504c28 0x502540 0x502f3d 0x507641 0x504c28 0x502540 0x502f3d 0x507641\n",
"\u001b[?25hRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from torchvision) (1.11.0)\n",
"Collecting pillow>=4.1.1 (from torchvision)\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/85/5e/e91792f198bbc5a0d7d3055ad552bc4062942d27eaf75c3e2783cf64eae5/Pillow-5.4.1-cp36-cp36m-manylinux1_x86_64.whl (2.0MB)\n",
"\u001b[K 100% |████████████████████████████████| 2.0MB 7.1MB/s \n",
"\u001b[?25hRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from torchvision) (1.14.6)\n",
"Installing collected packages: torch, pillow, torchvision\n",
" Found existing installation: Pillow 4.0.0\n",
" Uninstalling Pillow-4.0.0:\n",
" Successfully uninstalled Pillow-4.0.0\n",
"Successfully installed pillow-5.4.1 torch-1.0.0 torchvision-0.2.1\n"
],
"name": "stdout"
}
]
},
{
"metadata": {
"id": "URyeFBseXkJv",
"colab_type": "code",
"outputId": "b7017942-a2cd-4663-cf84-3473ebc4fbc9",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 10404
}
},
"cell_type": "code",
"source": [
"import numpy as np \n",
"import torch\n",
"import torch.nn as nn\n",
"import torch.optim as optim\n",
"import torch.nn.functional as F\n",
"import torchvision\n",
"import torchvision.transforms as transforms\n",
"import math\n",
"import random\n",
"\n",
"print(torch.__version__)\n",
"\n",
"device = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
"\n",
"CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]\n",
"CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]\n",
"\n",
"class Invert(object):\n",
" def __init__(self):\n",
" pass\n",
"\n",
" def __call__(self, img):\n",
" #print(img)\n",
" if random.random() > 0.5:\n",
" img = 1.0 - img\n",
" #print(img)\n",
" return img\n",
" \n",
"transform_train = transforms.Compose([\n",
" #Invert(),\n",
" transforms.RandomCrop(32, padding=9),\n",
" transforms.RandomHorizontalFlip(),\n",
" #transforms.RandomVerticalFlip(),\n",
" transforms.ToTensor(),\n",
" #Invert(),\n",
" transforms.Normalize(CIFAR_MEAN, CIFAR_STD),\n",
"])\n",
"\n",
"transform_test = transforms.Compose([\n",
" #transforms.CenterCrop(28),\n",
" transforms.ToTensor(),\n",
" transforms.Normalize(CIFAR_MEAN, CIFAR_STD),\n",
" \n",
"])\n",
"\n",
"\n",
"\n",
"\n",
"class Cutout(object):\n",
" def __init__(self, length):\n",
" self.length = length\n",
"\n",
" def __call__(self, img):\n",
" h, w = img.size(1), img.size(2)\n",
" mask = np.ones((h, w), np.float32)\n",
" y = np.random.randint(h)\n",
" x = np.random.randint(w)\n",
"\n",
" y1 = np.clip(y - self.length // 2, 0, h)\n",
" y2 = np.clip(y + self.length // 2, 0, h)\n",
" x1 = np.clip(x - self.length // 2, 0, w)\n",
" x2 = np.clip(x + self.length // 2, 0, w)\n",
"\n",
" mask[y1: y2, x1: x2] = 0.\n",
" mask = torch.from_numpy(mask)\n",
" mask = mask.expand_as(img)\n",
" img *= mask\n",
" return img \n",
"\n",
"transform_train.transforms.append(Cutout(14))\n",
"\n",
"train_dataset = torchvision.datasets.CIFAR10(root=\"./data\", train=True, download=True, transform=transform_train)\n",
"train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=96, shuffle=True, num_workers=4)\n",
"\n",
"test_dataset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)\n",
"test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=100, shuffle=False, num_workers=4)\n",
"\n",
"class Noise(nn.Module):\n",
" def __init__(self):\n",
" super(Noise, self).__init__()\n",
" \n",
" def forward(self, x):\n",
" n, c, h, w = x.shape\n",
" noise = torch.zeros(n, c, h, w).normal_(0, 0.1).cuda()\n",
" return x + noise\n",
"\n",
"class FactorizedReduce(nn.Module):\n",
"\n",
" def __init__(self, C_in, C_out, affine=True):\n",
" super(FactorizedReduce, self).__init__()\n",
" assert C_out % 2 == 0\n",
" self.relu = nn.ReLU(inplace=False)\n",
" self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)\n",
" self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) \n",
" self.bn = nn.BatchNorm2d(C_out, affine=affine)\n",
"\n",
" def forward(self, x):\n",
" x = self.relu(x)\n",
" out = torch.cat([self.conv_1(x), self.conv_2(x[:,:,1:,1:])], dim=1)\n",
" out = self.bn(out)\n",
" return out\n",
"\n",
"\n",
"class Reduce(nn.Module):\n",
"\n",
" def __init__(self, inp, out):\n",
" super(Reduce, self).__init__()\n",
" self.relu = nn.ReLU()\n",
" self.conv_1 = nn.Conv2d(inp, out, 1, bias=False)\n",
" self.bn = nn.BatchNorm2d(out)\n",
"\n",
" def forward(self, x):\n",
" h = self.relu(x)\n",
" h = F.interpolate(h, scale_factor=0.5, mode='bilinear')\n",
" h = self.conv_1(h)\n",
" h = self.bn(h)\n",
" return h\n",
"\n",
"\n",
"\n",
"class HCell(nn.Module):\n",
" def __init__(self, inp, out, ksize=3, stride=1):\n",
" super(HCell, self).__init__()\n",
"\n",
" if ksize == 3:\n",
" padding = 1\n",
" if ksize == 5:\n",
" padding = 2\n",
" if ksize == 7:\n",
" padding = 3\n",
" \n",
" self.skip = True\n",
" if stride == 2 or inp != out:\n",
" self.skip = False\n",
" \n",
"\n",
" self.layer0 = nn.Sequential(\n",
" nn.Conv2d(inp, inp*3, kernel_size=1, bias=False),\n",
" nn.BatchNorm2d(inp*3),\n",
" nn.ReLU6(),\n",
" )\n",
" \n",
" self.layer1 = nn.Sequential(\n",
" nn.Conv2d(inp*3, inp*3, kernel_size=ksize, stride=stride, padding=padding, groups=inp*3, bias=False),\n",
" nn.BatchNorm2d(inp*3),\n",
" nn.ReLU6(),\n",
" nn.Conv2d(inp*3, out, kernel_size=1, bias=False),\n",
" nn.BatchNorm2d(out),\n",
" ) \n",
" \n",
" def forward(self, x):\n",
" h = self.layer0(x) \n",
" h = self.layer1(h)\n",
" if self.skip:\n",
" h = h + x\n",
" \n",
" return h \n",
"\n",
"\n",
"class HBlock(nn.Module):\n",
" def __init__(self, inp, out, ksize=3, stride=1, num=1):\n",
" super(HBlock, self).__init__()\n",
" \n",
" layers = []\n",
" for i in range(num):\n",
" layers.append(HCell(inp, out, ksize=ksize, stride=stride)) \n",
" inp = out\n",
" if stride == 2:\n",
" stride = 1\n",
" \n",
" self.layers = nn.ModuleList(layers)\n",
" \n",
" \n",
" def forward(self, x):\n",
" for layer in self.layers:\n",
" x = layer(x)\n",
" \n",
" return x \n",
" \n",
"\n",
"class HNet(nn.Module):\n",
" def __init__(self, num_classes=10):\n",
" super(HNet, self).__init__()\n",
" self.inp= 32\n",
"\n",
" self.layer0 = nn.Sequential(\n",
" nn.Conv2d(3, self.inp, kernel_size=3, stride=1, padding=1, bias=False),\n",
" nn.BatchNorm2d(self.inp),\n",
" nn.ReLU6()\n",
" )\n",
"\n",
" \n",
" self.layer1 = HBlock(32, 64, ksize=3, stride=1, num=2)\n",
" self.layer2_0 = HBlock(64, 128, ksize=3, stride=2, num=1)\n",
" self.layer2 = HBlock(128, 128, ksize=3, stride=1, num=3)\n",
" self.layer3_0 = HBlock(128, 192, ksize=7, stride=2, num=1)\n",
" self.layer3 = HBlock(192, 192, ksize=3, stride=1, num=3)\n",
" self.layer4 = HBlock(192, 320, ksize=3, stride=1, num=1)\n",
" \n",
" self.drop = nn.Dropout(p=0.2)\n",
" #self.drop2d = nn.Dropout2d(p=0.2)\n",
" \n",
" last_out = 768\n",
" self.layer5 = nn.Sequential(\n",
" nn.Conv2d(320, last_out, kernel_size=1, bias=False),\n",
" nn.BatchNorm2d(last_out),\n",
" nn.ReLU6()\n",
" )\n",
" \n",
" \n",
" self.linear = nn.Linear(last_out, num_classes)\n",
" \n",
" for m in self.modules():\n",
" if isinstance(m, nn.Conv2d):\n",
" nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n",
" elif isinstance(m, nn.BatchNorm2d):\n",
" m.weight.data.fill_(1)\n",
" m.bias.data.zero_()\n",
" \n",
" \n",
"\n",
" def forward(self, x):\n",
" out = self.layer0(x)\n",
" out = self.layer1(out)\n",
" out = self.layer2_0(out)\n",
" out = self.layer2(out)\n",
" out = self.layer3_0(out)\n",
" out = self.layer3(out)\n",
" out = self.layer4(out)\n",
" out = self.layer5(out)\n",
" out = F.avg_pool2d(out, 8)\n",
" out = out.view(out.size(0), -1)\n",
" #out = self.drop(out)\n",
" h1 = self.linear(out)\n",
" #h1 = self.post_bn(h1) + out\n",
" \n",
" return h1\n",
" \n",
"def resnet18(pretrained=False, **kwargs):\n",
" model = HNet()\n",
" return model\n",
"\n",
"\n",
"net = resnet18()\n",
"net = net.to(device)\n",
"\n",
"criterion = nn.CrossEntropyLoss()\n",
"\n",
"biases = []\n",
"weights = []\n",
"\n",
"for name, param in net.named_parameters():\n",
" #print(name)\n",
" if \"weight\" in name:\n",
" weights.append(param)\n",
" else:\n",
" biases.append(param)\n",
" \n",
"\n",
"optimizer = optim.SGD([\n",
" {\"params\": weights},\n",
" #{\"params\": biases}\n",
" {\"params\": biases, \"weight_decay\": 0.0}\n",
"], lr=1e-1, momentum=0.9, weight_decay=1e-4, nesterov=True)\n",
"\n",
"#scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[30,60], gamma=0.1)\n",
"scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.97)\n",
"\n",
"\n",
"DROP = 0.1\n",
"\n",
"def train(epoch, optimizer, scheduler):\n",
" print('\\nEpoch: %d' % epoch)\n",
" print(\"Drop:\", DROP)\n",
" net.train()\n",
" if scheduler.get_lr()[0] > 1e-6:\n",
" scheduler.step()\n",
" print(scheduler.get_lr())\n",
" train_loss = 0\n",
" correct = 0\n",
" total = 0\n",
" for batch_idx, (inputs, targets) in enumerate(train_loader):\n",
" #targets_seg = targets.view(-1, 1).repeat(1, 8*8).view(-1, 8, 8).to(device)\n",
" \n",
" inputs, targets = inputs.to(device), targets.to(device)\n",
" \n",
" #inputs = 0.0\n",
" optimizer.zero_grad()\n",
" outputs = net(inputs)\n",
" \n",
" loss = F.cross_entropy(outputs, targets)\n",
" #print(outputs.transpose().shape)\n",
" #loss += torch.matmul(outputs.view(outputs.size(0), 1, 10), outputs.view(outputs.size(0), 10, 1)).mean() * 0.1\n",
" #print(outputs.t().shape)\n",
" #print(a)\n",
" #loss += F.cross_entropy(seg, targets_seg)\n",
" \n",
" loss.backward()\n",
" optimizer.step()\n",
"\n",
" train_loss += loss.item()\n",
" _, predicted = outputs.max(1)\n",
" total += targets.size(0)\n",
" correct += predicted.eq(targets).sum().item()\n",
"\n",
" if batch_idx % 200 == 0:\n",
" print('Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))\n",
" \n",
" print('Loss: | Acc: %.3f%% (%d/%d)' % (100.*correct/total, correct, total))\n",
" \n",
"def test(epoch):\n",
" net.eval()\n",
" test_loss = 0\n",
" correct = 0\n",
" total = 0\n",
" with torch.no_grad():\n",
" for batch_idx, (inputs, targets) in enumerate(test_loader):\n",
" inputs, targets = inputs.to(device), targets.to(device)\n",
" outputs= net(inputs)\n",
" #loss = criterion(outputs, targets)\n",
"\n",
" #test_loss += loss.item()\n",
" _, predicted = outputs.max(1)\n",
" total += targets.size(0)\n",
" correct += predicted.eq(targets).sum().item()\n",
" \n",
" print('Eval Acc: %.3f%% (%d/%d)' % (100.*correct/total, correct, total))\n",
" \n",
" \n",
"\n",
"for epoch in range(0, 300): \n",
" train(epoch, optimizer, scheduler)\n",
" test(epoch)\n",
"\n"
],
"execution_count": 0,
"outputs": [
{
"output_type": "stream",
"text": [
"1.0.0\n",
"Files already downloaded and verified\n",
"Files already downloaded and verified\n",
"\n",
"Epoch: 0\n",
"Drop: 0.1\n",
"[0.1, 0.1]\n",
"Loss: 2.308 | Acc: 10.417% (10/96)\n",
"Loss: 1.849 | Acc: 32.447% (6261/19296)\n",
"Loss: 1.637 | Acc: 40.544% (15608/38496)\n",
"Loss: | Acc: 43.702% (21851/50000)\n",
"Eval Acc: 53.980% (5398/10000)\n",
"\n",
"Epoch: 1\n",
"Drop: 0.1\n",
"[0.097, 0.097]\n",
"Loss: 1.218 | Acc: 53.125% (51/96)\n",
"Loss: 1.152 | Acc: 58.774% (11341/19296)\n",
"Loss: 1.105 | Acc: 60.775% (23396/38496)\n",
"Loss: | Acc: 61.594% (30797/50000)\n",
"Eval Acc: 61.410% (6141/10000)\n",
"\n",
"Epoch: 2\n",
"Drop: 0.1\n",
"[0.09409000000000001, 0.09409000000000001]\n",
"Loss: 0.818 | Acc: 67.708% (65/96)\n",
"Loss: 0.947 | Acc: 66.345% (12802/19296)\n",
"Loss: 0.913 | Acc: 67.752% (26082/38496)\n",
"Loss: | Acc: 68.318% (34159/50000)\n",
"Eval Acc: 71.120% (7112/10000)\n",
"\n",
"Epoch: 3\n",
"Drop: 0.1\n",
"[0.0912673, 0.0912673]\n",
"Loss: 0.752 | Acc: 73.958% (71/96)\n",
"Loss: 0.818 | Acc: 71.460% (13789/19296)\n",
"Loss: 0.793 | Acc: 72.371% (27860/38496)\n",
"Loss: | Acc: 72.602% (36301/50000)\n",
"Eval Acc: 73.250% (7325/10000)\n",
"\n",
"Epoch: 4\n",
"Drop: 0.1\n",
"[0.088529281, 0.088529281]\n",
"Loss: 0.595 | Acc: 76.042% (73/96)\n",
"Loss: 0.713 | Acc: 75.181% (14507/19296)\n",
"Loss: 0.714 | Acc: 75.117% (28917/38496)\n",
"Loss: | Acc: 75.246% (37623/50000)\n",
"Eval Acc: 78.870% (7887/10000)\n",
"\n",
"Epoch: 5\n",
"Drop: 0.1\n",
"[0.08587340257, 0.08587340257]\n",
"Loss: 0.713 | Acc: 69.792% (67/96)\n",
"Loss: 0.675 | Acc: 76.280% (14719/19296)\n",
"Loss: 0.666 | Acc: 76.826% (29575/38496)\n",
"Loss: | Acc: 76.886% (38443/50000)\n",
"Eval Acc: 78.530% (7853/10000)\n",
"\n",
"Epoch: 6\n",
"Drop: 0.1\n",
"[0.08329720049289999, 0.08329720049289999]\n",
"Loss: 0.697 | Acc: 77.083% (74/96)\n",
"Loss: 0.633 | Acc: 78.239% (15097/19296)\n",
"Loss: 0.623 | Acc: 78.548% (30238/38496)\n",
"Loss: | Acc: 78.504% (39252/50000)\n",
"Eval Acc: 82.830% (8283/10000)\n",
"\n",
"Epoch: 7\n",
"Drop: 0.1\n",
"[0.08079828447811299, 0.08079828447811299]\n",
"Loss: 0.486 | Acc: 80.208% (77/96)\n",
"Loss: 0.584 | Acc: 79.897% (15417/19296)\n",
"Loss: 0.584 | Acc: 79.782% (30713/38496)\n",
"Loss: | Acc: 79.908% (39954/50000)\n",
"Eval Acc: 82.240% (8224/10000)\n",
"\n",
"Epoch: 8\n",
"Drop: 0.1\n",
"[0.0783743359437696, 0.0783743359437696]\n",
"Loss: 0.645 | Acc: 72.917% (70/96)\n",
"Loss: 0.552 | Acc: 81.006% (15631/19296)\n",
"Loss: 0.553 | Acc: 80.871% (31132/38496)\n",
"Loss: | Acc: 80.842% (40421/50000)\n",
"Eval Acc: 82.440% (8244/10000)\n",
"\n",
"Epoch: 9\n",
"Drop: 0.1\n",
"[0.07602310586545652, 0.07602310586545652]\n",
"Loss: 0.451 | Acc: 83.333% (80/96)\n",
"Loss: 0.529 | Acc: 81.784% (15781/19296)\n",
"Loss: 0.527 | Acc: 81.746% (31469/38496)\n",
"Loss: | Acc: 81.618% (40809/50000)\n",
"Eval Acc: 82.830% (8283/10000)\n",
"\n",
"Epoch: 10\n",
"Drop: 0.1\n",
"[0.07374241268949282, 0.07374241268949282]\n",
"Loss: 0.483 | Acc: 82.292% (79/96)\n",
"Loss: 0.506 | Acc: 82.369% (15894/19296)\n",
"Loss: 0.508 | Acc: 82.411% (31725/38496)\n",
"Loss: | Acc: 82.376% (41188/50000)\n",
"Eval Acc: 84.110% (8411/10000)\n",
"\n",
"Epoch: 11\n",
"Drop: 0.1\n",
"[0.07153014030880803, 0.07153014030880803]\n",
"Loss: 0.399 | Acc: 86.458% (83/96)\n",
"Loss: 0.486 | Acc: 83.136% (16042/19296)\n",
"Loss: 0.487 | Acc: 83.141% (32006/38496)\n",
"Loss: | Acc: 83.156% (41578/50000)\n",
"Eval Acc: 87.550% (8755/10000)\n",
"\n",
"Epoch: 12\n",
"Drop: 0.1\n",
"[0.06938423609954378, 0.06938423609954378]\n",
"Loss: 0.355 | Acc: 87.500% (84/96)\n",
"Loss: 0.459 | Acc: 84.080% (16224/19296)\n",
"Loss: 0.464 | Acc: 83.845% (32277/38496)\n",
"Loss: | Acc: 83.832% (41916/50000)\n",
"Eval Acc: 86.630% (8663/10000)\n",
"\n",
"Epoch: 13\n",
"Drop: 0.1\n",
"[0.06730270901655747, 0.06730270901655747]\n",
"Loss: 0.301 | Acc: 92.708% (89/96)\n",
"Loss: 0.444 | Acc: 84.743% (16352/19296)\n",
"Loss: 0.450 | Acc: 84.502% (32530/38496)\n",
"Loss: | Acc: 84.504% (42252/50000)\n",
"Eval Acc: 86.110% (8611/10000)\n",
"\n",
"Epoch: 14\n",
"Drop: 0.1\n",
"[0.06528362774606074, 0.06528362774606074]\n",
"Loss: 0.360 | Acc: 88.542% (85/96)\n",
"Loss: 0.447 | Acc: 84.380% (16282/19296)\n",
"Loss: 0.443 | Acc: 84.630% (32579/38496)\n",
"Loss: | Acc: 84.674% (42337/50000)\n",
"Eval Acc: 87.280% (8728/10000)\n",
"\n",
"Epoch: 15\n",
"Drop: 0.1\n",
"[0.06332511891367891, 0.06332511891367891]\n",
"Loss: 0.524 | Acc: 83.333% (80/96)\n",
"Loss: 0.422 | Acc: 85.479% (16494/19296)\n",
"Loss: 0.425 | Acc: 85.237% (32813/38496)\n",
"Loss: | Acc: 85.130% (42565/50000)\n",
"Eval Acc: 86.850% (8685/10000)\n",
"\n",
"Epoch: 16\n",
"Drop: 0.1\n",
"[0.06142536534626855, 0.06142536534626855]\n",
"Loss: 0.475 | Acc: 83.333% (80/96)\n",
"Loss: 0.406 | Acc: 85.966% (16588/19296)\n",
"Loss: 0.406 | Acc: 85.869% (33056/38496)\n",
"Loss: | Acc: 85.720% (42860/50000)\n",
"Eval Acc: 88.720% (8872/10000)\n",
"\n",
"Epoch: 17\n",
"Drop: 0.1\n",
"[0.05958260438588049, 0.05958260438588049]\n",
"Loss: 0.286 | Acc: 84.375% (81/96)\n",
"Loss: 0.397 | Acc: 86.184% (16630/19296)\n",
"Loss: 0.397 | Acc: 86.113% (33150/38496)\n",
"Loss: | Acc: 86.008% (43004/50000)\n",
"Eval Acc: 88.430% (8843/10000)\n",
"\n",
"Epoch: 18\n",
"Drop: 0.1\n",
"[0.05779512625430408, 0.05779512625430408]\n",
"Loss: 0.347 | Acc: 87.500% (84/96)\n",
"Loss: 0.379 | Acc: 86.816% (16752/19296)\n",
"Loss: 0.385 | Acc: 86.674% (33366/38496)\n",
"Loss: | Acc: 86.528% (43264/50000)\n",
"Eval Acc: 88.830% (8883/10000)\n",
"\n",
"Epoch: 19\n",
"Drop: 0.1\n",
"[0.05606127246667495, 0.05606127246667495]\n",
"Loss: 0.427 | Acc: 86.458% (83/96)\n",
"Loss: 0.368 | Acc: 86.909% (16770/19296)\n",
"Loss: 0.376 | Acc: 86.619% (33345/38496)\n",
"Loss: | Acc: 86.510% (43255/50000)\n",
"Eval Acc: 89.650% (8965/10000)\n",
"\n",
"Epoch: 20\n",
"Drop: 0.1\n",
"[0.0543794342926747, 0.0543794342926747]\n",
"Loss: 0.260 | Acc: 93.750% (90/96)\n",
"Loss: 0.368 | Acc: 87.360% (16857/19296)\n",
"Loss: 0.371 | Acc: 87.038% (33506/38496)\n",
"Loss: | Acc: 87.008% (43504/50000)\n",
"Eval Acc: 90.090% (9009/10000)\n",
"\n",
"Epoch: 21\n",
"Drop: 0.1\n",
"[0.05274805126389446, 0.05274805126389446]\n",
"Loss: 0.378 | Acc: 86.458% (83/96)\n",
"Loss: 0.357 | Acc: 87.448% (16874/19296)\n",
"Loss: 0.355 | Acc: 87.510% (33688/38496)\n",
"Loss: | Acc: 87.476% (43738/50000)\n",
"Eval Acc: 89.540% (8954/10000)\n",
"\n",
"Epoch: 22\n",
"Drop: 0.1\n",
"[0.051165609725977626, 0.051165609725977626]\n",
"Loss: 0.434 | Acc: 86.458% (83/96)\n",
"Loss: 0.336 | Acc: 88.137% (17007/19296)\n",
"Loss: 0.348 | Acc: 87.783% (33793/38496)\n",
"Loss: | Acc: 87.790% (43895/50000)\n",
"Eval Acc: 89.780% (8978/10000)\n",
"\n",
"Epoch: 23\n",
"Drop: 0.1\n",
"[0.04963064143419829, 0.04963064143419829]\n",
"Loss: 0.483 | Acc: 81.250% (78/96)\n",
"Loss: 0.332 | Acc: 88.552% (17087/19296)\n",
"Loss: 0.338 | Acc: 88.269% (33980/38496)\n",
"Loss: | Acc: 88.122% (44061/50000)\n",
"Eval Acc: 90.540% (9054/10000)\n",
"\n",
"Epoch: 24\n",
"Drop: 0.1\n",
"[0.04814172219117234, 0.04814172219117234]\n",
"Loss: 0.304 | Acc: 92.708% (89/96)\n",
"Loss: 0.324 | Acc: 88.609% (17098/19296)\n",
"Loss: 0.331 | Acc: 88.417% (34037/38496)\n",
"Loss: | Acc: 88.230% (44115/50000)\n",
"Eval Acc: 90.790% (9079/10000)\n",
"\n",
"Epoch: 25\n",
"Drop: 0.1\n",
"[0.04669747052543717, 0.04669747052543717]\n",
"Loss: 0.420 | Acc: 86.458% (83/96)\n",
"Loss: 0.314 | Acc: 89.158% (17204/19296)\n",
"Loss: 0.319 | Acc: 88.843% (34201/38496)\n",
"Loss: | Acc: 88.706% (44353/50000)\n",
"Eval Acc: 89.900% (8990/10000)\n",
"\n",
"Epoch: 26\n",
"Drop: 0.1\n",
"[0.04529654640967405, 0.04529654640967405]\n",
"Loss: 0.315 | Acc: 89.583% (86/96)\n",
"Loss: 0.316 | Acc: 89.060% (17185/19296)\n",
"Loss: 0.314 | Acc: 89.155% (34321/38496)\n",
"Loss: | Acc: 88.974% (44487/50000)\n",
"Eval Acc: 90.310% (9031/10000)\n",
"\n",
"Epoch: 27\n",
"Drop: 0.1\n",
"[0.04393765001738383, 0.04393765001738383]\n",
"Loss: 0.273 | Acc: 91.667% (88/96)\n",
"Loss: 0.316 | Acc: 89.039% (17181/19296)\n",
"Loss: 0.312 | Acc: 89.103% (34301/38496)\n",
"Loss: | Acc: 89.060% (44530/50000)\n",
"Eval Acc: 91.010% (9101/10000)\n",
"\n",
"Epoch: 28\n",
"Drop: 0.1\n",
"[0.04261952051686232, 0.04261952051686232]\n",
"Loss: 0.203 | Acc: 92.708% (89/96)\n",
"Loss: 0.296 | Acc: 89.682% (17305/19296)\n",
"Loss: 0.305 | Acc: 89.529% (34465/38496)\n",
"Loss: | Acc: 89.400% (44700/50000)\n",
"Eval Acc: 91.060% (9106/10000)\n",
"\n",
"Epoch: 29\n",
"Drop: 0.1\n",
"[0.041340934901356444, 0.041340934901356444]\n",
"Loss: 0.435 | Acc: 86.458% (83/96)\n",
"Loss: 0.295 | Acc: 89.532% (17276/19296)\n",
"Loss: 0.296 | Acc: 89.594% (34490/38496)\n",
"Loss: | Acc: 89.550% (44775/50000)\n",
"Eval Acc: 91.200% (9120/10000)\n",
"\n",
"Epoch: 30\n",
"Drop: 0.1\n",
"[0.04010070685431575, 0.04010070685431575]\n",
"Loss: 0.304 | Acc: 83.333% (80/96)\n",
"Loss: 0.293 | Acc: 89.646% (17298/19296)\n",
"Loss: 0.292 | Acc: 89.768% (34557/38496)\n",
"Loss: | Acc: 89.748% (44874/50000)\n",
"Eval Acc: 90.810% (9081/10000)\n",
"\n",
"Epoch: 31\n",
"Drop: 0.1\n",
"[0.03889768564868628, 0.03889768564868628]\n",
"Loss: 0.294 | Acc: 89.583% (86/96)\n",
"Loss: 0.281 | Acc: 90.148% (17395/19296)\n",
"Loss: 0.284 | Acc: 90.056% (34668/38496)\n",
"Loss: | Acc: 90.044% (45022/50000)\n",
"Eval Acc: 92.130% (9213/10000)\n",
"\n",
"Epoch: 32\n",
"Drop: 0.1\n",
"[0.037730755079225686, 0.037730755079225686]\n",
"Loss: 0.161 | Acc: 92.708% (89/96)\n",
"Loss: 0.274 | Acc: 90.428% (17449/19296)\n",
"Loss: 0.277 | Acc: 90.311% (34766/38496)\n",
"Loss: | Acc: 90.106% (45053/50000)\n",
"Eval Acc: 92.120% (9212/10000)\n",
"\n",
"Epoch: 33\n",
"Drop: 0.1\n",
"[0.036598832426848915, 0.036598832426848915]\n",
"Loss: 0.325 | Acc: 88.542% (85/96)\n",
"Loss: 0.266 | Acc: 90.801% (17521/19296)\n",
"Loss: 0.272 | Acc: 90.368% (34788/38496)\n",
"Loss: | Acc: 90.300% (45150/50000)\n",
"Eval Acc: 92.110% (9211/10000)\n",
"\n",
"Epoch: 34\n",
"Drop: 0.1\n",
"[0.03550086745404345, 0.03550086745404345]\n",
"Loss: 0.314 | Acc: 88.542% (85/96)\n",
"Loss: 0.259 | Acc: 90.817% (17524/19296)\n",
"Loss: 0.265 | Acc: 90.521% (34847/38496)\n",
"Loss: | Acc: 90.494% (45247/50000)\n",
"Eval Acc: 91.890% (9189/10000)\n",
"\n",
"Epoch: 35\n",
"Drop: 0.1\n",
"[0.034435841430422144, 0.034435841430422144]\n",
"Loss: 0.365 | Acc: 89.583% (86/96)\n",
"Loss: 0.243 | Acc: 91.366% (17630/19296)\n",
"Loss: 0.256 | Acc: 90.924% (35002/38496)\n",
"Loss: | Acc: 90.874% (45437/50000)\n",
"Eval Acc: 91.470% (9147/10000)\n",
"\n",
"Epoch: 36\n",
"Drop: 0.1\n",
"[0.033402766187509474, 0.033402766187509474]\n",
"Loss: 0.237 | Acc: 91.667% (88/96)\n",
"Loss: 0.242 | Acc: 91.651% (17685/19296)\n",
"Loss: 0.248 | Acc: 91.451% (35205/38496)\n",
"Loss: | Acc: 91.192% (45596/50000)\n",
"Eval Acc: 92.080% (9208/10000)\n",
"\n",
"Epoch: 37\n",
"Drop: 0.1\n",
"[0.03240068320188419, 0.03240068320188419]\n",
"Loss: 0.249 | Acc: 89.583% (86/96)\n",
"Loss: 0.248 | Acc: 91.066% (17572/19296)\n",
"Loss: 0.253 | Acc: 90.924% (35002/38496)\n",
"Loss: | Acc: 90.794% (45397/50000)\n",
"Eval Acc: 91.960% (9196/10000)\n",
"\n",
"Epoch: 38\n",
"Drop: 0.1\n",
"[0.03142866270582766, 0.03142866270582766]\n",
"Loss: 0.211 | Acc: 91.667% (88/96)\n",
"Loss: 0.246 | Acc: 91.340% (17625/19296)\n",
"Loss: 0.244 | Acc: 91.441% (35201/38496)\n",
"Loss: | Acc: 91.348% (45674/50000)\n",
"Eval Acc: 91.970% (9197/10000)\n",
"\n",
"Epoch: 39\n",
"Drop: 0.1\n",
"[0.03048580282465283, 0.03048580282465283]\n",
"Loss: 0.150 | Acc: 95.833% (92/96)\n",
"Loss: 0.234 | Acc: 91.915% (17736/19296)\n",
"Loss: 0.236 | Acc: 91.680% (35293/38496)\n",
"Loss: | Acc: 91.574% (45787/50000)\n",
"Eval Acc: 92.280% (9228/10000)\n",
"\n",
"Epoch: 40\n",
"Drop: 0.1\n",
"[0.029571228739913247, 0.029571228739913247]\n",
"Loss: 0.331 | Acc: 87.500% (84/96)\n",
"Loss: 0.225 | Acc: 92.097% (17771/19296)\n",
"Loss: 0.230 | Acc: 91.911% (35382/38496)\n",
"Loss: | Acc: 91.796% (45898/50000)\n",
"Eval Acc: 92.710% (9271/10000)\n",
"\n",
"Epoch: 41\n",
"Drop: 0.1\n",
"[0.028684091877715853, 0.028684091877715853]\n",
"Loss: 0.110 | Acc: 95.833% (92/96)\n",
"Loss: 0.216 | Acc: 92.418% (17833/19296)\n",
"Loss: 0.226 | Acc: 92.056% (35438/38496)\n",
"Loss: | Acc: 91.974% (45987/50000)\n",
"Eval Acc: 92.510% (9251/10000)\n",
"\n",
"Epoch: 42\n",
"Drop: 0.1\n",
"[0.027823569121384374, 0.027823569121384374]\n",
"Loss: 0.282 | Acc: 94.792% (91/96)\n",
"Loss: 0.206 | Acc: 92.885% (17923/19296)\n",
"Loss: 0.216 | Acc: 92.529% (35620/38496)\n",
"Loss: | Acc: 92.334% (46167/50000)\n",
"Eval Acc: 92.600% (9260/10000)\n",
"\n",
"Epoch: 43\n",
"Drop: 0.1\n",
"[0.02698886204774284, 0.02698886204774284]\n",
"Loss: 0.257 | Acc: 88.542% (85/96)\n",
"Loss: 0.222 | Acc: 92.133% (17778/19296)\n",
"Loss: 0.221 | Acc: 92.236% (35507/38496)\n",
"Loss: | Acc: 92.106% (46053/50000)\n",
"Eval Acc: 92.670% (9267/10000)\n",
"\n",
"Epoch: 44\n",
"Drop: 0.1\n",
"[0.026179196186310556, 0.026179196186310556]\n",
"Loss: 0.206 | Acc: 91.667% (88/96)\n",
"Loss: 0.209 | Acc: 92.501% (17849/19296)\n",
"Loss: 0.211 | Acc: 92.360% (35555/38496)\n",
"Loss: | Acc: 92.300% (46150/50000)\n",
"Eval Acc: 92.170% (9217/10000)\n",
"\n",
"Epoch: 45\n",
"Drop: 0.1\n",
"[0.025393820300721237, 0.025393820300721237]\n",
"Loss: 0.267 | Acc: 90.625% (87/96)\n",
"Loss: 0.202 | Acc: 92.745% (17896/19296)\n",
"Loss: 0.212 | Acc: 92.462% (35594/38496)\n",
"Loss: | Acc: 92.422% (46211/50000)\n",
"Eval Acc: 92.180% (9218/10000)\n",
"\n",
"Epoch: 46\n",
"Drop: 0.1\n",
"[0.0246320056916996, 0.0246320056916996]\n",
"Loss: 0.184 | Acc: 93.750% (90/96)\n",
"Loss: 0.204 | Acc: 92.765% (17900/19296)\n",
"Loss: 0.204 | Acc: 92.817% (35731/38496)\n",
"Loss: | Acc: 92.672% (46336/50000)\n",
"Eval Acc: 92.300% (9230/10000)\n",
"\n",
"Epoch: 47\n",
"Drop: 0.1\n",
"[0.023893045520948612, 0.023893045520948612]\n",
"Loss: 0.188 | Acc: 93.750% (90/96)\n",
"Loss: 0.196 | Acc: 92.947% (17935/19296)\n",
"Loss: 0.200 | Acc: 92.869% (35751/38496)\n",
"Loss: | Acc: 92.844% (46422/50000)\n",
"Eval Acc: 93.120% (9312/10000)\n",
"\n",
"Epoch: 48\n",
"Drop: 0.1\n",
"[0.023176254155320153, 0.023176254155320153]\n",
"Loss: 0.190 | Acc: 94.792% (91/96)\n",
"Loss: 0.192 | Acc: 93.247% (17993/19296)\n",
"Loss: 0.196 | Acc: 93.082% (35833/38496)\n",
"Loss: | Acc: 92.976% (46488/50000)\n",
"Eval Acc: 92.640% (9264/10000)\n",
"\n",
"Epoch: 49\n",
"Drop: 0.1\n",
"[0.02248096653066055, 0.02248096653066055]\n",
"Loss: 0.181 | Acc: 96.875% (93/96)\n",
"Loss: 0.183 | Acc: 93.558% (18053/19296)\n",
"Loss: 0.191 | Acc: 93.204% (35880/38496)\n",
"Loss: | Acc: 93.196% (46598/50000)\n",
"Eval Acc: 93.200% (9320/10000)\n",
"\n",
"Epoch: 50\n",
"Drop: 0.1\n",
"[0.02180653753474073, 0.02180653753474073]\n",
"Loss: 0.125 | Acc: 96.875% (93/96)\n",
"Loss: 0.181 | Acc: 93.610% (18063/19296)\n",
"Loss: 0.187 | Acc: 93.358% (35939/38496)\n",
"Loss: | Acc: 93.236% (46618/50000)\n",
"Eval Acc: 93.280% (9328/10000)\n",
"\n",
"Epoch: 51\n",
"Drop: 0.1\n",
"[0.02115234140869851, 0.02115234140869851]\n",
"Loss: 0.191 | Acc: 91.667% (88/96)\n",
"Loss: 0.176 | Acc: 93.776% (18095/19296)\n",
"Loss: 0.180 | Acc: 93.607% (36035/38496)\n",
"Loss: | Acc: 93.470% (46735/50000)\n",
"Eval Acc: 92.810% (9281/10000)\n",
"\n",
"Epoch: 52\n",
"Drop: 0.1\n",
"[0.020517771166437552, 0.020517771166437552]\n",
"Loss: 0.222 | Acc: 92.708% (89/96)\n",
"Loss: 0.181 | Acc: 93.610% (18063/19296)\n",
"Loss: 0.181 | Acc: 93.607% (36035/38496)\n",
"Loss: | Acc: 93.592% (46796/50000)\n",
"Eval Acc: 92.900% (9290/10000)\n",
"\n",
"Epoch: 53\n",
"Drop: 0.1\n",
"[0.019902238031444426, 0.019902238031444426]\n",
"Loss: 0.210 | Acc: 93.750% (90/96)\n",
"Loss: 0.171 | Acc: 93.978% (18134/19296)\n",
"Loss: 0.172 | Acc: 93.940% (36163/38496)\n",
"Loss: | Acc: 93.912% (46956/50000)\n",
"Eval Acc: 93.470% (9347/10000)\n",
"\n",
"Epoch: 54\n",
"Drop: 0.1\n",
"[0.019305170890501093, 0.019305170890501093]\n",
"Loss: 0.078 | Acc: 97.917% (94/96)\n",
"Loss: 0.169 | Acc: 94.154% (18168/19296)\n",
"Loss: 0.174 | Acc: 93.895% (36146/38496)\n",
"Loss: | Acc: 93.904% (46952/50000)\n",
"Eval Acc: 93.680% (9368/10000)\n",
"\n",
"Epoch: 55\n",
"Drop: 0.1\n",
"[0.018726015763786057, 0.018726015763786057]\n",
"Loss: 0.133 | Acc: 97.917% (94/96)\n",
"Loss: 0.164 | Acc: 94.232% (18183/19296)\n",
"Loss: 0.166 | Acc: 94.197% (36262/38496)\n",
"Loss: | Acc: 94.182% (47091/50000)\n",
"Eval Acc: 93.650% (9365/10000)\n",
"\n",
"Epoch: 56\n",
"Drop: 0.1\n",
"[0.018164235290872476, 0.018164235290872476]\n",
"Loss: 0.114 | Acc: 95.833% (92/96)\n",
"Loss: 0.162 | Acc: 94.387% (18213/19296)\n",
"Loss: 0.163 | Acc: 94.347% (36320/38496)\n",
"Loss: | Acc: 94.228% (47114/50000)\n",
"Eval Acc: 93.370% (9337/10000)\n",
"\n",
"Epoch: 57\n",
"Drop: 0.1\n",
"[0.0176193082321463, 0.0176193082321463]\n",
"Loss: 0.174 | Acc: 93.750% (90/96)\n",
"Loss: 0.160 | Acc: 94.533% (18241/19296)\n",
"Loss: 0.161 | Acc: 94.376% (36331/38496)\n",
"Loss: | Acc: 94.414% (47207/50000)\n",
"Eval Acc: 93.300% (9330/10000)\n",
"\n",
"Epoch: 58\n",
"Drop: 0.1\n",
"[0.01709072898518191, 0.01709072898518191]\n",
"Loss: 0.108 | Acc: 96.875% (93/96)\n",
"Loss: 0.156 | Acc: 94.626% (18259/19296)\n",
"Loss: 0.158 | Acc: 94.475% (36369/38496)\n",
"Loss: | Acc: 94.360% (47180/50000)\n",
"Eval Acc: 93.270% (9327/10000)\n",
"\n",
"Epoch: 59\n",
"Drop: 0.1\n",
"[0.016578007115626455, 0.016578007115626455]\n",
"Loss: 0.118 | Acc: 95.833% (92/96)\n",
"Loss: 0.153 | Acc: 94.714% (18276/19296)\n",
"Loss: 0.155 | Acc: 94.654% (36438/38496)\n",
"Loss: | Acc: 94.560% (47280/50000)\n",
"Eval Acc: 93.380% (9338/10000)\n",
"\n",
"Epoch: 60\n",
"Drop: 0.1\n",
"[0.016080666902157658, 0.016080666902157658]\n",
"Loss: 0.140 | Acc: 92.708% (89/96)\n",
"Loss: 0.149 | Acc: 95.066% (18344/19296)\n",
"Loss: 0.148 | Acc: 94.927% (36543/38496)\n",
"Loss: | Acc: 94.828% (47414/50000)\n",
"Eval Acc: 93.350% (9335/10000)\n",
"\n",
"Epoch: 61\n",
"Drop: 0.1\n",
"[0.01559824689509293, 0.01559824689509293]\n",
"Loss: 0.104 | Acc: 94.792% (91/96)\n",
"Loss: 0.150 | Acc: 94.750% (18283/19296)\n",
"Loss: 0.153 | Acc: 94.625% (36427/38496)\n",
"Loss: | Acc: 94.642% (47321/50000)\n",
"Eval Acc: 93.610% (9361/10000)\n",
"\n",
"Epoch: 62\n",
"Drop: 0.1\n",
"[0.01513029948824014, 0.01513029948824014]\n",
"Loss: 0.160 | Acc: 95.833% (92/96)\n",
"Loss: 0.144 | Acc: 94.828% (18298/19296)\n",
"Loss: 0.144 | Acc: 94.823% (36503/38496)\n",
"Loss: | Acc: 94.870% (47435/50000)\n",
"Eval Acc: 93.750% (9375/10000)\n",
"\n",
"Epoch: 63\n",
"Drop: 0.1\n",
"[0.014676390503592937, 0.014676390503592937]\n",
"Loss: 0.145 | Acc: 96.875% (93/96)\n",
"Loss: 0.140 | Acc: 95.066% (18344/19296)\n",
"Loss: 0.141 | Acc: 94.950% (36552/38496)\n",
"Loss: | Acc: 94.862% (47431/50000)\n",
"Eval Acc: 93.530% (9353/10000)\n",
"\n",
"Epoch: 64\n",
"Drop: 0.1\n",
"[0.01423609878848515, 0.01423609878848515]\n",
"Loss: 0.182 | Acc: 95.833% (92/96)\n",
"Loss: 0.132 | Acc: 95.300% (18389/19296)\n",
"Loss: 0.134 | Acc: 95.225% (36658/38496)\n",
"Loss: | Acc: 95.160% (47580/50000)\n",
"Eval Acc: 93.800% (9380/10000)\n",
"\n",
"Epoch: 65\n",
"Drop: 0.1\n",
"[0.013809015824830594, 0.013809015824830594]\n",
"Loss: 0.114 | Acc: 95.833% (92/96)\n",
"Loss: 0.131 | Acc: 95.408% (18410/19296)\n",
"Loss: 0.135 | Acc: 95.267% (36674/38496)\n",
"Loss: | Acc: 95.222% (47611/50000)\n",
"Eval Acc: 93.680% (9368/10000)\n",
"\n",
"Epoch: 66\n",
"Drop: 0.1\n",
"[0.013394745350085675, 0.013394745350085675]\n",
"Loss: 0.079 | Acc: 96.875% (93/96)\n",
"Loss: 0.131 | Acc: 95.377% (18404/19296)\n",
"Loss: 0.132 | Acc: 95.384% (36719/38496)\n",
"Loss: | Acc: 95.342% (47671/50000)\n",
"Eval Acc: 93.850% (9385/10000)\n",
"\n",
"Epoch: 67\n",
"Drop: 0.1\n",
"[0.012992902989583105, 0.012992902989583105]\n",
"Loss: 0.112 | Acc: 93.750% (90/96)\n"
],
"name": "stdout"
}
]
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment