Skip to content

Instantly share code, notes, and snippets.

@kahartma
Created July 10, 2017 14:45
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save kahartma/0739a98e42e7bb0987120b156665ef61 to your computer and use it in GitHub Desktop.
Save kahartma/0739a98e42e7bb0987120b156665ef61 to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"%load_ext autoreload\n",
"%autoreload 2\n",
"\n",
"import numpy as np\n",
"import torch\n",
"import torch.nn as nn\n",
"from torch.autograd import Variable\n",
"from torch import optim\n",
"\n",
"import torch.autograd as autograd\n",
"\n",
"torch.cuda.set_device(0)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"class WGAN_I_Discriminator(nn.Module):\n",
"\tdef __init__(self):\n",
"\t\tsuper(WGAN_I_Discriminator, self).__init__()\n",
"\n",
"\t\tself.did_init_train = False\n",
"\n",
"\tdef train_init(self,alpha=0.001,betas=(0,0.9)):\n",
"\t\tself.loss = torch.nn.L1Loss()\n",
"\t\tself.optimizer = optim.Adam(self.parameters(),lr=alpha,betas=betas)\n",
"\t\tself.did_init_train = True\n",
"\n",
"\tdef train_batch(self, batch_real, batch_fake, lambd=10):\n",
"\t\tif not self.did_init_train:\n",
"\t\t\tself.train_init()\n",
"\n",
"\t\t# Reset gradients\n",
"\t\tself.optimizer.zero_grad()\n",
"\n",
"\t\t# Compute output and loss\n",
"\t\tfx_real = self.forward(batch_real)\n",
"\t\tloss_real = fx_real.mean()\n",
"\n",
"\t\tfx_fake = self.forward(batch_fake)\n",
"\t\tloss_fake = fx_fake.mean()\n",
"\n",
"\n",
"\t\t#dreist geklaut von\n",
"\t\t# https://github.com/caogang/wgan-gp/blob/master/gan_toy.py\n",
"\t\t# gradients = autograd.grad(outputs=fx_comb, inputs=interpolates,\n",
"\t\t# \t\t\t\t\t grad_outputs=grad_ones,\n",
"\t\t# \t\t\t\t\t create_graph=True, retain_graph=True, only_inputs=True)[0]\n",
"\t\t# gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * lambd\n",
"\t\tloss_penalty = self.calc_gradient_penalty(batch_real, batch_fake,lambd)\n",
"\n",
"\t\tloss = loss_fake - loss_real + loss_penalty\n",
"\n",
"\t\t# Backprop gradient\n",
"\t\tloss.backward()\n",
"\n",
"\t\t# Update parameters\n",
"\t\tself.optimizer.step()\n",
"\n",
"\t\tself.optimizer.zero_grad()\n",
"\n",
"\t\treturn loss.data[0] # return loss\n",
"\n",
"\n",
"\tdef calc_gradient_penalty(self, real_data, fake_data,lambd):\n",
"\t\talpha = torch.rand(real_data.size(0), 1,1,1)\n",
"\t\talpha = alpha.expand(real_data.size())\n",
"\t\talpha = alpha.cuda()\n",
"\n",
"\t\tinterpolates = alpha * real_data.data + ((1 - alpha) * fake_data.data)\n",
"\n",
"\t\tinterpolates = interpolates\n",
"\t\tinterpolates = Variable(interpolates, requires_grad=True)\n",
"\n",
"\t\tdisc_interpolates = self(interpolates)\n",
"\n",
"\t\tgradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,\n",
"\t\t\t\t\t\t\t\t grad_outputs=torch.ones(disc_interpolates.size()).cuda(),\n",
"\t\t\t\t\t\t\t\t create_graph=True, retain_graph=True, only_inputs=True)[0]\n",
"\n",
"\t\tgradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * lambd \n",
"\t\t#for grad in gradients[1:]:\n",
"\t\t#\tgradient_penalty += 0 * grad.view(-1)[0]\n",
"\n",
"\t\treturn gradient_penalty\n",
"\n",
"\n",
"class WGAN_I_Generator(nn.Module):\n",
"\tdef __init__(self):\n",
"\t\tsuper(WGAN_I_Generator, self).__init__()\n",
"\n",
"\t\tself.did_init_train = False\n",
"\n",
"\tdef train_init(self,alpha=0.001,betas=(0,0.9)):\n",
"\t\tself.loss = None\n",
"\t\tself.optimizer = optim.Adam(self.parameters(),lr=alpha,betas=betas)\n",
"\t\tself.did_init_train = True\n",
"\n",
"\tdef train_batch(self, batch_noise, discriminator):\n",
"\t\tif not self.did_init_train:\n",
"\t\t\tself.train_init()\n",
"\n",
"\t\tnoise = batch_noise\n",
"\n",
"\t\t# Reset gradients\n",
"\t\tself.optimizer.zero_grad()\n",
"\n",
"\t\t# Generate and discriminate\n",
"\t\tgen = self.forward(noise)\n",
"\t\tprint 'generated'\n",
"\t\tdisc = discriminator(gen)\n",
"\t\tloss = -disc.mean()\n",
"\n",
"\t\t# Backprop gradient\n",
"\t\tloss.backward()\n",
"\n",
"\t\t# Update parameters\n",
"\t\tself.optimizer.step()\n",
"\n",
"\t\tself.optimizer.zero_grad()\n",
"\n",
"\t\treturn loss.data[0] # return loss"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"class G(WGAN_I_Generator):\n",
"\tdef __init__(self):\n",
"\t\tsuper(G, self).__init__()\n",
"\t\tself.project = nn.Sequential(\n",
"\t\t\tnn.Linear(1,28*28),\n",
"\t\t\tnn.Tanh()\n",
"\t\t\t)\n",
"\n",
"\t\tself.did_init_train = False\n",
"\n",
"\tdef forward(self, inp):\n",
"\t\tx = self.project(inp)\n",
"\t\tx = x.view(x.size(0),1,28,28)\n",
"\t\treturn x"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
"class D1(WGAN_I_Discriminator):\n",
"\tdef __init__(self):\n",
"\t\tsuper(D1, self).__init__()\n",
"\t\tself.features = nn.Sequential(\n",
"\t\t\tnn.Conv2d(1,3,3),\n",
"\t\t\tnn.Conv2d(3,3,2,stride=2),\n",
"\t\t\tnn.LeakyReLU(negative_slope=0.2),\n",
"\t\t\tnn.Conv2d(3,3,1),\n",
"\t\t\tnn.LeakyReLU(negative_slope=0.2)\n",
"\t\t\t)\n",
"\t\tself.classification = nn.Sequential(\n",
"\t\t\tnn.Linear(3*13*13,1)\n",
"\t\t\t)\n",
"\n",
"\t\tself.did_init_train = False\n",
"\n",
"\tdef forward(self, inp):\n",
"\t\tprint inp\n",
"\t\tinp = inp.view(inp.size(0),1,28,28)\n",
"\t\tx = self.features(inp)\n",
"\t\tprint x.size()\n",
"\t\tx = x.view(x.size(0),3*13*13)\n",
"\t\tprint x\n",
"\t\tx = self.classification(x)\n",
"\t\t#x = (x*x)/x\n",
"\t\tprint x\n",
"\t\treturn x\n",
" \n",
"class D2(WGAN_I_Discriminator):\n",
"\tdef __init__(self):\n",
"\t\tsuper(D2, self).__init__()\n",
"\t\tself.features = nn.Sequential(\n",
"\t\t\tnn.Conv2d(1,3,3),\n",
"\t\t\tnn.Conv2d(3,3,2),\n",
"\t\t\tnn.LeakyReLU(negative_slope=0.2),\n",
"\t\t\tnn.Conv2d(3,3,1),\n",
"\t\t\tnn.LeakyReLU(negative_slope=0.2)\n",
"\t\t\t)\n",
"\t\tself.classification = nn.Sequential(\n",
"\t\t\tnn.Linear(3*25*25,1)\n",
"\t\t\t)\n",
"\n",
"\t\tself.did_init_train = False\n",
"\n",
"\tdef forward(self, inp):\n",
"\t\tprint inp\n",
"\t\tinp = inp.view(inp.size(0),1,28,28)\n",
"\t\tx = self.features(inp)\n",
"\t\tprint x.size()\n",
"\t\tx = x.view(x.size(0),3*25*25)\n",
"\t\tprint x\n",
"\t\tx = self.classification(x)\n",
"\t\t#x = (x*x)/x\n",
"\t\tprint x\n",
"\t\treturn x"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def train(gen,disc):\n",
" batch_size = 5\n",
"\n",
" ind = np.random.randint(28*28,size=(batch_size,1))\n",
" batch = np.zeros((batch_size,28*28))\n",
" batch[:,ind] = 1\n",
" batch = batch.reshape((batch_size,1,28,28))\n",
" X_batch = Variable(torch.from_numpy(batch).float().cuda(),requires_grad=False)\n",
" noise = np.random.uniform(-1,1,(batch_size,1))\n",
" noise = Variable(torch.from_numpy(noise).float().cuda(),volatile=True)\n",
"\n",
" fake_batch = Variable(gen.forward(noise).data,requires_grad=False)\n",
"\n",
" loss_d = disc.train_batch(X_batch,fake_batch)\n",
"\n",
" noise = np.random.uniform(-1,1,(batch_size,1))\n",
" noise = Variable(torch.from_numpy(noise).float().cuda(),requires_grad=False)\n",
"\n",
" loss_g = gen.train_batch(noise,disc)"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Variable containing:\n",
"(0 ,0 ,.,.) = \n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" ... ⋱ ... \n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" ⋮ \n",
"\n",
"(1 ,0 ,.,.) = \n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" ... ⋱ ... \n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" ⋮ \n",
"\n",
"(2 ,0 ,.,.) = \n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" ... ⋱ ... \n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" ⋮ \n",
"\n",
"(3 ,0 ,.,.) = \n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" ... ⋱ ... \n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" ⋮ \n",
"\n",
"(4 ,0 ,.,.) = \n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" ... ⋱ ... \n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
"[torch.cuda.FloatTensor of size 5x1x28x28 (GPU 0)]\n",
"\n",
"torch.Size([5, 3, 13, 13])\n",
"Variable containing:\n",
"-0.0668 -0.0668 -0.0668 ... -0.0749 -0.0749 -0.0749\n",
"-0.0668 -0.0668 -0.0668 ... -0.0749 -0.0749 -0.0749\n",
"-0.0668 -0.0668 -0.0668 ... -0.0749 -0.0749 -0.0749\n",
"-0.0668 -0.0668 -0.0668 ... -0.0749 -0.0749 -0.0749\n",
"-0.0668 -0.0668 -0.0668 ... -0.0749 -0.0749 -0.0749\n",
"[torch.cuda.FloatTensor of size 5x507 (GPU 0)]\n",
"\n",
"Variable containing:\n",
"-0.1295\n",
"-0.1295\n",
"-0.1295\n",
"-0.1295\n",
"-0.1295\n",
"[torch.cuda.FloatTensor of size 5x1 (GPU 0)]\n",
"\n",
"Variable containing:\n",
"(0 ,0 ,.,.) = \n",
" -0.1958 -0.4163 0.5939 ... -0.4926 0.3769 0.6196\n",
" 0.7153 0.6333 0.2550 ... -0.1181 0.1808 0.6242\n",
" -0.3161 -0.0543 0.6444 ... 0.4870 0.8344 -0.4811\n",
" ... ⋱ ... \n",
" -0.6760 0.4894 -0.0617 ... -0.3862 -0.0224 -0.3669\n",
" -0.3044 0.6002 0.2350 ... 0.2946 -0.3209 0.6815\n",
" 0.6704 0.7079 0.2999 ... -0.6500 0.0257 -0.4367\n",
" ⋮ \n",
"\n",
"(1 ,0 ,.,.) = \n",
" 0.0157 -0.2397 0.5625 ... -0.1597 0.6268 0.7511\n",
" 0.7815 0.8239 0.5984 ... -0.4785 0.2691 0.6886\n",
" -0.2831 -0.0393 0.5071 ... 0.6924 0.9292 -0.2602\n",
" ... ⋱ ... \n",
" -0.8402 0.4696 0.3046 ... -0.6405 -0.2304 -0.6207\n",
" -0.1485 0.6258 0.0862 ... 0.1806 -0.1837 0.6308\n",
" 0.7518 0.6915 0.4895 ... -0.8362 0.1861 -0.1639\n",
" ⋮ \n",
"\n",
"(2 ,0 ,.,.) = \n",
" -0.5319 -0.6691 0.6474 ... -0.8444 -0.2251 0.2560\n",
" 0.5505 -0.0302 -0.4862 ... 0.5528 0.0112 0.4802\n",
" -0.3749 -0.0819 0.8166 ... -0.0579 0.3568 -0.7615\n",
" ... ⋱ ... \n",
" -0.0848 0.5246 -0.6381 ... 0.2360 0.3528 0.2389\n",
" -0.5495 0.5498 0.4788 ... 0.4828 -0.5392 0.7600\n",
" 0.4670 0.7362 -0.1063 ... 0.0223 -0.2671 -0.7721\n",
" ⋮ \n",
"\n",
"(3 ,0 ,.,.) = \n",
" -0.0858 -0.3265 0.5776 ... -0.3283 0.5188 0.6943\n",
" 0.7519 0.7480 0.4513 ... -0.3184 0.2276 0.6592\n",
" -0.2989 -0.0464 0.5764 ... 0.6046 0.8934 -0.3705\n",
" ... ⋱ ... \n",
" -0.7746 0.4791 0.1349 ... -0.5312 -0.1330 -0.5109\n",
" -0.2240 0.6138 0.1578 ... 0.2356 -0.2501 0.6557\n",
" 0.7155 0.6994 0.4036 ... -0.7627 0.1106 -0.2997\n",
" ⋮ \n",
"\n",
"(4 ,0 ,.,.) = \n",
" -0.3375 -0.5263 0.6153 ... -0.6694 0.1528 0.4968\n",
" 0.6583 0.4184 -0.0461 ... 0.1670 0.1158 0.5724\n",
" -0.3392 -0.0650 0.7227 ... 0.2945 0.7069 -0.6098\n",
" ... ⋱ ... \n",
" -0.4901 0.5032 -0.3189 ... -0.1549 0.1285 -0.1403\n",
" -0.4069 0.5812 0.3353 ... 0.3714 -0.4115 0.7142\n",
" 0.6000 0.7192 0.1471 ... -0.4351 -0.0901 -0.5942\n",
"[torch.cuda.FloatTensor of size 5x1x28x28 (GPU 0)]\n",
"\n",
"torch.Size([5, 3, 13, 13])\n",
"Variable containing:\n",
"-0.0629 -0.0691 -0.0678 ... -0.0701 -0.0885 -0.0763\n",
"-0.0625 -0.0703 -0.0701 ... -0.0644 -0.0900 -0.0694\n",
"-0.0642 -0.0671 -0.0635 ... -0.0680 -0.0833 -0.0869\n",
"-0.0627 -0.0697 -0.0690 ... -0.0666 -0.0894 -0.0725\n",
"-0.0633 -0.0683 -0.0661 ... -0.0690 -0.0870 -0.0816\n",
"[torch.cuda.FloatTensor of size 5x507 (GPU 0)]\n",
"\n",
"Variable containing:\n",
"-0.1323\n",
"-0.1355\n",
"-0.1232\n",
"-0.1336\n",
"-0.1282\n",
"[torch.cuda.FloatTensor of size 5x1 (GPU 0)]\n",
"\n",
"Variable containing:\n",
"(0 ,0 ,.,.) = \n",
" -0.0080 -0.0171 0.0244 ... -0.0202 0.0155 0.0254\n",
" 0.0293 0.0260 0.0105 ... -0.0048 0.0074 0.0256\n",
" -0.0130 -0.0022 0.0264 ... 0.0200 0.0342 -0.0197\n",
" ... ⋱ ... \n",
" -0.0277 0.0201 -0.0025 ... -0.0158 -0.0009 -0.0150\n",
" -0.0125 0.0246 0.0096 ... 0.0121 -0.0132 0.0279\n",
" 0.0275 0.0290 0.0123 ... -0.0267 0.0011 -0.0179\n",
" ⋮ \n",
"\n",
"(1 ,0 ,.,.) = \n",
" 0.0130 -0.1988 0.4665 ... -0.1325 0.5199 0.6230\n",
" 0.6482 0.6834 0.4963 ... -0.3969 0.2232 0.5711\n",
" -0.2348 -0.0326 0.4206 ... 0.5743 0.7707 -0.2158\n",
" ... ⋱ ... \n",
" -0.6969 0.3895 0.2526 ... -0.5313 -0.1911 -0.5148\n",
" -0.1232 0.5190 0.0715 ... 0.1498 -0.1523 0.5232\n",
" 0.6236 0.5736 0.4060 ... -0.6936 0.1544 -0.1359\n",
" ⋮ \n",
"\n",
"(2 ,0 ,.,.) = \n",
" -0.3141 -0.3951 0.3823 ... -0.4986 -0.1329 0.1512\n",
" 0.3251 -0.0179 -0.2871 ... 0.3264 0.0066 0.2836\n",
" -0.2214 -0.0483 0.4822 ... -0.0342 0.2107 -0.4497\n",
" ... ⋱ ... \n",
" -0.0501 0.3097 -0.3768 ... 0.1394 0.2083 0.1411\n",
" -0.3245 0.3247 0.2827 ... 0.2851 -0.3184 0.4488\n",
" 0.2758 0.4347 -0.0628 ... 0.0132 -0.1577 -0.4560\n",
" ⋮ \n",
"\n",
"(3 ,0 ,.,.) = \n",
" -0.0562 -0.2139 0.3784 ... -0.2151 0.3399 0.4549\n",
" 0.4926 0.4900 0.2957 ... -0.2086 0.1491 0.4319\n",
" -0.1958 -0.0304 0.3776 ... 0.3961 0.5853 -0.2427\n",
" ... ⋱ ... \n",
" -0.5075 0.3139 0.0884 ... -0.3480 -0.0871 -0.3347\n",
" -0.1468 0.4021 0.1034 ... 0.1544 -0.1639 0.4296\n",
" 0.4688 0.4582 0.2644 ... -0.4997 0.0724 -0.1964\n",
" ⋮ \n",
"\n",
"(4 ,0 ,.,.) = \n",
" -0.3221 -0.5024 0.5874 ... -0.6390 0.1459 0.4743\n",
" 0.6284 0.3994 -0.0440 ... 0.1594 0.1105 0.5464\n",
" -0.3238 -0.0620 0.6899 ... 0.2811 0.6748 -0.5821\n",
" ... ⋱ ... \n",
" -0.4679 0.4804 -0.3044 ... -0.1479 0.1226 -0.1340\n",
" -0.3884 0.5548 0.3201 ... 0.3545 -0.3928 0.6818\n",
" 0.5728 0.6865 0.1404 ... -0.4153 -0.0860 -0.5672\n",
"[torch.cuda.FloatTensor of size 5x1x28x28 (GPU 0)]\n",
"\n",
"torch.Size([5, 3, 13, 13])\n",
"Variable containing:\n",
"-0.0666 -0.0669 -0.0669 ... -0.0747 -0.0755 -0.0750\n",
"-0.0630 -0.0697 -0.0696 ... -0.0688 -0.0874 -0.0703\n",
"-0.0652 -0.0670 -0.0649 ... -0.0709 -0.0799 -0.0837\n",
"-0.0635 -0.0687 -0.0683 ... -0.0728 -0.0844 -0.0734\n",
"-0.0634 -0.0682 -0.0661 ... -0.0693 -0.0864 -0.0813\n",
"[torch.cuda.FloatTensor of size 5x507 (GPU 0)]\n",
"\n",
"Variable containing:\n",
"-0.1297\n",
"-0.1363\n",
"-0.1276\n",
"-0.1342\n",
"-0.1286\n",
"[torch.cuda.FloatTensor of size 5x1 (GPU 0)]\n",
"\n"
]
},
{
"ename": "RuntimeError",
"evalue": "CUDNN_STATUS_NOT_SUPPORTED. This error may appear if you passed in a non-contiguous input.",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-18-d123af906ae4>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdisc\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mD1\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m<ipython-input-17-3e28490bcfa7>\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(gen, disc)\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[0mfake_batch\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mVariable\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgen\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnoise\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mrequires_grad\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 14\u001b[0;31m \u001b[0mloss_d\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdisc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain_batch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX_batch\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mfake_batch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 15\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0mnoise\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrandom\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0muniform\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-7-66674e2a5694>\u001b[0m in \u001b[0;36mtrain_batch\u001b[0;34m(self, batch_real, batch_fake, lambd)\u001b[0m\n\u001b[1;32m 36\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 37\u001b[0m \u001b[0;31m# Backprop gradient\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 38\u001b[0;31m \u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 39\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0;31m# Update parameters\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/home/hartmank/anaconda2/lib/python2.7/site-packages/torch/autograd/variable.pyc\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(self, gradient, retain_graph, create_graph, retain_variables)\u001b[0m\n\u001b[1;32m 150\u001b[0m \u001b[0mDefaults\u001b[0m \u001b[0mto\u001b[0m \u001b[0mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0munless\u001b[0m \u001b[0;34m`\u001b[0m\u001b[0;34m`\u001b[0m\u001b[0mgradient\u001b[0m\u001b[0;34m`\u001b[0m\u001b[0;34m`\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0ma\u001b[0m \u001b[0mvolatile\u001b[0m \u001b[0mVariable\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 151\u001b[0m \"\"\"\n\u001b[0;32m--> 152\u001b[0;31m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgradient\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_variables\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 153\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 154\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mregister_hook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/home/hartmank/anaconda2/lib/python2.7/site-packages/torch/autograd/__init__.pyc\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(variables, grad_variables, retain_graph, create_graph, retain_variables)\u001b[0m\n\u001b[1;32m 96\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 97\u001b[0m Variable._execution_engine.run_backward(\n\u001b[0;32m---> 98\u001b[0;31m variables, grad_variables, retain_graph)\n\u001b[0m\u001b[1;32m 99\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 100\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mRuntimeError\u001b[0m: CUDNN_STATUS_NOT_SUPPORTED. This error may appear if you passed in a non-contiguous input."
]
}
],
"source": [
"train(G().cuda(),disc=D1().cuda())"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Variable containing:\n",
"(0 ,0 ,.,.) = \n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" ... ⋱ ... \n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" ⋮ \n",
"\n",
"(1 ,0 ,.,.) = \n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" ... ⋱ ... \n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" ⋮ \n",
"\n",
"(2 ,0 ,.,.) = \n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" ... ⋱ ... \n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" ⋮ \n",
"\n",
"(3 ,0 ,.,.) = \n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" ... ⋱ ... \n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" ⋮ \n",
"\n",
"(4 ,0 ,.,.) = \n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" ... ⋱ ... \n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
" 0 0 0 ... 0 0 0\n",
"[torch.cuda.FloatTensor of size 5x1x28x28 (GPU 0)]\n",
"\n",
"torch.Size([5, 3, 25, 25])\n",
"Variable containing:\n",
" 0.0804 0.0804 0.0804 ... -0.0696 -0.0696 -0.0696\n",
" 0.0804 0.0804 0.0804 ... -0.0696 -0.0696 -0.0696\n",
" 0.0804 0.0804 0.0804 ... -0.0696 -0.0696 -0.0696\n",
" 0.0804 0.0804 0.0804 ... -0.0696 -0.0696 -0.0696\n",
" 0.0804 0.0804 0.0804 ... -0.0696 -0.0696 -0.0696\n",
"[torch.cuda.FloatTensor of size 5x1875 (GPU 0)]\n",
"\n",
"Variable containing:\n",
"-0.4963\n",
"-0.4963\n",
"-0.4963\n",
"-0.4963\n",
"-0.4963\n",
"[torch.cuda.FloatTensor of size 5x1 (GPU 0)]\n",
"\n",
"Variable containing:\n",
"(0 ,0 ,.,.) = \n",
" 0.2163 0.2712 -0.5388 ... 0.1016 0.1319 -0.7765\n",
" -0.2396 0.3341 0.1018 ... 0.0321 0.1586 0.8582\n",
" -0.7088 0.3103 -0.2281 ... 0.2821 0.6902 -0.4314\n",
" ... ⋱ ... \n",
" 0.4806 -0.4742 -0.4602 ... 0.4931 0.0236 -0.6836\n",
" 0.5622 -0.5086 0.0364 ... 0.5409 -0.0629 -0.8709\n",
" 0.1554 0.6485 0.0679 ... -0.8441 0.3008 0.1449\n",
" ⋮ \n",
"\n",
"(1 ,0 ,.,.) = \n",
" 0.6023 -0.4631 -0.7817 ... -0.3702 0.6361 -0.7132\n",
" 0.7423 -0.2395 -0.3482 ... -0.8013 0.5516 0.3007\n",
" 0.0022 0.9104 0.7386 ... -0.7178 -0.0261 -0.8918\n",
" ... ⋱ ... \n",
" 0.5131 -0.9410 -0.9118 ... 0.9518 0.8079 -0.1313\n",
" 0.4689 -0.8386 0.6691 ... 0.8681 -0.6922 -0.1799\n",
" 0.2036 0.6383 -0.7276 ... 0.0555 0.3782 0.2425\n",
" ⋮ \n",
"\n",
"(2 ,0 ,.,.) = \n",
" 0.3373 0.0635 -0.6203 ... -0.0331 0.2941 -0.7604\n",
" 0.0857 0.1826 -0.0260 ... -0.2730 0.2792 0.7690\n",
" -0.5654 0.5741 0.0922 ... -0.0384 0.5425 -0.6220\n",
" ... ⋱ ... \n",
" 0.4897 -0.6933 -0.6550 ... 0.7167 0.3146 -0.5664\n",
" 0.5377 -0.6299 0.2440 ... 0.6661 -0.2731 -0.7695\n",
" 0.1687 0.6457 -0.2021 ... -0.7064 0.3226 0.1721\n",
" ⋮ \n",
"\n",
"(3 ,0 ,.,.) = \n",
" 0.1827 0.3234 -0.5150 ... 0.1371 0.0870 -0.7806\n",
" -0.3207 0.3722 0.1355 ... 0.1149 0.1255 0.8760\n",
" -0.7398 0.2280 -0.3085 ... 0.3607 0.7224 -0.3717\n",
" ... ⋱ ... \n",
" 0.4782 -0.4012 -0.3979 ... 0.4168 -0.0569 -0.7102\n",
" 0.5685 -0.4720 -0.0204 ... 0.5024 -0.0050 -0.8900\n",
" 0.1518 0.6492 0.1400 ... -0.8692 0.2950 0.1376\n",
" ⋮ \n",
"\n",
"(4 ,0 ,.,.) = \n",
" 0.3509 0.0385 -0.6291 ... -0.0489 0.3122 -0.7584\n",
" 0.1239 0.1641 -0.0410 ... -0.3065 0.2928 0.7558\n",
" -0.5456 0.5996 0.1297 ... -0.0767 0.5223 -0.6408\n",
" ... ⋱ ... \n",
" 0.4907 -0.7133 -0.6737 ... 0.7366 0.3461 -0.5508\n",
" 0.5348 -0.6425 0.2673 ... 0.6788 -0.2964 -0.7539\n",
" 0.1703 0.6454 -0.2325 ... -0.6849 0.3251 0.1753\n",
"[torch.cuda.FloatTensor of size 5x1x28x28 (GPU 0)]\n",
"\n",
"torch.Size([5, 3, 25, 25])\n",
"Variable containing:\n",
" 1.4478e-01 1.9833e-01 3.0619e-02 ... -1.0594e-01 -7.3415e-02 -3.0691e-02\n",
" 3.1871e-02 2.6757e-01 1.5021e-01 ... -8.3525e-02 -2.9183e-02 -4.1255e-02\n",
" 1.1688e-01 2.5826e-01 6.6122e-02 ... -1.0628e-01 -6.4362e-02 -3.7949e-02\n",
" 1.5052e-01 1.8185e-01 2.0938e-02 ... -1.0543e-01 -7.6082e-02 -2.8985e-02\n",
" 1.1308e-01 2.6468e-01 7.0170e-02 ... -1.0617e-01 -6.3426e-02 -3.8896e-02\n",
"[torch.cuda.FloatTensor of size 5x1875 (GPU 0)]\n",
"\n",
"Variable containing:\n",
"-0.5566\n",
"-0.5740\n",
"-0.5772\n",
"-0.5487\n",
"-0.5788\n",
"[torch.cuda.FloatTensor of size 5x1 (GPU 0)]\n",
"\n",
"Variable containing:\n",
"(0 ,0 ,.,.) = \n",
" 0.1456 0.1825 -0.3625 ... 0.0684 0.0887 -0.5225\n",
" -0.1612 0.2248 0.0685 ... 0.0216 0.1067 0.5775\n",
" -0.4769 0.2088 -0.1535 ... 0.1898 0.4644 -0.2903\n",
" ... ⋱ ... \n",
" 0.3234 -0.3191 -0.3097 ... 0.3318 0.0159 -0.4600\n",
" 0.3783 -0.3422 0.0245 ... 0.3640 -0.0423 -0.5860\n",
" 0.1045 0.4363 0.0457 ... -0.5679 0.2024 0.0975\n",
" ⋮ \n",
"\n",
"(1 ,0 ,.,.) = \n",
" 0.1422 -0.1094 -0.1846 ... -0.0874 0.1502 -0.1684\n",
" 0.1753 -0.0565 -0.0822 ... -0.1892 0.1303 0.0710\n",
" 0.0005 0.2150 0.1744 ... -0.1695 -0.0062 -0.2106\n",
" ... ⋱ ... \n",
" 0.1211 -0.2222 -0.2153 ... 0.2247 0.1908 -0.0310\n",
" 0.1107 -0.1980 0.1580 ... 0.2050 -0.1634 -0.0425\n",
" 0.0481 0.1507 -0.1718 ... 0.0131 0.0893 0.0573\n",
" ⋮ \n",
"\n",
"(2 ,0 ,.,.) = \n",
" 0.1420 0.0267 -0.2612 ... -0.0139 0.1238 -0.3202\n",
" 0.0361 0.0769 -0.0109 ... -0.1149 0.1176 0.3238\n",
" -0.2381 0.2417 0.0388 ... -0.0161 0.2284 -0.2619\n",
" ... ⋱ ... \n",
" 0.2062 -0.2919 -0.2758 ... 0.3018 0.1325 -0.2385\n",
" 0.2264 -0.2652 0.1028 ... 0.2805 -0.1150 -0.3240\n",
" 0.0710 0.2719 -0.0851 ... -0.2974 0.1358 0.0725\n",
" ⋮ \n",
"\n",
"(3 ,0 ,.,.) = \n",
" 0.0985 0.1742 -0.2775 ... 0.0739 0.0469 -0.4207\n",
" -0.1728 0.2005 0.0730 ... 0.0619 0.0676 0.4720\n",
" -0.3986 0.1229 -0.1662 ... 0.1944 0.3893 -0.2003\n",
" ... ⋱ ... \n",
" 0.2577 -0.2162 -0.2144 ... 0.2246 -0.0307 -0.3827\n",
" 0.3064 -0.2544 -0.0110 ... 0.2707 -0.0027 -0.4796\n",
" 0.0818 0.3498 0.0754 ... -0.4684 0.1589 0.0741\n",
" ⋮ \n",
"\n",
"(4 ,0 ,.,.) = \n",
" 0.1726 0.0189 -0.3095 ... -0.0240 0.1536 -0.3732\n",
" 0.0610 0.0807 -0.0202 ... -0.1508 0.1441 0.3719\n",
" -0.2684 0.2950 0.0638 ... -0.0377 0.2570 -0.3153\n",
" ... ⋱ ... \n",
" 0.2414 -0.3510 -0.3315 ... 0.3624 0.1703 -0.2710\n",
" 0.2631 -0.3161 0.1315 ... 0.3340 -0.1458 -0.3709\n",
" 0.0838 0.3175 -0.1144 ... -0.3370 0.1600 0.0862\n",
"[torch.cuda.FloatTensor of size 5x1x28x28 (GPU 0)]\n",
"\n",
"torch.Size([5, 3, 25, 25])\n",
"Variable containing:\n",
" 1.2371e-01 1.6503e-01 5.2178e-02 ... -9.5766e-02 -7.4679e-02 -4.3415e-02\n",
" 6.8919e-02 1.4988e-01 1.0920e-01 ... -7.2876e-02 -6.0045e-02 -6.6540e-02\n",
" 9.5743e-02 1.6562e-01 8.3720e-02 ... -8.7235e-02 -7.0104e-02 -5.6264e-02\n",
" 1.1817e-01 1.4250e-01 5.5791e-02 ... -9.1282e-02 -7.6634e-02 -4.7707e-02\n",
" 9.6464e-02 1.8081e-01 8.3556e-02 ... -8.9513e-02 -6.9214e-02 -5.4486e-02\n",
"[torch.cuda.FloatTensor of size 5x1875 (GPU 0)]\n",
"\n",
"Variable containing:\n",
"-0.5294\n",
"-0.5017\n",
"-0.5167\n",
"-0.5166\n",
"-0.5232\n",
"[torch.cuda.FloatTensor of size 5x1 (GPU 0)]\n",
"\n",
"generated\n",
"Variable containing:\n",
"(0 ,0 ,.,.) = \n",
" 0.6058 -0.4702 -0.7837 ... -0.3751 0.6403 -0.7123\n",
" 0.7484 -0.2459 -0.3530 ... -0.8059 0.5553 0.2904\n",
" 0.0125 0.9128 0.7447 ... -0.7245 -0.0362 -0.8941\n",
" ... ⋱ ... \n",
" 0.5134 -0.9426 -0.9138 ... 0.9532 0.8123 -0.1233\n",
" 0.4678 -0.8408 0.6740 ... 0.8702 -0.6969 -0.1670\n",
" 0.2041 0.6382 -0.7330 ... 0.0704 0.3791 0.2436\n",
" ⋮ \n",
"\n",
"(1 ,0 ,.,.) = \n",
" 0.5560 -0.3691 -0.7550 ... -0.3067 0.5791 -0.7233\n",
" 0.6528 -0.1564 -0.2871 ... -0.7336 0.5030 0.4244\n",
" -0.1268 0.8748 0.6498 ... -0.6223 0.1014 -0.8589\n",
" ... ⋱ ... \n",
" 0.5084 -0.9165 -0.8823 ... 0.9301 0.7445 -0.2307\n",
" 0.4833 -0.8078 0.6019 ... 0.8397 -0.6272 -0.3370\n",
" 0.1966 0.6398 -0.6519 ... -0.1323 0.3672 0.2285\n",
" ⋮ \n",
"\n",
"(2 ,0 ,.,.) = \n",
" 0.4932 -0.2407 -0.7178 ... -0.2238 0.4994 -0.7354\n",
" 0.5093 -0.0500 -0.2076 ... -0.6231 0.4378 0.5585\n",
" -0.2811 0.8121 0.5082 ... -0.4712 0.2551 -0.8050\n",
" ... ⋱ ... \n",
" 0.5026 -0.8723 -0.8327 ... 0.8897 0.6416 -0.3479\n",
" 0.5009 -0.7622 0.5047 ... 0.7966 -0.5322 -0.5087\n",
" 0.1878 0.6416 -0.5358 ... -0.3522 0.3534 0.2109\n",
" ⋮ \n",
"\n",
"(3 ,0 ,.,.) = \n",
" 0.5807 -0.4195 -0.7693 ... -0.3404 0.6097 -0.7180\n",
" 0.7024 -0.2003 -0.3195 ... -0.7712 0.5289 0.3612\n",
" -0.0594 0.8948 0.6990 ... -0.6751 0.0347 -0.8771\n",
" ... ⋱ ... \n",
" 0.5108 -0.9303 -0.8988 ... 0.9424 0.7797 -0.1791\n",
" 0.4758 -0.8245 0.6383 ... 0.8552 -0.6625 -0.2564\n",
" 0.2002 0.6390 -0.6935 ... -0.0343 0.3730 0.2358\n",
" ⋮ \n",
"\n",
"(4 ,0 ,.,.) = \n",
" 0.1684 0.3448 -0.5048 ... 0.1520 0.0679 -0.7824\n",
" -0.3537 0.3878 0.1496 ... 0.1494 0.1114 0.8828\n",
" -0.7520 0.1922 -0.3411 ... 0.3924 0.7351 -0.3456\n",
" ... ⋱ ... \n",
" 0.4771 -0.3687 -0.3704 ... 0.3827 -0.0907 -0.7208\n",
" 0.5712 -0.4561 -0.0443 ... 0.4856 0.0195 -0.8972\n",
" 0.1503 0.6495 0.1699 ... -0.8787 0.2925 0.1345\n",
"[torch.cuda.FloatTensor of size 5x1x28x28 (GPU 0)]\n",
"\n",
"torch.Size([5, 3, 25, 25])\n",
"Variable containing:\n",
" 0.0335 0.2673 0.1530 ... -0.0822 -0.0277 -0.0409\n",
" 0.0498 0.2710 0.1357 ... -0.0913 -0.0359 -0.0401\n",
" 0.0710 0.2693 0.1150 ... -0.1014 -0.0448 -0.0401\n",
" 0.0416 0.2698 0.1442 ... -0.0869 -0.0318 -0.0404\n",
" 0.1563 0.1771 0.0166 ... -0.1049 -0.0770 -0.0279\n",
"[torch.cuda.FloatTensor of size 5x1875 (GPU 0)]\n",
"\n",
"Variable containing:\n",
"-0.6924\n",
"-0.7014\n",
"-0.7136\n",
"-0.6967\n",
"-0.6753\n",
"[torch.cuda.FloatTensor of size 5x1 (GPU 0)]\n",
"\n"
]
}
],
"source": [
"train(G().cuda(),disc=D2().cuda())"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.13"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment