-
-
Save kahartma/ccf7279fb20a216905079f6b10b31b78 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"cells": [ | |
{ | |
"cell_type": "code", | |
"execution_count": 1, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"%load_ext autoreload\n", | |
"%autoreload 2\n", | |
"\n", | |
"import numpy as np\n", | |
"import torch\n", | |
"import torch.nn as nn\n", | |
"from torch.autograd import Variable\n", | |
"from torch import optim\n", | |
"\n", | |
"import torch.autograd as autograd\n", | |
"\n", | |
"#torch.cuda.set_device(0)" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 31, | |
"metadata": { | |
"collapsed": true | |
}, | |
"outputs": [], | |
"source": [ | |
"class WGAN_I_Discriminator(nn.Module):\n", | |
"\tdef __init__(self):\n", | |
"\t\tsuper(WGAN_I_Discriminator, self).__init__()\n", | |
"\n", | |
"\t\tself.did_init_train = False\n", | |
"\n", | |
"\tdef train_init(self,alpha=0.0001,betas=(0,0.9)):\n", | |
"\t\tself.loss = torch.nn.L1Loss()\n", | |
"\t\tself.optimizer = optim.Adam(self.parameters(),lr=alpha,betas=betas)\n", | |
"\t\tself.did_init_train = True\n", | |
"\n", | |
"\tdef train_batch(self, batch_real, batch_fake, lambd=10):\n", | |
"\t\tif not self.did_init_train:\n", | |
"\t\t\tself.train_init()\n", | |
"\n", | |
"\t\t# Reset gradients\n", | |
"\t\tself.optimizer.zero_grad()\n", | |
"\n", | |
"\t\t# Compute output and loss\n", | |
"\t\tfx_real = self.forward(batch_real)\n", | |
"\t\tloss_real = fx_real.mean()\n", | |
"\n", | |
"\t\tfx_fake = self.forward(batch_fake)\n", | |
"\t\tloss_fake = fx_fake.mean()\n", | |
"\n", | |
"\n", | |
"\t\t#dreist geklaut von\n", | |
"\t\t# https://github.com/caogang/wgan-gp/blob/master/gan_toy.py\n", | |
"\t\t# gradients = autograd.grad(outputs=fx_comb, inputs=interpolates,\n", | |
"\t\t# \t\t\t\t\t grad_outputs=grad_ones,\n", | |
"\t\t# \t\t\t\t\t create_graph=True, retain_graph=True, only_inputs=True)[0]\n", | |
"\t\t# gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * lambd\n", | |
"\t\tloss_penalty = self.calc_gradient_penalty(batch_real, batch_fake,lambd)\n", | |
"\n", | |
"\t\tloss = loss_fake - loss_real + loss_penalty\n", | |
"\n", | |
"\t\t# Backprop gradient\n", | |
"\t\tloss.backward()\n", | |
"\n", | |
"\t\t# Update parameters\n", | |
"\t\tself.optimizer.step()\n", | |
"\n", | |
"\t\tself.optimizer.zero_grad()\n", | |
"\n", | |
"\t\treturn loss.data[0] # return loss\n", | |
"\n", | |
"\n", | |
"\tdef calc_gradient_penalty(self, real_data, fake_data,lambd):\n", | |
"\t\talpha = torch.rand(real_data.size(0), 1,1,1)\n", | |
"\t\talpha = alpha.expand(real_data.size())\n", | |
"\t\talpha = alpha\n", | |
"\n", | |
"\t\tinterpolates = alpha * real_data.data + ((1 - alpha) * fake_data.data)\n", | |
"\n", | |
"\t\tinterpolates = interpolates\n", | |
"\t\tinterpolates = Variable(interpolates, requires_grad=True)\n", | |
"\n", | |
"\t\tdisc_interpolates = self(interpolates)\n", | |
"\n", | |
"\t\tgradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,\n", | |
"\t\t\t\t\t\t\t\t grad_outputs=torch.ones(disc_interpolates.size()),\n", | |
"\t\t\t\t\t\t\t\t create_graph=True, retain_graph=True, only_inputs=True)[0]\n", | |
"\n", | |
"\t\tgradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * lambd\n", | |
"\n", | |
"\t\treturn gradient_penalty\n", | |
"\n", | |
"\n", | |
"class WGAN_I_Generator(nn.Module):\n", | |
"\tdef __init__(self):\n", | |
"\t\tsuper(WGAN_I_Generator, self).__init__()\n", | |
"\n", | |
"\t\tself.did_init_train = False\n", | |
"\n", | |
"\tdef train_init(self,alpha=0.0001,betas=(0,0.9)):\n", | |
"\t\tself.loss = None\n", | |
"\t\tself.optimizer = optim.Adam(self.parameters(),lr=alpha,betas=betas)\n", | |
"\t\tself.did_init_train = True\n", | |
"\n", | |
"\tdef train_batch(self, batch_noise, discriminator):\n", | |
"\t\tif not self.did_init_train:\n", | |
"\t\t\tself.train_init()\n", | |
"\n", | |
"\t\tnoise = batch_noise\n", | |
"\t\tzeros_label = Variable(torch.ones(noise.size(0)))\n", | |
"\n", | |
"\t\t# Reset gradients\n", | |
"\t\tself.optimizer.zero_grad()\n", | |
"\n", | |
"\t\t# Generate and discriminate\n", | |
"\t\tgen = self.forward(noise)\n", | |
"\t\tdisc = discriminator(gen)\n", | |
"\t\tloss = -disc.mean()\n", | |
"\n", | |
"\t\t# Backprop gradient\n", | |
"\t\tloss.backward()\n", | |
"\n", | |
"\t\t# Update parameters\n", | |
"\t\tself.optimizer.step()\n", | |
"\n", | |
"\t\tself.optimizer.zero_grad()\n", | |
"\n", | |
"\t\treturn loss.data[0] # return loss" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 32, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"class G(WGAN_I_Generator):\n", | |
"\tdef __init__(self):\n", | |
"\t\tsuper(G, self).__init__()\n", | |
"\t\tself.project = nn.Sequential(\n", | |
"\t\t\tnn.Linear(1,4),\n", | |
"\t\t\tnn.Tanh()\n", | |
"\t\t\t)\n", | |
"\n", | |
"\t\tself.did_init_train = False\n", | |
"\n", | |
"\tdef forward(self, inp):\n", | |
"\t\tx = self.project(inp)\n", | |
"\t\tx = x.view(x.size(0),1,2,2)\n", | |
"\t\treturn x" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 33, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"class D1(WGAN_I_Discriminator):\n", | |
"\tdef __init__(self):\n", | |
"\t\tsuper(D1, self).__init__()\n", | |
"\t\tself.features = nn.Sequential(\n", | |
"\t\t\tnn.Conv2d(1,3,(2,1)),\n", | |
"\t\t\tnn.LeakyReLU(negative_slope=0.2),\n", | |
"\t\t\t)\n", | |
"\t\tself.classification = nn.Sequential(\n", | |
"\t\t\tnn.Linear(3*2,1)\n", | |
"\t\t\t)\n", | |
"\n", | |
"\t\tself.did_init_train = False\n", | |
"\n", | |
"\tdef forward(self, inp):\n", | |
"\t\tinp = inp.view(inp.size(0),1,2,2)\n", | |
"\t\tx = self.features(inp)\n", | |
"\t\tx = x.view(x.size(0),3*2)\n", | |
"\t\tx = self.classification(x)\n", | |
"\t\tprint x\n", | |
"\t\treturn x\n", | |
" \n", | |
"class D2(WGAN_I_Discriminator):\n", | |
"\tdef __init__(self):\n", | |
"\t\tsuper(D2, self).__init__()\n", | |
"\t\tself.features = nn.Sequential(\n", | |
"\t\t\tnn.Conv2d(1,3,(2,1)),\n", | |
"\t\t\tnn.Conv2d(3,3,1),\n", | |
"\t\t\tnn.LeakyReLU(negative_slope=0.2),\n", | |
"\t\t\t)\n", | |
"\t\tself.classification = nn.Sequential(\n", | |
"\t\t\tnn.Linear(3*2,1)\n", | |
"\t\t\t)\n", | |
"\n", | |
"\t\tself.did_init_train = False\n", | |
"\n", | |
"\tdef forward(self, inp):\n", | |
"\t\tinp = inp.view(inp.size(0),1,2,2)\n", | |
"\t\tx = self.features(inp)\n", | |
"\t\tx = x.view(x.size(0),3*2)\n", | |
"\t\tx = self.classification(x)\n", | |
"\t\tprint x\n", | |
"\t\treturn x\n", | |
" \n", | |
"class D3(WGAN_I_Discriminator):\n", | |
"\tdef __init__(self):\n", | |
"\t\tsuper(D3, self).__init__()\n", | |
"\t\tself.features = nn.Sequential(\n", | |
"\t\t\tnn.Conv2d(1,3,(2,1)),\n", | |
"\t\t\tnn.Conv2d(3,3,1),\n", | |
"\t\t\tnn.LeakyReLU(negative_slope=0.2),\n", | |
"\t\t\tnn.Conv2d(3,3,1),\n", | |
"\t\t\tnn.LeakyReLU(negative_slope=0.2)\n", | |
"\t\t\t)\n", | |
"\t\tself.classification = nn.Sequential(\n", | |
"\t\t\tnn.Linear(3*2,1)\n", | |
"\t\t\t)\n", | |
"\n", | |
"\t\tself.did_init_train = False\n", | |
"\n", | |
"\tdef forward(self, inp):\n", | |
"\t\tinp = inp.view(inp.size(0),1,2,2)\n", | |
"\t\tx = self.features(inp)\n", | |
"\t\tx = x.view(x.size(0),3*2)\n", | |
"\t\tx = self.classification(x)\n", | |
"\t\tprint x\n", | |
"\t\treturn x\n", | |
" \n", | |
"class D4(WGAN_I_Discriminator):\n", | |
"\tdef __init__(self):\n", | |
"\t\tsuper(D4, self).__init__()\n", | |
"\t\tself.features = nn.Sequential(\n", | |
"\t\t\tnn.Conv2d(1,3,(2,1)),\n", | |
"\t\t\tnn.Conv2d(3,3,1),\n", | |
"\t\t\tnn.LeakyReLU(negative_slope=0.2),\n", | |
"\t\t\tnn.Conv2d(3,3,1),\n", | |
"\t\t\tnn.Conv2d(3,3,1),\n", | |
"\t\t\tnn.LeakyReLU(negative_slope=0.2)\n", | |
"\t\t\t)\n", | |
"\t\tself.classification = nn.Sequential(\n", | |
"\t\t\tnn.Linear(3*2,1)\n", | |
"\t\t\t)\n", | |
"\n", | |
"\t\tself.did_init_train = False\n", | |
"\n", | |
"\tdef forward(self, inp):\n", | |
"\t\tinp = inp.view(inp.size(0),1,2,2)\n", | |
"\t\tx = self.features(inp)\n", | |
"\t\tx = x.view(x.size(0),3*2)\n", | |
"\t\tx = self.classification(x)\n", | |
"\t\tprint x\n", | |
"\t\treturn x\n", | |
" \n", | |
"class D5(WGAN_I_Discriminator):\n", | |
"\tdef __init__(self):\n", | |
"\t\tsuper(D5, self).__init__()\n", | |
"\t\tself.features = nn.Sequential(\n", | |
"\t\t\tnn.Conv2d(1,3,(2,1)),\n", | |
"\t\t\tnn.Conv2d(3,3,1),\n", | |
"\t\t\tnn.LeakyReLU(negative_slope=0.2),\n", | |
"\t\t\tnn.Conv2d(3,3,1),\n", | |
"\t\t\tnn.Conv2d(3,3,1),\n", | |
"\t\t\tnn.LeakyReLU(negative_slope=0.2)\n", | |
"\t\t\t)\n", | |
"\t\tself.classification = nn.Sequential(\n", | |
"\t\t\tnn.Linear(3*2,1)\n", | |
"\t\t\t)\n", | |
"\n", | |
"\t\tself.did_init_train = False\n", | |
"\n", | |
"\tdef forward(self, inp):\n", | |
"\t\tinp = inp.view(inp.size(0),1,2,2)\n", | |
"\t\tx = self.features(inp)\n", | |
"\t\tx = x.view(x.size(0),3*2)\n", | |
"\t\tx = self.classification(x)\n", | |
"\t\tx = 1/x\n", | |
"\t\tprint x\n", | |
"\t\treturn x" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 34, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"def train(gen,disc):\n", | |
" batch_size = 5\n", | |
"\n", | |
" ind = np.random.randint(4,size=(batch_size,1))\n", | |
" batch = np.zeros((batch_size,4))\n", | |
" batch[:,ind] = 1\n", | |
" batch = batch.reshape((batch_size,1,2,2))\n", | |
" X_batch = Variable(torch.from_numpy(batch).float(),requires_grad=False)\n", | |
" noise = np.random.uniform(-1,1,(batch_size,1))\n", | |
" noise = Variable(torch.from_numpy(noise).float(),volatile=True)\n", | |
"\n", | |
" fake_batch = Variable(gen.forward(noise).data,requires_grad=False)\n", | |
"\n", | |
" loss_d = disc.train_batch(X_batch,fake_batch)\n", | |
"\n", | |
" noise = np.random.uniform(-1,1,(batch_size,1))\n", | |
" noise = Variable(torch.from_numpy(noise).float(),requires_grad=False)\n", | |
"\n", | |
" loss_g = gen.train_batch(noise,disc)" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 35, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Variable containing:\n", | |
" 0.1076\n", | |
" 0.1076\n", | |
" 0.1076\n", | |
" 0.1076\n", | |
" 0.1076\n", | |
"[torch.FloatTensor of size 5x1]\n", | |
"\n", | |
"Variable containing:\n", | |
"-0.1358\n", | |
" 0.1392\n", | |
"-0.0589\n", | |
" 0.0915\n", | |
"-0.0997\n", | |
"[torch.FloatTensor of size 5x1]\n", | |
"\n", | |
"Variable containing:\n", | |
"-0.0801\n", | |
" 0.1170\n", | |
" 0.0927\n", | |
" 0.0967\n", | |
"-0.0515\n", | |
"[torch.FloatTensor of size 5x1]\n", | |
"\n", | |
"Variable containing:\n", | |
" 0.1460\n", | |
"-0.0738\n", | |
" 0.0821\n", | |
" 0.1472\n", | |
" 0.1261\n", | |
"[torch.FloatTensor of size 5x1]\n", | |
"\n" | |
] | |
} | |
], | |
"source": [ | |
"train(G(),disc=D1())" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 36, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Variable containing:\n", | |
" 0.2192\n", | |
" 0.2192\n", | |
" 0.2192\n", | |
" 0.2192\n", | |
" 0.2192\n", | |
"[torch.FloatTensor of size 5x1]\n", | |
"\n", | |
"Variable containing:\n", | |
" 0.4432\n", | |
" 0.3932\n", | |
" 0.4277\n", | |
" 0.4354\n", | |
" 0.3680\n", | |
"[torch.FloatTensor of size 5x1]\n", | |
"\n", | |
"Variable containing:\n", | |
" 0.3128\n", | |
" 0.3124\n", | |
" 0.2965\n", | |
" 0.4105\n", | |
" 0.2265\n", | |
"[torch.FloatTensor of size 5x1]\n", | |
"\n" | |
] | |
}, | |
{ | |
"ename": "RuntimeError", | |
"evalue": "ConvNdBackward: expected Variable at argument 0 (got None)", | |
"output_type": "error", | |
"traceback": [ | |
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", | |
"\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)", | |
"\u001b[0;32m<ipython-input-36-a33a28fe4e3e>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdisc\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mD2\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", | |
"\u001b[0;32m<ipython-input-34-0bc717f851b5>\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(gen, disc)\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[0mfake_batch\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mVariable\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgen\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnoise\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mrequires_grad\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 14\u001b[0;31m \u001b[0mloss_d\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdisc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain_batch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX_batch\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mfake_batch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 15\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0mnoise\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrandom\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0muniform\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |
"\u001b[0;32m<ipython-input-31-2f8373282c33>\u001b[0m in \u001b[0;36mtrain_batch\u001b[0;34m(self, batch_real, batch_fake, lambd)\u001b[0m\n\u001b[1;32m 36\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 37\u001b[0m \u001b[0;31m# Backprop gradient\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 38\u001b[0;31m \u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 39\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0;31m# Update parameters\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |
"\u001b[0;32m/home/hartmank/anaconda2/lib/python2.7/site-packages/torch/autograd/variable.pyc\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(self, gradient, retain_graph, create_graph, retain_variables)\u001b[0m\n\u001b[1;32m 150\u001b[0m \u001b[0mDefaults\u001b[0m \u001b[0mto\u001b[0m \u001b[0mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0munless\u001b[0m \u001b[0;34m`\u001b[0m\u001b[0;34m`\u001b[0m\u001b[0mgradient\u001b[0m\u001b[0;34m`\u001b[0m\u001b[0;34m`\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0ma\u001b[0m \u001b[0mvolatile\u001b[0m \u001b[0mVariable\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 151\u001b[0m \"\"\"\n\u001b[0;32m--> 152\u001b[0;31m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgradient\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_variables\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 153\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 154\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mregister_hook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |
"\u001b[0;32m/home/hartmank/anaconda2/lib/python2.7/site-packages/torch/autograd/__init__.pyc\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(variables, grad_variables, retain_graph, create_graph, retain_variables)\u001b[0m\n\u001b[1;32m 96\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 97\u001b[0m Variable._execution_engine.run_backward(\n\u001b[0;32m---> 98\u001b[0;31m variables, grad_variables, retain_graph)\n\u001b[0m\u001b[1;32m 99\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 100\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", | |
"\u001b[0;31mRuntimeError\u001b[0m: ConvNdBackward: expected Variable at argument 0 (got None)" | |
] | |
} | |
], | |
"source": [ | |
"train(G(),disc=D2())" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 37, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Variable containing:\n", | |
"-0.3431\n", | |
"-0.3431\n", | |
"-0.3431\n", | |
"-0.3431\n", | |
"-0.3431\n", | |
"[torch.FloatTensor of size 5x1]\n", | |
"\n", | |
"Variable containing:\n", | |
"-0.3279\n", | |
"-0.3279\n", | |
"-0.3423\n", | |
"-0.3313\n", | |
"-0.3366\n", | |
"[torch.FloatTensor of size 5x1]\n", | |
"\n", | |
"Variable containing:\n", | |
"-0.3133\n", | |
"-0.3415\n", | |
"-0.3116\n", | |
"-0.3241\n", | |
"-0.3228\n", | |
"[torch.FloatTensor of size 5x1]\n", | |
"\n", | |
"Variable containing:\n", | |
"-0.3338\n", | |
"-0.3368\n", | |
"-0.3323\n", | |
"-0.3308\n", | |
"-0.3231\n", | |
"[torch.FloatTensor of size 5x1]\n", | |
"\n" | |
] | |
} | |
], | |
"source": [ | |
"train(G(),disc=D3())" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 38, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Variable containing:\n", | |
"-0.4007\n", | |
"-0.4007\n", | |
"-0.4007\n", | |
"-0.4007\n", | |
"-0.4007\n", | |
"[torch.FloatTensor of size 5x1]\n", | |
"\n", | |
"Variable containing:\n", | |
"-0.3828\n", | |
"-0.3870\n", | |
"-0.3827\n", | |
"-0.3826\n", | |
"-0.3836\n", | |
"[torch.FloatTensor of size 5x1]\n", | |
"\n", | |
"Variable containing:\n", | |
"-0.3872\n", | |
"-0.3930\n", | |
"-0.3956\n", | |
"-0.3818\n", | |
"-0.3829\n", | |
"[torch.FloatTensor of size 5x1]\n", | |
"\n" | |
] | |
}, | |
{ | |
"ename": "RuntimeError", | |
"evalue": "ConvNdBackward: expected Variable at argument 0 (got None)", | |
"output_type": "error", | |
"traceback": [ | |
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", | |
"\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)", | |
"\u001b[0;32m<ipython-input-38-617c87cc7824>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdisc\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mD4\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", | |
"\u001b[0;32m<ipython-input-34-0bc717f851b5>\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(gen, disc)\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[0mfake_batch\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mVariable\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgen\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnoise\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mrequires_grad\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 14\u001b[0;31m \u001b[0mloss_d\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdisc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain_batch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX_batch\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mfake_batch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 15\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0mnoise\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrandom\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0muniform\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |
"\u001b[0;32m<ipython-input-31-2f8373282c33>\u001b[0m in \u001b[0;36mtrain_batch\u001b[0;34m(self, batch_real, batch_fake, lambd)\u001b[0m\n\u001b[1;32m 36\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 37\u001b[0m \u001b[0;31m# Backprop gradient\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 38\u001b[0;31m \u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 39\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0;31m# Update parameters\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |
"\u001b[0;32m/home/hartmank/anaconda2/lib/python2.7/site-packages/torch/autograd/variable.pyc\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(self, gradient, retain_graph, create_graph, retain_variables)\u001b[0m\n\u001b[1;32m 150\u001b[0m \u001b[0mDefaults\u001b[0m \u001b[0mto\u001b[0m \u001b[0mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0munless\u001b[0m \u001b[0;34m`\u001b[0m\u001b[0;34m`\u001b[0m\u001b[0mgradient\u001b[0m\u001b[0;34m`\u001b[0m\u001b[0;34m`\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0ma\u001b[0m \u001b[0mvolatile\u001b[0m \u001b[0mVariable\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 151\u001b[0m \"\"\"\n\u001b[0;32m--> 152\u001b[0;31m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgradient\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_variables\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 153\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 154\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mregister_hook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |
"\u001b[0;32m/home/hartmank/anaconda2/lib/python2.7/site-packages/torch/autograd/__init__.pyc\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(variables, grad_variables, retain_graph, create_graph, retain_variables)\u001b[0m\n\u001b[1;32m 96\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 97\u001b[0m Variable._execution_engine.run_backward(\n\u001b[0;32m---> 98\u001b[0;31m variables, grad_variables, retain_graph)\n\u001b[0m\u001b[1;32m 99\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 100\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", | |
"\u001b[0;31mRuntimeError\u001b[0m: ConvNdBackward: expected Variable at argument 0 (got None)" | |
] | |
} | |
], | |
"source": [ | |
"train(G(),disc=D4())" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 39, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Variable containing:\n", | |
"-3.6873\n", | |
"-3.6873\n", | |
"-3.6873\n", | |
"-3.6873\n", | |
"-3.6873\n", | |
"[torch.FloatTensor of size 5x1]\n", | |
"\n", | |
"Variable containing:\n", | |
"-4.2763\n", | |
"-4.2791\n", | |
"-4.2800\n", | |
"-4.2772\n", | |
"-4.2787\n", | |
"[torch.FloatTensor of size 5x1]\n", | |
"\n", | |
"Variable containing:\n", | |
"-4.0579\n", | |
"-3.9405\n", | |
"-3.7507\n", | |
"-4.1994\n", | |
"-4.0711\n", | |
"[torch.FloatTensor of size 5x1]\n", | |
"\n", | |
"Variable containing:\n", | |
"-4.2828\n", | |
"-4.2846\n", | |
"-4.2845\n", | |
"-4.2837\n", | |
"-4.2790\n", | |
"[torch.FloatTensor of size 5x1]\n", | |
"\n" | |
] | |
} | |
], | |
"source": [ | |
"train(G(),disc=D5())" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": null, | |
"metadata": { | |
"collapsed": true | |
}, | |
"outputs": [], | |
"source": [] | |
} | |
], | |
"metadata": { | |
"kernelspec": { | |
"display_name": "Python 2", | |
"language": "python", | |
"name": "python2" | |
}, | |
"language_info": { | |
"codemirror_mode": { | |
"name": "ipython", | |
"version": 2 | |
}, | |
"file_extension": ".py", | |
"mimetype": "text/x-python", | |
"name": "python", | |
"nbconvert_exporter": "python", | |
"pygments_lexer": "ipython2", | |
"version": "2.7.13" | |
} | |
}, | |
"nbformat": 4, | |
"nbformat_minor": 2 | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment