Skip to content

Instantly share code, notes, and snippets.

@jinglescode
Created January 10, 2021 06:15
Show Gist options
  • Save jinglescode/bc6f5b558cea67fdc36d98a7ec33b017 to your computer and use it in GitHub Desktop.
Save jinglescode/bc6f5b558cea67fdc36d98a7ec33b017 to your computer and use it in GitHub Desktop.
class Discriminator(nn.Module):
'''
Discriminator Class
Parameters:
in_dim: int, default: 784
the dimension of the input (MNIST images are 28x28, so 784 so is the default)
hidden_dim: int, default: 128
the inner dimension
out_dim: int, default: 1
default 1 because we returns a 1-dimension tensor representing fake/real
'''
def __init__(self, in_dim=784, hidden_dim=128, out_dim=1):
super(Discriminator, self).__init__()
dims = [hidden_dim*4, hidden_dim*2, hidden_dim]
self.layers = nn.Sequential(
self.discriminator_block(in_dim, dims[0]),
self.discriminator_block(dims[0], dims[1]),
self.discriminator_block(dims[1], dims[2]),
nn.Linear(dims[2], out_dim)
)
def forward(self, x):
return self.layers(x)
def discriminator_block(self, input_dim, output_dim):
'''
A discriminator neural network layer, with a linear transformation
followed by an nn.LeakyReLU activation with negative slope of 0.2
'''
return nn.Sequential(
nn.Linear(input_dim, output_dim),
nn.LeakyReLU(negative_slope=0.2)
)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment