Skip to content

Instantly share code, notes, and snippets.

@jmbjr
Created February 11, 2019 21:16
Show Gist options
  • Save jmbjr/7cdbbf63c3c1c941fa8b472c06787542 to your computer and use it in GitHub Desktop.
Save jmbjr/7cdbbf63c3c1c941fa8b472c06787542 to your computer and use it in GitHub Desktop.
{
"description": "Crossing the curl with finite differences",
"publication": "https://arxiv.org/pdf/1808.01531.pdf",
"discriminator":
{
"class": "class:hypergan.discriminators.configurable_discriminator.ConfigurableDiscriminator",
"defaults":{
"class": "class:hypergan.discriminators.dcgan_discriminator.DCGANDiscriminator",
"activation": "lrelu",
"initializer": "random_normal",
"random_stddev": 0.02,
"filter": [3,3],
"stride": [1,1],
"avg_pool": [2,2]
},
"layers":[
"conv 32",
"conv 64",
"conv 128",
"conv 256",
"linear 1 activation=null"
]
},
"latent":
{
"class": "function:hypergan.distributions.uniform_distribution.UniformDistribution",
"projections": [
"function:hypergan.distributions.uniform_distribution.identity"
],
"min": -1,
"max": 1,
"z": 100
},
"generator": {
"class": "class:hypergan.discriminators.configurable_discriminator.ConfigurableDiscriminator",
"defaults": {
"initializer": "random_normal",
"random_stddev": 0.02,
"activation": "relu",
"filter": [5,5],
"stride": [2,2],
"avg_pool": [1,1]
},
"layers": [
"linear 4*4*512",
"deconv 256",
"deconv 128",
"deconv 64",
"deconv 3 activation=tanh",
"resize_images 32 32"
]
},
"loss":
{
"class": "function:hypergan.losses.standard_loss.StandardLoss",
"reduce": "reduce_mean"
},
"trainer": {
"class": "function:hypergan.trainers.simultaneous_trainer.SimultaneousTrainer",
"optimizer": {
"class": "function:hypergan.optimizers.curl_optimizer.CurlOptimizer",
"learn_rate": 0.00001,
"optimizer": {
"class": "function:hypergan.optimizers.elastic_weight_consolidation_optimizer.ElasticWeightConsolidationOptimizer",
"f_decay": 0.5,
"add_ewc_loss_gradients": true,
"optimizer": {
"class": "function:hypergan.optimizers.giga_wolf_optimizer.GigaWolfOptimizer",
"optimizer": {
"class": "function:tensorflow.python.training.adam.AdamOptimizer",
"learn_rate": 1e-4,
"beta1":0.0,
"beta2":0.999
},
"optimizer2": {
"class": "function:tensorflow.python.training.adam.AdamOptimizer",
"learn_rate": 3e-5,
"beta1":0.0,
"beta2":0.999
}
}
}
}
},
"class": "class:hypergan.gans.standard_gan.StandardGAN"
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment