Skip to content

Instantly share code, notes, and snippets.

View IFeelBloated's full-sized avatar

John Doe IFeelBloated

  • Vacancy in the cosmos
View GitHub Profile
import torch
import torch.nn as nn
def ZeroCenteredGradientPenalty(Samples, Critics):
Gradient, = torch.autograd.grad(outputs=Critics.sum(), inputs=Samples, create_graph=True, only_inputs=True)
return 0.5 * Gradient.square().sum([1, 2, 3])
class AdversarialTraining:
def __init__(self, Generator, Discriminator):
self.Generator = Generator
import torch
import torch.nn as nn
import numpy
from torch_utils.ops import upfirdn2d
Box = [1.0]
Linear = [0.25,0.50,0.25]
Quadratic = [0.128,0.235,0.276,0.235,0.128]
Cubic = [0.058,0.128,0.199,0.231,0.199,0.128,0.058]
Gaussian = [0.008,0.036,0.110,0.213,0.267,0.213,0.110,0.036,0.008]
import math
import numpy
import torch
import torch.nn as nn
CompressionFactor = 4
SiLUGain = math.sqrt(2)
def MSRInitializer(Layer, ActivationGain=1):
FanIn = Layer.weight.data.size(1) * Layer.weight.data[0][0].numel()
auto x = 42;
auto y = 2.71;
auto z = "hello";
auto f(auto&& x) {
return x + x;
}
auto x = f(21); // x == 42
auto y = f("hello"s); // y == "hellohello"
@IFeelBloated
IFeelBloated / Loss.py
Created April 1, 2022 16:21
very wide GAN
import torch
import torch.nn as nn
def ZeroCenteredGradientPenalty(Samples, Critics):
Gradient, = torch.autograd.grad(outputs=Critics.sum(), inputs=Samples, create_graph=True, only_inputs=True)
return 0.5 * Gradient.square().sum([1,2,3]).mean()
def RelativisticLoss(PositiveCritics, NegativeCritics):
return nn.functional.binary_cross_entropy_with_logits(PositiveCritics - NegativeCritics, torch.ones_like(PositiveCritics))
@IFeelBloated
IFeelBloated / Loss.py
Created March 31, 2022 14:50
1200-layer GAN 128x128
import torch
import torch.nn as nn
def ZeroCenteredGradientPenalty(Samples, Critics):
Gradient, = torch.autograd.grad(outputs=Critics.sum(), inputs=Samples, create_graph=True, only_inputs=True)
return 0.5 * Gradient.square().sum([1,2,3]).mean()
def RelativisticLoss(PositiveCritics, NegativeCritics):
return nn.functional.binary_cross_entropy_with_logits(PositiveCritics - NegativeCritics, torch.ones_like(PositiveCritics))
@IFeelBloated
IFeelBloated / Loss.py
Created March 30, 2022 14:31
1200-layer GAN
import torch
import torch.nn as nn
def ZeroCenteredGradientPenalty(Samples, Critics):
Gradient, = torch.autograd.grad(outputs=Critics.sum(), inputs=Samples, create_graph=True, only_inputs=True)
return 0.5 * Gradient.square().sum([1,2,3]).mean()
def RelativisticLoss(PositiveCritics, NegativeCritics):
return nn.functional.binary_cross_entropy_with_logits(PositiveCritics - NegativeCritics, torch.ones_like(PositiveCritics))
#include "PluginInstantiator.vxx"
auto Main() {
auto Configurations = PluginInfo{
.Namespace = "test",
.Identifier = "com.dontcare.test",
.Description = "whatever"
};
PluginInstantiator::Initialize(Configurations);
PluginInstantiator::RegisterFunction("Test(clip: vnode)", [](auto Arguments, auto Core) {
import numpy as np
from keras import layers, initializers, models, optimizers, backend, utils
import tensorflow as tf
def Mirror(x, Padding=1):
return layers.Lambda(lambda x: tf.pad(x, [[0, 0], [0, 0], [Padding, Padding], [Padding, Padding]], mode="Reflect"))(x)
def BilinearUpsample(x, Scale=2, Permute=False):
ChannelCount = int(x.shape[1])
Settings = dict(filters=ChannelCount, kernel_size=Scale * 2, strides=Scale, padding='same', use_bias=False, trainable=False)
import IO
import numpy as np
from keras import models, layers, optimizers, initializers
import keras.backend as K
batch_size = 32
r = IO.LoadRawBinaryGrayscaleSequence('r.bin', 128, 128, 6442) # range: [0.0, 1.0]
g = IO.LoadRawBinaryGrayscaleSequence('g.bin', 128, 128, 6442) # range: [0.0, 1.0]
b = IO.LoadRawBinaryGrayscaleSequence('b.bin', 128, 128, 6442) # range: [0.0, 1.0]