This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
import torch.nn as nn | |
def ZeroCenteredGradientPenalty(Samples, Critics): | |
Gradient, = torch.autograd.grad(outputs=Critics.sum(), inputs=Samples, create_graph=True, only_inputs=True) | |
return 0.5 * Gradient.square().sum([1, 2, 3]) | |
class AdversarialTraining: | |
def __init__(self, Generator, Discriminator): | |
self.Generator = Generator |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
import torch.nn as nn | |
import numpy | |
from torch_utils.ops import upfirdn2d | |
Box = [1.0] | |
Linear = [0.25,0.50,0.25] | |
Quadratic = [0.128,0.235,0.276,0.235,0.128] | |
Cubic = [0.058,0.128,0.199,0.231,0.199,0.128,0.058] | |
Gaussian = [0.008,0.036,0.110,0.213,0.267,0.213,0.110,0.036,0.008] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import math | |
import numpy | |
import torch | |
import torch.nn as nn | |
CompressionFactor = 4 | |
SiLUGain = math.sqrt(2) | |
def MSRInitializer(Layer, ActivationGain=1): | |
FanIn = Layer.weight.data.size(1) * Layer.weight.data[0][0].numel() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
auto x = 42; | |
auto y = 2.71; | |
auto z = "hello"; | |
auto f(auto&& x) { | |
return x + x; | |
} | |
auto x = f(21); // x == 42 | |
auto y = f("hello"s); // y == "hellohello" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
import torch.nn as nn | |
def ZeroCenteredGradientPenalty(Samples, Critics): | |
Gradient, = torch.autograd.grad(outputs=Critics.sum(), inputs=Samples, create_graph=True, only_inputs=True) | |
return 0.5 * Gradient.square().sum([1,2,3]).mean() | |
def RelativisticLoss(PositiveCritics, NegativeCritics): | |
return nn.functional.binary_cross_entropy_with_logits(PositiveCritics - NegativeCritics, torch.ones_like(PositiveCritics)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
import torch.nn as nn | |
def ZeroCenteredGradientPenalty(Samples, Critics): | |
Gradient, = torch.autograd.grad(outputs=Critics.sum(), inputs=Samples, create_graph=True, only_inputs=True) | |
return 0.5 * Gradient.square().sum([1,2,3]).mean() | |
def RelativisticLoss(PositiveCritics, NegativeCritics): | |
return nn.functional.binary_cross_entropy_with_logits(PositiveCritics - NegativeCritics, torch.ones_like(PositiveCritics)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
import torch.nn as nn | |
def ZeroCenteredGradientPenalty(Samples, Critics): | |
Gradient, = torch.autograd.grad(outputs=Critics.sum(), inputs=Samples, create_graph=True, only_inputs=True) | |
return 0.5 * Gradient.square().sum([1,2,3]).mean() | |
def RelativisticLoss(PositiveCritics, NegativeCritics): | |
return nn.functional.binary_cross_entropy_with_logits(PositiveCritics - NegativeCritics, torch.ones_like(PositiveCritics)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#include "PluginInstantiator.vxx" | |
auto Main() { | |
auto Configurations = PluginInfo{ | |
.Namespace = "test", | |
.Identifier = "com.dontcare.test", | |
.Description = "whatever" | |
}; | |
PluginInstantiator::Initialize(Configurations); | |
PluginInstantiator::RegisterFunction("Test(clip: vnode)", [](auto Arguments, auto Core) { |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
from keras import layers, initializers, models, optimizers, backend, utils | |
import tensorflow as tf | |
def Mirror(x, Padding=1): | |
return layers.Lambda(lambda x: tf.pad(x, [[0, 0], [0, 0], [Padding, Padding], [Padding, Padding]], mode="Reflect"))(x) | |
def BilinearUpsample(x, Scale=2, Permute=False): | |
ChannelCount = int(x.shape[1]) | |
Settings = dict(filters=ChannelCount, kernel_size=Scale * 2, strides=Scale, padding='same', use_bias=False, trainable=False) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import IO | |
import numpy as np | |
from keras import models, layers, optimizers, initializers | |
import keras.backend as K | |
batch_size = 32 | |
r = IO.LoadRawBinaryGrayscaleSequence('r.bin', 128, 128, 6442) # range: [0.0, 1.0] | |
g = IO.LoadRawBinaryGrayscaleSequence('g.bin', 128, 128, 6442) # range: [0.0, 1.0] | |
b = IO.LoadRawBinaryGrayscaleSequence('b.bin', 128, 128, 6442) # range: [0.0, 1.0] |