Skip to content

Instantly share code, notes, and snippets.

@Quasimondo
Last active December 26, 2020 14:08
Show Gist options
  • Save Quasimondo/59ba668a89d6c292b275de55a5d2f09c to your computer and use it in GitHub Desktop.
Save Quasimondo/59ba668a89d6c292b275de55a5d2f09c to your computer and use it in GitHub Desktop.
A self-adjusting blend between Sine and LeakyRelu activation. No idea if this makes any sense.
import torch
import torch.nn as nn
import torch.nn.functional as F
class LeakySine(nn.Module):
def __init__(self, w0 =30.0, blend=0.75, slope = 0.2):
super().__init__()
self.blend = nn.Parameter(torch.ones(1, 1)*blend)
self.slope = nn.Parameter(torch.zeros(1, 1)+slope)
self.w0 = w0
def forward(self, x):
#I realized that using self.slope.item() will not work for training and the model does not learn a different slope, so unfortunately it is necessary to use a slower approach.
y = x * (1.0-self.blend)
mask = x<0
y[mask] = y[mask] * self.slope
return y + torch.sin(self.w0 * x) * self.blend
#Old version that does not work as intended
#return torch.sin(self.w0 * x) * self.blend + F.leaky_relu(x, negative_slope=self.slope.item() ) * (1.0-self.blend)
@Quasimondo
Copy link
Author

activation = LeakySine(blend=0.0,slope=0.0)
is equal to
activation = nn.ReLU()

activation = LeakySine(blend=0.0,slope=0.2)
is equal to
activation = nn.LeakyReLU(slope=0.2)

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment