Skip to content

Instantly share code, notes, and snippets.

View AFAgarap's full-sized avatar

Abien Fred Agarap AFAgarap

View GitHub Profile
@AFAgarap
AFAgarap / autoencoder-pytorch.ipynb
Last active May 8, 2023 20:35
PyTorch implementation of an autoencoder.
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@AFAgarap
AFAgarap / train_ae.py
Created January 7, 2020 12:55
Training a model in PyTorch.
for epoch in range(epochs):
loss = 0
for batch_features, _ in train_loader:
# reshape mini-batch data to [N, 784] matrix
# load it to the active device
batch_features = batch_features.view(-1, 784).to(device)
# reset the gradients back to zero
# PyTorch accumulates gradients on subsequent backward passes
optimizer.zero_grad()
@AFAgarap
AFAgarap / instantiate_objects.py
Last active January 14, 2020 12:59
Object instantiation for training an autoencoder written in PyTorch.
# use gpu if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# create a model from `AE` autoencoder class
# load it to the specified device, either gpu or cpu
model = AE(input_shape=784).to(device)
# create an optimizer object
# Adam optimizer with learning rate 1e-3
optimizer = optim.Adam(model.parameters(), lr=1e-3)
@AFAgarap
AFAgarap / load_mnist.py
Last active March 6, 2024 03:07
Loading MNIST dataset and creating a torch.utils.data.DataLoader object for it.
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])
train_dataset = torchvision.datasets.MNIST(
root="~/torch_datasets", train=True, transform=transform, download=True
)
test_dataset = torchvision.datasets.MNIST(
root="~/torch_datasets", train=False, transform=transform, download=True
)
@AFAgarap
AFAgarap / autoencoder.py
Last active December 25, 2020 18:34
PyTorch implementation of a vanilla autoencoder model.
class AE(nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.encoder_hidden_layer = nn.Linear(
in_features=kwargs["input_shape"], out_features=128
)
self.encoder_output_layer = nn.Linear(
in_features=128, out_features=128
)
self.decoder_hidden_layer = nn.Linear(
@AFAgarap
AFAgarap / mini_vgg_decoder.py
Created November 19, 2019 16:03
TensorFlow 2.0 implementation of mini VGG-based decoder for an autoencoder
class Decoder(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(Decoder, self).__init__()
self.convt_1_layer_1 = tf.keras.layers.Conv2DTranspose(
filters=64,
kernel_size=(3, 3),
activation=tf.nn.relu
)
self.convt_1_layer_2 = tf.keras.layers.Conv2DTranspose(
filters=64,
@AFAgarap
AFAgarap / mini_vgg_encoder.py
Created November 19, 2019 16:02
TensorFlow 2.0 implementation of mini VGG-based encoder for an autoencoder.
class Encoder(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(Encoder, self).__init__()
self.input_layer = tf.keras.layers.InputLayer(
input_shape=kwargs['input_shape']
)
self.conv_1_layer_1 = tf.keras.layers.Conv2D(
filters=32,
kernel_size=(3, 3),
activation=tf.nn.relu
@AFAgarap
AFAgarap / mini_vgg_ae.py
Created November 19, 2019 16:01
TensorFlow 2.0 implementation of mini VGG-based Autoencoder.
class Autoencoder(tf.keras.Model):
def __init__(self, **kwargs):
super(Autoencoder, self).__init__()
self.encoder = Encoder(
input_shape=kwargs['input_shape'],
latent_dim=kwargs['latent_dim']
)
self.decoder = Decoder(latent_dim=kwargs['latent_dim'])
def call(self, features):
from trustscore import TrustScore
ts = TrustScore(alpha=5e-2)
ts.fit(encoded_train_features, train_labels)
trust_score, closest_class_not_predicted = ts.score(
encoded_test_features, predictions, k=5
)
epochs = 60
def loss(actual, predicted):
crossentropy_loss = tf.losses.categorical_crossentropy(actual, predicted)
average_loss = tf.reduce_mean(crossentropy_loss)
return average_loss
def train(train_dataset, validation_dataset, epochs, learning_rate=1e-1, momentum=9e-1, decay=1e-6):