Skip to content

Instantly share code, notes, and snippets.

Avatar

Chris Hays johnchrishays

View GitHub Profile
View transformer_architecture.py
class FaceClassifier(nn.Module):
def __init__(self, n_vid_features, n_aud_features, n_head, n_layers, n_linear_hidden=30, dropout=0.1):
super(FaceClassifier, self).__init__()
# video
self.vid_pos_encoder = PositionalEncoding(d_model=n_vid_features)
vid_encoder_layer = nn.TransformerEncoderLayer(d_model=n_vid_features, nhead=n_head)
self.vid_transformer_encoder = nn.TransformerEncoder(vid_encoder_layer, num_layers=n_layers)
#self.dropout = nn.Dropout(p=dropout)
self.vid_pred = nn.Linear(n_vid_features, 1)
View face_autoencoder.py
class FaceAutoencoder(nn.Module):
def __init__(self, n_out_channels1=4, n_out_channels2=4, n_out_channels3=1, \
kernel_size1=5, kernel_size2=5, kernel_size3=5):
super(FaceAutoencoder, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(N_IN_CHANNELS, out_channels=n_out_channels1, kernel_size=kernel_size1, stride=2, padding=2), # [3,160,160] -> [4, 80, 80]
nn.ReLU(),
nn.Dropout(0.3),
View transformer_stats.csv
Train BCE Test classification error
Classifier with face crop 0.693 49%
Classifier with face crop and aggregation 0.693 47%
Classifier without face crop 0.704 51%
Classifier without face crop with aggregation 0.697 50%
@johnchrishays
johnchrishays / cae.csv
Last active May 9, 2020
Deepfake Detection with Transformer-Based Architectures
View cae.csv
Train MSE Test MSE
Face cropped CAE (hidden dim = 1296) 0.039 0.045
Un-preprocessed CAE (hidden dim = 3600) 0.014 0.014
You can’t perform that action at this time.