Skip to content

Instantly share code, notes, and snippets.

View RITIK-12's full-sized avatar
💭
LockDown Learning !

Ritik Bompilwar RITIK-12

💭
LockDown Learning !
View GitHub Profile
import fnmatch
import os
from matplotlib import pyplot as plt
import cv2
from facenet_pytorch import MTCNN, InceptionResnetV1
resnet = InceptionResnetV1(pretrained='vggface2').eval()
# Load the cascade
face_cascade = cv2.CascadeClassifier('/haarcascade_frontalface_default.xml')
def face_match(img_path, data_path): # img_path= location of photo, data_path= location of data.pt
model_ft, FT_losses = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=200)
plt.figure(figsize=(10,5))
plt.title("FRT Loss During Training")
plt.plot(FT_losses, label="FT loss")
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()
plt.show()
torch.save(model, "/model.pt")
def train_model(model, criterion, optimizer, scheduler,
num_epochs=25):
since = time.time()
FT_losses = []
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
from models.inception_resnet_v1 import InceptionResnetV1
print('Running on device: {}'.format(device))
model_ft = InceptionResnetV1(pretrained='vggface2', classify=False, num_classes = len(class_names))
list(model_ft.children())[-6:]
layer_list = list(model_ft.children())[-5:] # all final layers
model_ft = nn.Sequential(*list(model_ft.children())[:-5])
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
data_transforms = {
'train': transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Scale((224,224)),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.4),
transforms.RandomRotation(5, resample=False,expand=False, center=None),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
from torch import nn, optim, as_tensor
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
from torch.optim import lr_scheduler
from torch.nn.init import *
from torchvision import transforms, utils, datasets, models
import cv2
from PIL import Image
from pdb import set_trace
import time
import fnmatch
import os
from matplotlib import pyplot as plt
import cv2
# Load the cascade
face_cascade = cv2.CascadeClassifier('/haarcascade_frontalface_default.xml')
paths="/data/"
import numpy as np
from keras import backend as K
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import classification_report, confusion_matrix
#Start
train_data_path = 'F://data//Train'
.ORG 0000H
JMP MAIN
.ORG 0034H
JMP RST6.5
;RET
.ORG 003CH
JMP RST7.5
;RET
.ORG 1000H
MAIN: DI