Skip to content

Instantly share code, notes, and snippets.

@JoshVarty
Created March 14, 2020 21:33
Show Gist options
  • Save JoshVarty/235b79c2e229a5e9a8758fe9522af677 to your computer and use it in GitHub Desktop.
Save JoshVarty/235b79c2e229a5e9a8758fe9522af677 to your computer and use it in GitHub Desktop.
learner = load_learner('../data/cropped_faces','effnet_test.pkl',
test=ImageList.from_folder('/home/josh/git/kgl_deepfake/data/cropped_faces/valid'))
# Opening an image with fastai
fastai_img = open_image('/home/josh/git/kgl_deepfake/data/cropped_faces/valid/vylzsyazmx.mp4_1_REAL.jpg')
print(fastai_img.shape)
# Opening an image manually
manual_img = PILImage.open('/home/josh/git/kgl_deepfake/data/cropped_faces/valid/vylzsyazmx.mp4_1_REAL.jpg')
manual_img_array = np.array(manual_img, dtype=np.float32) / 255.
manual_img_array = manual_img_array.transpose(2,0,1)
print(manual_img_array.shape)
# They're the same
np.allclose(manual_img_array, np.array(fastai_img.data))
# Resize with fastai
resized_fastai = fastai_img.apply_tfms(learner.data.valid_ds.tfms, **{'size': 256, 'resize_method': ResizeMethod.PAD, 'padding_mode': 'zeros'}).data
resized_fastai = Image(resized_fastai)
# Resize with cv2
resized_cv2_0 = cv2.resize(manual_img_array.transpose(1,2,0), (256, 254), interpolation=cv2.INTER_AREA).transpose(2,0,1)
resized_cv2_1 = cv2.resize(manual_img_array.transpose(1,2,0), (256, 254), interpolation=cv2.INTER_CUBIC).transpose(2,0,1)
resized_cv2_2 = cv2.resize(manual_img_array.transpose(1,2,0), (256, 254), interpolation=cv2.INTER_LINEAR).transpose(2,0,1)
# TODO PAD
resized_cv2_0 = cv2.copyMakeBorder(resized_cv2_0, 0, 0, 0, 2, cv2.BORDER_CONSTANT)
resized_cv2_1 = cv2.copyMakeBorder(resized_cv2_1, 0, 0, 0, 2, cv2.BORDER_CONSTANT)
resized_cv2_2 = cv2.copyMakeBorder(resized_cv2_2, 0, 0, 0, 2, cv2.BORDER_CONSTANT)
def process_image(input_image):
imagenet_mean = torch.Tensor([0.485, 0.456, 0.406])
imagenet_std = torch.Tensor([0.229, 0.224, 0.225])
input_image = input_image.data.sub(imagenet_mean[None, :, None, None]).div(imagenet_std[None, :, None, None])
return torch.softmax(learner.model(input_image.cuda()), axis=-1)
res1 = process_image(torch.from_numpy(resized_cv2_0))
res2 = process_image(torch.from_numpy(resized_cv2_1))
res2_1 = process_image(torch.from_numpy(resized_cv2_1.clip(0,1)))
res3 = process_image(torch.from_numpy(resized_cv2_2))
print(res1)
print(res2)
print(res2_1)
print(res3)
fastai_res = learner.predict(resized_fastai)
print(fastai_res)
res1 = process_image(torch.from_numpy(resized_cv2_0))
res2 = process_image(torch.from_numpy(resized_cv2_1))
res2_1 = process_image(torch.from_numpy(resized_cv2_1.clip(0,1)))
res3 = process_image(torch.from_numpy(resized_cv2_2))
print(res1)
print(res2)
print(res2_1)
print(res3)
fastai_res = learner.predict(resized_fastai)
print(fastai_res)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment