Skip to content

Instantly share code, notes, and snippets.

View Thimira's full-sized avatar

Thimira Amaratunga Thimira

View GitHub Profile
@Thimira
Thimira / resnet50_predict.py
Created September 3, 2017 08:35
How to use the ResNet50 model from Keras Applications trained on ImageNet to make a prediction on an image.
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
model = ResNet50(weights='imagenet')
img_path = 'Data/Jellyfish.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
@Thimira
Thimira / vgg16_predict.py
Created September 3, 2017 08:34
How to use the VGG16 model from Keras Applications trained on ImageNet to make a prediction on an image.
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input, decode_predictions
import numpy as np
model = VGG16(weights='imagenet')
img_path = 'Data/Jellyfish.jpg'
img = image.load_img(img_path, target_size=(224, 224))
img_data = image.img_to_array(img)
@Thimira
Thimira / ObjectTracker.py
Last active November 11, 2021 06:07
Track any object in a video with Dlib Correlation Trackers. Tutorial: https://www.codesofinterest.com/2018/02/track-any-object-in-video-with-dlib.html
'''
Using Correlation Trackers in Dlib, you can track any object in a video stream without needing to train a custom object detector.
Check out the tutorial at: http://www.codesofinterest.com/2018/02/track-any-object-in-video-with-dlib.html
'''
import numpy as np
import cv2
import dlib
# this variable will hold the coordinates of the mouse click events.
mousePoints = []
import numpy as np
import cv2
import dlib
from scipy.spatial import distance as dist
from scipy.spatial import ConvexHull
PREDICTOR_PATH = "../data/dlib_models/shape_predictor_68_face_landmarks.dat"
FULL_POINTS = list(range(0, 68))
FACE_POINTS = list(range(17, 68))
@Thimira
Thimira / keras_bottleneck_multiclass.py
Last active April 13, 2021 05:52
Learn how to build a multi-class image classification system using bottleneck features from a pre-trained model in Keras to achieve transfer learning. Tutorial: https://www.codesofinterest.com/2017/08/bottleneck-features-multi-class-classification-keras.html
'''
Using Bottleneck Features for Multi-Class Classification in Keras
We use this technique to build powerful (high accuracy without overfitting) Image Classification systems with small
amount of training data.
The full tutorial to get this code working can be found at the "Codes of Interest" Blog at the following link,
https://www.codesofinterest.com/2017/08/bottleneck-features-multi-class-classification-keras.html
Please go through the tutorial before attempting to run this code, as it explains how to setup your training data.
# Blur Your Face Automatically with OpenCV and Dlib
# See tutorial at https://www.youtube.com/watch?v=QKggnWdCTNY
import numpy as np
import cv2
import dlib
video_capture = cv2.VideoCapture(1)
detector = dlib.get_frontal_face_detector()
@Thimira
Thimira / DCGAN.py
Created May 20, 2020 10:58
Handwritten Digit Generation by Deep Convolutional Generative Adversarial Networks. Requires TensorFlow 2, OpenCV, Matplotlib, and imageio
import tensorflow as tf
import glob
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
from tensorflow.keras import layers
import time
# Using a circular mask as the blur area to automatically blur the face in a video, giving it a more cleaner look.
# We're using OpenCV, and Dlib with Python.
# See tutorial here: https://youtu.be/1p1lUyLGB2E
import numpy as np
import cv2
import dlib
video_capture = cv2.VideoCapture(1)
detector = dlib.get_frontal_face_detector()
@Thimira
Thimira / bird_watch_train.py
Last active December 1, 2019 15:09
The main model training code from the Bird Watch project: https://github.com/Thimira/bird_watch . The project is currently live at https://www.birdwatch.photo/
import numpy as np
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from keras.models import Sequential, Model
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D, Input
from keras.applications.inception_v3 import InceptionV3
from keras.utils.np_utils import to_categorical
from keras import optimizers
from keras.callbacks import EarlyStopping, ModelCheckpoint
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
@Thimira
Thimira / vgg16_sequential.py
Created September 3, 2017 08:28
The VGG16 Deep Learning model created using the Sequential model of Keras v2
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout
from keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD
import numpy as np
def VGG_16(weights_path=None):
input_shape=(224,224,3)
model = Sequential()