Skip to content

Instantly share code, notes, and snippets.

@cuongtransc
Created October 17, 2017 03:10
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save cuongtransc/d692dae3cfd6696835658a178492e17e to your computer and use it in GitHub Desktop.
Save cuongtransc/d692dae3cfd6696835658a178492e17e to your computer and use it in GitHub Desktop.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import caffe
import numpy as np
# References: https://prateekvjoshi.com/2016/04/26/how-to-extract-feature-vectors-from-deep-neural-networks-in-python-caffe/
# ---------------------------------
# Load Model
# ---------------------------------
model_file = '/model/bvlc_alexnet/bvlc_alexnet.caffemodel'
# Specify the corresponding deploy prototxt file:
deploy_prototxt = '/model/bvlc_alexnet/deploy.prototxt'
# We are now ready to initialize the convolutional neural network:
net = caffe.Net(deploy_prototxt, model_file, caffe.TEST)
# Let's say we want to extract the feature vector from the layer 'fc7' in
# the network.
layer = 'fc7'
if layer not in net.blobs:
raise TypeError("Invalid layer name: " + layer)
# We need to specify the image mean file for the image transformer:
imagemean_file = '/opt/caffe/python/caffe/imagenet/ilsvrc_2012_mean.npy'
# Define the transformer in order to preprocess the input image
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_mean('data', np.load(imagemean_file).mean(1).mean(1))
transformer.set_transpose('data', (2, 0, 1))
transformer.set_raw_scale('data', 255.0)
# Reshape the network blob (if needed) to the shape needed for the current
# CNN architecture being used:
net.blobs['data'].reshape(1, 3, 227, 227)
# ---------------------------------
# Load Image
# ---------------------------------
input_image_file = 'data/1.jpg'
output_file = 'features/1.vec'
# Load the input image:
img = caffe.io.load_image(input_image_file)
# Run the image through the preprocessor:
net.blobs['data'].data[...] = transformer.preprocess('data', img)
# Run the image through the network:
output = net.forward()
# Extract the feature vector from the layer of interest:
with open(output_file, 'w') as f:
np.savetxt(f, net.blobs[layer].data[0], fmt='%.4f', delimiter='\n')
# Go ahead and open the output text file. You will see a text containing
# 4096 lines, where each line contains a floating point value.
# This is the 4096-dimensional feature vector!
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment