Skip to content

Instantly share code, notes, and snippets.

@jeongukjae
Created August 22, 2017 06:55
Show Gist options
  • Save jeongukjae/d5248318cfea2c2e72ac43644e2e5e98 to your computer and use it in GitHub Desktop.
Save jeongukjae/d5248318cfea2c2e72ac43644e2e5e98 to your computer and use it in GitHub Desktop.
Image Classifier using Linear Classification method
# Image Classifier using Linear Classification method with Softmax and CIFAR 10 dataset
import numpy as np
import pickle
import matplotlib.pyplot as plt
# get batches from file
def get_batches(filename):
with open(filename, 'rb') as file:
data = pickle.load(file, encoding='bytes')
return data
# activation function
def softmax(values):
# prevent overflow
values = values - np.max(values)
return np.exp(values) / np.sum(np.exp(values))
# cross entropy loss? cost? function
def cross_entropy_loss(target_output, estimated_output):
estimated_output = np.clip(estimated_output, 0.00001, 0.99999)
return -np.mean(target_output * np.log(estimated_output) +
(1 - target_output) * np.log(1 - estimated_output))
# train batches
def train():
# make random wegith and bias
W = np.random.rand(10, 3072)
b = np.random.rand(10)
# file_name_format
file_prefix = 'cifar-10-batches-py/data_batch_{0}'
for i in range(1, 6):
# get batches
batches = get_batches(file_prefix.format(i))
for k in range(10000):
# get batch
x = batches[b'data'][k]
# get output with Weight and bias using softmax activation function
output = softmax(np.matmul(W, x) + b)
# get answer
answer = np.zeros(10)
answer[batches[b'labels'][k]] = 1
# get loss with answer and output using cross entropy loss function
loss = cross_entropy_loss(answer, output)
# update weight & bias
# prevent "divided by zero exception"
output_dc = np.clip(output, 0.00001, 0.99999)
# dc/do : derivative of cross entropy loss function
dc = (-answer / output_dc + (1 - answer) / (1 - output_dc)) / 10
# do/dz : derivative of softmax function
do = output_dc * (1 - output_dc)
# dz/dw : derivative of z
# z1 = w11 * x1 + w12 * x2 + ...
# z1` = x1
dz = np.copy(x)
# do, dx, dz for updating weight
do_w = np.repeat(do, 3072).reshape(10, 3072)
dc_w = np.repeat(dc, 3072).reshape(10, 3072)
dz_w = np.tile(dz, 10).reshape(10, 3072)
# update weight (partial derivative of Cost function with respect to Weight)
W -= dc_w * do_w * dz_w
# update bias (partial derivative of Cost function with respect to Bias)
b -= dc * do * 1
# return Weight and bias
return W, b
# predict
def predict(W, x, b):
# get output using Neural Network
output = softmax(np.matmul(W, x) + b)
# argmax -> result
return np.argmax(output)
if __name__ == "__main__":
# get trained Weight and Bias
W, b = train()
# predict
result = []
# get test data
batches = get_batches('cifar-10-batches-py/test_batch')
for i in range(10000):
# get single test image and label.
x = batches[b'data'][i]
y = batches[b'labels'][i]
# get output from neural network
y_ = predict(W, x, b)
# add result to result list
result.append(y_ == y)
# get accuracy
print("Accuracy : %f"%np.mean(np.array(result, dtype='float32')))
# visualize weight and bias
meta = get_batches('cifar-10-batches-py/batches.meta')
for i in range(10):
a = plt.subplot(2, 5, i + 1)
a.set_title(meta[b'label_names'][i])
I = np.copy(W[i]).reshape([-1, 1024]) + b[i]
plt.imshow((I[0] * (2**16) + I[1] * (2**8) + I[2]).reshape(32, 32))
plt.show()
# result
# Accuracy : 0.237800
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment