Created
October 2, 2014 22:34
-
-
Save palango/e2eba839cde69fe55b0e to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# constants | |
const beta = 0.5 | |
const eta = 0.01 | |
function activation (b) | |
return tanh(beta*b) | |
end | |
function activation_prime (b) | |
return beta*(sech(beta*b).^2) | |
end | |
function normalize_data (train) | |
if train | |
data = readdlm("pima-indians-diabetes_train.txt", ' ') | |
else | |
data = readdlm("pima-indians-diabetes_valid.txt", ' ') | |
end | |
dims = size (data) | |
avg = mean (data, 1) | |
stde = std (data, 1) | |
for i = 1:dims[1] | |
data[i,:] = (data[i,:] - avg) ./ stde | |
end | |
return data[:, [1:dims[2]-1]]', sign (data[:, dims[2]])' | |
end | |
trainIn, trainOut = normalize_data (true) | |
# set network size | |
const networkSize = [8, 20, 10, 1] | |
const nLayers = length (networkSize) | |
# initialize weights and biases | |
weights = [(rand(networkSize[i], networkSize[i-1]) - 0.5)/2.5 for i in 2:nLayers] | |
biases = [(rand(n) - 0.5)*2 for n in networkSize[2:end]] | |
# save activations and inputs | |
activations = [zeros(n) for n in networkSize] | |
weightedInputs = [zeros(n) for n in networkSize[2:end]] | |
function feed_forward () | |
output = trainIn | |
for l = 1:nLayers - 1 | |
b = weights[l] * output .- biases[l] | |
output = activation(b) | |
end | |
return output | |
end | |
function backprop (weights, biases) | |
idx = 1 | |
output = trainIn[:,idx] | |
activations[1] = output | |
for l = 1:nLayers - 1 | |
b = weights[l] * output .- biases[l] | |
weightedInputs[l] = b | |
output = activation(b) | |
activations[l+1] = output | |
end | |
delta_bias = [zeros(size(b)) for b in biases] | |
delta_weights = [zeros(size(w)) for w in weights] | |
expected = trainOut[idx] | |
delta = (activations[nLayers] - expected) .* activation_prime (weightedInputs[nLayers-1]) | |
delta_bias[3] = delta | |
delta_weights[3] = delta * activations[3]' | |
for l = nLayers-1:-1:2 | |
b = activations[l] | |
b_d = activation_prime(b) | |
delta = weights[l]'*delta .* b_d | |
delta_bias[l-1] = delta | |
delta_weights[l-1] = delta * activations[l-1]' | |
end | |
weights += delta_weights | |
biases -= delta_bias | |
return delta_bias, delta_weights | |
end |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment