Skip to content

Instantly share code, notes, and snippets.

@codekansas
Created July 13, 2016 07:23
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save codekansas/42326e8a4cf57742ea5fdc1168ee5775 to your computer and use it in GitHub Desktop.
Save codekansas/42326e8a4cf57742ea5fdc1168ee5775 to your computer and use it in GitHub Desktop.
from __future__ import print_function
import theano
import theano.tensor as T
import numpy as np
import time
X = theano.shared(value=np.asarray([[0, 1], [1, 0], [0, 0], [1, 1]]), name='X')
y = theano.shared(value=np.asarray([[0], [0], [1], [1]]), name='y')
rng = np.random.RandomState(1234)
LEARNING_RATE = 0.1
def layer(*shape):
mag = 4. * np.sqrt(6. / sum(shape))
return theano.shared(value=np.asarray(rng.uniform(low=-mag, high=mag,
size=shape), dtype=theano.config.floatX), name='W', borrow=True, strict=False)
W1 = layer(2, 5)
W2 = layer(5, 1)
b1 = layer(5)
b2 = layer(1)
output = T.nnet.sigmoid(T.dot(T.nnet.relu(T.dot(X, W1) + b1), W2) + b2)
cost = T.mean((y - output) ** 2)
updates = [(W1, W1 - LEARNING_RATE * T.grad(cost, W1)),
(W2, W2 - LEARNING_RATE * T.grad(cost, W2)),
(b1, b1 - LEARNING_RATE * T.grad(cost, b1)),
(b2, b2 - LEARNING_RATE * T.grad(cost, b2))]
train = theano.function(inputs=[], outputs=[], updates=updates)
test = theano.function(inputs=[], outputs=cost)
print('Error before:', test())
start = time.time()
for i in range(10000):
train()
end = time.time()
print('Error after:', test())
print('Time (s):', end - start)
@codekansas
Copy link
Author

Error before: 0.372680536564
Error after: 0.000388681638475
Time (s): 0.184696912766

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment