Skip to content

Instantly share code, notes, and snippets.

@nasimrahaman
Created January 22, 2016 16:52
Show Gist options
  • Save nasimrahaman/58b65d84085516fbf705 to your computer and use it in GitHub Desktop.
Save nasimrahaman/58b65d84085516fbf705 to your computer and use it in GitHub Desktop.
Basic Linear Regression with Theano
# (CC-NC-SA) Nasim Rahaman
import theano as th
import theano.tensor as T
import numpy as np
import time
# Weights
W = th.shared(value=np.random.uniform(size=(3, 3)))
# Input
x = T.matrix()
# Output
y = T.dot(x, W)
# Target
yt = T.matrix()
# MSE Loss
L = T.mean(T.sum((y - yt) ** 2, axis=1))
# Compute Symbolic Gradient
dLdW = T.grad(cost=L, wrt=W)
# Gradient Descent Updates (Learning Rate = 1 works fine)
upd = [(W, W - dLdW)]
# Compile Training Function
train = th.function(inputs=[x, yt], outputs=L, updates=upd, allow_input_downcast=True)
# Generate Synthetic Data (numerical)
numW = np.random.uniform(size=(3, 3))
numx = np.random.uniform(size=(100, 3))
numyt = np.dot(numx, numW)
print("Initial W: \n {}".format(W.get_value()))
print("Target W: \n {}".format(numW))
programstart = time.time()
# Batch gradient descent for 500 epochs
for _ in xrange(500):
train(numx, numyt)
programstop = time.time()
print("Fitted W: \n {}".format(W.get_value()))
print("Elapsed Time: {} seconds on a GPU (GT 750M)".format(programstop - programstart))
# Using gpu device 0: GeForce GT 750M
# Initial W:
# [[ 0.36780546 0.70396992 0.36098496]
# [ 0.04568252 0.60477838 0.47389695]
# [ 0.69876337 0.92058904 0.57620348]]
# Target W:
# [[ 0.28928668 0.67040456 0.20592429]
# [ 0.98262113 0.37950274 0.50209176]
# [ 0.27904913 0.88948569 0.38525592]]
# Fitted W:
# [[ 0.28928668 0.67040456 0.20592429]
# [ 0.98262113 0.37950274 0.50209176]
# [ 0.27904913 0.88948569 0.38525592]]
# Elapsed Time: 0.154557228088 seconds on a GPU (GT 750M)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment