Skip to content

Instantly share code, notes, and snippets.

@leonidk
Last active June 5, 2016 19:48
Show Gist options
  • Save leonidk/29b467c4fea6e3e20ab6434b6e7b36f3 to your computer and use it in GitHub Desktop.
Save leonidk/29b467c4fea6e3e20ab6434b6e7b36f3 to your computer and use it in GitHub Desktop.
import tensorflow as tf
import numpy as np
# takes a pair, returns a projected pair
def distort_func(p):
x = p[0]
y = p[1]
r = p[2]
# welcome to my hat picking
K1 = 0.1
K2 = -0.1
K3 = -0.05
P1 = 0.01
P2 = -0.01
P3 = 0.02
r2 = r*r
r4 = r2*r2
r6 = r4*r2
nx = (1 + K1*r2 + K2*r4 + K3*r6) + (P2*(r2+2*x*x) + 2*P1*x*y)*(1+P3*r2)
ny = (1 + K1*r2 + K2*r4 + K3*r6) + (P1*(r2+2*x*x) + 2*P2*x*y)*(1+P3*r2)
return np.array([nx,ny])
# Create some sample data between [-1.0, 1.0]
x_data = 2*np.random.rand(1000,3).astype(np.float32) - 1.0
y_data = 2*np.random.rand(1000,2).astype(np.float32) - 1.0
for xp,yp in zip(x_data,y_data):
# add radius to input
xp[2] = np.sqrt(xp[0]*xp[0] + xp[1]*xp[1])
yp = distort_func(xp)
def leaky(x,alpha):
return tf.maximum(alpha*x,x)
class NN_Model:
def fit(self,xd,yd):
sess = tf.InteractiveSession()
self.x_in = tf.placeholder(tf.float32, shape=[None, 3])
self.y_in = tf.placeholder(tf.float32, shape=[None, 2])
self.W1 = tf.Variable(tf.truncated_normal([3,128], stddev=0.1))
self.b1 = tf.Variable(tf.constant(0.0,shape=[128]))
self.W2 = tf.Variable(tf.truncated_normal([128,128], stddev=0.1))
self.b2 = tf.Variable(tf.constant(0.0,shape=[128]))
self.W3 = tf.Variable(tf.truncated_normal([128,128], stddev=0.1))
self.b3 = tf.Variable(tf.constant(0.0,shape=[128]))
self.W4 = tf.Variable(tf.truncated_normal([128,2], stddev=0.1))
self.b4 = tf.Variable(tf.constant(0.0,shape=[2]))
self.h1 = leaky(tf.matmul(self.x_in, self.W1) + self.b1,0.01)
self.h2 = leaky(tf.matmul(self.h1, self.W2) + self.b2,0.01)
self.h3 = leaky(tf.matmul(self.h2, self.W3) + self.b3,0.01)
self.pred = tf.matmul(self.h3, self.W4) + self.b4
# Minimize the mean squared errors.
loss = tf.nn.l2_loss(self.pred - self.y_in)
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 4e-3
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
1000, 0.9, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
train = optimizer.minimize(loss,global_step=global_step)
# Launch the graph.
sess.run(tf.initialize_all_variables())
for epoch in xrange(2000):
bs = 100
print 'epoch {0}, loss {1}'.format(epoch, round(sess.run(loss,feed_dict={ self.x_in: xd[0:bs,:], self.y_in: yd[0:bs,:]}),2))
for i in xrange(0,xd.shape[0],bs):
sess.run(train,
feed_dict={
self.x_in: xd[i*bs:i*bs+bs,:],
self.y_in: yd[i*bs:i*bs+bs,:]
})
mod = NN_Model()
mod.fit(x_data,y_data)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment