Skip to content

Instantly share code, notes, and snippets.

@jackhftang
Created June 7, 2017 07:26
Show Gist options
  • Save jackhftang/7f33873e05dde062ab435ebdece49ae8 to your computer and use it in GitHub Desktop.
Save jackhftang/7f33873e05dde062ab435ebdece49ae8 to your computer and use it in GitHub Desktop.
Given known recurrent structure f(x) = w0 * f(x-1) + w1 * f(x-1)
import torch as th
from torch.autograd import Variable
## helpers
def fromlist(list):
return th.FloatTensor(list)
def const(t):
return Variable(t)
def var(t):
return Variable(t, requires_grad=True)
## training data
fibs = [1,1]
for i in range(8):
fibs.append( fibs[i] + fibs[i-1])
## variables
a0 = var(th.randn(1))
a1 = var(th.randn(1))
w = var(th.randn(2)) # optimal value = [1,1]
## some computation
def recurrent(x):
if x == 0: return a0
if x == 1: return a1
a = recurrent(x-1)
b = recurrent(x-2)
return w[0] * a + w[1] * b
## training
print('initial', a0, a1, w)
rate = 0.001
for i in range(5000):
## supply last 5 values
for j in range(len(fibs)-5,len(fibs)):
## absolute diff. as loss
loss = (recurrent(j) - fibs[j]).abs()
## calculate gradient
loss.backward()
## update variables
for v in [a0,a1,w]:
v.data -= rate * v.grad.data
v.grad.data.zero_()
print('final', a0, a1, w)
## loss
print([ (recurrent(i) - fibs[i]).abs().data[0] for i in range(len(fibs)) ])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment