Skip to content

Instantly share code, notes, and snippets.

@shurain
Forked from kastnerkyle/optimizers.py
Last active August 29, 2015 14:19
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save shurain/c518f6b6d4087ce18477 to your computer and use it in GitHub Desktop.
Save shurain/c518f6b6d4087ce18477 to your computer and use it in GitHub Desktop.
# Authors: Kyle Kastner
# License: BSD 3-clause
import theano.tensor as T
import numpy as np
import theano
class rmsprop(object):
"""
RMSProp with nesterov momentum and gradient rescaling
"""
def __init__(self, params):
self.running_square_ = [theano.shared(np.zeros_like(p.get_value()))
for p in params]
self.running_avg_ = [theano.shared(np.zeros_like(p.get_value()))
for p in params]
self.memory_ = [theano.shared(np.zeros_like(p.get_value()))
for p in params]
def updates(self, params, grads, learning_rate, momentum, rescale=5.):
grad_norm = T.sqrt(sum(map(lambda x: T.sqr(x).sum(), grads)))
not_finite = T.or_(T.isnan(grad_norm), T.isinf(grad_norm))
grad_norm = T.sqrt(grad_norm)
scaling_num = rescale
scaling_den = T.maximum(rescale, grad_norm)
# Magic constants
combination_coeff = 0.9
minimum_grad = 1E-4
updates = []
for n, (param, grad) in enumerate(zip(params, grads)):
grad = T.switch(not_finite, 0.1 * param,
grad * (scaling_num / scaling_den))
old_square = self.running_square_[n]
new_square = combination_coeff * old_square + (
1. - combination_coeff) * T.sqr(grad)
old_avg = self.running_avg_[n]
new_avg = combination_coeff * old_avg + (
1. - combination_coeff) * grad
rms_grad = T.sqrt(new_square - new_avg ** 2)
rms_grad = T.maximum(rms_grad, minimum_grad)
memory = self.memory_[n]
update = momentum * memory - learning_rate * grad
update2 = momentum * momentum * memory - (
1 + momentum) * learning_rate * grad
updates.append((old_square, new_square))
updates.append((old_avg, new_avg))
updates.append((memory, update))
updates.append((param, param + update2))
return updates
class sgd_nesterov(object):
def __init__(self, params):
self.memory_ = [theano.shared(np.zeros_like(p.get_value()))
for p in params]
def updates(self, params, grads, learning_rate, momentum):
updates = []
for n, (param, grad) in enumerate(zip(params, grads)):
memory = self.memory_[n]
update = momentum * memory - learning_rate * grad
update2 = momentum * momentum * memory - (
1 + momentum) * learning_rate * grad
updates.append((memory, update))
updates.append((param, param + update2))
return updates
"""
Usage:
grads = T.grad(cost, self.params)
#opt = sgd_nesterov(self.params)
opt = rmsprop(self.params)
updates = opt.updates(self.params, grads,
learning_rate / np.cast['float32'](self.batch_size),
momentum)
"""
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment