Last active
October 11, 2019 03:56
-
-
Save OverLordGoldDragon/aad492db1ff6f20a7983e600611799f7 to your computer and use it in GitHub Desktop.
Adam OptimizerV2 implementation, weights frozen -- reference issue: https://github.com/tensorflow/tensorflow/issues/33227
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from tensorflow.python.framework import ops | |
from tensorflow.python.keras import backend_config | |
from tensorflow.python.keras.optimizer_v2 import optimizer_v2 | |
from tensorflow.python.ops import array_ops, control_flow_ops, math_ops, state_ops | |
from tensorflow.python.util.tf_export import keras_export | |
import keras.backend as K | |
@keras_export('keras.optimizers.Adam') | |
class Adam(optimizer_v2.OptimizerV2): | |
"""Optimizer that implements the Adam algorithm. | |
Adam optimization is a stochastic gradient descent method that is based on | |
adaptive estimation of first-order and second-order moments. | |
According to the paper | |
[Adam: A Method for Stochastic Optimization. Kingma et al., | |
2014](http://arxiv.org/abs/1412.6980), | |
the method is "*computationally efficient, has little memory | |
requirement, invariant to diagonal rescaling of gradients, and is well suited | |
for problems that are large in terms of data/parameters*". | |
For AMSGrad see [On The Convergence Of Adam And Beyond. | |
Reddi et al., 5-8](https://openreview.net/pdf?id=ryQu7f-RZ). | |
""" | |
r"""Construct a new Adam optimizer. | |
The default value of 1e-7 for epsilon might not be a good default in | |
general. For example, when training an Inception network on ImageNet a | |
current good choice is 1.0 or 0.1. Note that since AdamOptimizer uses the | |
formulation just before Section 2.1 of the Kingma and Ba paper rather than | |
the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon | |
hat" in the paper. | |
The sparse implementation of this algorithm (used when the gradient is an | |
IndexedSlices object, typically because of `tf.gather` or an embedding | |
lookup in the forward pass) does apply momentum to variable slices even if | |
they were not used in the forward pass (meaning they have a gradient equal | |
to zero). Momentum decay (beta1) is also applied to the entire momentum | |
accumulator. This means that the sparse behavior is equivalent to the dense | |
behavior (in contrast to some momentum implementations which ignore momentum | |
unless a variable slice was actually used). | |
Args: | |
learning_rate: A Tensor or a floating point value. The learning rate. | |
beta_1: A float value or a constant float tensor. The exponential decay | |
rate for the 1st moment estimates. | |
beta_2: A float value or a constant float tensor. The exponential decay | |
rate for the 2nd moment estimates. | |
epsilon: A small constant for numerical stability. This epsilon is | |
"epsilon hat" in the Kingma and Ba paper (in the formula just before | |
Section 2.1), not the epsilon in Algorithm 1 of the paper. | |
amsgrad: boolean. Whether to apply AMSGrad variant of this algorithm from | |
the paper "On the Convergence of Adam and beyond". | |
name: Optional name for the operations created when applying gradients. | |
Defaults to "Adam". @compatibility(eager) When eager execution is | |
enabled, `learning_rate`, `beta_1`, `beta_2`, and `epsilon` can each be | |
a callable that takes no arguments and returns the actual value to use. | |
This can be useful for changing these values across different | |
invocations of optimizer functions. @end_compatibility | |
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, | |
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip | |
gradients by value, `decay` is included for backward compatibility to | |
allow time inverse decay of learning rate. `lr` is included for backward | |
compatibility, recommended to use `learning_rate` instead. | |
""" | |
def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999, | |
epsilon=None, decay=0., amsgrad=False, | |
name="Adam", **kwargs): | |
super(Adam, self).__init__(name, **kwargs) | |
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) | |
self._set_hyper('decay', self._initial_decay) | |
self._set_hyper('beta_1', beta_1) | |
self._set_hyper('beta_2', beta_2) | |
self.epsilon = epsilon or backend_config.epsilon() | |
self.amsgrad = amsgrad | |
def _create_slots(self, var_list): | |
# Create slots for the first and second moments. | |
# Separate for-loops to respect the ordering of slot variables from v1. | |
for var in var_list: | |
self.add_slot(var, 'm') | |
for var in var_list: | |
self.add_slot(var, 'v') | |
if self.amsgrad: | |
for var in var_list: | |
self.add_slot(var, 'vhat') | |
def _resource_apply_dense(self, grad, var): | |
var_dtype = var.dtype.base_dtype | |
lr_t = self._decayed_lr(var_dtype) | |
m = self.get_slot(var, 'm') | |
v = self.get_slot(var, 'v') | |
beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype)) | |
beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype)) | |
epsilon_t = ops.convert_to_tensor(self.epsilon, var_dtype) | |
m_t = state_ops.assign(m, | |
beta_1_t * m + (1.0 - beta_1_t) * grad, | |
use_locking=self._use_locking) | |
v_t = state_ops.assign(v, | |
beta_2_t * v + (1.0 - beta_2_t) * math_ops.square(grad), | |
use_locking=self._use_locking) | |
if self.amsgrad: | |
vhat = self.get_slot(var, 'vhat') | |
vhat_t = state_ops.assign(vhat, math_ops.maximum(vhat, v_t), | |
use_locking=self._use_locking) | |
var_delta = m_t / (K.sqrt(vhat_t) + epsilon_t) | |
else: | |
var_delta = m_t / (K.sqrt(v_t) + epsilon_t) | |
var_t = math_ops.sub(var, lr_t * var_delta) | |
if 'dense_1/kernel' in var.name: | |
print(var) | |
print(K.eval(var_t)) | |
var_update = state_ops.assign_sub(var, lr_t * var_delta, | |
use_locking=self._use_locking) | |
if 'dense_1/kernel' in var.name: | |
print(var) | |
updates = [var_update, m_t, v_t] | |
if self.amsgrad: | |
updates.append(vhat_t) | |
return control_flow_ops.group(*updates) | |
def _resource_apply_sparse(self, grad, var, indices, apply_state=None): | |
var_device, var_dtype = var.device, var.dtype.base_dtype | |
coefficients = ((apply_state or {}).get((var_device, var_dtype)) | |
or self._fallback_apply_state(var_device, var_dtype)) | |
# m_t = beta1 * m + (1 - beta1) * g_t | |
m = self.get_slot(var, 'm') | |
m_scaled_g_values = grad * coefficients['one_minus_beta_1_t'] | |
m_t = state_ops.assign(m, m * coefficients['beta_1_t'], | |
use_locking=self._use_locking) | |
with ops.control_dependencies([m_t]): | |
m_t = self._resource_scatter_add(m, indices, m_scaled_g_values) | |
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t) | |
v = self.get_slot(var, 'v') | |
v_scaled_g_values = (grad * grad) * coefficients['one_minus_beta_2_t'] | |
v_t = state_ops.assign(v, v * coefficients['beta_2_t'], | |
use_locking=self._use_locking) | |
with ops.control_dependencies([v_t]): | |
v_t = self._resource_scatter_add(v, indices, v_scaled_g_values) | |
if not self.amsgrad: | |
v_sqrt = math_ops.sqrt(v_t) | |
var_update = state_ops.assign_sub( | |
var, coefficients['lr'] * m_t / (v_sqrt + coefficients['epsilon']), | |
use_locking=self._use_locking) | |
return control_flow_ops.group(*[var_update, m_t, v_t]) | |
else: | |
v_hat = self.get_slot(var, 'vhat') | |
v_hat_t = math_ops.maximum(v_hat, v_t) | |
with ops.control_dependencies([v_hat_t]): | |
v_hat_t = state_ops.assign( | |
v_hat, v_hat_t, use_locking=self._use_locking) | |
v_hat_sqrt = math_ops.sqrt(v_hat_t) | |
var_update = state_ops.assign_sub( | |
var, | |
coefficients['lr'] * m_t / (v_hat_sqrt + coefficients['epsilon']), | |
use_locking=self._use_locking) | |
return control_flow_ops.group(*[var_update, m_t, v_t, v_hat_t]) | |
def set_weights(self, weights): | |
params = self.weights | |
# If the weights are generated by Keras V1 optimizer, it includes vhats | |
# even without amsgrad, i.e, V1 optimizer has 3x + 1 variables, while V2 | |
# optimizer has 2x + 1 variables. Filter vhats out for compatibility. | |
num_vars = int((len(params) - 1) / 2) | |
if len(weights) == 3 * num_vars + 1: | |
weights = weights[:len(params)] | |
super(Adam, self).set_weights(weights) | |
def get_config(self): | |
config = super(Adam, self).get_config() | |
config.update({ | |
'learning_rate': self._serialize_hyperparameter('learning_rate'), | |
'decay': self._serialize_hyperparameter('decay'), | |
'beta_1': self._serialize_hyperparameter('beta_1'), | |
'beta_2': self._serialize_hyperparameter('beta_2'), | |
'epsilon': self.epsilon, | |
'amsgrad': self.amsgrad, | |
}) | |
return config |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment