Skip to content

Instantly share code, notes, and snippets.

@VXU1230
Last active November 8, 2020 17:13
Show Gist options
  • Save VXU1230/f444aa26a14ccbfe3d584f063a7355dc to your computer and use it in GitHub Desktop.
Save VXU1230/f444aa26a14ccbfe3d584f063a7355dc to your computer and use it in GitHub Desktop.
def train(self, TargetNet):
if len(self.experience['s']) < self.min_experiences:
return 0
ids = np.random.randint(low=0, high=len(self.experience['s']), size=self.batch_size)
states = np.asarray([self.experience['s'][i] for i in ids])
actions = np.asarray([self.experience['a'][i] for i in ids])
rewards = np.asarray([self.experience['r'][i] for i in ids])
states_next = np.asarray([self.experience['s2'][i] for i in ids])
dones = np.asarray([self.experience['done'][i] for i in ids])
value_next = np.max(TargetNet.predict(states_next), axis=1)
actual_values = np.where(dones, rewards, rewards+self.gamma*value_next)
with tf.GradientTape() as tape:
selected_action_values = tf.math.reduce_sum(
self.predict(states) * tf.one_hot(actions, self.num_actions), axis=1)
loss = tf.math.reduce_mean(tf.square(actual_values - selected_action_values))
variables = self.model.trainable_variables
gradients = tape.gradient(loss, variables)
self.optimizer.apply_gradients(zip(gradients, variables))
return loss
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment