-
-
Save jmmcd/054f511fe6e6b9474439565d653102d5 to your computer and use it in GitHub Desktop.
Training a Neural Network ATARI Pong agent with Policy Gradients from raw pixels
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
"""from | |
https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5 | |
http://karpathy.github.io/2016/05/31/rl/ | |
I've converted to Python 3, added command-line args, added slightly | |
more informative episode status message, allowed reloading of running | |
reward, episode number, and elapsed time, and switched off the | |
per-point status message -- jmmcd. | |
""" | |
""" Trains an agent with (stochastic) Policy Gradients on Pong. Uses OpenAI Gym. """ | |
import sys | |
import time | |
import numpy as np | |
import pickle | |
import gym | |
# hyperparameters | |
H = 200 # number of hidden layer neurons | |
batch_size = 10 # every how many episodes to do a param update? | |
learning_rate = 1e-4 | |
gamma = 0.99 # discount factor for reward | |
decay_rate = 0.99 # decay factor for RMSProp leaky sum of grad^2 | |
resume = True if ("resume" in sys.argv) else False # resume from previous checkpoint? | |
render = True if ("render" in sys.argv) else False # show live animation: takes about 3-4 times longer per episode on my machine | |
# model initialization | |
D = 80 * 80 # input dimensionality: 80x80 grid | |
if resume: | |
print("resuming from save.p") | |
state = pickle.load(open('save.p', 'rb')) | |
model, episode_number, elapsed, running_reward = state | |
else: | |
model = {} | |
model['W1'] = np.random.randn(H,D) / np.sqrt(D) # "Xavier" initialization | |
model['W2'] = np.random.randn(H) / np.sqrt(H) | |
running_reward = None | |
episode_number = 0 | |
elapsed = 0.0 | |
grad_buffer = { k : np.zeros_like(v) for k,v in model.items() } # update buffers that add up gradients over a batch | |
rmsprop_cache = { k : np.zeros_like(v) for k,v in model.items() } # rmsprop memory | |
def sigmoid(x): | |
return 1.0 / (1.0 + np.exp(-x)) # sigmoid "squashing" function to interval [0,1] | |
def prepro(I): | |
""" prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector """ | |
I = I[35:195] # crop | |
I = I[::2,::2,0] # downsample by factor of 2 | |
I[I == 144] = 0 # erase background (background type 1) | |
I[I == 109] = 0 # erase background (background type 2) | |
I[I != 0] = 1 # everything else (paddles, ball) just set to 1 | |
return I.astype(np.float).ravel() | |
def discount_rewards(r): | |
""" take 1D float array of rewards and compute discounted reward """ | |
discounted_r = np.zeros_like(r) | |
running_add = 0 | |
for t in reversed(range(0, r.size)): | |
if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!) | |
running_add = running_add * gamma + r[t] | |
discounted_r[t] = running_add | |
return discounted_r | |
def policy_forward(x): | |
h = np.dot(model['W1'], x) | |
h[h<0] = 0 # ReLU nonlinearity | |
logp = np.dot(model['W2'], h) | |
p = sigmoid(logp) | |
return p, h # return probability of taking action 2, and hidden state | |
def policy_backward(eph, epdlogp): | |
""" backward pass. (eph is array of intermediate hidden states) """ | |
dW2 = np.dot(eph.T, epdlogp).ravel() | |
dh = np.outer(epdlogp, model['W2']) | |
dh[eph <= 0] = 0 # backpro prelu | |
dW1 = np.dot(dh.T, epx) | |
return {'W1':dW1, 'W2':dW2} | |
env = gym.make("Pong-v0") | |
observation = env.reset() | |
prev_x = None # used in computing the difference frame | |
xs,hs,dlogps,drs = [],[],[],[] | |
reward_sum = 0 | |
episode_start_time = time.time() | |
while True: | |
if render: env.render() | |
# preprocess the observation, set input to network to be difference image | |
cur_x = prepro(observation) | |
x = cur_x - prev_x if prev_x is not None else np.zeros(D) | |
prev_x = cur_x | |
# forward the policy network and sample an action from the returned probability | |
aprob, h = policy_forward(x) | |
action = 2 if np.random.uniform() < aprob else 3 # roll the dice! | |
# record various intermediates (needed later for backprop) | |
xs.append(x) # observation | |
hs.append(h) # hidden state | |
y = 1 if action == 2 else 0 # a "fake label" | |
dlogps.append(y - aprob) # grad that encourages the action that was taken to be taken (see http://cs231n.github.io/neural-networks-2/#losses if confused) | |
# step the environment and get new measurements | |
observation, reward, done, info = env.step(action) | |
reward_sum += reward | |
drs.append(reward) # record reward (has to be done after we call step() to get reward for previous action) | |
if done: # an episode finished | |
episode_number += 1 | |
# stack together all inputs, hidden states, action gradients, and rewards for this episode | |
epx = np.vstack(xs) | |
eph = np.vstack(hs) | |
epdlogp = np.vstack(dlogps) | |
epr = np.vstack(drs) | |
xs,hs,dlogps,drs = [],[],[],[] # reset array memory | |
# compute the discounted reward backwards through time | |
discounted_epr = discount_rewards(epr) | |
# standardize the rewards to be unit normal (helps control the gradient estimator variance) | |
discounted_epr -= np.mean(discounted_epr) | |
discounted_epr /= np.std(discounted_epr) | |
epdlogp *= discounted_epr # modulate the gradient with advantage (PG magic happens right here.) | |
grad = policy_backward(eph, epdlogp) | |
for k in model: grad_buffer[k] += grad[k] # accumulate grad over batch | |
# perform rmsprop parameter update every batch_size episodes | |
if episode_number % batch_size == 0: | |
for k,v in model.items(): | |
g = grad_buffer[k] # gradient | |
rmsprop_cache[k] = decay_rate * rmsprop_cache[k] + (1 - decay_rate) * g**2 | |
model[k] += learning_rate * g / (np.sqrt(rmsprop_cache[k]) + 1e-5) | |
grad_buffer[k] = np.zeros_like(v) # reset batch gradient buffer | |
# boring book-keeping | |
running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01 | |
now = time.time() | |
episode_time = now - episode_start_time | |
elapsed += episode_time | |
episode_start_time = now | |
print('Episode %d. Reward total %d. Episode time %.1fs. Total time %ds. Running mean %.3f.' % (episode_number, reward_sum, episode_time, elapsed, running_reward)) | |
state = (model, episode_number, elapsed, running_reward) | |
if episode_number % 100 == 0: pickle.dump(state, open('save.p', 'wb')) # you can ctrl-c at any time and later resume from this saved state, losing at most 99 episodes of training! | |
reward_sum = 0 | |
observation = env.reset() # reset env | |
prev_x = None | |
# I'm switching off this printing which is a bit too fast | |
# if reward != 0: # Pong has either +1 or -1 reward exactly when game ends. | |
# print(('ep %d: game finished, reward: %f' % (episode_number, reward)) + ('' if reward == -1 else ' !!!!!!!!')) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment