Skip to content

Instantly share code, notes, and snippets.

@malzantot
Last active October 20, 2022 04:00
Show Gist options
  • Star 5 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save malzantot/9d1d3fa4fdc4a101bc48a135d8f9a289 to your computer and use it in GitHub Desktop.
Save malzantot/9d1d3fa4fdc4a101bc48a135d8f9a289 to your computer and use it in GitHub Desktop.
Solution of MountainCar OpenAI Gym problem using Q-Learning.
"""
Q-Learning example using OpenAI gym MountainCar enviornment
Author: Moustafa Alzantot (malzantot@ucla.edu)
"""
import numpy as np
import gym
from gym import wrappers
n_states = 40
iter_max = 10000
initial_lr = 1.0 # Learning rate
min_lr = 0.003
gamma = 1.0
t_max = 10000
eps = 0.02
def run_episode(env, policy=None, render=False):
obs = env.reset()
total_reward = 0
step_idx = 0
for _ in range(t_max):
if render:
env.render()
if policy is None:
action = env.action_space.sample()
else:
a,b = obs_to_state(env, obs)
action = policy[a][b]
obs, reward, done, _ = env.step(action)
total_reward += gamma ** step_idx * reward
step_idx += 1
if done:
break
return total_reward
def obs_to_state(env, obs):
""" Maps an observation to state """
env_low = env.observation_space.low
env_high = env.observation_space.high
env_dx = (env_high - env_low) / n_states
a = int((obs[0] - env_low[0])/env_dx[0])
b = int((obs[1] - env_low[1])/env_dx[1])
return a, b
if __name__ == '__main__':
env_name = 'MountainCar-v0'
env = gym.make(env_name)
env.seed(0)
np.random.seed(0)
print ('----- using Q Learning -----')
q_table = np.zeros((n_states, n_states, 3))
for i in range(iter_max):
obs = env.reset()
total_reward = 0
## eta: learning rate is decreased at each step
eta = max(min_lr, initial_lr * (0.85 ** (i//100)))
for j in range(t_max):
a, b = obs_to_state(env, obs)
if np.random.uniform(0, 1) < eps:
action = np.random.choice(env.action_space.n)
else:
logits = q_table[a][b]
logits_exp = np.exp(logits)
probs = logits_exp / np.sum(logits_exp)
action = np.random.choice(env.action_space.n, p=probs)
obs, reward, done, _ = env.step(action)
total_reward += (gamma ** j) * reward
# update q table
a_, b_ = obs_to_state(env, obs)
q_table[a][b][action] = q_table[a][b][action] + eta * (reward + gamma * np.max(q_table[a_][b_]) - q_table[a][b][action])
if done:
break
if i % 100 == 0:
print('Iteration #%d -- Total reward = %d.' %(i+1, total_reward))
solution_policy = np.argmax(q_table, axis=2)
solution_policy_scores = [run_episode(env, solution_policy, False) for _ in range(100)]
print("Average score of solution = ", np.mean(solution_policy_scores))
# Animate it
run_episode(env, solution_policy, True)
@LinuxIsCool
Copy link

Does this solution work? It seems to never randomly gain any reward and thus never learns.

@semolnahali
Copy link

semolnahali commented Apr 29, 2018

Iteration #9901 -- Total reward = -200.
Average score of solution = -129.96

@BlueDi
Copy link

BlueDi commented Nov 4, 2018

Line 51 should be:

env = gym.make(env_name).env

Also close the environment at line 37:

if done:
    env.close()
    break

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment