Created
February 18, 2017 11:46
-
-
Save keon/b1286f826521b02a4abbf8750902153c to your computer and use it in GitHub Desktop.
DQN
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# -*- coding: utf-8 -*- | |
import random | |
import gym | |
import numpy as np | |
from collections import deque | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.optimizers import RMSprop | |
EPISODES = 5000 | |
class DQNAgent: | |
def __init__(self, state_size, action_size): | |
self.state_size = state_size | |
self.action_size = action_size | |
self.memory = deque(maxlen=10000) | |
self.gamma = 0.9 # discount rate | |
self.epsilon = 1.0 # exploration rate | |
self.e_decay = .996 | |
self.e_min = 0.05 | |
self.learning_rate = 0.0001 | |
self.model = self._build_model() | |
def _build_model(self): | |
# Neural Net for Deep-Q learning Model | |
model = Sequential() | |
model.add(Dense(64, input_dim=self.state_size, activation='tanh')) | |
model.add(Dense(128, activation='tanh', init='uniform')) | |
model.add(Dense(128, activation='tanh', init='uniform')) | |
model.add(Dense(self.action_size, activation='linear')) | |
model.compile(loss='mse', | |
optimizer=RMSprop(lr=self.learning_rate)) | |
return model | |
def remember(self, state, action, reward, next_state, done): | |
self.memory.append((state, action, reward, next_state, done)) | |
def act(self, state): | |
if np.random.rand() <= self.epsilon: | |
return random.randrange(self.action_size) | |
act_values = self.model.predict(state) | |
print(act_values) | |
return np.argmax(act_values[0]) # returns action | |
def replay(self, batch_size): | |
minibatch = random.sample(self.memory, batch_size) | |
for state, action, reward, next_state, done in minibatch: | |
target = self.model.predict(state) | |
if done: | |
target[0][action] = reward | |
else: | |
target[0][action] = reward + self.gamma * \ | |
np.max(self.model.predict(next_state)[0]) | |
self.model.fit(state, target, nb_epoch=1, verbose=0) | |
if self.epsilon > self.e_min: | |
self.epsilon *= self.e_decay | |
if __name__ == "__main__": | |
env = gym.make('CartPole-v0') | |
state_size = env.observation_space.shape[0] | |
action_size = env.action_space.n | |
agent = DQNAgent(state_size, action_size) | |
for e in range(EPISODES): | |
state = env.reset() | |
state = np.reshape(state, [1, state_size]) | |
for time in range(5000): | |
env.render() | |
action = agent.act(state) | |
next_state, reward, done, _ = env.step(action) | |
next_state = np.reshape(next_state, [1, state_size]) | |
reward = -100 if done else reward | |
agent.remember(state, action, reward, next_state, done) | |
state = next_state | |
if done or time == 4999: | |
print("episode: {}/{}, score: {}, e: {:.2}" | |
.format(e, EPISODES, time, agent.epsilon)) | |
break | |
agent.replay(16) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment