Navigation Menu

Skip to content

Instantly share code, notes, and snippets.

@goncalossilva
Last active April 17, 2018 00:54
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save goncalossilva/7e128fbe14fc637f5dd5cc96df006c86 to your computer and use it in GitHub Desktop.
Save goncalossilva/7e128fbe14fc637f5dd5cc96df006c86 to your computer and use it in GitHub Desktop.
//<![CDATA[
// a few things don't have var in front of them - they update already existing variables the game needs
lanesSide = 3;
patchesAhead = 52;
patchesBehind = 12;
trainIterations = 500000;
// the number of other autonomous vehicles controlled by your network
otherAgents = 0; // max of 10
var num_inputs = (lanesSide * 2 + 1) * (patchesAhead + patchesBehind);
var num_actions = 5;
var temporal_window = 0;
var network_size = num_inputs * temporal_window + num_actions * temporal_window + num_inputs;
var layer_defs = [];
layer_defs.push({
type: 'input',
out_sx: 1,
out_sy: 1,
out_depth: network_size
});
layer_defs.push({
type: 'fc',
num_neurons: 40,
activation: 'relu'
});
layer_defs.push({
type: 'fc',
num_neurons: 24,
activation: 'relu'
});
layer_defs.push({
type: 'fc',
num_neurons: 24,
activation: 'relu'
});
layer_defs.push({
type: 'fc',
num_neurons: 24,
activation: 'tanh'
});
layer_defs.push({
type: 'regression',
num_neurons: num_actions
});
var tdtrainer_options = {
learning_rate: 0.001,
momentum: 0.0,
batch_size: 128,
l2_decay: 0.01
};
var opt = {};
opt.temporal_window = temporal_window;
opt.experience_size = 100000;
opt.start_learn_threshold = 50000;
opt.gamma = 0.97;
opt.learning_steps_total = 500000;
opt.learning_steps_burnin = 1000;
opt.epsilon_min = 0.0;
opt.epsilon_test_time = 0.0;
opt.layer_defs = layer_defs;
opt.tdtrainer_options = tdtrainer_options;
brain = new deepqlearn.Brain(num_inputs, num_actions, opt);
learn = function (state, lastReward) {
brain.backward(lastReward);
var action = brain.forward(state);
draw_net();
draw_stats();
return action;
}
//]]>
//<![CDATA[
// a few things don't have var in front of them - they update already existing variables the game needs
lanesSide = 3;
patchesAhead = 50;
patchesBehind = 10;
trainIterations = 500000;
// the number of other autonomous vehicles controlled by your network
otherAgents = 0; // max of 10
var num_inputs = (lanesSide * 2 + 1) * (patchesAhead + patchesBehind);
var num_actions = 5;
var temporal_window = 0;
var network_size = num_inputs * temporal_window + num_actions * temporal_window + num_inputs;
var layer_defs = [];
layer_defs.push({
type: 'input',
out_sx: 1,
out_sy: 1,
out_depth: network_size
});
layer_defs.push({
type: 'fc',
num_neurons: 42,
activation: 'relu'
});
layer_defs.push({
type: 'fc',
num_neurons: 42,
activation: 'relu'
});
layer_defs.push({
type: 'fc',
num_neurons: 42,
activation: 'relu'
});
layer_defs.push({
type: 'fc',
num_neurons: 42,
activation: 'tanh'
});
layer_defs.push({
type: 'fc',
num_neurons: 21,
activation: 'relu'
});
layer_defs.push({
type: 'fc',
num_neurons: 21,
activation: 'tanh'
});
layer_defs.push({
type: 'regression',
num_neurons: num_actions
});
var tdtrainer_options = {
learning_rate: 0.001,
momentum: 0.0,
batch_size: 128,
l2_decay: 0.01
};
var opt = {};
opt.temporal_window = temporal_window;
opt.experience_size = 100000;
opt.start_learn_threshold = 5000;
opt.gamma = 0.98;
opt.learning_steps_total = 600000;
opt.learning_steps_burnin = 2000;
opt.epsilon_min = 0.0;
opt.epsilon_test_time = 0.0;
opt.layer_defs = layer_defs;
opt.tdtrainer_options = tdtrainer_options;
brain = new deepqlearn.Brain(num_inputs, num_actions, opt);
learn = function (state, lastReward) {
brain.backward(lastReward);
var action = brain.forward(state);
draw_net();
draw_stats();
return action;
}
//]]>
//<![CDATA[
// a few things don't have var in front of them - they update already existing variables the game needs
lanesSide = 3;
patchesAhead = 50;
patchesBehind = 10;
trainIterations = 500000;
// the number of other autonomous vehicles controlled by your network
otherAgents = 0; // max of 10
var num_inputs = (lanesSide * 2 + 1) * (patchesAhead + patchesBehind);
var num_actions = 5;
var temporal_window = 0;
var network_size = num_inputs * temporal_window + num_actions * temporal_window + num_inputs;
var layer_defs = [];
layer_defs.push({
type: 'input',
out_sx: 1,
out_sy: 1,
out_depth: network_size
});
layer_defs.push({
type: 'fc',
num_neurons: 32,
activation: 'relu'
});
layer_defs.push({
type: 'fc',
num_neurons: 32,
activation: 'relu'
});
layer_defs.push({
type: 'fc',
num_neurons: 24,
activation: 'relu'
});
layer_defs.push({
type: 'fc',
num_neurons: 24,
activation: 'relu'
});
layer_defs.push({
type: 'fc',
num_neurons: 24,
activation: 'relu'
});
layer_defs.push({
type: 'fc',
num_neurons: 20,
activation: 'tanh'
});
layer_defs.push({
type: 'regression',
num_neurons: num_actions
});
var tdtrainer_options = {
learning_rate: 0.001,
momentum: 0.0,
batch_size: 128,
l2_decay: 0.01
};
var opt = {};
opt.temporal_window = temporal_window;
opt.experience_size = 100000;
opt.start_learn_threshold = 5000;
opt.gamma = 0.96;
opt.learning_steps_total = 500000;
opt.learning_steps_burnin = 1000;
opt.epsilon_min = 0.0;
opt.epsilon_test_time = 0.0;
opt.layer_defs = layer_defs;
opt.tdtrainer_options = tdtrainer_options;
brain = new deepqlearn.Brain(num_inputs, num_actions, opt);
learn = function (state, lastReward) {
brain.backward(lastReward);
var action = brain.forward(state);
draw_net();
draw_stats();
return action;
}
//]]>
//<![CDATA[
// a few things don't have var in front of them - they update already existing variables the game needs
lanesSide = 3;
patchesAhead = 50;
patchesBehind = 10;
trainIterations = 500000;
// the number of other autonomous vehicles controlled by your network
otherAgents = 0; // max of 10
var num_inputs = (lanesSide * 2 + 1) * (patchesAhead + patchesBehind);
var num_actions = 5;
var temporal_window = 0;
var network_size = num_inputs * temporal_window + num_actions * temporal_window + num_inputs;
var layer_defs = [];
layer_defs.push({
type: 'input',
out_sx: 1,
out_sy: 1,
out_depth: network_size
});
layer_defs.push({
type: 'fc',
num_neurons: 48,
activation: 'relu'
});
layer_defs.push({
type: 'fc',
num_neurons: 48,
activation: 'tanh'
});
layer_defs.push({
type: 'fc',
num_neurons: 32,
activation: 'relu'
});
layer_defs.push({
type: 'fc',
num_neurons: 32,
activation: 'tanh'
});
layer_defs.push({
type: 'fc',
num_neurons: 24,
activation: 'relu'
});
layer_defs.push({
type: 'fc',
num_neurons: 24,
activation: 'tanh'
});
layer_defs.push({
type: 'regression',
num_neurons: num_actions
});
var tdtrainer_options = {
learning_rate: 0.001,
momentum: 0.0,
batch_size: 128,
l2_decay: 0.01
};
var opt = {};
opt.temporal_window = temporal_window;
opt.experience_size = 100000;
opt.start_learn_threshold = 5000;
opt.gamma = 0.98;
opt.learning_steps_total = 500000;
opt.learning_steps_burnin = 1000;
opt.epsilon_min = 0.0;
opt.epsilon_test_time = 0.0;
opt.layer_defs = layer_defs;
opt.tdtrainer_options = tdtrainer_options;
brain = new deepqlearn.Brain(num_inputs, num_actions, opt);
learn = function (state, lastReward) {
brain.backward(lastReward);
var action = brain.forward(state);
draw_net();
draw_stats();
return action;
}
//]]>
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment