Skip to content

Instantly share code, notes, and snippets.

@muupan
Created November 17, 2020 13:53
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save muupan/aad30f1fa970e8273660573f01f91f4f to your computer and use it in GitHub Desktop.
Save muupan/aad30f1fa970e8273660573f01f91f4f to your computer and use it in GitHub Desktop.
"""A training script of Soft Actor-Critic on OpenAI Gym Mujoco environments.
This script follows the settings of https://arxiv.org/abs/1812.05905 as much
as possible.
"""
import argparse
from distutils.version import LooseVersion
import functools
import logging
import sys
import torch
from torch import nn
from torch import distributions
import gym
import gym.wrappers
import numpy as np
import pfrl
from pfrl import experiments
from pfrl.nn.lmbda import Lambda
from pfrl import utils
from pfrl import replay_buffers
import torch_optimizer
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--outdir",
type=str,
default="results",
help=(
"Directory path to save output files."
" If it does not exist, it will be created."
),
)
parser.add_argument(
"--env",
type=str,
default="Hopper-v2",
help="OpenAI Gym MuJoCo env to perform algorithm on.",
)
parser.add_argument(
"--num-envs", type=int, default=1, help="Number of envs run in parallel."
)
parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 32)")
parser.add_argument(
"--gpu", type=int, default=0, help="GPU to use, set to -1 if no GPU."
)
parser.add_argument(
"--load", type=str, default="", help="Directory to load agent from."
)
parser.add_argument(
"--steps",
type=int,
default=10 ** 6,
help="Total number of timesteps to train the agent.",
)
parser.add_argument(
"--eval-n-runs",
type=int,
default=10,
help="Number of episodes run for each evaluation.",
)
parser.add_argument(
"--eval-interval",
type=int,
default=5000,
help="Interval in timesteps between evaluations.",
)
parser.add_argument(
"--replay-start-size",
type=int,
default=10000,
help="Minimum replay buffer size before " + "performing gradient updates.",
)
parser.add_argument("--batch-size", type=int, default=256, help="Minibatch size")
parser.add_argument(
"--render", action="store_true", help="Render env states in a GUI window."
)
parser.add_argument(
"--demo", action="store_true", help="Just run evaluation, not training."
)
parser.add_argument("--load-pretrained", action="store_true", default=False)
parser.add_argument(
"--pretrained-type", type=str, default="best", choices=["best", "final"]
)
parser.add_argument(
"--monitor", action="store_true", help="Wrap env with gym.wrappers.Monitor."
)
parser.add_argument(
"--log-interval",
type=int,
default=1000,
help="Interval in timesteps between outputting log messages during training",
)
parser.add_argument(
"--log-level", type=int, default=logging.INFO, help="Level of the root logger."
)
parser.add_argument(
"--policy-output-scale",
type=float,
default=1.0,
help="Weight initialization scale of policy output.",
)
parser.add_argument(
"--optimizer", type=str, default="AdaBelief",
)
args = parser.parse_args()
logging.basicConfig(level=args.log_level)
args.outdir = experiments.prepare_output_dir(args, args.outdir, argv=sys.argv)
print("Output files are saved in {}".format(args.outdir))
# Set a random seed used in PFRL
utils.set_random_seed(args.seed)
# Set different random seeds for different subprocesses.
# If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3].
# If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7].
process_seeds = np.arange(args.num_envs) + args.seed * args.num_envs
assert process_seeds.max() < 2 ** 32
def make_env(process_idx, test):
env = gym.make(args.env)
# Unwrap TimiLimit wrapper
assert isinstance(env, gym.wrappers.TimeLimit)
env = env.env
# Use different random seeds for train and test envs
process_seed = int(process_seeds[process_idx])
env_seed = 2 ** 32 - 1 - process_seed if test else process_seed
env.seed(env_seed)
# Cast observations to float32 because our model uses float32
env = pfrl.wrappers.CastObservationToFloat32(env)
# Normalize action space to [-1, 1]^n
env = pfrl.wrappers.NormalizeActionSpace(env)
if args.monitor:
env = gym.wrappers.Monitor(env, args.outdir)
if args.render:
env = pfrl.wrappers.Render(env)
return env
def make_batch_env(test):
return pfrl.envs.MultiprocessVectorEnv(
[
functools.partial(make_env, idx, test)
for idx, env in enumerate(range(args.num_envs))
]
)
sample_env = make_env(process_idx=0, test=False)
timestep_limit = sample_env.spec.max_episode_steps
obs_space = sample_env.observation_space
action_space = sample_env.action_space
print("Observation space:", obs_space)
print("Action space:", action_space)
obs_size = obs_space.low.size
action_size = action_space.low.size
if LooseVersion(torch.__version__) < LooseVersion("1.5.0"):
raise Exception("This script requires a PyTorch version >= 1.5.0")
def squashed_diagonal_gaussian_head(x):
assert x.shape[-1] == action_size * 2
mean, log_scale = torch.chunk(x, 2, dim=1)
log_scale = torch.clamp(log_scale, -20.0, 2.0)
var = torch.exp(log_scale * 2)
base_distribution = distributions.Independent(
distributions.Normal(loc=mean, scale=torch.sqrt(var)), 1
)
# cache_size=1 is required for numerical stability
return distributions.transformed_distribution.TransformedDistribution(
base_distribution, [distributions.transforms.TanhTransform(cache_size=1)]
)
def make_optimizer(parameters):
if args.optimizer == "OfficialAdaBelief":
import adabelief_pytorch
optim_class = adabelief_pytorch.AdaBelief
else:
optim_class = getattr(
torch_optimizer,
args.optimizer,
getattr(torch.optim, args.optimizer, None),
)
assert optim_class is not None
print(str(optim_class), "with default hyperparameters")
return optim_class(parameters)
policy = nn.Sequential(
nn.Linear(obs_size, 256),
nn.ReLU(),
nn.Linear(256, 256),
nn.ReLU(),
nn.Linear(256, action_size * 2),
Lambda(squashed_diagonal_gaussian_head),
)
torch.nn.init.xavier_uniform_(policy[0].weight)
torch.nn.init.xavier_uniform_(policy[2].weight)
torch.nn.init.xavier_uniform_(policy[4].weight, gain=args.policy_output_scale)
policy_optimizer = make_optimizer(policy.parameters())
def make_q_func_with_optimizer():
q_func = nn.Sequential(
pfrl.nn.ConcatObsAndAction(),
nn.Linear(obs_size + action_size, 256),
nn.ReLU(),
nn.Linear(256, 256),
nn.ReLU(),
nn.Linear(256, 1),
)
torch.nn.init.xavier_uniform_(q_func[1].weight)
torch.nn.init.xavier_uniform_(q_func[3].weight)
torch.nn.init.xavier_uniform_(q_func[5].weight)
q_func_optimizer = make_optimizer(q_func.parameters())
return q_func, q_func_optimizer
q_func1, q_func1_optimizer = make_q_func_with_optimizer()
q_func2, q_func2_optimizer = make_q_func_with_optimizer()
rbuf = replay_buffers.ReplayBuffer(10 ** 6)
def burnin_action_func():
"""Select random actions until model is updated one or more times."""
return np.random.uniform(action_space.low, action_space.high).astype(np.float32)
# Hyperparameters in http://arxiv.org/abs/1802.09477
agent = pfrl.agents.SoftActorCritic(
policy,
q_func1,
q_func2,
policy_optimizer,
q_func1_optimizer,
q_func2_optimizer,
rbuf,
gamma=0.99,
replay_start_size=args.replay_start_size,
gpu=args.gpu,
minibatch_size=args.batch_size,
burnin_action_func=burnin_action_func,
entropy_target=-action_size,
temperature_optimizer_lr=3e-4,
)
if len(args.load) > 0 or args.load_pretrained:
if args.load_pretrained:
raise Exception("Pretrained models are currently unsupported.")
# either load or load_pretrained must be false
assert not len(args.load) > 0 or not args.load_pretrained
if len(args.load) > 0:
agent.load(args.load)
else:
agent.load(
utils.download_model("SAC", args.env, model_type=args.pretrained_type)[
0
]
)
if args.demo:
eval_stats = experiments.eval_performance(
env=make_batch_env(test=True),
agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs,
max_episode_len=timestep_limit,
)
print(
"n_runs: {} mean: {} median: {} stdev {}".format(
args.eval_n_runs,
eval_stats["mean"],
eval_stats["median"],
eval_stats["stdev"],
)
)
else:
experiments.train_agent_batch_with_evaluation(
agent=agent,
env=make_batch_env(test=False),
eval_env=make_batch_env(test=True),
outdir=args.outdir,
steps=args.steps,
eval_n_steps=None,
eval_n_episodes=args.eval_n_runs,
eval_interval=args.eval_interval,
log_interval=args.log_interval,
max_episode_len=timestep_limit,
)
if __name__ == "__main__":
main()
@muupan
Copy link
Author

muupan commented Nov 17, 2020

SAC with various optimizers

This script is based on https://github.com/pfnet/pfrl/blob/master/examples/mujoco/reproduction/soft_actor_critic/train_soft_actor_critic.py.

Dependencies

  • torch==1.5.0
  • pfrl==0.1.0
  • gym[mujoco]==0.15.4
  • torch-optimizer==0.0.1a16
  • adabelief_pytorch==0.1.0

Results

python3 train_sac_optim.py --optimizer [Adam|AdaBelief|OfficialAdaBelief] --steps 3000000 --eval-interval 50000 --env HalfCheetah-v2 --seed [0-9]

See a thread from https://twitter.com/mooopan/status/1319488112813166592

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment