Skip to content

Instantly share code, notes, and snippets.

View araffin's full-sized avatar

Antonin RAFFIN araffin

View GitHub Profile
import gymnasium as gym
import numpy as np
from gymnasium.envs.mujoco.mujoco_env import MujocoEnv
# Env initialization
env = gym.make("Swimmer-v4", render_mode="human")
# Wrap to have reward statistics
env = gym.wrappers.RecordEpisodeStatistics(env)
mujoco_env = env.unwrapped
n_joints = 2
@araffin
araffin / halfcheetah_minimal.py
Last active March 27, 2024 11:55
Minimal implementation to solve the HalfCheetah env using open-loop oscillators
import gymnasium as gym
import numpy as np
from gymnasium.envs.mujoco.mujoco_env import MujocoEnv
# Env initialization
env = gym.make("HalfCheetah-v4", render_mode="human")
# Wrap to have reward statistics
env = gym.wrappers.RecordEpisodeStatistics(env)
mujoco_env = env.unwrapped
n_joints = 6
@araffin
araffin / color_mask.py
Created October 6, 2017 15:56
Find the center of a white line in an image using OpenCV
from __future__ import division
import cv2
import numpy as np
# Input Image
image = cv2.imread("my_image.jpg")
# Convert to HSV color space
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
# Define range of white color in HSV
lower_white = np.array([0, 0, 212])
@araffin
araffin / example_serial.py
Last active March 3, 2023 16:12
Example Use of Robust Serial in Python
# From https://github.com/araffin/python-arduino-serial
from robust_serial import Order, write_order, write_i8, write_i16
from robust_serial.utils import open_serial_port
# Open serial port with a baudrate of 9600 (bits/s)
serial_file = open_serial_port(baudrate=9600)
# Send the order "MOTOR", i.e. to change the speed of the car
# equivalent to write_i8(serial_file, Order.MOTOR.value)
write_order(serial_file, Order.MOTOR)
@araffin
araffin / download_local_google_fonts.mjs
Created November 3, 2022 14:35
Download Google fonts locally
// https://github.com/datalogix/google-fonts-helper
// npm install google-fonts-helper
import { download } from 'google-fonts-helper'
const downloader = download('https://fonts.googleapis.com/css?family=Montserrat:400,700%7CRoboto:400,400italic,700%7CRoboto+Mono&display=swap', {
base64: false,
overwriting: false,
outputDir: './',
stylePath: 'fonts.css',
fontsDir: 'fonts',
@araffin
araffin / a2c_lunar.py
Last active October 16, 2022 13:53
Training, Saving and Loading an A2C agent
import gym
from stable_baselines import A2C
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
# Create and wrap the environment
env = gym.make('LunarLander-v2')
env = DummyVecEnv([lambda: env])
@araffin
araffin / RL_CMAES.py
Last active April 19, 2021 08:54
Mixing Reinforcement Learning (RL) and Evolution Strategy (ES) using Stable-Baselines
import gym
import numpy as np
import cma
from collections import OrderedDict
from stable_baselines import A2C
def flatten(params):
"""
@araffin
araffin / demo_baselines.py
Last active April 10, 2020 19:13
Getting Started With Stable Baselines
# from https://github.com/hill-a/stable-baselines
import gym
from stable_baselines.common.policies import MlpPolicy
from stable_baselines import PPO2
env = gym.make('CartPole-v1')
model = PPO2(MlpPolicy, env, verbose=1)
# Train the agent
import pytest
import numpy as np
from stable_baselines import A2C, ACER, ACKTR, DQN, DDPG, PPO1, PPO2, TRPO
from stable_baselines.common import set_global_seeds
MODEL_LIST_DISCRETE = [
A2C,
ACER,
ACKTR,
import gym
import numpy as np
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import SubprocVecEnv
from stable_baselines.common import set_global_seeds
from stable_baselines import ACKTR
def make_env(env_id, rank, seed=0):
"""