Created
May 1, 2023 16:07
-
-
Save KamilMroczek/12e349ed81499260de0ae9dc5be578ea to your computer and use it in GitHub Desktop.
ray yamp
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# An unique identifier for the head node and workers of this cluster. | |
cluster_name: cluster_name | |
# The maximum number of workers nodes to launch in addition to the head | |
# node. | |
max_workers: 16 | |
# The autoscaler will scale up the cluster faster with higher upscaling speed. | |
# E.g., if the task requires adding more nodes then autoscaler will gradually | |
# scale up the cluster in chunks of upscaling_speed*currently_running_nodes. | |
# This number should be > 0. | |
upscaling_speed: 1.0 | |
# This executes all commands on all nodes in the docker container, | |
# and opens all the necessary ports to support the Ray cluster. | |
# Empty string means disabled. | |
docker: | |
image: XXXXXX.dkr.ecr.us-east-1.amazonaws.com/image:0.0.1 | |
container_name: "image-name" | |
# If true, pulls latest version of image. Otherwise, `docker run` will only pull the image | |
# if no cached version is present. | |
pull_before_run: True | |
run_options: # Extra options to pass into "docker run" | |
- --ulimit nofile=65536:65536 | |
# Example of running a GPU head with CPU workers | |
# head_image: "rayproject/ray-ml:latest-gpu" | |
# Allow Ray to automatically detect GPUs | |
# worker_image: "rayproject/ray-ml:latest-cpu" | |
# worker_run_options: [] | |
# If a node is idle for this many minutes, it will be removed. | |
idle_timeout_minutes: 5 | |
# Cloud-provider specific configuration. | |
provider: | |
type: aws | |
region: us-east-1 | |
# Availability zone(s), comma-separated, that nodes may be launched in. | |
# Nodes will be launched in the first listed availability zone and will | |
# be tried in the subsequent availability zones if launching fails. | |
availability_zone: us-east-1a,us-east-1b,us-east-1c,us-east-1d,us-east-1e,us-east-1f | |
# Whether to allow node reuse. If set to False, nodes will be terminated | |
# instead of stopped. | |
cache_stopped_nodes: False # If not present, the default is True. | |
security_group: | |
GroupName: group-name | |
# How Ray will authenticate with newly launched nodes. | |
auth: | |
ssh_user: ubuntu | |
# By default Ray creates a new private keypair, but you can also use your own. | |
# If you do so, make sure to also set "KeyName" in the head and worker node | |
# configurations below. | |
ssh_private_key: ~/.ssh/key.pem | |
# Tell the autoscaler the allowed node types and the resources they provide. | |
# The key is the name of the node type, which is just for debugging purposes. | |
# The node config specifies the launch config and physical instance type. | |
available_node_types: | |
ray.head.default: | |
min_workers: 1 | |
max_workers: 1 | |
# The node type's CPU and GPU resources are auto-detected based on AWS instance type. | |
# If desired, you can override the autodetected CPU and GPU resources advertised to the autoscaler. | |
# You can also set custom resources. | |
# For example, to mark a node type as having 1 CPU, 1 GPU, and 5 units of a resource called "custom", set | |
# resources: {"CPU": 1, "GPU": 1, "custom": 5} | |
# resources: {} | |
# Provider-specific config for this node type, e.g. instance type. By default | |
# Ray will auto-configure unspecified fields such as SubnetId and KeyName. | |
# For more documentation on available fields, see: | |
# http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances | |
node_config: | |
InstanceType: m5.large | |
# Default AMI for us-east-1. | |
# Check https://github.com/ray-project/ray/blob/master/python/ray/autoscaler/_private/aws/config.py | |
# for default images for other zones. | |
ImageId: ami-0dd6adfad4ad37eec | |
# You can provision additional disk space with a conf as follows | |
KeyName: key | |
EbsOptimized: True | |
IamInstanceProfile: | |
Name: IamInstance | |
BlockDeviceMappings: | |
- DeviceName: /dev/sda1 | |
Ebs: | |
VolumeType: gp3 | |
VolumeSize: 140 | |
TagSpecifications: | |
- ResourceType: instance | |
Tags: | |
- Key: Environment | |
Value: 'development' | |
- Key: Product | |
Value: 'project' | |
- ResourceType: volume | |
Tags: | |
- Key: Environment | |
Value: 'development' | |
- Key: Product | |
Value: 'project' | |
ray.worker.default: | |
# The minimum number of worker nodes of this type to launch. | |
# This number should be >= 0. | |
min_workers: 5 | |
# The maximum number of worker nodes of this type to launch. | |
# This takes precedence over min_workers. | |
#max_workers: 2 | |
# The node type's CPU and GPU resources are auto-detected based on AWS instance type. | |
# If desired, you can override the autodetected CPU and GPU resources advertised to the autoscaler. | |
# You can also set custom resources. | |
# For example, to mark a node type as having 1 CPU, 1 GPU, and 5 units of a resource called "custom", set | |
# resources: {"CPU": 1, "GPU": 1, "custom": 5} | |
# resources: {} | |
# Provider-specific config for this node type, e.g. instance type. By default | |
# Ray will auto-configure unspecified fields such as SubnetId and KeyName. | |
# For more documentation on available fields, see: | |
# http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances | |
node_config: | |
InstanceType: m5.large | |
# Default AMI for us-east-1. | |
# Check https://github.com/ray-project/ray/blob/master/python/ray/autoscaler/_private/aws/config.py | |
# for default images for other zones. | |
# https://us-east-1.console.aws.amazon.com/ec2/home?region=us-east-1#Images:visibility=public-images;owner=898082745236;architecture=arm64;v=3;$case=tags:false%5C,client:false;$regex=tags:false%5C,client:false | |
ImageId: ami-0dd6adfad4ad37eec | |
# Run workers on spot by default. Comment this out to use on-demand. | |
# NOTE: If relying on spot instances, it is best to specify multiple different instance | |
# types to avoid interruption when one instance type is experiencing heightened demand. | |
# Demand information can be found at https://aws.amazon.com/ec2/spot/instance-advisor/ | |
KeyName: key | |
EbsOptimized: True | |
IamInstanceProfile: | |
Name: IamInstanceProfile | |
InstanceMarketOptions: | |
MarketType: spot | |
# Additional options can be found in the boto docs, e.g. | |
# SpotOptions: | |
# MaxPrice: MAX_HOURLY_PRICE | |
TagSpecifications: | |
- ResourceType: instance | |
Tags: | |
- Key: Environment | |
Value: 'development' | |
- Key: Product | |
Value: 'project' | |
- ResourceType: volume | |
Tags: | |
- Key: Environment | |
Value: 'development' | |
- Key: Product | |
Value: 'project' | |
# Specify the node type of the head node (as configured above). | |
head_node_type: ray.head.default | |
# Files or directories to copy to the head and worker nodes. The format is a | |
# dictionary from REMOTE_PATH: LOCAL_PATH, e.g. | |
file_mounts: { | |
# "/path1/on/remote/machine": "/path1/on/local/machine", | |
# "/path2/on/remote/machine": "/path2/on/local/machine", | |
} | |
# Files or directories to copy from the head node to the worker nodes. The format is a | |
# list of paths. The same path on the head node will be copied to the worker node. | |
# This behavior is a subset of the file_mounts behavior. In the vast majority of cases | |
# you should just use file_mounts. Only use this if you know what you're doing! | |
cluster_synced_files: [] | |
# Whether changes to directories in file_mounts or cluster_synced_files in the head node | |
# should sync to the worker node continuously | |
file_mounts_sync_continuously: False | |
# Patterns for files to exclude when running rsync up or rsync down | |
rsync_exclude: | |
- "**/.git" | |
- "**/.git/**" | |
# Pattern files to use for filtering out files when running rsync up or rsync down. The file is searched for | |
# in the source directory and recursively through all subdirectories. For example, if .gitignore is provided | |
# as a value, the behavior will match git's behavior for finding and using .gitignore files. | |
rsync_filter: | |
- ".gitignore" | |
# List of commands that will be run before `setup_commands`. If docker is | |
# enabled, these commands will run outside the container and before docker | |
# is setup. | |
initialization_commands: [ | |
'aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin XXXXXXX.dkr.ecr.us-east-1.amazonaws.com' | |
] | |
# List of shell commands to run to set up nodes. | |
setup_commands: [] | |
# Note: if you're developing Ray, you probably want to create a Docker image that | |
# has your Ray repo pre-cloned. Then, you can replace the pip installs | |
# below with a git checkout <your_sha> (and possibly a recompile). | |
# To run the nightly version of ray (as opposed to the latest), either use a rayproject docker image | |
# that has the "nightly" (e.g. "rayproject/ray-ml:nightly-gpu") or uncomment the following line: | |
# - pip install -U "ray[default] @ https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-3.0.0.dev0-cp37-cp37m-manylinux2014_x86_64.whl" | |
# Custom commands that will be run on the head node after common setup. | |
head_setup_commands: [] | |
# Custom commands that will be run on worker nodes after common setup. | |
worker_setup_commands: [] | |
# Command to start ray on the head node. You don't need to change this. | |
head_start_ray_commands: | |
- ray stop | |
- ray start --head --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml | |
# Command to start ray on worker nodes. You don't need to change this. | |
worker_start_ray_commands: | |
- ray stop | |
- ray start --address=$RAY_HEAD_IP:6379 --object-manager-port=8076 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment