-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathray_cluster_template.yaml
89 lines (72 loc) · 3.32 KB
/
ray_cluster_template.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
# An unique identifier for the head node and workers of this cluster.
cluster_name: <USER>_<IDENTIFIER>
## NOTE: Typically for local clusters, min_workers == initial_workers == max_workers.
# The minimum number of workers nodes to launch in addition to the head
# node. This number should be >= 0.
# Typically, min_workers == initial_workers == max_workers.
min_workers: 1
# The initial number of worker nodes to launch in addition to the head node.
# Typically, min_workers == initial_workers == max_workers.
initial_workers: 1
# The maximum number of workers nodes to launch in addition to the head node.
# This takes precedence over min_workers.
# Typically, min_workers == initial_workers == max_workers.
max_workers: 1
# Autoscaling parameters.
# Ignore this if min_workers == initial_workers == max_workers.
autoscaling_mode: default
target_utilization_fraction: 0.8
idle_timeout_minutes: 5
# This executes all commands on all nodes in the docker container,
# and opens all the necessary ports to support the Ray cluster.
# Empty string means disabled. Assumes Docker is installed.
docker:
image: "" # e.g., tensorflow/tensorflow:1.5.0-py3
container_name: "" # e.g. ray_docker
# If true, pulls latest version of image. Otherwise, `docker run` will only pull the image
# if no cached version is present.
pull_before_run: True
run_options: [] # Extra options to pass into "docker run"
# Local specific configuration.
provider:
type: local
head_ip: <HEAD_IP>
worker_ips:
- <WORKER_IP_1>
- <WORKER_IP_2>
- <WORKER_IP_3>
# How Ray will authenticate with newly launched nodes.
auth:
ssh_user: <USER>
ssh_private_key: ~/.ssh/id_rsa
# Leave this empty.
head_node: {}
# Leave this empty.
worker_nodes: {}
# Files or directories to copy to the head and worker nodes. The format is a
# dictionary from REMOTE_PATH: LOCAL_PATH, e.g.
file_mounts: {}
# "/path1/on/remote/machine": "/path1/on/local/machine",
# "/path2/on/remote/machine": "/path2/on/local/machine",
# }
# List of commands that will be run before `setup_commands`. If docker is
# enabled, these commands will run outside the container and before docker
# is setup.
initialization_commands: []
# List of shell commands to run to set up each nodes.
setup_commands: []
# Custom commands that will be run on the head node after common setup.
head_setup_commands: []
# Custom commands that will be run on worker nodes after common setup.
worker_setup_commands: []
# NOTE: Modified the following commands to use the tf2-gpu environment
# and to use specific ports that have been opened for this purpose
# by Andrew Sedler ([email protected])
# Command to start ray on the head node. You don't need to change this.
head_start_ray_commands:
- conda activate <CONDA-ENV> && ray stop
- conda activate <CONDA-ENV> && ulimit -c unlimited && ray start --head --redis-port=10000 --redis-shard-ports=10001 --node-manager-port=10002 --object-manager-port=10003 --gcs-server-port=10004 --min-worker-port=10005 --max-worker-port=10099 --autoscaling-config=~/ray_bootstrap_config.yaml
# Command to start ray on worker nodes. You don't need to change this.
worker_start_ray_commands:
- conda activate <CONDA-ENV> && ray stop
- conda activate <CONDA-ENV> && ray start --address=$RAY_HEAD_IP:10000 --node-manager-port=10002 --object-manager-port=10003 --min-worker-port=10005 --max-worker-port=10099