Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[RLlib] Don't add a cpu to bundle for learner when using gpu #35529

Merged
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Don't add a cpu to bundle for learner when using gpu
solves #35409

Prevent fragmentation of resources by not placing gpus
with cpus in bundles for the learner workers, making it
so that an actor that requires only cpu does not
potentially take a bundle that has both a cpu and gpu.

The long term fix will be to allow the specification
of placement group bundle index via tune and ray train.

Signed-off-by: avnishn <[email protected]>
  • Loading branch information
avnishn committed May 18, 2023
commit 1a0c244043866b8fdb0bbc71bb9f428a07c7772f
31 changes: 31 additions & 0 deletions release/release_tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3437,6 +3437,37 @@
cluster_compute: multi_node_checkpointing_compute_config_gce.yaml


- name: rllib_multi_node_e2e_training_smoke_test
group: RLlib tests
working_dir: rllib_tests

frequency: nightly
team: rllib

cluster:
cluster_env: app_config.yaml
cluster_compute: multi_node_checkpointing_compute_config.yaml

run:
timeout: 3600
script: pytest smoke_tests/smoke_test_basic_multi_node_training_learner.py

wait_for_nodes:
num_nodes: 3

alert: default

variations:
- __suffix__: aws
- __suffix__: gce
env: gce
frequency: manual
cluster:
cluster_env: app_config.yaml
cluster_compute: multi_node_checkpointing_compute_config_gce.yaml



- name: rllib_learning_tests_a2c_tf
group: RLlib tests
working_dir: rllib_tests
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ max_workers: 3

head_node_type:
name: head_node
instance_type: m5.2xlarge
instance_type: m5.xlarge

worker_node_types:
- name: worker_node
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ max_workers: 3

head_node_type:
name: head_node
instance_type: n2-standard-8 # m5.2xlarge
instance_type: n2-standard-4 # m5.xlarge

worker_node_types:
- name: worker_node
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
import ray
from ray import air, tune
from ray.rllib.algorithms.ppo import PPOConfig


def run_with_tuner_n_rollout_worker_2_gpu(config):
"""Run training with n rollout workers and 2 learner workers with gpu."""
config = config.rollouts(num_rollout_workers=5)
tuner = tune.Tuner(
"PPO",
param_space=config,
run_config=air.RunConfig(
stop={"timesteps_total": 128},
failure_config=air.FailureConfig(fail_fast=True),
),
)
tuner.fit()


def run_with_tuner_0_rollout_worker_2_gpu(config):
"""Run training with 0 rollout workers with 2 learner workers with gpu."""
config = config.rollouts(num_rollout_workers=0)
tuner = tune.Tuner(
"PPO",
param_space=config,
run_config=air.RunConfig(
stop={"timesteps_total": 128},
failure_config=air.FailureConfig(fail_fast=True),
),
)
tuner.fit()


def run_tuner_n_rollout_workers_0_gpu(config):
"""Run training with n rollout workers, multiple learner workers, and no gpu."""
config = config.rollouts(num_rollout_workers=5)
config = config.resources(
num_cpus_per_learner_worker=1,
num_learner_workers=4,
)

tuner = tune.Tuner(
"PPO",
param_space=config,
run_config=air.RunConfig(
stop={"timesteps_total": 128},
failure_config=air.FailureConfig(fail_fast=True),
),
)
tuner.fit()


def run_tuner_n_rollout_workers_1_gpu_local(config):
"""Run training with n rollout workers, local learner, and 1 gpu."""
config = config.rollouts(num_rollout_workers=5)
config = config.resources(
num_gpus_per_learner_worker=1,
num_learner_workers=0,
)

tuner = tune.Tuner(
"PPO",
param_space=config,
run_config=air.RunConfig(
stop={"timesteps_total": 128},
failure_config=air.FailureConfig(fail_fast=True),
),
)
tuner.fit()


def test_multi_node_training_smoke():
"""A smoke test to see if we can run multi node training without pg problems.

This test is run on a 3 node cluster. The head node is a m5.xlarge (4 cpu),
the worker nodes are 2 g4dn.xlarge (1 gpu, 4 cpu) machines.

"""

ray.init()

config = (
PPOConfig()
.training(
_enable_learner_api=True,
model={
"fcnet_hiddens": [256, 256, 256],
"fcnet_activation": "relu",
"vf_share_layers": True,
},
train_batch_size=128,
)
.rl_module(_enable_rl_module_api=True)
.environment("CartPole-v1")
.resources(
num_gpus_per_learner_worker=1,
num_learner_workers=4,
)
.rollouts(num_rollout_workers=2)
.reporting(min_time_s_per_iteration=0, min_sample_timesteps_per_iteration=10)
)
for fw in ["tf2", "torch"]:
config = config.framework(fw, eager_tracing=True)

run_with_tuner_0_rollout_worker_2_gpu(config)
run_with_tuner_n_rollout_worker_2_gpu(config)
run_tuner_n_rollout_workers_0_gpu(config)
run_tuner_n_rollout_workers_1_gpu_local(config)


if __name__ == "__main__":
import sys
import pytest

sys.exit(pytest.main(["-v", __file__]))
77 changes: 0 additions & 77 deletions repro.py

This file was deleted.

Loading