Error: No available node types can fulfill resource request {‘CPU’: 1.0, ‘GPU’: 2.0}. Add suitable node types to this cluster to resolve this issue.
"""Simple example of setting up a multi-agent policy mapping.
Control the number of agents and policies via --num-agents and --num-policies.
This works with hundreds of agents and policies, but note that initializing
many TF policies will take some time.
Also, TF evals might slow down with large numbers of policies. To debug TF
execution, set the TF_TIMELINE_DIR environment variable.
"""
import argparse
import os
import random
import ray
from ray import air, tune
from ray.rllib.algorithms.ppo import PPOConfig
from ray.rllib.examples.env.multi_agent import MultiAgentCartPole
from ray.rllib.examples.models.shared_weights_model import (
SharedWeightsModel1,
SharedWeightsModel2,
TF2SharedWeightsModel,
TorchSharedWeightsModel,
)
from ray.rllib.models import ModelCatalog
from ray.rllib.policy.policy import PolicySpec
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.test_utils import check_learning_achieved
tf1, tf, tfv = try_import_tf()
parser = argparse.ArgumentParser()
parser.add_argument("--num-agents", type=int, default=4)
parser.add_argument("--num-policies", type=int, default=2)
parser.add_argument("--num-cpus", type=int, default=0)
parser.add_argument(
"--framework",
choices=["tf", "tf2", "torch"],
default="tf",
help="The DL framework specifier.",
)
parser.add_argument(
"--as-test",
action="store_true",
help="Whether this script should be run as a test: --stop-reward must "
"be achieved within --stop-timesteps AND --stop-iters.",
)
parser.add_argument(
"--stop-iters", type=int, default=200, help="Number of iterations to train."
)
parser.add_argument(
"--stop-timesteps", type=int, default=100000, help="Number of timesteps to train."
)
parser.add_argument(
"--stop-reward", type=float, default=150.0, help="Reward at which we stop training."
)
if __name__ == "__main__":
args = parser.parse_args()
ray.init(num_cpus=args.num_cpus or None)
# Register the models to use.
if args.framework == "torch":
mod1 = mod2 = TorchSharedWeightsModel
elif args.framework == "tf2":
mod1 = mod2 = TF2SharedWeightsModel
else:
mod1 = SharedWeightsModel1
mod2 = SharedWeightsModel2
ModelCatalog.register_custom_model("model1", mod1)
ModelCatalog.register_custom_model("model2", mod2)
# Each policy can have a different configuration (including custom model).
def gen_policy(i):
if bool(os.environ.get("RLLIB_ENABLE_RL_MODULE", False)):
# just change the gammas between the two policies.
# changing the module is not a critical part of this example.
# the important part is that the policies are different.
config = {
"gamma": random.choice([0.95, 0.99]),
}
else:
config = PPOConfig.overrides(
model={
"custom_model": ["model1", "model2"][i % 2],
},
gamma=random.choice([0.95, 0.99]),
)
return PolicySpec(config=config)
# Setup PPO with an ensemble of `num_policies` different policies.
policies = {"policy_{}".format(i): gen_policy(i) for i in range(args.num_policies)}
policy_ids = list(policies.keys())
def policy_mapping_fn(agent_id, episode, worker, **kwargs):
pol_id = random.choice(policy_ids)
return pol_id
config = (
PPOConfig()
.environment(MultiAgentCartPole, env_config={"num_agents": args.num_agents})
.framework(args.framework)
.training(num_sgd_iter=1000)
.multi_agent(policies=policies, policy_mapping_fn=policy_mapping_fn)
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
.resources(
num_gpus=2, num_cpus_per_worker=1,
num_gpus_per_learner_worker=0.2, num_learner_workers=2
)
)
stop = {
"episode_reward_mean": args.stop_reward,
"timesteps_total": args.stop_timesteps,
"training_iteration": args.stop_iters,
}
results = tune.Tuner(
"PPO",
param_space=config.to_dict(),
run_config=air.RunConfig(stop=stop, verbose=1),
).fit()
if args.as_test:
check_learning_achieved(results, args.stop_reward)
ray.shutdown()