1. Severity of the issue: (select one)
None: I’m just curious or want clarification.
Low: Annoying but doesn’t hinder my work.
Medium: Significantly affects my productivity but can find a workaround.
High: Completely blocks me.
2. Environment:
- Ray version: 2.44.1
- Python version: 3.12.7
- OS: Window
please help me, when i run the following code i get error “module’ object is not callable”
import os
import sys
if "SUMO_HOME" in os.environ:
tools = os.path.join(os.environ["SUMO_HOME"], "tools")
sys.path.append(tools)
else:
sys.exit("Please declare the environment variable 'SUMO_HOME'")
import numpy as np
import pandas as pd
import ray
import traci
from ray import tune
from ray.rllib.algorithms.ppo import PPOConfig
from ray.rllib.env.wrappers.pettingzoo_env import ParallelPettingZooEnv
from ray.tune.registry import register_env
import sumo_rl
ray.init()
if __name__ == "__main__":
# Use:
# ray[rllib]==2.7.0
# numpy == 1.23.4
# Pillow>=9.4.0
# ray[rllib]==2.7.0
# SuperSuit>=3.9.0
# torch>=1.13.1
# tensorflow-probability>=0.19.0
#ray.init()
env_name = "4x4grid"
register_env(
env_name,
lambda _: ParallelPettingZooEnv(
sumo_rl.parallel_env(
net_file="./4x4-Lucas/4x4.net.xml",
route_file="./4x4-Lucas/4x4c1c2c1c2.rou.xml",
out_csv_name="outputs/4x4grid/ppo",
use_gui=False,
num_seconds=80000,
)
),
)
config = (
PPOConfig()
.environment(env=env_name, disable_env_checking=True)
)
config.env_runners(num_env_runners=4, rollout_fragment_length=128)
config.training(
train_batch_size=512,
lr=2e-5,
gamma=0.95,
lambda_=0.9,
use_gae=True,
clip_param=0.4,
grad_clip=None,
entropy_coeff=0.1,
vf_loss_coeff=0.25,
#sgd_minibatch_size=64,
num_epochs=10)
config.debugging(log_level="ERROR")
config.framework(framework="torch")
config.resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0")))
tune.run(
"PPO",
name="PPO",
stop={"timesteps_total": 100000},
checkpoint_freq=10,
storage_path="~/ray_results/" + env_name,
config=config.to_dict(),
)
(PPO pid=22888) 2025-04-23 13:19:41,943 WARNING algorithm_config.py:4704 -- You are running PPO on the new API stack! This is the new default behavior for this algorithm. If you don't want to use the new API stack, set `config.api_stack(enable_rl_module_and_learner=False,enable_env_runner_and_connector_v2=False)`. For a detailed migration guide, see here: https://docs.ray.io/en/master/rllib/new-api-stack-migration-guide.html
(SingleAgentEnvRunner pid=11388) Step #0.00 (0ms ?*RT. ?UPS, TraCI: 17ms, vehicles TOT 0 ACT 0 BUF 0)
(SingleAgentEnvRunner pid=11388) Exception raised in creation task: The actor died because of an error raised in its creation task, ray::SingleAgentEnvRunner.__init__() (pid=11388, ip=127.0.0.1, actor_id=a44169f772470169f85ced2001000000, repr=<ray.rllib.env.single_agent_env_runner.SingleAgentEnvRunner object at 0x000002620CC666F0>)
(SingleAgentEnvRunner pid=11388) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
(SingleAgentEnvRunner pid=11388) File "C:\Users\thaigiang\AppData\Local\Temp\ipykernel_12400\3020126705.py", line 17, in <lambda>
(SingleAgentEnvRunner pid=11388) File "C:\Users\thaigiang\anaconda3\Lib\site-packages\pettingzoo\utils\conversions.py", line 14, in par_fn
(SingleAgentEnvRunner pid=11388) env = env_fn(**kwargs)
(SingleAgentEnvRunner pid=11388) ^^^^^^^^^^^^^^^^
(SingleAgentEnvRunner pid=11388) File "C:\Users\thaigiang\anaconda3\Lib\site-packages\sumo_rl\environment\env.py", line 32, in env
(SingleAgentEnvRunner pid=11388) env = SumoEnvironmentPZ(**kwargs)
(SingleAgentEnvRunner pid=11388) ^^^^^^^^^^^^^^^^^^^^^^^^^^^
(SingleAgentEnvRunner pid=11388) File "C:\Users\thaigiang\anaconda3\Lib\site-packages\sumo_rl\environment\env.py", line 523, in __init__
(SingleAgentEnvRunner pid=11388) self._agent_selector = agent_selector(self.agents)
(SingleAgentEnvRunner pid=11388) ^^^^^^^^^^^^^^^^^^^^^^^^^^^
(SingleAgentEnvRunner pid=11388) TypeError: 'module' object is not callable