import gym, ray
from ray.rllib.algorithms import ppo
from ray.tune.registry import register_env
from ray import tune
from ray.tune.logger import pretty_print
ray.shutdown()
ray.init(ignore_reinit_error=True)
# ray.rllib.utils.check_env(BertrandCompetitionContinuousEnv())
algo = ppo.PPO(env=BertrandCompetitionContinuousEnv, config={"num_workers": 0})
while True:
print(algo.train())
runfile('C:/Users/price_simulator_main/untitled0.py', wdir='C:/Users/price_simulator_main')
2023-05-19 15:09:41,852 INFO worker.py:1544 -- Started a local Ray instance. View the dashboard at 127.0.0.1:8265
2023-05-19 15:09:46,331 WARNING deprecation.py:50 -- DeprecationWarning: `algo = Algorithm(env='<class '__main__.BertrandCompetitionContinuousEnv'>', ...)` has been deprecated. Use `algo = AlgorithmConfig().environment('<class '__main__.BertrandCompetitionContinuousEnv'>').build()` instead. This will raise an error in the future!
<class 'ray.rllib.env.env_context.EnvContext'>
Traceback (most recent call last):
File "C:\Users\.conda\envs\tensorflow\lib\site-packages\spyder_kernels\py3compat.py", line 356, in compat_exec
exec(code, globals, locals)
File "c:\users\price_simulator_main\untitled0.py", line 259, in <module>
algo = ppo.PPO(env=BertrandCompetitionContinuousEnv, config={"num_workers": 0})
File "C:\Users\.conda\envs\tensorflow\lib\site-packages\ray\rllib\algorithms\algorithm.py", line 445, in __init__
super().__init__(
File "C:\Users\.conda\envs\tensorflow\lib\site-packages\ray\tune\trainable\trainable.py", line 169, in __init__
self.setup(copy.deepcopy(self.config))
File "C:\Users\.conda\envs\tensorflow\lib\site-packages\ray\rllib\algorithms\algorithm.py", line 571, in setup
self.workers = WorkerSet(
File "C:\Users\.conda\envs\tensorflow\lib\site-packages\ray\rllib\evaluation\worker_set.py", line 170, in __init__
self._setup(
File "C:\Users\.conda\envs\tensorflow\lib\site-packages\ray\rllib\evaluation\worker_set.py", line 260, in _setup
self._local_worker = self._make_worker(
File "C:\Users\.conda\envs\tensorflow\lib\site-packages\ray\rllib\evaluation\worker_set.py", line 946, in _make_worker
worker = cls(
File "C:\Users\.conda\envs\tensorflow\lib\site-packages\ray\rllib\evaluation\rollout_worker.py", line 607, in __init__
self.env = env_creator(copy.deepcopy(self.env_context))
File "C:\Users\.conda\envs\tensorflow\lib\site-packages\ray\rllib\env\utils.py", line 133, in _gym_env_creator
env = env_descriptor(env_context)
File "c:\users\price_simulator_main\untitled0.py", line 55, in __init__
a = np.array([c_i + a_minus_c_i] * self.num_agents)
TypeError: can't multiply sequence by non-int of type 'EnvContext'[quote="SS_D, post:1, topic:10734, full:true"]
import gym, ray
from ray.rllib.algorithms import ppo
from ray.tune.registry import register_env
from ray import tune
from ray.tune.logger import pretty_print
ray.shutdown()
ray.init(ignore_reinit_error=True)
# ray.rllib.utils.check_env(BertrandCompetitionContinuousEnv())
algo = ppo.PPO(env=BertrandCompetitionContinuousEnv, config={"num_workers": 0})
while True:
print(algo.train())
runfile('C:/Users/price_simulator_main/untitled0.py', wdir='C:/Users/price_simulator_main')
2023-05-19 15:09:41,852 INFO worker.py:1544 -- Started a local Ray instance. View the dashboard at 127.0.0.1:8265
2023-05-19 15:09:46,331 WARNING deprecation.py:50 -- DeprecationWarning: `algo = Algorithm(env='<class '__main__.BertrandCompetitionContinuousEnv'>', ...)` has been deprecated. Use `algo = AlgorithmConfig().environment('<class '__main__.BertrandCompetitionContinuousEnv'>').build()` instead. This will raise an error in the future!
<class 'ray.rllib.env.env_context.EnvContext'>
Traceback (most recent call last):
File "C:\Users\.conda\envs\tensorflow\lib\site-packages\spyder_kernels\py3compat.py", line 356, in compat_exec
exec(code, globals, locals)
File "c:\users\price_simulator_main\untitled0.py", line 259, in <module>
algo = ppo.PPO(env=BertrandCompetitionContinuousEnv, config={"num_workers": 0})
File "C:\Users\.conda\envs\tensorflow\lib\site-packages\ray\rllib\algorithms\algorithm.py", line 445, in __init__
super().__init__(
File "C:\Users\.conda\envs\tensorflow\lib\site-packages\ray\tune\trainable\trainable.py", line 169, in __init__
self.setup(copy.deepcopy(self.config))
File "C:\Users\.conda\envs\tensorflow\lib\site-packages\ray\rllib\algorithms\algorithm.py", line 571, in setup
self.workers = WorkerSet(
File "C:\Users\.conda\envs\tensorflow\lib\site-packages\ray\rllib\evaluation\worker_set.py", line 170, in __init__
self._setup(
File "C:\Users\.conda\envs\tensorflow\lib\site-packages\ray\rllib\evaluation\worker_set.py", line 260, in _setup
self._local_worker = self._make_worker(
File "C:\Users\.conda\envs\tensorflow\lib\site-packages\ray\rllib\evaluation\worker_set.py", line 946, in _make_worker
worker = cls(
File "C:\Users\.conda\envs\tensorflow\lib\site-packages\ray\rllib\evaluation\rollout_worker.py", line 607, in __init__
self.env = env_creator(copy.deepcopy(self.env_context))
File "C:\Users\.conda\envs\tensorflow\lib\site-packages\ray\rllib\env\utils.py", line 133, in _gym_env_creator
env = env_descriptor(env_context)
File "c:\users\price_simulator_main\untitled0.py", line 55, in __init__
a = np.array([c_i + a_minus_c_i] * self.num_agents)
TypeError: can't multiply sequence by non-int of type 'EnvContext'
[/quote]
Could you create a minimal repro script and raise this as an issue on Github? Please include your environment, since the error seems to come from there.
Hi Rohan,
Huge thanks for your reply. I just created an issue in GitHub and put my entire code in Google colab which everyone could edit. https://github.com/dengshidiet123/Pricing-Collusion/issues/1
Looking forward to hearing from you and thanks again
Best regards,
Shidi Deng
Rohan Potdar via Ray <notifications@ray.discoursemail.com> 于2023年6月21日周三 06:16写道: