I’m not sure why exactly, but this seems to work for tf but not torch. Here is a reproduction script:
import ray
import gym
from ray import tune
import numpy as np
from gym.spaces import Discrete, Tuple, Box
class RandomEnv(gym.Env):
"""A randomly acting environment.
Can be instantiated with arbitrary action-, observation-, and reward
spaces. Observations and rewards are generated by simply sampling from the
observation/reward spaces. The probability of a `done=True` can be
configured as well.
"""
def __init__(self, config=None):
config = config or {}
# Action space.
self.action_space = Tuple((Discrete(2), Discrete(4)))
# Observation space from which to sample.
self.observation_space = Tuple((Box(0, 1, (5,5)), Discrete(2), Discrete(4)))
# Reward space from which to sample.
self.reward_space = config.get(
"reward_space",
gym.spaces.Box(low=-1.0, high=1.0, shape=(), dtype=np.float32))
# Chance that an episode ends at any step.
self.p_done = config.get("p_done", 0.1)
# A max episode length.
self.max_episode_len = config.get("max_episode_len", None)
# Whether to check action bounds.
self.check_action_bounds = config.get("check_action_bounds", False)
# Steps taken so far (after last reset).
self.steps = 0
def reset(self):
self.steps = 0
return self.observation_space.sample()
def step(self, action):
if self.check_action_bounds and not self.action_space.contains(action):
raise ValueError("Illegal action for {}: {}".format(
self.action_space, action))
if (isinstance(self.action_space, Tuple)
and len(action) != len(self.action_space.spaces)):
raise ValueError("Illegal action for {}: {}".format(
self.action_space, action))
self.steps += 1
done = False
# We are done as per our max-episode-len.
if self.max_episode_len is not None and \
self.steps >= self.max_episode_len:
done = True
# Max not reached yet -> Sample done via p_done.
elif self.p_done > 0.0:
done = bool(
np.random.choice(
[True, False], p=[self.p_done, 1.0 - self.p_done]))
return self.observation_space.sample(), \
float(self.reward_space.sample()), done, {}
if __name__ == '__main__':
run_config = {
"env": RandomEnv,
"model": {
"max_seq_len": 20,
"use_lstm": True,
},
"framework": "torch",
"num_gpus": 1,
"num_workers": 0,
}
ray.init()
results = tune.run("PPO", config=run_config, verbose=1)
ray.shutdown()
The error is like this:
(pid=113923) 2021-10-11 21:47:54,766 WARNING deprecation.py:38 -- DeprecationWarning: `SampleBatch['is_training']` has been deprecated. Use `SampleBatch.is_training` instead. This will raise an error in the future!
(pid=113923) 2021-10-11 21:47:54,768 ERROR worker.py:425 -- Exception raised in creation task: The actor died because of an error raised in its creation task, ray::PPO.__init__() (pid=113923, ip=10.161.24.55)
(pid=113923) File "/home/xl3942/anaconda3/envs/CommAgent/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in __init__
(pid=113923) Trainer.__init__(self, config, env, logger_creator)
(pid=113923) File "/home/xl3942/anaconda3/envs/CommAgent/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 622, in __init__
(pid=113923) super().__init__(config, logger_creator)
(pid=113923) File "/home/xl3942/anaconda3/envs/CommAgent/lib/python3.8/site-packages/ray/tune/trainable.py", line 106, in __init__
(pid=113923) self.setup(copy.deepcopy(self.config))
(pid=113923) File "/home/xl3942/anaconda3/envs/CommAgent/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 147, in setup
(pid=113923) super().setup(config)
(pid=113923) File "/home/xl3942/anaconda3/envs/CommAgent/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 775, in setup
(pid=113923) self._init(self.config, self.env_creator)
(pid=113923) File "/home/xl3942/anaconda3/envs/CommAgent/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 171, in _init
(pid=113923) self.workers = self._make_workers(
(pid=113923) File "/home/xl3942/anaconda3/envs/CommAgent/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 857, in _make_workers
(pid=113923) return WorkerSet(
(pid=113923) File "/home/xl3942/anaconda3/envs/CommAgent/lib/python3.8/site-packages/ray/rllib/evaluation/worker_set.py", line 110, in __init__
(pid=113923) self._local_worker = self._make_worker(
(pid=113923) File "/home/xl3942/anaconda3/envs/CommAgent/lib/python3.8/site-packages/ray/rllib/evaluation/worker_set.py", line 406, in _make_worker
(pid=113923) worker = cls(
(pid=113923) File "/home/xl3942/anaconda3/envs/CommAgent/lib/python3.8/site-packages/ray/rllib/evaluation/rollout_worker.py", line 584, in __init__
(pid=113923) self._build_policy_map(
(pid=113923) File "/home/xl3942/anaconda3/envs/CommAgent/lib/python3.8/site-packages/ray/rllib/evaluation/rollout_worker.py", line 1376, in _build_policy_map
(pid=113923) self.policy_map.create_policy(name, orig_cls, obs_space, act_space,
(pid=113923) File "/home/xl3942/anaconda3/envs/CommAgent/lib/python3.8/site-packages/ray/rllib/policy/policy_map.py", line 143, in create_policy
(pid=113923) self[policy_id] = class_(observation_space, action_space,
(pid=113923) File "/home/xl3942/anaconda3/envs/CommAgent/lib/python3.8/site-packages/ray/rllib/policy/policy_template.py", line 280, in __init__
(pid=113923) self._initialize_loss_from_dummy_batch(
(pid=113923) File "/home/xl3942/anaconda3/envs/CommAgent/lib/python3.8/site-packages/ray/rllib/policy/policy.py", line 731, in _initialize_loss_from_dummy_batch
(pid=113923) self.compute_actions_from_input_dict(
(pid=113923) File "/home/xl3942/anaconda3/envs/CommAgent/lib/python3.8/site-packages/ray/rllib/policy/torch_policy.py", line 304, in compute_actions_from_input_dict
(pid=113923) return self._compute_action_helper(input_dict, state_batches,
(pid=113923) File "/home/xl3942/anaconda3/envs/CommAgent/lib/python3.8/site-packages/ray/rllib/utils/threading.py", line 21, in wrapper
(pid=113923) return func(self, *a, **k)
(pid=113923) File "/home/xl3942/anaconda3/envs/CommAgent/lib/python3.8/site-packages/ray/rllib/policy/torch_policy.py", line 368, in _compute_action_helper
(pid=113923) dist_inputs, state_out = self.model(input_dict, state_batches,
(pid=113923) File "/home/xl3942/anaconda3/envs/CommAgent/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 231, in __call__
(pid=113923) restored["obs"] = restore_original_dimensions(
(pid=113923) File "/home/xl3942/anaconda3/envs/CommAgent/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 389, in restore_original_dimensions
(pid=113923) return _unpack_obs(obs, original_space, tensorlib=tensorlib)
(pid=113923) File "/home/xl3942/anaconda3/envs/CommAgent/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 423, in _unpack_obs
(pid=113923) raise ValueError(
(pid=113923) ValueError: Expected flattened obs shape of [..., 31], got torch.Size([32, 27])