I am attempting to create a custom environment to use with RLLIB. I define my observation space and action space in as follows:
self.action_space = Box(np.array([0,-1]),np.array([1,1]))
observation_space_dict = {
'velocity_mag': Box(low=-1, high=30,shape=(1,), dtype=np.float64), #assumes max velocity is 30 m/s
'd2target': Box(low=0, high=100000,shape=(1,), dtype=np.float64), #assumes max d2target is 100000m
'pitch': Box(low=-360, high=360,shape=(1,), dtype=np.float64),
'yaw': Box(low=-360, high=360,shape=(1,), dtype=np.float64),
'roll': Box(low=-360, high=360,shape=(1,), dtype=np.float64),
'command':MultiBinary(n=6)
}
self.observation_space = Dict(observation_space_dict)
I have included my entire environment-class below as a reference. When training (using FCNET not a custom model) I get the following error:
Traceback (most recent call last):
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 697, in _process_trial
results = self.trial_executor.fetch_result(trial)
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 678, in fetch_result
result = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/_private/client_mode_hook.py", line 47, in wrapper
return func(*args, **kwargs)
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/worker.py", line 1428, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(ValueError): ray::Custom_PPO_trainer.train_buffered() (pid=27114, ip=128.83.141.65)
File "python/ray/_raylet.pyx", line 497, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 444, in ray._raylet.execute_task.function_executor
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/tune/trainable.py", line 167, in train_buffered
result = self.train()
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 535, in train
raise e
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 524, in train
result = Trainable.train(self)
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/tune/trainable.py", line 226, in train
result = self.step()
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 148, in step
res = next(self.train_exec_impl)
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/util/iter.py", line 756, in __next__
return next(self.built_iterator)
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/util/iter.py", line 843, in apply_filter
for item in it:
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/util/iter.py", line 843, in apply_filter
for item in it:
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
[Previous line repeated 1 more time]
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/util/iter.py", line 876, in apply_flatten
for item in it:
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/util/iter.py", line 828, in add_wait_hooks
item = next(it)
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
[Previous line repeated 1 more time]
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/util/iter.py", line 471, in base_iterator
yield ray.get(futures, timeout=timeout)
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/_private/client_mode_hook.py", line 47, in wrapper
return func(*args, **kwargs)
ray.exceptions.RayTaskError(ValueError): ray::RolloutWorker.par_iter_next() (pid=27100, ip=128.83.141.65)
File "python/ray/_raylet.pyx", line 497, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 444, in ray._raylet.execute_task.function_executor
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/util/iter.py", line 1152, in par_iter_next
return next(self.local_it)
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/rllib/evaluation/rollout_worker.py", line 327, in gen_rollouts
yield self.sample()
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/rllib/evaluation/rollout_worker.py", line 678, in sample
batches = [self.input_reader.next()]
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/rllib/evaluation/sampler.py", line 98, in next
batches = [self.get_data()]
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/rllib/evaluation/sampler.py", line 232, in get_data
item = next(self.rollout_provider)
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/rllib/evaluation/sampler.py", line 618, in _env_runner
_process_observations_w_trajectory_view_api(
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/rllib/evaluation/sampler.py", line 1086, in _process_observations_w_trajectory_view_api
prep_obs: EnvObsType = _get_or_raise(preprocessors,
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/rllib/models/preprocessors.py", line 257, in transform
self.check_shape(observation)
File "/home/stephane/miniconda3/envs/carla-challenge/lib/python3.8/site-packages/ray/rllib/models/preprocessors.py", line 66, in check_shape
raise ValueError(
ValueError: ('Observation ({}) outside given space ({})!', [0.0, 653.6267399294039, 0.0, 90.00004577636719, 0.0, 0, 0, 1, 0, 0, 0], Dict(command:MultiBinary(6), d2target:Box(0.0, 100000.0, (1,), float64), pitch:Box(-360.0, 360.0, (1,), float64), roll:Box(-360.0, 360.0, (1,), float64), velocity_mag:Box(-1.0, 30.0, (1,), float64), yaw:Box(-360.0, 360.0, (1,), float64)))
Result for Custom_PPO_trainer_CarlaEnv_f9785_00000:
{}
I am not sure what I am doing wrong. Am I not defining the observation space correctly? All values in the observation space are within the designated ranges.
Enviroment Class:
from __future__ import print_function
import gym
import argparse
import gym
from gym.spaces import Discrete, Box, MultiBinary, Dict
import numpy as np
from .carla_core import CarlaCore
import carla
class CarlaEnv(gym.Env):
"""
This is a carla environment, responsible of handling all the CARLA related steps of the training.
"""
#TODO: MUST HANDLE ADDING ARGS "config": {"args": args,},
def __init__(self, config):
"""Initializes the environment"""
self.config = config
self.action_space = Box(np.array([0,-1]),np.array([1,1]))
observation_space_dict = {
# 'img': Box(low=0, high=255,shape=(1,80, 80, 3), dtype=np.float64), #TODO: 1 is number of samples when it should be batch size!
'velocity_mag': Box(low=-1, high=30,shape=(1,), dtype=np.float64), #assumes max velocity is 30 m/s
'd2target': Box(low=0, high=100000,shape=(1,), dtype=np.float64), #assumes max d2target is 100000m
'pitch': Box(low=-360, high=360,shape=(1,), dtype=np.float64),
'yaw': Box(low=-360, high=360,shape=(1,), dtype=np.float64),
'roll': Box(low=-360, high=360,shape=(1,), dtype=np.float64),
'command':MultiBinary(n=6)
}
#observation_space_dict = {'img': Box(low=0, high=255,shape=(1,80, 80, 3), dtype=np.uint8)}
self.observation_space = Dict(observation_space_dict)
args = self.config["args"]
args.client = self.launch_client(args)
self.core = CarlaCore(args,save_video=False,i=1)
self.reset()
def launch_client(self,args):
client = carla.Client(args.host, args.world_port)
client.set_timeout(args.client_timeout)
return client
def reset(self):
s, _, _, _ = self.core.reset(False, 0)
return s[1:]
def step(self, action):
"""Computes one tick of the environment in order to return the new observation,
as well as the rewards"""
s_prime, reward, done, info = self.core.step(action=action, timeout=2)
return s_prime[1:], reward, done, info