Hi all,
I’m trying to set up an action masking environment by following the examples on GitHub.
from gym.spaces import Dict
from gym import spaces
from ray.rllib.models.tf.fcnet import FullyConnectedNetwork
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork as TorchFC
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.torch_utils import FLOAT_MIN
tf1, tf, tfv = try_import_tf()
import tensorflow as tf_real
def find_shape(obs):
n = 0
for i in obs:
if isinstance(i, spaces.Box):
n += i.shape[0]
elif isinstance(i, spaces.Discrete):
n += i.n
elif isinstance(i, spaces.MultiDiscrete):
n += i.nvec.sum()
else:
raise TypeError
return (n,)
class ActionMaskModel(TFModelV2):
"""Model that handles simple discrete action masking.
This assumes the outputs are logits for a single Categorical action dist.
Getting this to work with a more complex output (e.g., if the action space
is a tuple of several distributions) is also possible but left as an
exercise to the reader.
"""
def __init__(
self, obs_space, action_space, num_outputs, model_config, name, **kwargs
):
orig_space = getattr(obs_space, "original_space", obs_space)
assert (
isinstance(orig_space, Dict)
and "action_mask" in orig_space.spaces
and "observations" in orig_space.spaces
)
super().__init__(obs_space, action_space, num_outputs, model_config, name)
# Tuple Shape is None by default, calculate the flattened shape of its contents and update accoridngly
tmp_shape = orig_space["observations"]
tmp_shape.shape = find_shape(tmp_shape)
self.internal_model = FullyConnectedNetwork(
tmp_shape,
action_space,
num_outputs,
model_config,
name + "_internal",
)
# disable action masking --> will likely lead to invalid actions
self.no_masking = model_config["custom_model_config"].get("no_masking", False)
def forward(self, input_dict, state, seq_lens):
# Extract the available actions tensor from the observation.
action_mask = input_dict["obs"]["action_mask"]
# Compute the unmasked logits.
# tmp_stack = tf_real.concat(input_dict["obs"]["observations"], axis = 1)
logits, _ = self.internal_model({"obs": input_dict["obs"]["observations"]})
# If action masking is disabled, directly return unmasked logits
if self.no_masking:
return logits, state
# Convert action_mask into a [0.0 || -inf]-type mask.
# inf_mask = tf.maximum(tf.math.log(action_mask), tf.float32.min)
# masked_logits = logits + inf_mask
masked_logits = logits
# Return masked logits.
return masked_logits, state
def value_function(self):
return self.internal_model.value_function()
This is the modified file from github: https://github.com/ray-project/ray/blob/master/rllib/examples/models/action_mask_model.py
Crashes on the following line logits, _ = self.internal_model({"obs": input_dict["obs"]["observations"]})
and prints out the following error:
Traceback (most recent call last):
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\agents\trainer.py", line 892, in setup
self._init(self.config, self.env_creator)
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\agents\trainer.py", line 1021, in _init
raise NotImplementedError
NotImplementedError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:/Users/destr/PycharmProjects/Underlords/code/policy_server.py", line 310, in <module>
trainer = PPOTrainer(config=DEFAULT_CONFIG, env= ActionMaskEnv)
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\agents\trainer.py", line 826, in __init__
super().__init__(
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\tune\trainable.py", line 142, in __init__
self.setup(copy.deepcopy(self.config))
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\agents\trainer.py", line 907, in setup
self.workers = self._make_workers(
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\agents\trainer.py", line 2161, in _make_workers
return WorkerSet(
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\evaluation\worker_set.py", line 161, in __init__
self._local_worker = self._make_worker(
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\evaluation\worker_set.py", line 553, in _make_worker
worker = cls(
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\evaluation\rollout_worker.py", line 613, in __init__
self._build_policy_map(
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\evaluation\rollout_worker.py", line 1698, in _build_policy_map
self.policy_map.create_policy(
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\policy\policy_map.py", line 140, in create_policy
self[policy_id] = class_(
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\policy\tf_policy_template.py", line 256, in __init__
DynamicTFPolicy.__init__(
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\policy\dynamic_tf_policy.py", line 360, in __init__
dist_inputs, self._state_out = self.model(self._input_dict)
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\models\modelv2.py", line 251, in __call__
res = self.forward(restored, state or [], seq_lens)
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\models\tf\recurrent_net.py", line 221, in forward
wrapped_out, _ = self._wrapped_forward(input_dict, [], None)
File "C:\Users\destr\PycharmProjects\Underlords\code\mask_model.py", line 71, in forward
logits, _ = self.internal_model({"obs": input_dict["obs"]["observations"]})
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\models\modelv2.py", line 251, in __call__
res = self.forward(restored, state or [], seq_lens)
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\models\tf\fcnet.py", line 146, in forward
model_out, self._value_out = self.base_model(input_dict["obs_flat"])
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\tensorflow\python\keras\engine\base_layer_v1.py", line 760, in __call__
input_spec.assert_input_compatibility(self.input_spec, inputs,
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\tensorflow\python\keras\engine\input_spec.py", line 204, in assert_input_compatibility
raise ValueError('Layer ' + layer_name + ' expects ' +
ValueError: Layer model expects 1 input(s), but it received 56 input tensors. Inputs received: [<tf.Tensor 'default_policy/Reshape_2:0' shape=(?, 9) dtype=float32>, <tf.Tensor 'default_policy/Reshape_3:0' shape=(?, 1) dtype=float32>, <tf.Tensor 'default_policy/Reshape_4:0' shape=(?, 1) dtype=float32>, <tf.Tensor 'default_policy/Reshape_5:0' shape=(?, 1) dtype=float32>, <tf.Tensor 'default_policy/Reshape_6:0' shape=(?, 1) dtype=float32>, <tf.Tensor 'default_policy/Reshape_7:0' shape=(?, 1) dtype=float32>, <tf.Tensor 'default_policy/Reshape_8:0' shape=(?, 2) dtype=float32>, <tf.Tensor 'default_policy/Reshape_9:0' shape=(?, 2) dtype=float32>, <tf.Tensor 'default_policy/Reshape_10:0' shape=(?, 6) dtype=float32>, <tf.Tensor 'default_policy/Reshape_11:0' shape=(?, 13) dtype=float32>, <tf.Tensor 'default_policy/Reshape_12:0' shape=(?, 13) dtype=float32>, <tf.Tensor 'default_policy/Reshape_13:0' shape=(?, 3) dtype=float32>, <tf.Tensor 'default_policy/Reshape_14:0' shape=(?, 2) dtype=float32>, <tf.Tensor 'default_policy/Reshape_15:0' shape=(?, 1) dtype=float32>, <tf.Tensor 'default_policy/Reshape_16:0' shape=(?, 435) dtype=float32>, <tf.Tensor 'default_policy/Reshape_17:0' shape=(?, 19) dtype=float32>, <tf.Tensor 'default_policy/Reshape_18:0' shape=(?, 19) dtype=float32>, <tf.Tensor 'default_policy/Reshape_19:0' shape=(?, 27) dtype=float32>, <tf.Tensor 'default_policy/Reshape_20:0' shape=(?, 115) dtype=float32>, <tf.Tensor 'default_policy/Reshape_21:0' shape=(?, 115) dtype=float32>, <tf.Tensor 'default_policy/Reshape_22:0' shape=(?, 115) dtype=float32>, <tf.Tensor 'default_policy/Reshape_23:0' shape=(?, 115) dtype=float32>, <tf.Tensor 'default_policy/Reshape_24:0' shape=(?, 115) dtype=float32>, <tf.Tensor 'default_policy/Reshape_25:0' shape=(?, 115) dtype=float32>, <tf.Tensor 'default_policy/Reshape_26:0' shape=(?, 115) dtype=float32>, <tf.Tensor 'default_policy/Reshape_27:0' shape=(?, 115) dtype=float32>, <tf.Tensor 'default_policy/Reshape_28:0' shape=(?, 115) dtype=float32>, <tf.Tensor 'default_policy/Reshape_29:0' shape=(?, 115) dtype=float32>, <tf.Tensor 'default_policy/Reshape_30:0' shape=(?, 115) dtype=float32>, <tf.Tensor 'default_policy/Reshape_31:0' shape=(?, 115) dtype=float32>, <tf.Tensor 'default_policy/Reshape_32:0' shape=(?, 115) dtype=float32>, <tf.Tensor 'default_policy/Reshape_33:0' shape=(?, 115) dtype=float32>, <tf.Tensor 'default_policy/Reshape_34:0' shape=(?, 115) dtype=float32>, <tf.Tensor 'default_policy/Reshape_35:0' shape=(?, 115) dtype=float32>, <tf.Tensor 'default_policy/Reshape_36:0' shape=(?, 115) dtype=float32>, <tf.Tensor 'default_policy/Reshape_37:0' shape=(?, 115) dtype=float32>, <tf.Tensor 'default_policy/Reshape_38:0' shape=(?, 115) dtype=float32>, <tf.Tensor 'default_policy/Reshape_39:0' shape=(?, 32) dtype=float32>, <tf.Tensor 'default_policy/Reshape_40:0' shape=(?, 92) dtype=float32>, <tf.Tensor 'default_policy/Reshape_41:0' shape=(?, 92) dtype=float32>, <tf.Tensor 'default_policy/Reshape_42:0' shape=(?, 92) dtype=float32>, <tf.Tensor 'default_policy/Reshape_43:0' shape=(?, 92) dtype=float32>, <tf.Tensor 'default_policy/Reshape_44:0' shape=(?, 92) dtype=float32>, <tf.Tensor 'default_policy/Reshape_45:0' shape=(?, 92) dtype=float32>, <tf.Tensor 'default_policy/Reshape_46:0' shape=(?, 92) dtype=float32>, <tf.Tensor 'default_policy/Reshape_47:0' shape=(?, 92) dtype=float32>, <tf.Tensor 'default_policy/Reshape_48:0' shape=(?, 92) dtype=float32>, <tf.Tensor 'default_policy/Reshape_49:0' shape=(?, 92) dtype=float32>, <tf.Tensor 'default_policy/Reshape_50:0' shape=(?, 210) dtype=float32>, <tf.Tensor 'default_policy/Reshape_51:0' shape=(?, 9) dtype=float32>, <tf.Tensor 'default_policy/Reshape_52:0' shape=(?, 1) dtype=float32>, <tf.Tensor 'default_policy/Reshape_53:0' shape=(?, 1) dtype=float32>, <tf.Tensor 'default_policy/Reshape_54:0' shape=(?, 1) dtype=float32>, <tf.Tensor 'default_policy/Reshape_55:0' shape=(?, 10) dtype=float32>, <tf.Tensor 'default_policy/Reshape_56:0' shape=(?, 810) dtype=float32>, <tf.Tensor 'default_policy/Reshape_57:0' shape=(?, 120) dtype=float32>]
Process finished with exit code 1
I’ve tried fixed that line with the following:
tmp_stack = tf_real.concat(input_dict["obs"]["observations"], axis = 1)
logits, _ = self.internal_model(tmp_stack)
but get the following error:
Traceback (most recent call last):
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\agents\trainer.py", line 892, in setup
self._init(self.config, self.env_creator)
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\agents\trainer.py", line 1021, in _init
raise NotImplementedError
NotImplementedError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:/Users/destr/PycharmProjects/Underlords/code/policy_server.py", line 310, in <module>
trainer = PPOTrainer(config=DEFAULT_CONFIG, env= ActionMaskEnv)
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\agents\trainer.py", line 826, in __init__
super().__init__(
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\tune\trainable.py", line 142, in __init__
self.setup(copy.deepcopy(self.config))
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\agents\trainer.py", line 907, in setup
self.workers = self._make_workers(
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\agents\trainer.py", line 2161, in _make_workers
return WorkerSet(
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\evaluation\worker_set.py", line 161, in __init__
self._local_worker = self._make_worker(
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\evaluation\worker_set.py", line 553, in _make_worker
worker = cls(
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\evaluation\rollout_worker.py", line 613, in __init__
self._build_policy_map(
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\evaluation\rollout_worker.py", line 1698, in _build_policy_map
self.policy_map.create_policy(
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\policy\policy_map.py", line 140, in create_policy
self[policy_id] = class_(
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\policy\tf_policy_template.py", line 256, in __init__
DynamicTFPolicy.__init__(
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\policy\dynamic_tf_policy.py", line 360, in __init__
dist_inputs, self._state_out = self.model(self._input_dict)
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\models\modelv2.py", line 251, in __call__
res = self.forward(restored, state or [], seq_lens)
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\models\tf\recurrent_net.py", line 221, in forward
wrapped_out, _ = self._wrapped_forward(input_dict, [], None)
File "C:\Users\destr\PycharmProjects\Underlords\code\mask_model.py", line 70, in forward
logits, _ = self.internal_model(tmp_stack)
File "C:\Users\destr\AppData\Local\Programs\Python\Python38\lib\site-packages\ray\rllib\models\modelv2.py", line 216, in __call__
restored = input_dict.copy()
AttributeError: 'Tensor' object has no attribute 'copy'
Any advice please and thank you.