Cannot concat data under key

How severe does this issue affect your experience of using Ray?

  • High: It blocks me to complete my task.

TL;DR trying to train on a cluster gives "ValueError: Cannot concat data under key ‘obs’, b/c sub-structures under that key don’t match. " but works fine on standalone node.

I’m trying to train a PPO + LSTM agent using a medical simulation as the observation. Training runs fine on a single node but when I try to run it on a little cluster I get the error below. Any ideas what I might be doing wrong?


ValueError Traceback (most recent call last)
Cell In[8], line 4
2 try:
3 while True:
----> 4 result = algo.train()
5 # print(pretty_print(result))
6 i += 1

File ~/miniconda3/envs/torch/lib/python3.10/site-packages/ray/tune/trainable/trainable.py:400, in Trainable.train(self)
398 except Exception as e:
399 skipped = skip_exceptions(e)
→ 400 raise skipped from exception_cause(skipped)
402 assert isinstance(result, dict), “step() needs to return a dict.”
404 # We do not modify internal state nor update this result if duplicate.

File ~/miniconda3/envs/torch/lib/python3.10/site-packages/ray/tune/trainable/trainable.py:397, in Trainable.train(self)
395 start = time.time()
396 try:
→ 397 result = self.step()
398 except Exception as e:
399 skipped = skip_exceptions(e)

File ~/miniconda3/envs/torch/lib/python3.10/site-packages/ray/rllib/algorithms/algorithm.py:853, in Algorithm.step(self)
845 (
846 results,
847 train_iter_ctx,
848 ) = self._run_one_training_iteration_and_evaluation_in_parallel()
849 # - No evaluation necessary, just run the next training iteration.
850 # - We have to evaluate in this training iteration, but no parallelism →
851 # evaluate after the training iteration is entirely done.
852 else:
→ 853 results, train_iter_ctx = self._run_one_training_iteration()
855 # Sequential: Train (already done above), then evaluate.
856 if evaluate_this_iter and not self.config.evaluation_parallel_to_training:

File ~/miniconda3/envs/torch/lib/python3.10/site-packages/ray/rllib/algorithms/algorithm.py:2838, in Algorithm._run_one_training_iteration(self)
2836 with self._timers[TRAINING_ITERATION_TIMER]:
2837 if self.config._disable_execution_plan_api:
→ 2838 results = self.training_step()
2839 else:
2840 results = next(self.train_exec_impl)

File ~/miniconda3/envs/torch/lib/python3.10/site-packages/ray/rllib/algorithms/ppo/ppo.py:429, in PPO.training_step(self)
424 train_batch = synchronous_parallel_sample(
425 worker_set=self.workers,
426 max_agent_steps=self.config.train_batch_size,
427 )
428 else:
→ 429 train_batch = synchronous_parallel_sample(
430 worker_set=self.workers, max_env_steps=self.config.train_batch_size
431 )
433 train_batch = train_batch.as_multi_agent()
434 self._counters[NUM_AGENT_STEPS_SAMPLED] += train_batch.agent_steps()

File ~/miniconda3/envs/torch/lib/python3.10/site-packages/ray/rllib/execution/rollout_ops.py:101, in synchronous_parallel_sample(worker_set, max_agent_steps, max_env_steps, concat)
98 all_sample_batches.extend(sample_batches)
100 if concat is True:
→ 101 full_batch = concat_samples(all_sample_batches)
102 # Discard collected incomplete episodes in episode mode.
103 # if max_episodes is not None and episodes >= max_episodes:
104 # last_complete_ep_idx = len(full_batch) - full_batch[
105 # SampleBatch.DONES
106 # ].reverse().index(1)
107 # full_batch = full_batch.slice(0, last_complete_ep_idx)
108 return full_batch

File ~/miniconda3/envs/torch/lib/python3.10/site-packages/ray/rllib/policy/sample_batch.py:1582, in concat_samples(samples)
1544 “”“Concatenates a list of SampleBatches or MultiAgentBatches.
1545
1546 If all items in the list are or SampleBatch typ4, the output will be
(…)
1578 “b”: np.array([10, 11, 12])}}
1579 “””
1581 if any(isinstance(s, MultiAgentBatch) for s in samples):
→ 1582 return concat_samples_into_ma_batch(samples)
1584 # the output is a SampleBatch type
1585 concatd_seq_lens =

File ~/miniconda3/envs/torch/lib/python3.10/site-packages/ray/rllib/policy/sample_batch.py:1731, in concat_samples_into_ma_batch(samples)
1729 out = {}
1730 for key, batches in policy_batches.items():
→ 1731 out[key] = concat_samples(batches)
1733 return MultiAgentBatch(out, env_steps)

File ~/miniconda3/envs/torch/lib/python3.10/site-packages/ray/rllib/policy/sample_batch.py:1652, in concat_samples(samples)
1649 raise e
1650 except Exception as e:
1651 # Other errors are likely due to mismatching sub-structures.
→ 1652 raise ValueError(
1653 f"Cannot concat data under key ‘{k}’, b/c "
1654 “sub-structures under that key don’t match. "
1655 f"samples={samples}\n Original error: \n {e}”
1656 )
1658 if concatd_seq_lens != and torch and torch.is_tensor(concatd_seq_lens[0]):
1659 concatd_seq_lens = torch.Tensor(concatd_seq_lens)

ValueError: Cannot concat data under key ‘obs’, b/c sub-structures under that key don’t match. samples=[SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(67 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(66 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(66 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(66 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(66 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(66 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(66 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(66 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(66 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(66 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(66 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(66 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(66 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(66 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(66 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(66 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(66 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(66 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(66 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(66 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’]), SampleBatch(66 (seqs=1): [‘obs’, ‘new_obs’, ‘actions’, ‘prev_actions’, ‘rewards’, ‘prev_rewards’, ‘terminateds’, ‘truncateds’, ‘infos’, ‘eps_id’, ‘unroll_id’, ‘agent_index’, ‘t’, ‘state_in’, ‘state_out’, ‘vf_preds’, ‘action_dist_inputs’, ‘action_prob’, ‘action_logp’, ‘values_bootstrapped’, ‘advantages’, ‘value_targets’])]
Original error:
all the input array dimensions except for the concatenation axis must match exactly, but along dimension 1, the array at index 0 has size 67 and the array at index 40 has size 66

I’m getting a similar error. Did you have any luck on solving this?

In my case, the training goes on for a few hours, but then it breaks with this error.