From 13ac83156ca104d226de1d372a8fcc6a9c5602be Mon Sep 17 00:00:00 2001 From: Rebecca Zhang Date: Thu, 25 Sep 2025 10:09:41 +0800 Subject: [PATCH] improves recorder manager: - it keeps original function, but extends to store demo with specific demo index --- .../isaaclab/managers/recorder_manager.py | 4 ++-- .../datasets/hdf5_dataset_file_handler.py | 19 ++++++++++++++----- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/source/isaaclab/isaaclab/managers/recorder_manager.py b/source/isaaclab/isaaclab/managers/recorder_manager.py index 855c975f2a9..4eea817e424 100644 --- a/source/isaaclab/isaaclab/managers/recorder_manager.py +++ b/source/isaaclab/isaaclab/managers/recorder_manager.py @@ -442,7 +442,7 @@ def get_ep_meta(self) -> dict: ep_meta = self._env.cfg.get_ep_meta() return ep_meta - def export_episodes(self, env_ids: Sequence[int] | None = None) -> None: + def export_episodes(self, env_ids: Sequence[int] | None = None, demo_id: int | None = None) -> None: """Concludes and exports the episodes for the given environment ids. Args: @@ -484,7 +484,7 @@ def export_episodes(self, env_ids: Sequence[int] | None = None) -> None: else: target_dataset_file_handler = self._failed_episode_dataset_file_handler if target_dataset_file_handler is not None: - target_dataset_file_handler.write_episode(self._episodes[env_id]) + target_dataset_file_handler.write_episode(self._episodes[env_id], demo_id) need_to_flush = True # Update episode count if episode_succeeded: diff --git a/source/isaaclab/isaaclab/utils/datasets/hdf5_dataset_file_handler.py b/source/isaaclab/isaaclab/utils/datasets/hdf5_dataset_file_handler.py index 2fa35ca1533..d031ea0a810 100644 --- a/source/isaaclab/isaaclab/utils/datasets/hdf5_dataset_file_handler.py +++ b/source/isaaclab/isaaclab/utils/datasets/hdf5_dataset_file_handler.py @@ -136,18 +136,25 @@ def load_dataset_helper(group): return episode - def write_episode(self, episode: EpisodeData): + def write_episode(self, episode: EpisodeData, demo_id: int | None = None): """Add an episode to the dataset. Args: episode: The episode data to add. + demo_id: Custom index for the episode. If None, uses default index. """ self._raise_if_not_initialized() if episode.is_empty(): return - # create episode group based on demo count - h5_episode_group = self._hdf5_data_group.create_group(f"demo_{self._demo_count}") + # Use custom demo id if provided, otherwise use default naming + if demo_id is not None: + episode_group_name = f"demo_{demo_id}" + else: + episode_group_name = f"demo_{self._demo_count}" + + # create episode group with the specified name + h5_episode_group = self._hdf5_data_group.create_group(episode_group_name) # store number of steps taken if "actions" in episode.data: @@ -176,8 +183,10 @@ def create_dataset_helper(group, key, value): # increment total step counts self._hdf5_data_group.attrs["total"] += h5_episode_group.attrs["num_samples"] - # increment total demo counts - self._demo_count += 1 + # Only increment demo count if using default indexing + if demo_id is None: + # increment total demo counts + self._demo_count += 1 def flush(self): """Flush the episode data to disk."""