Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -66,19 +66,6 @@ def __init__(self, cfg: AssemblyEnvCfg, render_mode: str | None = None, **kwargs
if self.cfg_task.if_logging_eval:
self._init_eval_logging()

if self.cfg_task.sample_from != "rand":
self._init_eval_loading()

def _init_eval_loading(self):
eval_held_asset_pose, eval_fixed_asset_pose, eval_success = automate_log.load_log_from_hdf5(
self.cfg_task.eval_filename
)

if self.cfg_task.sample_from == "gp":
self.gp = automate_algo.model_succ_w_gp(eval_held_asset_pose, eval_fixed_asset_pose, eval_success)
elif self.cfg_task.sample_from == "gmm":
self.gmm = automate_algo.model_succ_w_gmm(eval_held_asset_pose, eval_fixed_asset_pose, eval_success)

def _init_eval_logging(self):

self.held_asset_pose_log = torch.empty(
Expand Down Expand Up @@ -246,7 +233,7 @@ def _load_disassembly_data(self):
# offset each trajectory to be relative to the goal
eef_pos_traj.append(curr_ee_traj - curr_ee_goal)

self.eef_pos_traj = torch.tensor(eef_pos_traj, dtype=torch.float32, device=self.device).squeeze()
self.eef_pos_traj = torch.tensor(np.array(eef_pos_traj), dtype=torch.float32, device=self.device).squeeze()

def _get_keypoint_offsets(self, num_keypoints):
"""Get uniformly-spaced keypoints along a line of unit length, centered at 0."""
Expand Down Expand Up @@ -804,28 +791,12 @@ def randomize_held_initial_state(self, env_ids, pre_grasp):
torch.rand((self.num_envs,), dtype=torch.float32, device=self.device)
)

if self.cfg_task.sample_from == "rand":

rand_sample = torch.rand((len(env_ids), 3), dtype=torch.float32, device=self.device)
held_pos_init_rand = 2 * (rand_sample - 0.5) # [-1, 1]
held_asset_init_pos_rand = torch.tensor(
self.cfg_task.held_asset_init_pos_noise, dtype=torch.float32, device=self.device
)
self.held_pos_init_rand = held_pos_init_rand @ torch.diag(held_asset_init_pos_rand)

if self.cfg_task.sample_from == "gp":
rand_sample = torch.rand((self.cfg_task.num_gp_candidates, 3), dtype=torch.float32, device=self.device)
held_pos_init_rand = 2 * (rand_sample - 0.5) # [-1, 1]
held_asset_init_pos_rand = torch.tensor(
self.cfg_task.held_asset_init_pos_noise, dtype=torch.float32, device=self.device
)
held_asset_init_candidates = held_pos_init_rand @ torch.diag(held_asset_init_pos_rand)
self.held_pos_init_rand, _ = automate_algo.propose_failure_samples_batch_from_gp(
self.gp, held_asset_init_candidates.cpu().detach().numpy(), len(env_ids), self.device
)

if self.cfg_task.sample_from == "gmm":
self.held_pos_init_rand = automate_algo.sample_rel_pos_from_gmm(self.gmm, len(env_ids), self.device)
rand_sample = torch.rand((len(env_ids), 3), dtype=torch.float32, device=self.device)
held_pos_init_rand = 2 * (rand_sample - 0.5) # [-1, 1]
held_asset_init_pos_rand = torch.tensor(
self.cfg_task.held_asset_init_pos_noise, dtype=torch.float32, device=self.device
)
self.held_pos_init_rand = held_pos_init_rand @ torch.diag(held_asset_init_pos_rand)

# Set plug pos to assembled state, but offset plug Z-coordinate by height of socket,
# minus curriculum displacement
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -139,10 +139,6 @@ class AssemblyTask:
num_eval_trials: int = 100
eval_filename: str = "evaluation_00015.h5"

# Fine-tuning
sample_from: str = "rand" # gp, gmm, idv, rand
num_gp_candidates: int = 1000


@configclass
class Peg8mm(HeldAssetCfg):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ class Extraction(DisassemblyTask):
assembly_id = "00015"
assembly_dir = f"{ASSET_DIR}/{assembly_id}/"
disassembly_dir = "disassembly_dir"
num_log_traj = 1000
num_log_traj = 100

fixed_asset_cfg = Hole8mm()
held_asset_cfg = Peg8mm()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,9 @@ def main():

bash_command = None
if sys.platform.startswith("win"):
bash_command = "isaaclab.bat -p"
bash_command = "NUMBA_CUDA_LOW_OCCUPANCY_WARNINGS=0 isaaclab.bat -p"
elif sys.platform.startswith("linux"):
bash_command = "./isaaclab.sh -p"
bash_command = "NUMBA_CUDA_LOW_OCCUPANCY_WARNINGS=0 ./isaaclab.sh -p"
Comment on lines +64 to +66
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What does the NUMBA_CUDA_LOW_OCCUPANCY_WARNINGS do here specifically? Could you add a comment in the code if it needed for something specific?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This line is a easy fix for the reported warning "/colossus/isaaclab/venv_sim_pip_lab_src/lib/python3.11/site-packages/numba/cuda/dispatcher.py:536: NumbaPerformanceWarning: Grid size 1 will likely result in GPU under-utilization due to low occupancy.
warn(NumbaPerformanceWarning(msg))
[2025-07-23 22:33:10,045][numba.cuda.cudadrv.driver][INFO] - init
/colossus/isaaclab/venv_sim_pip_lab_src/lib/python3.11/site-packages/numba/cuda/dispatcher.py:536: NumbaPerformanceWarning: Grid size 1 will likely result in GPU under-utilization due to low occupancy.
warn(NumbaPerformanceWarning(msg))".

if args.train:
bash_command += " scripts/reinforcement_learning/rl_games/train.py --task=Isaac-AutoMate-Assembly-Direct-v0"
bash_command += f" --seed={str(args.seed)} --max_iterations={str(args.max_iterations)}"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,8 +123,8 @@ class _SoftDTWCUDA(Function):
def forward(ctx, D, gamma, bandwidth):
dev = D.device
dtype = D.dtype
gamma = torch.cuda.FloatTensor([gamma])
bandwidth = torch.cuda.FloatTensor([bandwidth])
gamma = torch.tensor([gamma], dtype=torch.float, device="cuda")
bandwidth = torch.tensor([bandwidth], dtype=torch.float, device="cuda")
Comment on lines +126 to +127
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why is this using "cuda" and not the dev itself? Not sure if this will work nicely on multi-gpu setup?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We did not run AutoMate env with multi-gpu setting. Could you please suggest how to change device='cuda' for multi-gpu setting? Any example of this?


B = D.shape[0]
N = D.shape[1]
Expand Down
Loading