diff --git a/.gitignore b/.gitignore index 812c5d7..71792c4 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ .env __pycache__ .vscode/ +.DS_Store watod-config.local.sh utils/voxel/spconv diff --git a/autonomy/simulation/Teleop/Teleop.md b/autonomy/simulation/Teleop/Teleop.md new file mode 100644 index 0000000..b199ba5 --- /dev/null +++ b/autonomy/simulation/Teleop/Teleop.md @@ -0,0 +1,9 @@ +## Upper body control +Control for Arm-hand (6 DOF Arm + 15 DOF Hand) + +## Lower body control (Not used currently, for future reference) +- 4D Tensor [x, y, yaw, lower_body_height] + +### Reference: +IsaacLab repo: +IsaacLab-main/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree \ No newline at end of file diff --git a/autonomy/simulation/Teleop/wato_hand/data/configs/dex-retargeting/wato_hand_right_dexpilot.yml b/autonomy/simulation/Teleop/wato_hand/data/configs/dex-retargeting/wato_hand_right_dexpilot.yml new file mode 100644 index 0000000..3aad4da --- /dev/null +++ b/autonomy/simulation/Teleop/wato_hand/data/configs/dex-retargeting/wato_hand_right_dexpilot.yml @@ -0,0 +1,34 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +retargeting: + finger_tip_link_names: + - IP_THUMB_v1_1 + - DIP_INDEX_v1_1 + - DIP_MIDDLE_v1_1 + - DIP_RING_v1_1 + - DIP_PINKY_v1_1 + low_pass_alpha: 0.2 + scaling_factor: 1.0 + target_joint_names: + - mcp_index + - pip_index + - dip_index + - mcp_middle + - pip_middle + - dip_middle + - mcp_ring + - pip_ring + - dip_ring + - mcp_pinky + - pip_pinky + - dip_pinky + - cmc_thumb + - mcp_thumb + - ip_thumb + type: DexPilot + # Overwritten at runtime by wato_dex_retargeting_utils to Humanoid_Wato/arm_assembly/arm_assembly.urdf + urdf_path: /tmp/wato_arm_assembly.urdf + wrist_link_name: PALM_GAVIN_1DoF_Hinge_v2_1 diff --git a/autonomy/simulation/Teleop/wato_hand/wato_dex_retargeting_utils.py b/autonomy/simulation/Teleop/wato_hand/wato_dex_retargeting_utils.py new file mode 100644 index 0000000..6cb2351 --- /dev/null +++ b/autonomy/simulation/Teleop/wato_hand/wato_dex_retargeting_utils.py @@ -0,0 +1,248 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import logging +import os + +import numpy as np +import torch +import yaml +from dex_retargeting.retargeting_config import RetargetingConfig +from scipy.spatial.transform import Rotation as R + + +# import logger +logger = logging.getLogger(__name__) + +# yourdfpy loads visual/collision meshes with the hand URDFs; these aren't needed for +# retargeting and clutter the logs, so we suppress them. +logging.getLogger("dex_retargeting.yourdfpy").setLevel(logging.ERROR) + +# OpenXR hand joint indices (XrHandJointEXT 0-25) to select 21 joints for dex-retargeting. +# Selects: wrist(1), thumb(2-5), index/middle/ring/pinky proximal→tip (skips metacarpals 6,11,16,21). +_OPENXR_HAND_JOINT_INDICES = [1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 14, 15, 17, 18, 19, 20, 22, 23, 24, 25] + +# The transformation matrices to convert hand pose to canonical view. +_OPERATOR2MANO_RIGHT = np.array( + [ + [0, 0, 1], + [1, 0, 0], + [0, 1, 0], + ] +) + +_OPERATOR2MANO_LEFT = np.array( + [ + [0, 0, 1], + [1, 0, 0], + [0, 1, 0], + ] +) + +# Wato arm_assembly hand joint names - 5 fingers (index/middle/ring/pinky MCP,PIP,DIP + thumb CMC,MCP,IP) +_WATO_HAND_JOINT_NAMES = [ + "mcp_index", "pip_index", "dip_index", + "mcp_middle", "pip_middle", "dip_middle", + "mcp_ring", "pip_ring", "dip_ring", + "mcp_pinky", "pip_pinky", "dip_pinky", + "cmc_thumb", "mcp_thumb", "ip_thumb", +] +_LEFT_HAND_JOINT_NAMES = _WATO_HAND_JOINT_NAMES +_RIGHT_HAND_JOINT_NAMES = _WATO_HAND_JOINT_NAMES + + +class WatoHandDexRetargeting: + """Hand retargeting for Wato arm_assembly hand. + + Retargets OpenXR hand tracking to Wato arm_assembly hand joint angles (15 DOF per hand). + """ + + def __init__( + self, + hand_joint_names: list[str], + right_hand_config_filename: str = "wato_hand_right_dexpilot.yml", + left_hand_config_filename: str = "wato_hand_right_dexpilot.yml", + left_hand_urdf_path: str | None = None, + right_hand_urdf_path: str | None = None, + ): + """Initialize the hand retargeting. + + Args: + hand_joint_names: Names of hand joints in the robot model + right_hand_config_filename: Config file for right hand retargeting + left_hand_config_filename: Config file for left hand retargeting + """ + data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "data/")) + config_dir = os.path.join(data_dir, "configs/dex-retargeting") + # Local Wato arm_assembly.urdf path (relative to this package) + default_urdf = os.path.abspath( + os.path.join(os.path.dirname(__file__), "..", "..", "Humanoid_Wato", "arm_assembly", "arm_assembly.urdf") + ) + local_left_urdf_path = left_hand_urdf_path if left_hand_urdf_path else default_urdf + local_right_urdf_path = right_hand_urdf_path if right_hand_urdf_path else default_urdf + + left_config_path = os.path.join(config_dir, left_hand_config_filename) + right_config_path = os.path.join(config_dir, right_hand_config_filename) + + self._update_yaml_with_urdf_path(left_config_path, local_left_urdf_path) + self._update_yaml_with_urdf_path(right_config_path, local_right_urdf_path) + + self._dex_left_hand = RetargetingConfig.load_from_file(left_config_path).build() + self._dex_right_hand = RetargetingConfig.load_from_file(right_config_path).build() + + self.left_dof_names = self._dex_left_hand.optimizer.robot.dof_joint_names + self.right_dof_names = self._dex_right_hand.optimizer.robot.dof_joint_names + self.dof_names = self.left_dof_names + self.right_dof_names + self.isaac_lab_hand_joint_names = hand_joint_names + + logger.info("[WatoDexRetargeter] init done.") + + def _update_yaml_with_urdf_path(self, yaml_path: str, urdf_path: str): + """Update YAML file with the correct URDF path. + + Args: + yaml_path: Path to the YAML configuration file + urdf_path: Path to the URDF file to use + """ + try: + # Read the YAML file + with open(yaml_path) as file: + config = yaml.safe_load(file) + + # Update the URDF path in the configuration + if "retargeting" in config: + config["retargeting"]["urdf_path"] = urdf_path + logger.info(f"Updated URDF path in {yaml_path} to {urdf_path}") + else: + logger.warning(f"Unable to find 'retargeting' section in {yaml_path}") + + # Write the updated configuration back to the file + with open(yaml_path, "w") as file: + yaml.dump(config, file) + + except Exception as e: + logger.error(f"Error updating YAML file {yaml_path}: {e}") + + def convert_hand_joints(self, hand_poses: dict[str, np.ndarray], operator2mano: np.ndarray) -> np.ndarray: + """Prepares the hand joints data for retargeting. + + Args: + hand_poses: Dictionary containing hand pose data with joint positions and rotations + operator2mano: Transformation matrix to convert from operator to MANO frame + + Returns: + Joint positions with shape (21, 3) + """ + joint_position = np.zeros((21, 3)) + hand_joints = list(hand_poses.values()) + for i, joint_index in enumerate(_OPENXR_HAND_JOINT_INDICES): + joint = hand_joints[joint_index] + joint_position[i] = joint[:3] + + # Convert hand pose to the canonical frame. + joint_position = joint_position - joint_position[0:1, :] + xr_wrist_quat = hand_poses.get("wrist")[3:] + # OpenXR hand uses w,x,y,z order for quaternions but scipy uses x,y,z,w order + wrist_rot = R.from_quat([xr_wrist_quat[1], xr_wrist_quat[2], xr_wrist_quat[3], xr_wrist_quat[0]]).as_matrix() + + return joint_position @ wrist_rot @ operator2mano + + def compute_ref_value(self, joint_position: np.ndarray, indices: np.ndarray, retargeting_type: str) -> np.ndarray: + """Computes reference value for retargeting. + + Args: + joint_position: Joint positions array + indices: Target link indices + retargeting_type: Type of retargeting ("POSITION" or other) + + Returns: + Reference value in cartesian space + """ + if retargeting_type == "POSITION": + return joint_position[indices, :] + else: + origin_indices = indices[0, :] + task_indices = indices[1, :] + ref_value = joint_position[task_indices, :] - joint_position[origin_indices, :] + return ref_value + + def compute_one_hand( + self, hand_joints: dict[str, np.ndarray], retargeting: RetargetingConfig, operator2mano: np.ndarray + ) -> np.ndarray: + """Computes retargeted joint angles for one hand. + + Args: + hand_joints: Dictionary containing hand joint data + retargeting: Retargeting configuration object + operator2mano: Transformation matrix from operator to MANO frame + + Returns: + Retargeted joint angles + """ + joint_pos = self.convert_hand_joints(hand_joints, operator2mano) + ref_value = self.compute_ref_value( + joint_pos, + indices=retargeting.optimizer.target_link_human_indices, + retargeting_type=retargeting.optimizer.retargeting_type, + ) + # Enable gradient calculation and inference mode in case some other script has disabled it + # This is necessary for the retargeting to work since it uses gradient features that + # are not available in inference mode + with torch.enable_grad(): + with torch.inference_mode(False): + return retargeting.retarget(ref_value) + + def get_joint_names(self) -> list[str]: + """Returns list of all joint names.""" + return self.dof_names + + def get_left_joint_names(self) -> list[str]: + """Returns list of left hand joint names.""" + return self.left_dof_names + + def get_right_joint_names(self) -> list[str]: + """Returns list of right hand joint names.""" + return self.right_dof_names + + def get_hand_indices(self, robot) -> np.ndarray: + """Gets indices of hand joints in robot's DOF array. + + Args: + robot: Robot object containing DOF information + + Returns: + Array of joint indices + """ + return np.array([robot.dof_names.index(name) for name in self.dof_names], dtype=np.int64) + + def compute_left(self, left_hand_poses: dict[str, np.ndarray]) -> np.ndarray: + """Computes retargeted joints for left hand. + + Args: + left_hand_poses: Dictionary of left hand joint poses + + Returns: + Retargeted joint angles for left hand + """ + if left_hand_poses is not None: + left_hand_q = self.compute_one_hand(left_hand_poses, self._dex_left_hand, _OPERATOR2MANO_LEFT) + else: + left_hand_q = np.zeros(len(_LEFT_HAND_JOINT_NAMES)) + return left_hand_q + + def compute_right(self, right_hand_poses: dict[str, np.ndarray]) -> np.ndarray: + """Computes retargeted joints for right hand. + + Args: + right_hand_poses: Dictionary of right hand joint poses + + Returns: + Retargeted joint angles for right hand + """ + if right_hand_poses is not None: + right_hand_q = self.compute_one_hand(right_hand_poses, self._dex_right_hand, _OPERATOR2MANO_RIGHT) + else: + right_hand_q = np.zeros(len(_RIGHT_HAND_JOINT_NAMES)) + return right_hand_q diff --git a/autonomy/simulation/Teleop/wato_hand/wato_upper_body_motion_ctrl_retargeter.py b/autonomy/simulation/Teleop/wato_hand/wato_upper_body_motion_ctrl_retargeter.py new file mode 100644 index 0000000..e4b5201 --- /dev/null +++ b/autonomy/simulation/Teleop/wato_hand/wato_upper_body_motion_ctrl_retargeter.py @@ -0,0 +1,185 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from dataclasses import dataclass + +import numpy as np +import torch + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as PoseUtils +from isaaclab.devices.device_base import DeviceBase +from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg +from isaaclab.markers import VisualizationMarkers, VisualizationMarkersCfg + + +class WatoUpperBodyMotionControllerRetargeter(RetargeterBase): + """Simple retargeter that maps motion controller inputs to Wato arm_assembly hand joints (15 DOF per hand). + + Mapping: + - Trigger (analog 0-1) → Index finger (mcp_index, pip_index, dip_index) + - Squeeze (analog 0-1) → Middle (and ring/pinky) finger joints + - Thumb (max(trigger,squeeze)) → Thumb (cmc_thumb, mcp_thumb, ip_thumb) + """ + + def __init__(self, cfg: WatoUpperBodyMotionControllerRetargeterCfg): + """Initialize the retargeter.""" + super().__init__(cfg) + self._sim_device = cfg.sim_device + self._hand_joint_names = cfg.hand_joint_names + self._enable_visualization = cfg.enable_visualization + + if cfg.hand_joint_names is None: + raise ValueError("hand_joint_names must be provided") + + # Initialize visualization if enabled + if self._enable_visualization: + marker_cfg = VisualizationMarkersCfg( + prim_path="/Visuals/g1_controller_markers", + markers={ + "joint": sim_utils.SphereCfg( + radius=0.01, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)), + ), + }, + ) + self._markers = VisualizationMarkers(marker_cfg) + + def retarget(self, data: dict) -> torch.Tensor: + """Convert controller inputs to robot commands. + + Args: + data: Dictionary with MotionControllerTrackingTarget.LEFT/RIGHT keys + Each value is a 2D array: [pose(7), inputs(7)] + + Returns: + Tensor: [left_wrist(7), right_wrist(7), hand_joints(30)] + hand_joints: left 15 (index,middle,ring,pinky,thumb) then right 15, matching arm_assembly order. + """ + + # Get controller data + left_controller_data = data.get(DeviceBase.TrackingTarget.CONTROLLER_LEFT, np.array([])) + right_controller_data = data.get(DeviceBase.TrackingTarget.CONTROLLER_RIGHT, np.array([])) + + default_wrist = np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) + left_wrist = self._extract_wrist_pose(left_controller_data, default_wrist) + right_wrist = self._extract_wrist_pose(right_controller_data, default_wrist) + + left_hand_joints = self._map_to_hand_joints(left_controller_data, is_left=True) + right_hand_joints = self._map_to_hand_joints(right_controller_data, is_left=False) + left_hand_joints = -left_hand_joints + + # Order per hand (arm_assembly): mcp_index..dip_index, mcp_middle..dip_middle, mcp_ring..dip_ring, mcp_pinky..dip_pinky, cmc_thumb,mcp_thumb,ip_thumb + all_hand_joints = np.concatenate([left_hand_joints, right_hand_joints]) + + # Convert to tensors + left_wrist_tensor = torch.tensor( + self._retarget_abs(left_wrist, is_left=True), dtype=torch.float32, device=self._sim_device + ) + right_wrist_tensor = torch.tensor( + self._retarget_abs(right_wrist, is_left=False), dtype=torch.float32, device=self._sim_device + ) + hand_joints_tensor = torch.tensor(all_hand_joints, dtype=torch.float32, device=self._sim_device) + + return torch.cat([left_wrist_tensor, right_wrist_tensor, hand_joints_tensor]) + + def get_requirements(self) -> list[RetargeterBase.Requirement]: + return [RetargeterBase.Requirement.MOTION_CONTROLLER] + + def _extract_wrist_pose(self, controller_data: np.ndarray, default_pose: np.ndarray) -> np.ndarray: + """Extract wrist pose from controller data. + + Args: + controller_data: 2D array [pose(7), inputs(7)] + default_pose: Default pose to use if no data + + Returns: + Wrist pose array [x, y, z, w, x, y, z] + """ + if len(controller_data) > DeviceBase.MotionControllerDataRowIndex.POSE.value: + return controller_data[DeviceBase.MotionControllerDataRowIndex.POSE.value] + return default_pose + + def _map_to_hand_joints(self, controller_data: np.ndarray, is_left: bool) -> np.ndarray: + """Map controller inputs to hand joint angles. + + Args: + controller_data: 2D array [pose(7), inputs(7)] + is_left: True for left hand, False for right hand + + Returns: + Hand joint angles (15 joints per hand) in radians: index(3), middle(3), ring(3), pinky(3), thumb(3). + """ + hand_joints = np.zeros(15) + + if len(controller_data) <= DeviceBase.MotionControllerDataRowIndex.INPUTS.value: + return hand_joints + + # Extract inputs from second row + inputs = controller_data[DeviceBase.MotionControllerDataRowIndex.INPUTS.value] + + if len(inputs) < len(DeviceBase.MotionControllerInputIndex): + return hand_joints + + # Extract specific inputs using enum + trigger = inputs[DeviceBase.MotionControllerInputIndex.TRIGGER.value] # 0.0 to 1.0 (analog) + squeeze = inputs[DeviceBase.MotionControllerInputIndex.SQUEEZE.value] # 0.0 to 1.0 (analog) + + # Grasping logic: + # If trigger is pressed, we grasp with index and thumb. + # If squeeze is pressed, we grasp with middle and thumb. + # If both are pressed, we grasp with index, middle, and thumb. + # The thumb rotates towards the direction of the pressing finger. + # If both are pressed, the thumb stays in the middle. + + thumb_button = max(trigger, squeeze) + thumb_angle = -thumb_button + thumb_rotation = 0.5 * trigger - 0.5 * squeeze + if not is_left: + thumb_rotation = -thumb_rotation + + index_angle = trigger * 1.0 + middle_angle = squeeze * 1.0 + ring_angle = squeeze * 1.0 + pinky_angle = squeeze * 1.0 + + # arm_assembly order: index(3), middle(3), ring(3), pinky(3), thumb(3) + hand_joints[0:3] = index_angle # mcp_index, pip_index, dip_index + hand_joints[3:6] = middle_angle # mcp_middle, pip_middle, dip_middle + hand_joints[6:9] = ring_angle # mcp_ring, pip_ring, dip_ring + hand_joints[9:12] = pinky_angle # mcp_pinky, pip_pinky, dip_pinky + hand_joints[12] = thumb_rotation # cmc_thumb + hand_joints[13] = thumb_angle * 0.4 # mcp_thumb + hand_joints[14] = thumb_angle * 0.7 # ip_thumb + return hand_joints + + def _retarget_abs(self, wrist: np.ndarray, is_left: bool) -> np.ndarray: + """Handle absolute pose retargeting for controller wrists.""" + wrist_pos = torch.tensor(wrist[:3], dtype=torch.float32) + wrist_quat = torch.tensor(wrist[3:], dtype=torch.float32) + + # Combined -75° (rather than -90° for wrist comfort) Y rotation + 90° Z rotation + # This is equivalent to (0, -75, 90) in euler angles + combined_quat = torch.tensor([0.5358, -0.4619, 0.5358, 0.4619], dtype=torch.float32) + + openxr_pose = PoseUtils.make_pose(wrist_pos, PoseUtils.matrix_from_quat(wrist_quat)) + transform_pose = PoseUtils.make_pose(torch.zeros(3), PoseUtils.matrix_from_quat(combined_quat)) + + result_pose = PoseUtils.pose_in_A_to_pose_in_B(transform_pose, openxr_pose) + pos, rot_mat = PoseUtils.unmake_pose(result_pose) + quat = PoseUtils.quat_from_matrix(rot_mat) + + return np.concatenate([pos.numpy(), quat.numpy()]) + + +@dataclass +class WatoUpperBodyMotionControllerRetargeterCfg(RetargeterCfg): + """Configuration for the Wato arm_assembly motion controller retargeter.""" + + enable_visualization: bool = False + hand_joint_names: list[str] | None = None # List of robot hand joint names + retargeter_type: type[RetargeterBase] = WatoUpperBodyMotionControllerRetargeter diff --git a/autonomy/simulation/Teleop/wato_hand/wato_upper_body_retargeter.py b/autonomy/simulation/Teleop/wato_hand/wato_upper_body_retargeter.py new file mode 100644 index 0000000..9d36acb --- /dev/null +++ b/autonomy/simulation/Teleop/wato_hand/wato_upper_body_retargeter.py @@ -0,0 +1,171 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import contextlib +from dataclasses import dataclass + +import numpy as np +import torch + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as PoseUtils +from isaaclab.devices.device_base import DeviceBase +from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg +from isaaclab.markers import VisualizationMarkers, VisualizationMarkersCfg + +# This import exception is suppressed because wato_dex_retargeting_utils depends +# on pinocchio which is not available on Windows. +with contextlib.suppress(Exception): + from .wato_dex_retargeting_utils import WatoHandDexRetargeting + + +class WatoUpperBodyRetargeter(RetargeterBase): + """Retargets OpenXR data to Wato arm_assembly upper body commands. + + This retargeter maps hand tracking data from OpenXR to wrist and hand joint commands for the Wato arm_assembly hand. + It handles both left and right hands, converting poses of the hands in OpenXR format to appropriate wrist poses + and joint angles for the G1 robot's upper body. + """ + + def __init__( + self, + cfg: WatoUpperBodyRetargeterCfg, + ): + """Initialize the Wato upper body retargeter. + + Args: + cfg: Configuration for the retargeter. + """ + super().__init__(cfg) + + # Store device name for runtime retrieval + self._sim_device = cfg.sim_device + self._hand_joint_names = cfg.hand_joint_names + + # Initialize the hands controller + if cfg.hand_joint_names is not None: + self._hands_controller = WatoHandDexRetargeting(cfg.hand_joint_names) + else: + raise ValueError("hand_joint_names must be provided in configuration") + + # Initialize visualization if enabled + self._enable_visualization = cfg.enable_visualization + self._num_open_xr_hand_joints = cfg.num_open_xr_hand_joints + if self._enable_visualization: + marker_cfg = VisualizationMarkersCfg( + prim_path="/Visuals/g1_hand_markers", + markers={ + "joint": sim_utils.SphereCfg( + radius=0.005, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), + ), + }, + ) + self._markers = VisualizationMarkers(marker_cfg) + + def retarget(self, data: dict) -> torch.Tensor: + """Convert hand joint poses to robot end-effector commands. + + Args: + data: Dictionary mapping tracking targets to joint data dictionaries. + + Returns: + A tensor containing the retargeted commands: + - Left wrist pose (7) + - Right wrist pose (7) + - Hand joint angles (len(hand_joint_names)) + """ + + # Access the left and right hand data using the enum key + left_hand_poses = data[DeviceBase.TrackingTarget.HAND_LEFT] + right_hand_poses = data[DeviceBase.TrackingTarget.HAND_RIGHT] + + left_wrist = left_hand_poses.get("wrist") + right_wrist = right_hand_poses.get("wrist") + + # Handle case where wrist data is not available + if left_wrist is None or right_wrist is None: + # Set to default pose if no data available. + # pos=(0,0,0), quat=(1,0,0,0) (w,x,y,z) + default_pose = np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) + if left_wrist is None: + left_wrist = default_pose + if right_wrist is None: + right_wrist = default_pose + + # Visualization if enabled + if self._enable_visualization: + joints_position = np.zeros((self._num_open_xr_hand_joints, 3)) + joints_position[::2] = np.array([pose[:3] for pose in left_hand_poses.values()]) + joints_position[1::2] = np.array([pose[:3] for pose in right_hand_poses.values()]) + self._markers.visualize(translations=torch.tensor(joints_position, device=self._sim_device)) + + # Compute retargeted hand joints + left_hands_pos = self._hands_controller.compute_left(left_hand_poses) + indexes = [self._hand_joint_names.index(name) for name in self._hands_controller.get_left_joint_names()] + left_retargeted_hand_joints = np.zeros(len(self._hands_controller.get_joint_names())) + left_retargeted_hand_joints[indexes] = left_hands_pos + left_hand_joints = left_retargeted_hand_joints + + right_hands_pos = self._hands_controller.compute_right(right_hand_poses) + indexes = [self._hand_joint_names.index(name) for name in self._hands_controller.get_right_joint_names()] + right_retargeted_hand_joints = np.zeros(len(self._hands_controller.get_joint_names())) + right_retargeted_hand_joints[indexes] = right_hands_pos + right_hand_joints = right_retargeted_hand_joints + retargeted_hand_joints = left_hand_joints + right_hand_joints + + # Convert numpy arrays to tensors and store in command buffer + left_wrist_tensor = torch.tensor( + self._retarget_abs(left_wrist, is_left=True), dtype=torch.float32, device=self._sim_device + ) + right_wrist_tensor = torch.tensor( + self._retarget_abs(right_wrist, is_left=False), dtype=torch.float32, device=self._sim_device + ) + hand_joints_tensor = torch.tensor(retargeted_hand_joints, dtype=torch.float32, device=self._sim_device) + + # Combine all tensors into a single tensor + return torch.cat([left_wrist_tensor, right_wrist_tensor, hand_joints_tensor]) + + def get_requirements(self) -> list[RetargeterBase.Requirement]: + return [RetargeterBase.Requirement.HAND_TRACKING] + + def _retarget_abs(self, wrist: np.ndarray, is_left: bool) -> np.ndarray: + """Handle absolute pose retargeting. + + Args: + wrist: Wrist pose data from OpenXR. + is_left: True for the left hand, False for the right hand. + + Returns: + Retargeted wrist pose in USD control frame. + """ + wrist_pos = torch.tensor(wrist[:3], dtype=torch.float32) + wrist_quat = torch.tensor(wrist[3:], dtype=torch.float32) + + if is_left: + # Corresponds to a rotation of (0, 90, 90) in euler angles (x,y,z) + combined_quat = torch.tensor([0.7071, 0, 0.7071, 0], dtype=torch.float32) + else: + # Corresponds to a rotation of (0, -90, -90) in euler angles (x,y,z) + combined_quat = torch.tensor([0, -0.7071, 0, 0.7071], dtype=torch.float32) + + openxr_pose = PoseUtils.make_pose(wrist_pos, PoseUtils.matrix_from_quat(wrist_quat)) + transform_pose = PoseUtils.make_pose(torch.zeros(3), PoseUtils.matrix_from_quat(combined_quat)) + + result_pose = PoseUtils.pose_in_A_to_pose_in_B(transform_pose, openxr_pose) + pos, rot_mat = PoseUtils.unmake_pose(result_pose) + quat = PoseUtils.quat_from_matrix(rot_mat) + + return np.concatenate([pos.numpy(), quat.numpy()]) + + +@dataclass +class WatoUpperBodyRetargeterCfg(RetargeterCfg): + enable_visualization: bool = False + num_open_xr_hand_joints: int = 100 + hand_joint_names: list[str] | None = None # List of robot hand joint names + retargeter_type: type[RetargeterBase] = WatoUpperBodyRetargeter diff --git a/autonomy/simulation/Teleop/wato_lower_body_standing.py b/autonomy/simulation/Teleop/wato_lower_body_standing.py new file mode 100644 index 0000000..1b900bc --- /dev/null +++ b/autonomy/simulation/Teleop/wato_lower_body_standing.py @@ -0,0 +1,26 @@ +from __future__ import annotations +from dataclasses import dataclass +import torch +from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg + +# Current lower body retargeter just keep the lower body at constant height and not moving + +class WatoLowerBodyStandingRetargeter(RetargeterBase): + """Provides lower body standing commands""" + + def __init__(self, cfg: G1LowerBodyStandingRetargeterCfg): + super().__init__(cfg) + self.cfg = cfg + + def retarget(self, data: dict) -> torch.Tensor: + return torch.tensor([0.0, 0.0, 0.0, self.cfg.hip_height], device=self.cfg.sim_device) + + def get_requirements(self) -> list[RetargeterBase.Requirement]: + # This retargeter does not consume any device data + return [] + + +@dataclass +class WatoLowerBodyStandingRetargeterCfg(RetargeterCfg): + hip_height: float = 0.72 + retargeter_type: type[RetargeterBase] = G1LowerBodyStandingRetargeter diff --git a/autonomy/simulation/Teleop/wato_motion_controller_locomotion.py b/autonomy/simulation/Teleop/wato_motion_controller_locomotion.py new file mode 100644 index 0000000..8acfe0a --- /dev/null +++ b/autonomy/simulation/Teleop/wato_motion_controller_locomotion.py @@ -0,0 +1,87 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from dataclasses import dataclass + +import torch + +from isaaclab.devices.device_base import DeviceBase +from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg +from isaaclab.sim import SimulationContext + + +class G1LowerBodyStandingMotionControllerRetargeter(RetargeterBase): + """Provides lower body standing commands for the G1 robot.""" + + def __init__(self, cfg: G1LowerBodyStandingMotionControllerRetargeterCfg): + """Initialize the retargeter.""" + super().__init__(cfg) + self.cfg = cfg + self._hip_height = cfg.hip_height + + def retarget(self, data: dict) -> torch.Tensor: + left_thumbstick_x = 0.0 + left_thumbstick_y = 0.0 + right_thumbstick_x = 0.0 + right_thumbstick_y = 0.0 + + # Get controller data using enums + if ( + DeviceBase.TrackingTarget.CONTROLLER_LEFT in data + and data[DeviceBase.TrackingTarget.CONTROLLER_LEFT] is not None + ): + left_controller_data = data[DeviceBase.TrackingTarget.CONTROLLER_LEFT] + if len(left_controller_data) > DeviceBase.MotionControllerDataRowIndex.INPUTS.value: + left_inputs = left_controller_data[DeviceBase.MotionControllerDataRowIndex.INPUTS.value] + if len(left_inputs) > DeviceBase.MotionControllerInputIndex.THUMBSTICK_Y.value: + left_thumbstick_x = left_inputs[DeviceBase.MotionControllerInputIndex.THUMBSTICK_X.value] + left_thumbstick_y = left_inputs[DeviceBase.MotionControllerInputIndex.THUMBSTICK_Y.value] + + if ( + DeviceBase.TrackingTarget.CONTROLLER_RIGHT in data + and data[DeviceBase.TrackingTarget.CONTROLLER_RIGHT] is not None + ): + right_controller_data = data[DeviceBase.TrackingTarget.CONTROLLER_RIGHT] + if len(right_controller_data) > DeviceBase.MotionControllerDataRowIndex.INPUTS.value: + right_inputs = right_controller_data[DeviceBase.MotionControllerDataRowIndex.INPUTS.value] + if len(right_inputs) > DeviceBase.MotionControllerInputIndex.THUMBSTICK_Y.value: + right_thumbstick_x = right_inputs[DeviceBase.MotionControllerInputIndex.THUMBSTICK_X.value] + right_thumbstick_y = right_inputs[DeviceBase.MotionControllerInputIndex.THUMBSTICK_Y.value] + + # Thumbstick values are in the range of [-1, 1], so we need to scale them to the range of + # [-movement_scale, movement_scale] + left_thumbstick_x = left_thumbstick_x * self.cfg.movement_scale + left_thumbstick_y = left_thumbstick_y * self.cfg.movement_scale + + # Use rendering time step for deterministic hip height adjustment regardless of wall clock time. + dt = SimulationContext.instance().get_rendering_dt() + self._hip_height -= right_thumbstick_y * dt * self.cfg.rotation_scale + self._hip_height = max(0.4, min(1.0, self._hip_height)) + + return torch.tensor( + [-left_thumbstick_y, -left_thumbstick_x, -right_thumbstick_x, self._hip_height], + device=self.cfg.sim_device, + dtype=torch.float32, + ) + + def get_requirements(self) -> list[RetargeterBase.Requirement]: + return [RetargeterBase.Requirement.MOTION_CONTROLLER] + + +@dataclass +class G1LowerBodyStandingMotionControllerRetargeterCfg(RetargeterCfg): + """Configuration for the G1 lower body standing retargeter.""" + + hip_height: float = 0.72 + """Height of the G1 robot hip in meters. The value is a fixed height suitable for G1 to do tabletop manipulation.""" + + movement_scale: float = 0.5 + """Scale the movement of the robot to the range of [-movement_scale, movement_scale].""" + + rotation_scale: float = 0.35 + """Scale the rotation of the robot to the range of [-rotation_scale, rotation_scale].""" + retargeter_type: type[RetargeterBase] = G1LowerBodyStandingMotionControllerRetargeter