cp isaaclab源代码的openxr部分,bak文件中实现gr1t2的无灵巧手手臂控制。

This commit is contained in:
2026-04-21 11:23:35 +08:00
parent ca763363e9
commit f68e6bca46
22 changed files with 3203 additions and 2 deletions

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,364 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Script to run teleoperation with Isaac Lab manipulation environments.
Supports multiple input devices (e.g., keyboard, spacemouse, gamepad) and devices
configured within the environment (including OpenXR-based hand tracking or motion
controllers)."""
"""Launch Isaac Sim Simulator first."""
import argparse
from collections.abc import Callable
from isaaclab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(
description="Teleoperation for Isaac Lab environments."
)
parser.add_argument(
"--num_envs", type=int, default=1, help="Number of environments to simulate."
)
parser.add_argument(
"--teleop_device",
type=str,
default="keyboard",
help=(
"Teleop device. Set here (legacy) or via the environment config. If using the environment config, pass the"
" device key/name defined under 'teleop_devices' (it can be a custom name, not necessarily 'handtracking')."
" Built-ins: keyboard, spacemouse, gamepad. Not all tasks support all built-ins."
),
)
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
parser.add_argument(
"--sensitivity", type=float, default=1.0, help="Sensitivity factor."
)
parser.add_argument(
"--enable_pinocchio",
action="store_true",
default=False,
help="Enable Pinocchio.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
app_launcher_args = vars(args_cli)
if args_cli.enable_pinocchio:
# Import pinocchio before AppLauncher to force the use of the version installed by IsaacLab and
# not the one installed by Isaac Sim pinocchio is required by the Pink IK controllers and the
# GR1T2 retargeter
import pinocchio # noqa: F401
if "handtracking" in args_cli.teleop_device.lower():
app_launcher_args["xr"] = True
# launch omniverse app
app_launcher = AppLauncher(app_launcher_args)
simulation_app = app_launcher.app
"""Rest everything follows."""
import logging
import gymnasium as gym
import torch
from isaaclab.devices import (
Se3Gamepad,
Se3GamepadCfg,
Se3Keyboard,
Se3KeyboardCfg,
Se3SpaceMouse,
Se3SpaceMouseCfg,
)
from isaaclab.devices.openxr import remove_camera_configs
from isaaclab.devices.teleop_device_factory import create_teleop_device
from isaaclab.envs import ManagerBasedRLEnvCfg
from isaaclab.managers import TerminationTermCfg as DoneTerm
# import isaaclab_tasks # noqa: F401
import mindbot.tasks # noqa: F401
from isaaclab_tasks.manager_based.manipulation.lift import mdp
from isaaclab_tasks.utils import parse_env_cfg
if args_cli.enable_pinocchio:
# import isaaclab_tasks.manager_based.locomanipulation.pick_place # noqa: F401
# import isaaclab_tasks.manager_based.manipulation.pick_place # noqa: F401
import mindbot.tasks.manager_based.il.pick_place
# import logger
logger = logging.getLogger(__name__)
def main() -> None:
"""
Run teleoperation with an Isaac Lab manipulation environment.
Creates the environment, sets up teleoperation interfaces and callbacks,
and runs the main simulation loop until the application is closed.
Returns:
None
"""
# parse configuration
env_cfg = parse_env_cfg(
args_cli.task, device=args_cli.device, num_envs=args_cli.num_envs
)
env_cfg.env_name = args_cli.task
if not isinstance(env_cfg, ManagerBasedRLEnvCfg):
raise ValueError(
"Teleoperation is only supported for ManagerBasedRLEnv environments. "
f"Received environment config type: {type(env_cfg).__name__}"
)
# modify configuration
env_cfg.terminations.time_out = None
if "Lift" in args_cli.task:
# set the resampling time range to large number to avoid resampling
env_cfg.commands.object_pose.resampling_time_range = (1.0e9, 1.0e9)
# add termination condition for reaching the goal otherwise the environment won't reset
env_cfg.terminations.object_reached_goal = DoneTerm(
func=mdp.object_reached_goal
)
if args_cli.xr:
env_cfg = remove_camera_configs(env_cfg)
env_cfg.sim.render.antialiasing_mode = "DLSS"
try:
# create environment
env = gym.make(args_cli.task, cfg=env_cfg).unwrapped
# check environment name (for reach , we don't allow the gripper)
if "Reach" in args_cli.task:
logger.warning(
f"The environment '{args_cli.task}' does not support gripper control. The device command will be"
" ignored."
)
except Exception as e:
logger.error(f"Failed to create environment: {e}")
simulation_app.close()
return
# Flags for controlling teleoperation flow
should_reset_recording_instance = False
teleoperation_active = True
# Callback handlers
def reset_recording_instance() -> None:
"""
Reset the environment to its initial state.
Sets a flag to reset the environment on the next simulation step.
Returns:
None
"""
nonlocal should_reset_recording_instance
should_reset_recording_instance = True
print("Reset triggered - Environment will reset on next step")
def start_teleoperation() -> None:
"""
Activate teleoperation control of the robot.
Enables the application of teleoperation commands to the environment.
Returns:
None
"""
nonlocal teleoperation_active
teleoperation_active = True
print("Teleoperation activated")
def stop_teleoperation() -> None:
"""
Deactivate teleoperation control of the robot.
Disables the application of teleoperation commands to the environment.
Returns:
None
"""
nonlocal teleoperation_active
teleoperation_active = False
print("Teleoperation deactivated")
# Create device config if not already in env_cfg
teleoperation_callbacks: dict[str, Callable[[], None]] = {
"R": reset_recording_instance,
"START": start_teleoperation,
"STOP": stop_teleoperation,
"RESET": reset_recording_instance,
}
# For hand tracking devices, add additional callbacks
if args_cli.xr:
# Default to inactive for hand tracking
teleoperation_active = False
else:
# Always active for other devices
teleoperation_active = True
# Create teleop device from config if present, otherwise create manually
teleop_interface = None
try:
if (
hasattr(env_cfg, "teleop_devices")
and args_cli.teleop_device in env_cfg.teleop_devices.devices
):
teleop_interface = create_teleop_device(
args_cli.teleop_device,
env_cfg.teleop_devices.devices,
teleoperation_callbacks,
)
else:
logger.warning(
f"No teleop device '{args_cli.teleop_device}' found in environment config. Creating default."
)
# Create fallback teleop device
sensitivity = args_cli.sensitivity
if args_cli.teleop_device.lower() == "keyboard":
teleop_interface = Se3Keyboard(
Se3KeyboardCfg(
pos_sensitivity=0.05 * sensitivity,
rot_sensitivity=0.05 * sensitivity,
)
)
elif args_cli.teleop_device.lower() == "spacemouse":
teleop_interface = Se3SpaceMouse(
Se3SpaceMouseCfg(
pos_sensitivity=0.05 * sensitivity,
rot_sensitivity=0.05 * sensitivity,
)
)
elif args_cli.teleop_device.lower() == "gamepad":
teleop_interface = Se3Gamepad(
Se3GamepadCfg(
pos_sensitivity=0.1 * sensitivity,
rot_sensitivity=0.1 * sensitivity,
)
)
else:
logger.error(f"Unsupported teleop device: {args_cli.teleop_device}")
logger.error("Configure the teleop device in the environment config.")
env.close()
simulation_app.close()
return
# Add callbacks to fallback device
for key, callback in teleoperation_callbacks.items():
try:
teleop_interface.add_callback(key, callback)
except (ValueError, TypeError) as e:
logger.warning(f"Failed to add callback for key {key}: {e}")
except Exception as e:
logger.error(f"Failed to create teleop device: {e}")
env.close()
simulation_app.close()
return
if teleop_interface is None:
logger.error("Failed to create teleop interface")
env.close()
simulation_app.close()
return
print(f"Using teleop device: {teleop_interface}")
# reset environment
# reset environment
env.reset()
term = env.action_manager.get_term("upper_body_ik")
print("term.action_dim =", term.action_dim)
print("hand_joint_names =", term.cfg.hand_joint_names)
print("num_hand_joints =", term.cfg.controller.num_hand_joints)
print("controlled_joint_ids =", len(term._controlled_joint_ids))
teleop_interface.reset()
print("Teleoperation started. Press 'R' to reset the environment.")
# env.reset()
# teleop_interface.reset()
# print("Teleoperation started. Press 'R' to reset the environment.")
# simulate environment
import traceback
while simulation_app.is_running():
try:
with torch.inference_mode():
action = teleop_interface.advance()
if teleoperation_active:
actions = action.repeat(env.num_envs, 1)
expected_dim = env.single_action_space.shape[0]
if actions.shape[1] < expected_dim:
pad = torch.zeros(env.num_envs, expected_dim - actions.shape[1], device=actions.device)
actions = torch.cat([actions, pad], dim=1)
print("teleop action shape =", tuple(action.shape))
print("batched actions shape =", tuple(actions.shape))
env.step(actions)
else:
env.sim.render()
if should_reset_recording_instance:
env.reset()
teleop_interface.reset()
should_reset_recording_instance = False
print("Environment reset complete")
except Exception as e:
traceback.print_exc()
logger.error(f"Error during simulation step: {e}")
break
# while simulation_app.is_running():
# try:
# # run everything in inference mode
# with torch.inference_mode():
# # get device command
# action = teleop_interface.advance()
# # Only apply teleop commands when active
# if teleoperation_active:
# # process actions
# actions = action.repeat(env.num_envs, 1)
# # 自动补齐 action 维度(如有 scale=0 的 hold 关节)
# expected_dim = env.single_action_space.shape[0]
# if actions.shape[1] < expected_dim:
# pad = torch.zeros(env.num_envs, expected_dim - actions.shape[1], device=actions.device)
# actions = torch.cat([actions, pad], dim=1)
# # apply actions
# env.step(actions)
# else:
# env.sim.render()
# if should_reset_recording_instance:
# env.reset()
# teleop_interface.reset()
# should_reset_recording_instance = False
# print("Environment reset complete")
# except Exception as e:
# logger.error(f"Error during simulation step: {e}")
# break
# close the simulator
env.close()
print("Environment closed")
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()

View File

@@ -0,0 +1,25 @@
retargeting:
finger_tip_link_names:
- GR1T2_fourier_hand_6dof_L_thumb_distal_link
- GR1T2_fourier_hand_6dof_L_index_intermediate_link
- GR1T2_fourier_hand_6dof_L_middle_intermediate_link
- GR1T2_fourier_hand_6dof_L_ring_intermediate_link
- GR1T2_fourier_hand_6dof_L_pinky_intermediate_link
low_pass_alpha: 0.2
scaling_factor: 1.2
target_joint_names:
- L_index_proximal_joint
- L_middle_proximal_joint
- L_pinky_proximal_joint
- L_ring_proximal_joint
- L_index_intermediate_joint
- L_middle_intermediate_joint
- L_pinky_intermediate_joint
- L_ring_intermediate_joint
- L_thumb_proximal_yaw_joint
- L_thumb_proximal_pitch_joint
- L_thumb_distal_joint
- L_thumb_distal_joint
type: DexPilot
urdf_path: /tmp/GR1_T2_left_hand.urdf
wrist_link_name: l_hand_base_link

View File

@@ -0,0 +1,24 @@
retargeting:
finger_tip_link_names:
- GR1T2_fourier_hand_6dof_R_thumb_distal_link
- GR1T2_fourier_hand_6dof_R_index_intermediate_link
- GR1T2_fourier_hand_6dof_R_middle_intermediate_link
- GR1T2_fourier_hand_6dof_R_ring_intermediate_link
- GR1T2_fourier_hand_6dof_R_pinky_intermediate_link
low_pass_alpha: 0.2
scaling_factor: 1.2
target_joint_names:
- R_index_proximal_joint
- R_middle_proximal_joint
- R_pinky_proximal_joint
- R_ring_proximal_joint
- R_index_intermediate_joint
- R_middle_intermediate_joint
- R_pinky_intermediate_joint
- R_ring_intermediate_joint
- R_thumb_proximal_yaw_joint
- R_thumb_proximal_pitch_joint
- R_thumb_distal_joint
type: DexPilot
urdf_path: /tmp/GR1_T2_right_hand.urdf
wrist_link_name: r_hand_base_link

View File

@@ -0,0 +1,262 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import logging
import os
import numpy as np
import torch
import yaml
from dex_retargeting.retargeting_config import RetargetingConfig
from scipy.spatial.transform import Rotation as R
from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path
# import logger
logger = logging.getLogger(__name__)
# The index to map the OpenXR hand joints to the hand joints used
# in Dex-retargeting.
_HAND_JOINTS_INDEX = [1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 14, 15, 17, 18, 19, 20, 22, 23, 24, 25]
# The transformation matrices to convert hand pose to canonical view.
_OPERATOR2MANO_RIGHT = np.array(
[
[0, -1, 0],
[-1, 0, 0],
[0, 0, -1],
]
)
_OPERATOR2MANO_LEFT = np.array(
[
[0, -1, 0],
[-1, 0, 0],
[0, 0, -1],
]
)
_LEFT_HAND_JOINT_NAMES = [
"L_index_proximal_joint",
"L_index_intermediate_joint",
"L_middle_proximal_joint",
"L_middle_intermediate_joint",
"L_pinky_proximal_joint",
"L_pinky_intermediate_joint",
"L_ring_proximal_joint",
"L_ring_intermediate_joint",
"L_thumb_proximal_yaw_joint",
"L_thumb_proximal_pitch_joint",
"L_thumb_distal_joint",
]
_RIGHT_HAND_JOINT_NAMES = [
"R_index_proximal_joint",
"R_index_intermediate_joint",
"R_middle_proximal_joint",
"R_middle_intermediate_joint",
"R_pinky_proximal_joint",
"R_pinky_intermediate_joint",
"R_ring_proximal_joint",
"R_ring_intermediate_joint",
"R_thumb_proximal_yaw_joint",
"R_thumb_proximal_pitch_joint",
"R_thumb_distal_joint",
]
class GR1TR2DexRetargeting:
"""A class for hand retargeting with GR1Fourier.
Handles retargeting of OpenXRhand tracking data to GR1T2 robot hand joint angles.
"""
def __init__(
self,
hand_joint_names: list[str],
right_hand_config_filename: str = "fourier_hand_right_dexpilot.yml",
left_hand_config_filename: str = "fourier_hand_left_dexpilot.yml",
left_hand_urdf_path: str = f"{ISAACLAB_NUCLEUS_DIR}/Mimic/GR1T2_assets/GR1_T2_left_hand.urdf",
right_hand_urdf_path: str = f"{ISAACLAB_NUCLEUS_DIR}/Mimic/GR1T2_assets/GR1_T2_right_hand.urdf",
):
"""Initialize the hand retargeting.
Args:
hand_joint_names: Names of hand joints in the robot model
right_hand_config_filename: Config file for right hand retargeting
left_hand_config_filename: Config file for left hand retargeting
"""
data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "data/"))
config_dir = os.path.join(data_dir, "configs/dex-retargeting")
# Download urdf files from aws
local_left_urdf_path = retrieve_file_path(left_hand_urdf_path, force_download=True)
local_right_urdf_path = retrieve_file_path(right_hand_urdf_path, force_download=True)
left_config_path = os.path.join(config_dir, left_hand_config_filename)
right_config_path = os.path.join(config_dir, right_hand_config_filename)
# Update the YAML files with the correct URDF paths
self._update_yaml_with_urdf_path(left_config_path, local_left_urdf_path)
self._update_yaml_with_urdf_path(right_config_path, local_right_urdf_path)
self._dex_left_hand = RetargetingConfig.load_from_file(left_config_path).build()
self._dex_right_hand = RetargetingConfig.load_from_file(right_config_path).build()
self.left_dof_names = self._dex_left_hand.optimizer.robot.dof_joint_names
self.right_dof_names = self._dex_right_hand.optimizer.robot.dof_joint_names
self.dof_names = self.left_dof_names + self.right_dof_names
self.isaac_lab_hand_joint_names = hand_joint_names
logger.info("[GR1T2DexRetargeter] init done.")
def _update_yaml_with_urdf_path(self, yaml_path: str, urdf_path: str):
"""Update YAML file with the correct URDF path.
Args:
yaml_path: Path to the YAML configuration file
urdf_path: Path to the URDF file to use
"""
try:
# Read the YAML file
with open(yaml_path) as file:
config = yaml.safe_load(file)
# Update the URDF path in the configuration
if "retargeting" in config:
config["retargeting"]["urdf_path"] = urdf_path
logger.info(f"Updated URDF path in {yaml_path} to {urdf_path}")
else:
logger.warning(f"Unable to find 'retargeting' section in {yaml_path}")
# Write the updated configuration back to the file
with open(yaml_path, "w") as file:
yaml.dump(config, file)
except Exception as e:
logger.error(f"Error updating YAML file {yaml_path}: {e}")
def convert_hand_joints(self, hand_poses: dict[str, np.ndarray], operator2mano: np.ndarray) -> np.ndarray:
"""Prepares the hand joints data for retargeting.
Args:
hand_poses: Dictionary containing hand pose data with joint positions and rotations
operator2mano: Transformation matrix to convert from operator to MANO frame
Returns:
Joint positions with shape (21, 3)
"""
joint_position = np.zeros((21, 3))
hand_joints = list(hand_poses.values())
for i in range(len(_HAND_JOINTS_INDEX)):
joint = hand_joints[_HAND_JOINTS_INDEX[i]]
joint_position[i] = joint[:3]
# Convert hand pose to the canonical frame.
joint_position = joint_position - joint_position[0:1, :]
xr_wrist_quat = hand_poses.get("wrist")[3:]
# OpenXR hand uses w,x,y,z order for quaternions but scipy uses x,y,z,w order
wrist_rot = R.from_quat([xr_wrist_quat[1], xr_wrist_quat[2], xr_wrist_quat[3], xr_wrist_quat[0]]).as_matrix()
return joint_position @ wrist_rot @ operator2mano
def compute_ref_value(self, joint_position: np.ndarray, indices: np.ndarray, retargeting_type: str) -> np.ndarray:
"""Computes reference value for retargeting.
Args:
joint_position: Joint positions array
indices: Target link indices
retargeting_type: Type of retargeting ("POSITION" or other)
Returns:
Reference value in cartesian space
"""
if retargeting_type == "POSITION":
return joint_position[indices, :]
else:
origin_indices = indices[0, :]
task_indices = indices[1, :]
ref_value = joint_position[task_indices, :] - joint_position[origin_indices, :]
return ref_value
def compute_one_hand(
self, hand_joints: dict[str, np.ndarray], retargeting: RetargetingConfig, operator2mano: np.ndarray
) -> np.ndarray:
"""Computes retargeted joint angles for one hand.
Args:
hand_joints: Dictionary containing hand joint data
retargeting: Retargeting configuration object
operator2mano: Transformation matrix from operator to MANO frame
Returns:
Retargeted joint angles
"""
joint_pos = self.convert_hand_joints(hand_joints, operator2mano)
ref_value = self.compute_ref_value(
joint_pos,
indices=retargeting.optimizer.target_link_human_indices,
retargeting_type=retargeting.optimizer.retargeting_type,
)
# Enable gradient calculation and inference mode in case some other script has disabled it
# This is necessary for the retargeting to work since it uses gradient features that
# are not available in inference mode
with torch.enable_grad():
with torch.inference_mode(False):
return retargeting.retarget(ref_value)
def get_joint_names(self) -> list[str]:
"""Returns list of all joint names."""
return self.dof_names
def get_left_joint_names(self) -> list[str]:
"""Returns list of left hand joint names."""
return self.left_dof_names
def get_right_joint_names(self) -> list[str]:
"""Returns list of right hand joint names."""
return self.right_dof_names
def get_hand_indices(self, robot) -> np.ndarray:
"""Gets indices of hand joints in robot's DOF array.
Args:
robot: Robot object containing DOF information
Returns:
Array of joint indices
"""
return np.array([robot.dof_names.index(name) for name in self.dof_names], dtype=np.int64)
def compute_left(self, left_hand_poses: dict[str, np.ndarray]) -> np.ndarray:
"""Computes retargeted joints for left hand.
Args:
left_hand_poses: Dictionary of left hand joint poses
Returns:
Retargeted joint angles for left hand
"""
if left_hand_poses is not None:
left_hand_q = self.compute_one_hand(left_hand_poses, self._dex_left_hand, _OPERATOR2MANO_LEFT)
else:
left_hand_q = np.zeros(len(_LEFT_HAND_JOINT_NAMES))
return left_hand_q
def compute_right(self, right_hand_poses: dict[str, np.ndarray]) -> np.ndarray:
"""Computes retargeted joints for right hand.
Args:
right_hand_poses: Dictionary of right hand joint poses
Returns:
Retargeted joint angles for right hand
"""
if right_hand_poses is not None:
right_hand_q = self.compute_one_hand(right_hand_poses, self._dex_right_hand, _OPERATOR2MANO_RIGHT)
else:
right_hand_q = np.zeros(len(_RIGHT_HAND_JOINT_NAMES))
return right_hand_q

View File

@@ -0,0 +1,340 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import contextlib
from dataclasses import dataclass, field
import numpy as np
import torch
import isaaclab.sim as sim_utils
import isaaclab.utils.math as PoseUtils
from isaaclab.devices.device_base import DeviceBase
from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg
from isaaclab.markers import VisualizationMarkers, VisualizationMarkersCfg
with contextlib.suppress(Exception):
from .gr1_t2_dex_retargeting_utils import GR1TR2DexRetargeting
class GR1T2Retargeter(RetargeterBase):
def __init__(self, cfg: GR1T2RetargeterCfg):
super().__init__(cfg)
self._hand_joint_names = cfg.hand_joint_names
self._enable_visualization = cfg.enable_visualization
self._num_open_xr_hand_joints = cfg.num_open_xr_hand_joints
self._sim_device = cfg.sim_device
# ✅ 核心修改:根据 hand_joint_names 是否为空
# 决定是否初始化手部控制器
if self._hand_joint_names:
self._enable_hand_control = True
self._hands_controller = GR1TR2DexRetargeting(self._hand_joint_names)
self._num_hand_joints = len(self._hand_joint_names)
else:
self._enable_hand_control = False
self._hands_controller = None
self._num_hand_joints = 0
if self._enable_visualization:
marker_cfg = VisualizationMarkersCfg(
prim_path="/Visuals/markers",
markers={
"joint": sim_utils.SphereCfg(
radius=0.005,
visual_material=sim_utils.PreviewSurfaceCfg(
diffuse_color=(1.0, 0.0, 0.0)
),
),
},
)
self._markers = VisualizationMarkers(marker_cfg)
def retarget(self, data: dict) -> torch.Tensor:
"""Convert hand joint poses to robot end-effector commands."""
left_hand_poses = data[DeviceBase.TrackingTarget.HAND_LEFT]
right_hand_poses = data[DeviceBase.TrackingTarget.HAND_RIGHT]
left_wrist = left_hand_poses.get("wrist")
right_wrist = right_hand_poses.get("wrist")
# 可视化部分不受影响
if self._enable_visualization:
joints_position = np.zeros((self._num_open_xr_hand_joints, 3))
joints_position[::2] = np.array(
[pose[:3] for pose in left_hand_poses.values()]
)
joints_position[1::2] = np.array(
[pose[:3] for pose in right_hand_poses.values()]
)
self._markers.visualize(
translations=torch.tensor(joints_position, device=self._sim_device)
)
# ✅ 核心修改:手部关节处理分支
if self._enable_hand_control:
# 原始手部重定向逻辑(不变)
left_hands_pos = self._hands_controller.compute_left(left_hand_poses)
indexes = [
self._hand_joint_names.index(name)
for name in self._hands_controller.get_left_joint_names()
]
left_retargeted_hand_joints = np.zeros(
len(self._hands_controller.get_joint_names())
)
left_retargeted_hand_joints[indexes] = left_hands_pos
right_hands_pos = self._hands_controller.compute_right(right_hand_poses)
indexes = [
self._hand_joint_names.index(name)
for name in self._hands_controller.get_right_joint_names()
]
right_retargeted_hand_joints = np.zeros(
len(self._hands_controller.get_joint_names())
)
right_retargeted_hand_joints[indexes] = right_hands_pos
retargeted_hand_joints = left_retargeted_hand_joints + right_retargeted_hand_joints
hand_joints_tensor = torch.tensor(
retargeted_hand_joints,
dtype=torch.float32,
device=self._sim_device
)
else:
# ✅ 手部控制禁用:输出空张量,不包含任何手部关节数据
hand_joints_tensor = torch.zeros(0, dtype=torch.float32, device=self._sim_device)
# 腕部张量(不受影响)
left_wrist_tensor = torch.tensor(
left_wrist,
dtype=torch.float32,
device=self._sim_device
)
right_wrist_tensor = torch.tensor(
self._retarget_abs(right_wrist),
dtype=torch.float32,
device=self._sim_device
)
# ✅ 输出结构:
# hand_joint_names 不为空 → [left_wrist(7), right_wrist(7), hand_joints(22)] = 36维
# hand_joint_names 为空 → [left_wrist(7), right_wrist(7)] = 14维
return torch.cat([left_wrist_tensor, right_wrist_tensor, hand_joints_tensor])
def get_requirements(self) -> list[RetargeterBase.Requirement]:
return [RetargeterBase.Requirement.HAND_TRACKING]
def _retarget_abs(self, wrist: np.ndarray) -> np.ndarray:
"""Handle absolute pose retargeting."""
wrist_pos = torch.tensor(wrist[:3], dtype=torch.float32)
wrist_quat = torch.tensor(wrist[3:], dtype=torch.float32)
openxr_right_wrist_in_world = PoseUtils.make_pose(
wrist_pos, PoseUtils.matrix_from_quat(wrist_quat)
)
zero_pos = torch.zeros(3, dtype=torch.float32)
z_axis_rot_quat = torch.tensor([0, 0, 0, 1], dtype=torch.float32)
usd_right_roll_link_in_openxr_right_wrist = PoseUtils.make_pose(
zero_pos, PoseUtils.matrix_from_quat(z_axis_rot_quat)
)
usd_right_roll_link_in_world = PoseUtils.pose_in_A_to_pose_in_B(
usd_right_roll_link_in_openxr_right_wrist,
openxr_right_wrist_in_world
)
usd_right_roll_link_in_world_pos, usd_right_roll_link_in_world_mat = \
PoseUtils.unmake_pose(usd_right_roll_link_in_world)
usd_right_roll_link_in_world_quat = PoseUtils.quat_from_matrix(
usd_right_roll_link_in_world_mat
)
return np.concatenate([
usd_right_roll_link_in_world_pos,
usd_right_roll_link_in_world_quat
])
@dataclass
class GR1T2RetargeterCfg(RetargeterCfg):
"""Configuration for the GR1T2 retargeter."""
enable_visualization: bool = False
num_open_xr_hand_joints: int = 100
# ✅ 默认值改为 None为空时禁用手部控制
hand_joint_names: list[str] | None = None
sim_device: str = "cpu"
retargeter_type: type[RetargeterBase] = GR1T2Retargeter
# # Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# # All rights reserved.
# #
# # SPDX-License-Identifier: BSD-3-Clause
# from __future__ import annotations
# import contextlib
# from dataclasses import dataclass
# import numpy as np
# import torch
# import isaaclab.sim as sim_utils
# import isaaclab.utils.math as PoseUtils
# from isaaclab.devices.device_base import DeviceBase
# from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg
# from isaaclab.markers import VisualizationMarkers, VisualizationMarkersCfg
# # This import exception is suppressed because gr1_t2_dex_retargeting_utils depends
# # on pinocchio which is not available on Windows.
# with contextlib.suppress(Exception):
# from .gr1_t2_dex_retargeting_utils import GR1TR2DexRetargeting
# class GR1T2Retargeter(RetargeterBase):
# """Retargets OpenXR hand tracking data to GR1T2 hand end-effector commands.
# This retargeter maps hand tracking data from OpenXR to joint commands for the GR1T2 robot's hands.
# It handles both left and right hands, converting poses of the hands in OpenXR format joint angles
# for the GR1T2 robot's hands.
# """
# def __init__(
# self,
# cfg: GR1T2RetargeterCfg,
# ):
# """Initialize the GR1T2 hand retargeter.
# Args:
# enable_visualization: If True, visualize tracked hand joints
# num_open_xr_hand_joints: Number of joints tracked by OpenXR
# device: PyTorch device for computations
# hand_joint_names: List of robot hand joint names
# """
# super().__init__(cfg)
# self._hand_joint_names = cfg.hand_joint_names
# self._hands_controller = GR1TR2DexRetargeting(self._hand_joint_names)
# # Initialize visualization if enabled
# self._enable_visualization = cfg.enable_visualization
# self._num_open_xr_hand_joints = cfg.num_open_xr_hand_joints
# self._sim_device = cfg.sim_device
# if self._enable_visualization:
# marker_cfg = VisualizationMarkersCfg(
# prim_path="/Visuals/markers",
# markers={
# "joint": sim_utils.SphereCfg(
# radius=0.005,
# visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)),
# ),
# },
# )
# self._markers = VisualizationMarkers(marker_cfg)
# def retarget(self, data: dict) -> torch.Tensor:
# """Convert hand joint poses to robot end-effector commands.
# Args:
# data: Dictionary mapping tracking targets to joint data dictionaries.
# Returns:
# tuple containing:
# Left wrist pose
# Right wrist pose in USD frame
# Retargeted hand joint angles
# """
# # Access the left and right hand data using the enum key
# left_hand_poses = data[DeviceBase.TrackingTarget.HAND_LEFT]
# right_hand_poses = data[DeviceBase.TrackingTarget.HAND_RIGHT]
# left_wrist = left_hand_poses.get("wrist")
# right_wrist = right_hand_poses.get("wrist")
# if self._enable_visualization:
# joints_position = np.zeros((self._num_open_xr_hand_joints, 3))
# joints_position[::2] = np.array([pose[:3] for pose in left_hand_poses.values()])
# joints_position[1::2] = np.array([pose[:3] for pose in right_hand_poses.values()])
# self._markers.visualize(translations=torch.tensor(joints_position, device=self._sim_device))
# # Create array of zeros with length matching number of joint names
# left_hands_pos = self._hands_controller.compute_left(left_hand_poses)
# indexes = [self._hand_joint_names.index(name) for name in self._hands_controller.get_left_joint_names()]
# left_retargeted_hand_joints = np.zeros(len(self._hands_controller.get_joint_names()))
# left_retargeted_hand_joints[indexes] = left_hands_pos
# left_hand_joints = left_retargeted_hand_joints
# right_hands_pos = self._hands_controller.compute_right(right_hand_poses)
# indexes = [self._hand_joint_names.index(name) for name in self._hands_controller.get_right_joint_names()]
# right_retargeted_hand_joints = np.zeros(len(self._hands_controller.get_joint_names()))
# right_retargeted_hand_joints[indexes] = right_hands_pos
# right_hand_joints = right_retargeted_hand_joints
# retargeted_hand_joints = left_hand_joints + right_hand_joints
# # Convert numpy arrays to tensors and concatenate them
# left_wrist_tensor = torch.tensor(left_wrist, dtype=torch.float32, device=self._sim_device)
# right_wrist_tensor = torch.tensor(self._retarget_abs(right_wrist), dtype=torch.float32, device=self._sim_device)
# hand_joints_tensor = torch.tensor(retargeted_hand_joints, dtype=torch.float32, device=self._sim_device)
# # Combine all tensors into a single tensor
# return torch.cat([left_wrist_tensor, right_wrist_tensor, hand_joints_tensor])
# def get_requirements(self) -> list[RetargeterBase.Requirement]:
# return [RetargeterBase.Requirement.HAND_TRACKING]
# def _retarget_abs(self, wrist: np.ndarray) -> np.ndarray:
# """Handle absolute pose retargeting.
# Args:
# wrist: Wrist pose data from OpenXR
# Returns:
# Retargeted wrist pose in USD control frame
# """
# # Convert wrist data in openxr frame to usd control frame
# # Create pose object for openxr_right_wrist_in_world
# # Note: The pose utils require torch tensors
# wrist_pos = torch.tensor(wrist[:3], dtype=torch.float32)
# wrist_quat = torch.tensor(wrist[3:], dtype=torch.float32)
# openxr_right_wrist_in_world = PoseUtils.make_pose(wrist_pos, PoseUtils.matrix_from_quat(wrist_quat))
# # The usd control frame is 180 degrees rotated around z axis wrt to the openxr frame
# # This was determined through trial and error
# zero_pos = torch.zeros(3, dtype=torch.float32)
# # 180 degree rotation around z axis
# z_axis_rot_quat = torch.tensor([0, 0, 0, 1], dtype=torch.float32)
# usd_right_roll_link_in_openxr_right_wrist = PoseUtils.make_pose(
# zero_pos, PoseUtils.matrix_from_quat(z_axis_rot_quat)
# )
# # Convert wrist pose in openxr frame to usd control frame
# usd_right_roll_link_in_world = PoseUtils.pose_in_A_to_pose_in_B(
# usd_right_roll_link_in_openxr_right_wrist, openxr_right_wrist_in_world
# )
# # extract position and rotation
# usd_right_roll_link_in_world_pos, usd_right_roll_link_in_world_mat = PoseUtils.unmake_pose(
# usd_right_roll_link_in_world
# )
# usd_right_roll_link_in_world_quat = PoseUtils.quat_from_matrix(usd_right_roll_link_in_world_mat)
# return np.concatenate([usd_right_roll_link_in_world_pos, usd_right_roll_link_in_world_quat])
# @dataclass
# class GR1T2RetargeterCfg(RetargeterCfg):
# """Configuration for the GR1T2 retargeter."""
# enable_visualization: bool = False
# num_open_xr_hand_joints: int = 100
# hand_joint_names: list[str] | None = None # List of robot hand joint names
# retargeter_type: type[RetargeterBase] = GR1T2Retargeter

View File

@@ -0,0 +1,175 @@
# Copyright (c) 2024, MindRobot Project
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for MindRobot dual-arm robot.
Arms are RealMan RM-65 (6-DOF, 5kg payload, 7.2kg self-weight, 610mm reach).
Reference: https://develop.realman-robotics.com/robot4th/robotParameter/RM65OntologyParameters/
The following configurations are available:
* :为mindrobot添加fixedjoint,并添加articulated root.
* :obj:`MINDBOT_CFG`: Base configuration with gravity enabled (general purpose).
* :obj:`MINDBOT_HIGH_PD_CFG`: Gravity disabled, stiffer PD for IK task-space control.
"""
import isaaclab.sim as sim_utils
from isaaclab.actuators import ImplicitActuatorCfg
from isaaclab.assets import ArticulationCfg
from mindbot.utils.assets import MINDBOT_ASSETS_DIR
MINDBOT_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{MINDBOT_ASSETS_DIR}/robots/Collected_mindrobot_zed_v3/v1.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
max_depenetration_velocity=5.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
fix_root_link=False,
enabled_self_collisions=True,
solver_position_iteration_count=8,
solver_velocity_iteration_count=0,
),
),
init_state=ArticulationCfg.InitialStateCfg(
joint_pos={
# ====== Left arm (RM-65) — away from singularities ======
# Elbow singularity: q3=0; Wrist singularity: q5=0.
# Small offsets on q2/q4/q6 to avoid exact 90° Jacobian symmetry.
"l_joint1": 1.5708, # 90°
"l_joint2": -1.2217, # -70° (offset from -90° for better elbow workspace)
"l_joint3": -1.5708, # -90° (away from elbow singularity q3=0)
"l_joint4": 1.3963, # 80° (offset from 90° to break wrist symmetry)
"l_joint5": 1.5708, # 90° (away from wrist singularity q5=0)
"l_joint6": -1.3963, # -80° (offset from -90° to break wrist symmetry)
# ====== Right arm (RM-65) — same joint values as left (verified visually) ======
"r_joint1": 1.5708, # 90°
"r_joint2": -1.2217, # -70°
"r_joint3": -1.5708, # -90° (away from elbow singularity q3=0)
"r_joint4": 1.3963, # 80°
"r_joint5": 1.5708, # 90° (away from wrist singularity q5=0)
"r_joint6": 1.3963, # -80°
# ====== Grippers (0=open) ======
"left_hand_joint_left": 0.0,
"left_hand_joint_right": 0.0,
"right_hand_joint_left": 0.0,
"right_hand_joint_right": 0.0,
# ====== Trunk & Head ======
# NOTE: pos z = 0.44 (base clearance) + PrismaticJoint value.
# If you change PrismaticJoint, update pos z accordingly to avoid
# geometry clipping that causes the robot to fall on spawn.
"PrismaticJoint": 0.26,
"head_revoluteJoint": 0.0,
},
pos=(0.0, 0.0, 0.7), # z = 0.44 (base clearance) + 0.26 (PrismaticJoint)
),
actuators={
# RM-65 J1-J3: shoulder/elbow — max 180°/s ≈ 3.14 rad/s
"left_arm_shoulder": ImplicitActuatorCfg(
joint_names_expr=["l_joint[1-3]"],
effort_limit_sim=50.0,
velocity_limit_sim=3.14,
stiffness=80.0,
damping=4.0,
),
# RM-65 J4-J6: wrist — max 225°/s ≈ 3.93 rad/s
"left_arm_wrist": ImplicitActuatorCfg(
joint_names_expr=["l_joint[4-6]"],
effort_limit_sim=20.0,
velocity_limit_sim=3.93,
stiffness=80.0,
damping=4.0,
),
"right_arm_shoulder": ImplicitActuatorCfg(
joint_names_expr=["r_joint[1-3]"],
effort_limit_sim=50.0,
velocity_limit_sim=3.14,
stiffness=80.0,
damping=4.0,
),
"right_arm_wrist": ImplicitActuatorCfg(
joint_names_expr=["r_joint[4-6]"],
effort_limit_sim=20.0,
velocity_limit_sim=3.93,
stiffness=80.0,
damping=4.0,
),
"head": ImplicitActuatorCfg(
joint_names_expr=["head_revoluteJoint"],
effort_limit_sim=12.0,
stiffness=80.0,
damping=4.0,
),
"trunk": ImplicitActuatorCfg(
joint_names_expr=["PrismaticJoint"],
effort_limit_sim=200.0,
velocity_limit_sim=0.2,
stiffness=1000.0,
damping=100.0,
),
"wheels": ImplicitActuatorCfg(
# joint_names_expr=[".*_revolute_Joint"],
joint_names_expr=[
"right_b_revolute_Joint",
"left_b_revolute_Joint",
"left_f_revolute_Joint",
"right_f_revolute_Joint",
],
effort_limit_sim=200.0,
stiffness=0.0,
damping=100.0,
),
"grippers": ImplicitActuatorCfg(
joint_names_expr=[".*_hand_joint.*"],
effort_limit_sim=200.0,
stiffness=2e3,
damping=1e2,
),
},
soft_joint_pos_limit_factor=1.0,
)
"""Base configuration for MindRobot. Gravity enabled, low PD gains."""
MINDBOT_HIGH_PD_CFG = MINDBOT_CFG.copy()
MINDBOT_HIGH_PD_CFG.actuators["left_arm_shoulder"].stiffness = 400.0
MINDBOT_HIGH_PD_CFG.actuators["left_arm_shoulder"].damping = 80.0
MINDBOT_HIGH_PD_CFG.actuators["left_arm_wrist"].stiffness = 400.0
MINDBOT_HIGH_PD_CFG.actuators["left_arm_wrist"].damping = 80.0
MINDBOT_HIGH_PD_CFG.actuators["right_arm_shoulder"].stiffness = 400.0
MINDBOT_HIGH_PD_CFG.actuators["right_arm_shoulder"].damping = 80.0
MINDBOT_HIGH_PD_CFG.actuators["right_arm_wrist"].stiffness = 400.0
MINDBOT_HIGH_PD_CFG.actuators["right_arm_wrist"].damping = 80.0
"""IK task-space control configuration. Gravity disabled, stiffer PD for tracking."""
# =====================================================================
# Robot Body / Joint Name Constants
# Keep all robot-specific strings here so env cfgs stay robot-agnostic.
# =====================================================================
# Joint name patterns (regex, for use in action/actuator configs)
MINDBOT_LEFT_ARM_JOINTS = "l_joint[1-6]"
MINDBOT_RIGHT_ARM_JOINTS = "r_joint[1-6]"
MINDBOT_LEFT_GRIPPER_JOINTS = ["left_hand_joint_left", "left_hand_joint_right"]
MINDBOT_RIGHT_GRIPPER_JOINTS = ["right_hand_joint_left", "right_hand_joint_right"]
MINDBOT_WHEEL_JOINTS = [
"right_b_revolute_Joint",
"left_b_revolute_Joint",
"left_f_revolute_Joint",
"right_f_revolute_Joint",
]
# EEF body names (as defined in the USD asset)
MINDBOT_LEFT_EEF_BODY = "left_hand_body"
MINDBOT_RIGHT_EEF_BODY = "right_hand_body"
# Prim paths relative to the Robot prim (i.e. after "{ENV_REGEX_NS}/Robot/")
MINDBOT_LEFT_ARM_PRIM_ROOT = "rm_65_fb_left"
MINDBOT_RIGHT_ARM_PRIM_ROOT = "rm_65_b_right"
MINDBOT_LEFT_EEF_PRIM = "rm_65_fb_left/l_hand_01/left_hand_body"
MINDBOT_RIGHT_EEF_PRIM = "rm_65_b_right/r_hand/right_hand_body"

View File

@@ -0,0 +1,37 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
from dataclasses import dataclass
import torch
from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg
class G1LowerBodyStandingRetargeter(RetargeterBase):
"""Provides lower body standing commands for the G1 robot."""
def __init__(self, cfg: G1LowerBodyStandingRetargeterCfg):
"""Initialize the retargeter."""
super().__init__(cfg)
self.cfg = cfg
def retarget(self, data: dict) -> torch.Tensor:
return torch.tensor([0.0, 0.0, 0.0, self.cfg.hip_height], device=self.cfg.sim_device)
def get_requirements(self) -> list[RetargeterBase.Requirement]:
# This retargeter does not consume any device data
return []
@dataclass
class G1LowerBodyStandingRetargeterCfg(RetargeterCfg):
"""Configuration for the G1 lower body standing retargeter."""
hip_height: float = 0.72
"""Height of the G1 robot hip in meters. The value is a fixed height suitable for G1 to do tabletop manipulation."""
retargeter_type: type[RetargeterBase] = G1LowerBodyStandingRetargeter

View File

@@ -0,0 +1,87 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
from dataclasses import dataclass
import torch
from isaaclab.devices.device_base import DeviceBase
from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg
from isaaclab.sim import SimulationContext
class G1LowerBodyStandingMotionControllerRetargeter(RetargeterBase):
"""Provides lower body standing commands for the G1 robot."""
def __init__(self, cfg: G1LowerBodyStandingMotionControllerRetargeterCfg):
"""Initialize the retargeter."""
super().__init__(cfg)
self.cfg = cfg
self._hip_height = cfg.hip_height
def retarget(self, data: dict) -> torch.Tensor:
left_thumbstick_x = 0.0
left_thumbstick_y = 0.0
right_thumbstick_x = 0.0
right_thumbstick_y = 0.0
# Get controller data using enums
if (
DeviceBase.TrackingTarget.CONTROLLER_LEFT in data
and data[DeviceBase.TrackingTarget.CONTROLLER_LEFT] is not None
):
left_controller_data = data[DeviceBase.TrackingTarget.CONTROLLER_LEFT]
if len(left_controller_data) > DeviceBase.MotionControllerDataRowIndex.INPUTS.value:
left_inputs = left_controller_data[DeviceBase.MotionControllerDataRowIndex.INPUTS.value]
if len(left_inputs) > DeviceBase.MotionControllerInputIndex.THUMBSTICK_Y.value:
left_thumbstick_x = left_inputs[DeviceBase.MotionControllerInputIndex.THUMBSTICK_X.value]
left_thumbstick_y = left_inputs[DeviceBase.MotionControllerInputIndex.THUMBSTICK_Y.value]
if (
DeviceBase.TrackingTarget.CONTROLLER_RIGHT in data
and data[DeviceBase.TrackingTarget.CONTROLLER_RIGHT] is not None
):
right_controller_data = data[DeviceBase.TrackingTarget.CONTROLLER_RIGHT]
if len(right_controller_data) > DeviceBase.MotionControllerDataRowIndex.INPUTS.value:
right_inputs = right_controller_data[DeviceBase.MotionControllerDataRowIndex.INPUTS.value]
if len(right_inputs) > DeviceBase.MotionControllerInputIndex.THUMBSTICK_Y.value:
right_thumbstick_x = right_inputs[DeviceBase.MotionControllerInputIndex.THUMBSTICK_X.value]
right_thumbstick_y = right_inputs[DeviceBase.MotionControllerInputIndex.THUMBSTICK_Y.value]
# Thumbstick values are in the range of [-1, 1], so we need to scale them to the range of
# [-movement_scale, movement_scale]
left_thumbstick_x = left_thumbstick_x * self.cfg.movement_scale
left_thumbstick_y = left_thumbstick_y * self.cfg.movement_scale
# Use rendering time step for deterministic hip height adjustment regardless of wall clock time.
dt = SimulationContext.instance().get_rendering_dt()
self._hip_height -= right_thumbstick_y * dt * self.cfg.rotation_scale
self._hip_height = max(0.4, min(1.0, self._hip_height))
return torch.tensor(
[-left_thumbstick_y, -left_thumbstick_x, -right_thumbstick_x, self._hip_height],
device=self.cfg.sim_device,
dtype=torch.float32,
)
def get_requirements(self) -> list[RetargeterBase.Requirement]:
return [RetargeterBase.Requirement.MOTION_CONTROLLER]
@dataclass
class G1LowerBodyStandingMotionControllerRetargeterCfg(RetargeterCfg):
"""Configuration for the G1 lower body standing retargeter."""
hip_height: float = 0.72
"""Height of the G1 robot hip in meters. The value is a fixed height suitable for G1 to do tabletop manipulation."""
movement_scale: float = 0.5
"""Scale the movement of the robot to the range of [-movement_scale, movement_scale]."""
rotation_scale: float = 0.35
"""Scale the rotation of the robot to the range of [-rotation_scale, rotation_scale]."""
retargeter_type: type[RetargeterBase] = G1LowerBodyStandingMotionControllerRetargeter

View File

@@ -0,0 +1,24 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
retargeting:
finger_tip_link_names:
- L_thumb_tip
- L_index_tip
- L_middle_tip
- L_ring_tip
- L_pinky_tip
low_pass_alpha: 0.2
scaling_factor: 1.2
target_joint_names:
- L_thumb_proximal_yaw_joint
- L_thumb_proximal_pitch_joint
- L_index_proximal_joint
- L_middle_proximal_joint
- L_ring_proximal_joint
- L_pinky_proximal_joint
type: DexPilot
urdf_path: /tmp/retarget_inspire_white_left_hand.urdf
wrist_link_name: L_hand_base_link

View File

@@ -0,0 +1,24 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
retargeting:
finger_tip_link_names:
- R_thumb_tip
- R_index_tip
- R_middle_tip
- R_ring_tip
- R_pinky_tip
low_pass_alpha: 0.2
scaling_factor: 1.2
target_joint_names:
- R_thumb_proximal_yaw_joint
- R_thumb_proximal_pitch_joint
- R_index_proximal_joint
- R_middle_proximal_joint
- R_ring_proximal_joint
- R_pinky_proximal_joint
type: DexPilot
urdf_path: /tmp/retarget_inspire_white_right_hand.urdf
wrist_link_name: R_hand_base_link

View File

@@ -0,0 +1,266 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import logging
import os
import numpy as np
import torch
import yaml
from dex_retargeting.retargeting_config import RetargetingConfig
from scipy.spatial.transform import Rotation as R
from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path
# import logger
logger = logging.getLogger(__name__)
# The index to map the OpenXR hand joints to the hand joints used
# in Dex-retargeting.
_HAND_JOINTS_INDEX = [1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 14, 15, 17, 18, 19, 20, 22, 23, 24, 25]
# The transformation matrices to convert hand pose to canonical view.
_OPERATOR2MANO_RIGHT = np.array(
[
[0, -1, 0],
[-1, 0, 0],
[0, 0, -1],
]
)
_OPERATOR2MANO_LEFT = np.array(
[
[0, -1, 0],
[-1, 0, 0],
[0, 0, -1],
]
)
_LEFT_HAND_JOINT_NAMES = [
"L_thumb_proximal_yaw_joint",
"L_thumb_proximal_pitch_joint",
"L_thumb_intermediate_joint",
"L_thumb_distal_joint",
"L_index_proximal_joint",
"L_index_intermediate_joint",
"L_middle_proximal_joint",
"L_middle_intermediate_joint",
"L_ring_proximal_joint",
"L_ring_intermediate_joint",
"L_pinky_proximal_joint",
"L_pinky_intermediate_joint",
]
_RIGHT_HAND_JOINT_NAMES = [
"R_thumb_proximal_yaw_joint",
"R_thumb_proximal_pitch_joint",
"R_thumb_intermediate_joint",
"R_thumb_distal_joint",
"R_index_proximal_joint",
"R_index_intermediate_joint",
"R_middle_proximal_joint",
"R_middle_intermediate_joint",
"R_ring_proximal_joint",
"R_ring_intermediate_joint",
"R_pinky_proximal_joint",
"R_pinky_intermediate_joint",
]
class UnitreeG1DexRetargeting:
"""A class for hand retargeting with GR1Fourier.
Handles retargeting of OpenXRhand tracking data to GR1T2 robot hand joint angles.
"""
def __init__(
self,
hand_joint_names: list[str],
right_hand_config_filename: str = "unitree_hand_right_dexpilot.yml",
left_hand_config_filename: str = "unitree_hand_left_dexpilot.yml",
left_hand_urdf_path: str = f"{ISAACLAB_NUCLEUS_DIR}/Mimic/G1_inspire_assets/retarget_inspire_white_left_hand.urdf", # noqa: E501
right_hand_urdf_path: str = f"{ISAACLAB_NUCLEUS_DIR}/Mimic/G1_inspire_assets/retarget_inspire_white_right_hand.urdf", # noqa: E501
):
"""Initialize the hand retargeting.
Args:
hand_joint_names: Names of hand joints in the robot model
right_hand_config_filename: Config file for right hand retargeting
left_hand_config_filename: Config file for left hand retargeting
"""
data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "data/"))
config_dir = os.path.join(data_dir, "configs/dex-retargeting")
# Download urdf files from aws
local_left_urdf_path = retrieve_file_path(left_hand_urdf_path, force_download=True)
local_right_urdf_path = retrieve_file_path(right_hand_urdf_path, force_download=True)
left_config_path = os.path.join(config_dir, left_hand_config_filename)
right_config_path = os.path.join(config_dir, right_hand_config_filename)
# Update the YAML files with the correct URDF paths
self._update_yaml_with_urdf_path(left_config_path, local_left_urdf_path)
self._update_yaml_with_urdf_path(right_config_path, local_right_urdf_path)
self._dex_left_hand = RetargetingConfig.load_from_file(left_config_path).build()
self._dex_right_hand = RetargetingConfig.load_from_file(right_config_path).build()
self.left_dof_names = self._dex_left_hand.optimizer.robot.dof_joint_names
self.right_dof_names = self._dex_right_hand.optimizer.robot.dof_joint_names
self.dof_names = self.left_dof_names + self.right_dof_names
self.isaac_lab_hand_joint_names = hand_joint_names
logger.info("[UnitreeG1DexRetargeter] init done.")
def _update_yaml_with_urdf_path(self, yaml_path: str, urdf_path: str):
"""Update YAML file with the correct URDF path.
Args:
yaml_path: Path to the YAML configuration file
urdf_path: Path to the URDF file to use
"""
try:
# Read the YAML file
with open(yaml_path) as file:
config = yaml.safe_load(file)
# Update the URDF path in the configuration
if "retargeting" in config:
config["retargeting"]["urdf_path"] = urdf_path
logger.info(f"Updated URDF path in {yaml_path} to {urdf_path}")
else:
logger.warning(f"Unable to find 'retargeting' section in {yaml_path}")
# Write the updated configuration back to the file
with open(yaml_path, "w") as file:
yaml.dump(config, file)
except Exception as e:
logger.error(f"Error updating YAML file {yaml_path}: {e}")
def convert_hand_joints(self, hand_poses: dict[str, np.ndarray], operator2mano: np.ndarray) -> np.ndarray:
"""Prepares the hand joints data for retargeting.
Args:
hand_poses: Dictionary containing hand pose data with joint positions and rotations
operator2mano: Transformation matrix to convert from operator to MANO frame
Returns:
Joint positions with shape (21, 3)
"""
joint_position = np.zeros((21, 3))
hand_joints = list(hand_poses.values())
for i in range(len(_HAND_JOINTS_INDEX)):
joint = hand_joints[_HAND_JOINTS_INDEX[i]]
joint_position[i] = joint[:3]
# Convert hand pose to the canonical frame.
joint_position = joint_position - joint_position[0:1, :]
xr_wrist_quat = hand_poses.get("wrist")[3:]
# OpenXR hand uses w,x,y,z order for quaternions but scipy uses x,y,z,w order
wrist_rot = R.from_quat([xr_wrist_quat[1], xr_wrist_quat[2], xr_wrist_quat[3], xr_wrist_quat[0]]).as_matrix()
return joint_position @ wrist_rot @ operator2mano
def compute_ref_value(self, joint_position: np.ndarray, indices: np.ndarray, retargeting_type: str) -> np.ndarray:
"""Computes reference value for retargeting.
Args:
joint_position: Joint positions array
indices: Target link indices
retargeting_type: Type of retargeting ("POSITION" or other)
Returns:
Reference value in cartesian space
"""
if retargeting_type == "POSITION":
return joint_position[indices, :]
else:
origin_indices = indices[0, :]
task_indices = indices[1, :]
ref_value = joint_position[task_indices, :] - joint_position[origin_indices, :]
return ref_value
def compute_one_hand(
self, hand_joints: dict[str, np.ndarray], retargeting: RetargetingConfig, operator2mano: np.ndarray
) -> np.ndarray:
"""Computes retargeted joint angles for one hand.
Args:
hand_joints: Dictionary containing hand joint data
retargeting: Retargeting configuration object
operator2mano: Transformation matrix from operator to MANO frame
Returns:
Retargeted joint angles
"""
joint_pos = self.convert_hand_joints(hand_joints, operator2mano)
ref_value = self.compute_ref_value(
joint_pos,
indices=retargeting.optimizer.target_link_human_indices,
retargeting_type=retargeting.optimizer.retargeting_type,
)
# Enable gradient calculation and inference mode in case some other script has disabled it
# This is necessary for the retargeting to work since it uses gradient features that
# are not available in inference mode
with torch.enable_grad():
with torch.inference_mode(False):
return retargeting.retarget(ref_value)
def get_joint_names(self) -> list[str]:
"""Returns list of all joint names."""
return self.dof_names
def get_left_joint_names(self) -> list[str]:
"""Returns list of left hand joint names."""
return self.left_dof_names
def get_right_joint_names(self) -> list[str]:
"""Returns list of right hand joint names."""
return self.right_dof_names
def get_hand_indices(self, robot) -> np.ndarray:
"""Gets indices of hand joints in robot's DOF array.
Args:
robot: Robot object containing DOF information
Returns:
Array of joint indices
"""
return np.array([robot.dof_names.index(name) for name in self.dof_names], dtype=np.int64)
def compute_left(self, left_hand_poses: dict[str, np.ndarray]) -> np.ndarray:
"""Computes retargeted joints for left hand.
Args:
left_hand_poses: Dictionary of left hand joint poses
Returns:
Retargeted joint angles for left hand
"""
if left_hand_poses is not None:
left_hand_q = self.compute_one_hand(left_hand_poses, self._dex_left_hand, _OPERATOR2MANO_LEFT)
else:
left_hand_q = np.zeros(len(_LEFT_HAND_JOINT_NAMES))
return left_hand_q
def compute_right(self, right_hand_poses: dict[str, np.ndarray]) -> np.ndarray:
"""Computes retargeted joints for right hand.
Args:
right_hand_poses: Dictionary of right hand joint poses
Returns:
Retargeted joint angles for right hand
"""
if right_hand_poses is not None:
right_hand_q = self.compute_one_hand(right_hand_poses, self._dex_right_hand, _OPERATOR2MANO_RIGHT)
else:
right_hand_q = np.zeros(len(_RIGHT_HAND_JOINT_NAMES))
return right_hand_q

View File

@@ -0,0 +1,164 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import contextlib
from dataclasses import dataclass
import numpy as np
import torch
import isaaclab.sim as sim_utils
import isaaclab.utils.math as PoseUtils
from isaaclab.devices.device_base import DeviceBase
from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg
from isaaclab.markers import VisualizationMarkers, VisualizationMarkersCfg
# This import exception is suppressed because g1_dex_retargeting_utils
# depends on pinocchio which is not available on Windows.
with contextlib.suppress(Exception):
from .g1_dex_retargeting_utils import UnitreeG1DexRetargeting
class UnitreeG1Retargeter(RetargeterBase):
"""Retargets OpenXR hand tracking data to GR1T2 hand end-effector commands.
This retargeter maps hand tracking data from OpenXR to joint commands for the GR1T2 robot's hands.
It handles both left and right hands, converting poses of the hands in OpenXR format joint angles
for the GR1T2 robot's hands.
"""
def __init__(
self,
cfg: UnitreeG1RetargeterCfg,
):
"""Initialize the UnitreeG1 hand retargeter.
Args:
enable_visualization: If True, visualize tracked hand joints
num_open_xr_hand_joints: Number of joints tracked by OpenXR
device: PyTorch device for computations
hand_joint_names: List of robot hand joint names
"""
super().__init__(cfg)
self._hand_joint_names = cfg.hand_joint_names
self._hands_controller = UnitreeG1DexRetargeting(self._hand_joint_names)
# Initialize visualization if enabled
self._enable_visualization = cfg.enable_visualization
self._num_open_xr_hand_joints = cfg.num_open_xr_hand_joints
self._sim_device = cfg.sim_device
if self._enable_visualization:
marker_cfg = VisualizationMarkersCfg(
prim_path="/Visuals/markers",
markers={
"joint": sim_utils.SphereCfg(
radius=0.005,
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)),
),
},
)
self._markers = VisualizationMarkers(marker_cfg)
def retarget(self, data: dict) -> torch.Tensor:
"""Convert hand joint poses to robot end-effector commands.
Args:
data: Dictionary mapping tracking targets to joint data dictionaries.
Returns:
tuple containing:
Left wrist pose
Right wrist pose in USD frame
Retargeted hand joint angles
"""
# Access the left and right hand data using the enum key
left_hand_poses = data[DeviceBase.TrackingTarget.HAND_LEFT]
right_hand_poses = data[DeviceBase.TrackingTarget.HAND_RIGHT]
left_wrist = left_hand_poses.get("wrist")
right_wrist = right_hand_poses.get("wrist")
if self._enable_visualization:
joints_position = np.zeros((self._num_open_xr_hand_joints, 3))
joints_position[::2] = np.array([pose[:3] for pose in left_hand_poses.values()])
joints_position[1::2] = np.array([pose[:3] for pose in right_hand_poses.values()])
self._markers.visualize(translations=torch.tensor(joints_position, device=self._sim_device))
# Create array of zeros with length matching number of joint names
left_hands_pos = self._hands_controller.compute_left(left_hand_poses)
indexes = [self._hand_joint_names.index(name) for name in self._hands_controller.get_left_joint_names()]
left_retargeted_hand_joints = np.zeros(len(self._hands_controller.get_joint_names()))
left_retargeted_hand_joints[indexes] = left_hands_pos
left_hand_joints = left_retargeted_hand_joints
right_hands_pos = self._hands_controller.compute_right(right_hand_poses)
indexes = [self._hand_joint_names.index(name) for name in self._hands_controller.get_right_joint_names()]
right_retargeted_hand_joints = np.zeros(len(self._hands_controller.get_joint_names()))
right_retargeted_hand_joints[indexes] = right_hands_pos
right_hand_joints = right_retargeted_hand_joints
retargeted_hand_joints = left_hand_joints + right_hand_joints
# Convert numpy arrays to tensors and concatenate them
left_wrist_tensor = torch.tensor(
self._retarget_abs(left_wrist, True), dtype=torch.float32, device=self._sim_device
)
right_wrist_tensor = torch.tensor(
self._retarget_abs(right_wrist, False), dtype=torch.float32, device=self._sim_device
)
hand_joints_tensor = torch.tensor(retargeted_hand_joints, dtype=torch.float32, device=self._sim_device)
# Combine all tensors into a single tensor
return torch.cat([left_wrist_tensor, right_wrist_tensor, hand_joints_tensor])
def get_requirements(self) -> list[RetargeterBase.Requirement]:
return [RetargeterBase.Requirement.HAND_TRACKING]
def _retarget_abs(self, wrist: np.ndarray, is_left: bool) -> np.ndarray:
"""Handle absolute pose retargeting.
Args:
wrist: Wrist pose data from OpenXR.
is_left: True for the left hand, False for the right hand.
Returns:
Retargeted wrist pose in USD control frame.
"""
# Note: This was determined through trial, use the target quat and cloudXR quat,
# to estimate a most reasonable transformation matrix
wrist_pos = torch.tensor(wrist[:3], dtype=torch.float32)
wrist_quat = torch.tensor(wrist[3:], dtype=torch.float32)
if is_left:
# Corresponds to a rotation of (0, 180, 0) in euler angles (x,y,z)
combined_quat = torch.tensor([0.7071, 0, 0.7071, 0], dtype=torch.float32)
else:
# Corresponds to a rotation of (180, 0, 0) in euler angles (x,y,z)
combined_quat = torch.tensor([0, 0.7071, 0, -0.7071], dtype=torch.float32)
openxr_pose = PoseUtils.make_pose(wrist_pos, PoseUtils.matrix_from_quat(wrist_quat))
transform_pose = PoseUtils.make_pose(torch.zeros(3), PoseUtils.matrix_from_quat(combined_quat))
result_pose = PoseUtils.pose_in_A_to_pose_in_B(transform_pose, openxr_pose)
pos, rot_mat = PoseUtils.unmake_pose(result_pose)
quat = PoseUtils.quat_from_matrix(rot_mat)
return np.concatenate([pos.numpy(), quat.numpy()])
@dataclass
class UnitreeG1RetargeterCfg(RetargeterCfg):
"""Configuration for the UnitreeG1 retargeter."""
enable_visualization: bool = False
num_open_xr_hand_joints: int = 100
hand_joint_names: list[str] | None = None # List of robot hand joint names
retargeter_type: type[RetargeterBase] = UnitreeG1Retargeter

View File

@@ -0,0 +1,18 @@
retargeting:
finger_tip_link_names:
- thumb_tip
- index_tip
- middle_tip
low_pass_alpha: 0.2
scaling_factor: 1.0
target_joint_names:
- left_hand_thumb_0_joint
- left_hand_thumb_1_joint
- left_hand_thumb_2_joint
- left_hand_middle_0_joint
- left_hand_middle_1_joint
- left_hand_index_0_joint
- left_hand_index_1_joint
type: DexPilot
urdf_path: /tmp/G1_left_hand.urdf
wrist_link_name: base_link

View File

@@ -0,0 +1,18 @@
retargeting:
finger_tip_link_names:
- thumb_tip
- index_tip
- middle_tip
low_pass_alpha: 0.2
scaling_factor: 1.0
target_joint_names:
- right_hand_thumb_0_joint
- right_hand_thumb_1_joint
- right_hand_thumb_2_joint
- right_hand_middle_0_joint
- right_hand_middle_1_joint
- right_hand_index_0_joint
- right_hand_index_1_joint
type: DexPilot
urdf_path: /tmp/G1_right_hand.urdf
wrist_link_name: base_link

View File

@@ -0,0 +1,258 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import logging
import os
import numpy as np
import torch
import yaml
from dex_retargeting.retargeting_config import RetargetingConfig
from scipy.spatial.transform import Rotation as R
from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path
# import logger
logger = logging.getLogger(__name__)
# yourdfpy loads visual/collision meshes with the hand URDFs; these aren't needed for
# retargeting and clutter the logs, so we suppress them.
logging.getLogger("dex_retargeting.yourdfpy").setLevel(logging.ERROR)
# The index to map the OpenXR hand joints to the hand joints used
# in Dex-retargeting.
_HAND_JOINTS_INDEX = [1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 14, 15, 17, 18, 19, 20, 22, 23, 24, 25]
# The transformation matrices to convert hand pose to canonical view.
_OPERATOR2MANO_RIGHT = np.array(
[
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
]
)
_OPERATOR2MANO_LEFT = np.array(
[
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
]
)
# G1 robot hand joint names - 2 fingers and 1 thumb configuration
_LEFT_HAND_JOINT_NAMES = [
"left_hand_thumb_0_joint", # Thumb base (yaw axis)
"left_hand_thumb_1_joint", # Thumb middle (pitch axis)
"left_hand_thumb_2_joint", # Thumb tip
"left_hand_index_0_joint", # Index finger proximal
"left_hand_index_1_joint", # Index finger distal
"left_hand_middle_0_joint", # Middle finger proximal
"left_hand_middle_1_joint", # Middle finger distal
]
_RIGHT_HAND_JOINT_NAMES = [
"right_hand_thumb_0_joint", # Thumb base (yaw axis)
"right_hand_thumb_1_joint", # Thumb middle (pitch axis)
"right_hand_thumb_2_joint", # Thumb tip
"right_hand_index_0_joint", # Index finger proximal
"right_hand_index_1_joint", # Index finger distal
"right_hand_middle_0_joint", # Middle finger proximal
"right_hand_middle_1_joint", # Middle finger distal
]
class G1TriHandDexRetargeting:
"""A class for hand retargeting with G1.
Handles retargeting of OpenXRhand tracking data to G1 robot hand joint angles.
"""
def __init__(
self,
hand_joint_names: list[str],
right_hand_config_filename: str = "g1_hand_right_dexpilot.yml",
left_hand_config_filename: str = "g1_hand_left_dexpilot.yml",
left_hand_urdf_path: str = f"{ISAACLAB_NUCLEUS_DIR}/Controllers/LocomanipulationAssets/unitree_g1_dexpilot_asset/G1_left_hand.urdf", # noqa: E501
right_hand_urdf_path: str = f"{ISAACLAB_NUCLEUS_DIR}/Controllers/LocomanipulationAssets/unitree_g1_dexpilot_asset/G1_right_hand.urdf", # noqa: E501
):
"""Initialize the hand retargeting.
Args:
hand_joint_names: Names of hand joints in the robot model
right_hand_config_filename: Config file for right hand retargeting
left_hand_config_filename: Config file for left hand retargeting
"""
data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "data/"))
config_dir = os.path.join(data_dir, "configs/dex-retargeting")
# Download urdf files from aws
local_left_urdf_path = retrieve_file_path(left_hand_urdf_path, force_download=True)
local_right_urdf_path = retrieve_file_path(right_hand_urdf_path, force_download=True)
left_config_path = os.path.join(config_dir, left_hand_config_filename)
right_config_path = os.path.join(config_dir, right_hand_config_filename)
# Update the YAML files with the correct URDF paths
self._update_yaml_with_urdf_path(left_config_path, local_left_urdf_path)
self._update_yaml_with_urdf_path(right_config_path, local_right_urdf_path)
self._dex_left_hand = RetargetingConfig.load_from_file(left_config_path).build()
self._dex_right_hand = RetargetingConfig.load_from_file(right_config_path).build()
self.left_dof_names = self._dex_left_hand.optimizer.robot.dof_joint_names
self.right_dof_names = self._dex_right_hand.optimizer.robot.dof_joint_names
self.dof_names = self.left_dof_names + self.right_dof_names
self.isaac_lab_hand_joint_names = hand_joint_names
logger.info("[G1DexRetargeter] init done.")
def _update_yaml_with_urdf_path(self, yaml_path: str, urdf_path: str):
"""Update YAML file with the correct URDF path.
Args:
yaml_path: Path to the YAML configuration file
urdf_path: Path to the URDF file to use
"""
try:
# Read the YAML file
with open(yaml_path) as file:
config = yaml.safe_load(file)
# Update the URDF path in the configuration
if "retargeting" in config:
config["retargeting"]["urdf_path"] = urdf_path
logger.info(f"Updated URDF path in {yaml_path} to {urdf_path}")
else:
logger.warning(f"Unable to find 'retargeting' section in {yaml_path}")
# Write the updated configuration back to the file
with open(yaml_path, "w") as file:
yaml.dump(config, file)
except Exception as e:
logger.error(f"Error updating YAML file {yaml_path}: {e}")
def convert_hand_joints(self, hand_poses: dict[str, np.ndarray], operator2mano: np.ndarray) -> np.ndarray:
"""Prepares the hand joints data for retargeting.
Args:
hand_poses: Dictionary containing hand pose data with joint positions and rotations
operator2mano: Transformation matrix to convert from operator to MANO frame
Returns:
Joint positions with shape (21, 3)
"""
joint_position = np.zeros((21, 3))
hand_joints = list(hand_poses.values())
for i, joint_index in enumerate(_HAND_JOINTS_INDEX):
joint = hand_joints[joint_index]
joint_position[i] = joint[:3]
# Convert hand pose to the canonical frame.
joint_position = joint_position - joint_position[0:1, :]
xr_wrist_quat = hand_poses.get("wrist")[3:]
# OpenXR hand uses w,x,y,z order for quaternions but scipy uses x,y,z,w order
wrist_rot = R.from_quat([xr_wrist_quat[1], xr_wrist_quat[2], xr_wrist_quat[3], xr_wrist_quat[0]]).as_matrix()
return joint_position @ wrist_rot @ operator2mano
def compute_ref_value(self, joint_position: np.ndarray, indices: np.ndarray, retargeting_type: str) -> np.ndarray:
"""Computes reference value for retargeting.
Args:
joint_position: Joint positions array
indices: Target link indices
retargeting_type: Type of retargeting ("POSITION" or other)
Returns:
Reference value in cartesian space
"""
if retargeting_type == "POSITION":
return joint_position[indices, :]
else:
origin_indices = indices[0, :]
task_indices = indices[1, :]
ref_value = joint_position[task_indices, :] - joint_position[origin_indices, :]
return ref_value
def compute_one_hand(
self, hand_joints: dict[str, np.ndarray], retargeting: RetargetingConfig, operator2mano: np.ndarray
) -> np.ndarray:
"""Computes retargeted joint angles for one hand.
Args:
hand_joints: Dictionary containing hand joint data
retargeting: Retargeting configuration object
operator2mano: Transformation matrix from operator to MANO frame
Returns:
Retargeted joint angles
"""
joint_pos = self.convert_hand_joints(hand_joints, operator2mano)
ref_value = self.compute_ref_value(
joint_pos,
indices=retargeting.optimizer.target_link_human_indices,
retargeting_type=retargeting.optimizer.retargeting_type,
)
# Enable gradient calculation and inference mode in case some other script has disabled it
# This is necessary for the retargeting to work since it uses gradient features that
# are not available in inference mode
with torch.enable_grad():
with torch.inference_mode(False):
return retargeting.retarget(ref_value)
def get_joint_names(self) -> list[str]:
"""Returns list of all joint names."""
return self.dof_names
def get_left_joint_names(self) -> list[str]:
"""Returns list of left hand joint names."""
return self.left_dof_names
def get_right_joint_names(self) -> list[str]:
"""Returns list of right hand joint names."""
return self.right_dof_names
def get_hand_indices(self, robot) -> np.ndarray:
"""Gets indices of hand joints in robot's DOF array.
Args:
robot: Robot object containing DOF information
Returns:
Array of joint indices
"""
return np.array([robot.dof_names.index(name) for name in self.dof_names], dtype=np.int64)
def compute_left(self, left_hand_poses: dict[str, np.ndarray]) -> np.ndarray:
"""Computes retargeted joints for left hand.
Args:
left_hand_poses: Dictionary of left hand joint poses
Returns:
Retargeted joint angles for left hand
"""
if left_hand_poses is not None:
left_hand_q = self.compute_one_hand(left_hand_poses, self._dex_left_hand, _OPERATOR2MANO_LEFT)
else:
left_hand_q = np.zeros(len(_LEFT_HAND_JOINT_NAMES))
return left_hand_q
def compute_right(self, right_hand_poses: dict[str, np.ndarray]) -> np.ndarray:
"""Computes retargeted joints for right hand.
Args:
right_hand_poses: Dictionary of right hand joint poses
Returns:
Retargeted joint angles for right hand
"""
if right_hand_poses is not None:
right_hand_q = self.compute_one_hand(right_hand_poses, self._dex_right_hand, _OPERATOR2MANO_RIGHT)
else:
right_hand_q = np.zeros(len(_RIGHT_HAND_JOINT_NAMES))
return right_hand_q

View File

@@ -0,0 +1,154 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
from dataclasses import dataclass
import numpy as np
import torch
import isaaclab.utils.math as PoseUtils
from isaaclab.devices.device_base import DeviceBase
from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg
class G1TriHandUpperBodyMotionControllerGripperRetargeter(RetargeterBase):
"""Retargeter for G1 gripper that outputs a boolean state based on controller trigger input,
concatenated with the retargeted wrist pose.
Gripper:
- Uses hysteresis to prevent flickering when the trigger is near the threshold.
- Output is 0.0 for open, 1.0 for close.
Wrist:
- Retargets absolute pose from controller to robot frame.
- Applies a fixed offset rotation for comfort/alignment.
"""
def __init__(self, cfg: G1TriHandUpperBodyMotionControllerGripperRetargeterCfg):
"""Initialize the retargeter.
Args:
cfg: Configuration for the retargeter.
"""
super().__init__(cfg)
self._cfg = cfg
# Track previous state for hysteresis (left, right)
self._prev_left_state: float = 0.0
self._prev_right_state: float = 0.0
def retarget(self, data: dict) -> torch.Tensor:
"""Retarget controller inputs to gripper boolean state and wrist pose.
Args:
data: Dictionary with MotionControllerTrackingTarget.LEFT/RIGHT keys
Each value is a 2D array: [pose(7), inputs(7)]
Returns:
Tensor: [left_gripper_state(1), right_gripper_state(1), left_wrist(7), right_wrist(7)]
Wrist format: [x, y, z, qw, qx, qy, qz]
"""
# Get controller data
left_controller_data = data.get(DeviceBase.TrackingTarget.CONTROLLER_LEFT, np.array([]))
right_controller_data = data.get(DeviceBase.TrackingTarget.CONTROLLER_RIGHT, np.array([]))
# --- Gripper Logic ---
# Extract hand state from controller data with hysteresis
left_hand_state: float = self._extract_hand_state(left_controller_data, self._prev_left_state)
right_hand_state: float = self._extract_hand_state(right_controller_data, self._prev_right_state)
# Update previous states
self._prev_left_state = left_hand_state
self._prev_right_state = right_hand_state
gripper_tensor = torch.tensor([left_hand_state, right_hand_state], dtype=torch.float32, device=self._sim_device)
# --- Wrist Logic ---
# Default wrist poses (position + quaternion [w, x, y, z] as per default_wrist init)
# Note: default_wrist is [x, y, z, w, x, y, z] in reference, but seemingly used as [x,y,z, w,x,y,z]
default_wrist = np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
# Extract poses from controller data
left_wrist = self._extract_wrist_pose(left_controller_data, default_wrist)
right_wrist = self._extract_wrist_pose(right_controller_data, default_wrist)
# Convert to tensors
left_wrist_tensor = torch.tensor(self._retarget_abs(left_wrist), dtype=torch.float32, device=self._sim_device)
right_wrist_tensor = torch.tensor(self._retarget_abs(right_wrist), dtype=torch.float32, device=self._sim_device)
# Concatenate: [gripper(2), left_wrist(7), right_wrist(7)]
return torch.cat([gripper_tensor, left_wrist_tensor, right_wrist_tensor])
def _extract_hand_state(self, controller_data: np.ndarray, prev_state: float) -> float:
"""Extract hand state from controller data with hysteresis.
Args:
controller_data: 2D array [pose(7), inputs(7)]
prev_state: Previous hand state (0.0 or 1.0)
Returns:
Hand state as float (0.0 for open, 1.0 for close)
"""
if len(controller_data) <= DeviceBase.MotionControllerDataRowIndex.INPUTS.value:
return 0.0
# Extract inputs from second row
inputs = controller_data[DeviceBase.MotionControllerDataRowIndex.INPUTS.value]
if len(inputs) < len(DeviceBase.MotionControllerInputIndex):
return 0.0
# Extract specific inputs using enum
trigger = inputs[DeviceBase.MotionControllerInputIndex.TRIGGER.value] # 0.0 to 1.0 (analog)
# Apply hysteresis
if prev_state < 0.5: # Currently open
return 1.0 if trigger > self._cfg.threshold_high else 0.0
else: # Currently closed
return 0.0 if trigger < self._cfg.threshold_low else 1.0
def _extract_wrist_pose(self, controller_data: np.ndarray, default_pose: np.ndarray) -> np.ndarray:
"""Extract wrist pose from controller data.
Args:
controller_data: 2D array [pose(7), inputs(7)]
default_pose: Default pose to use if no data
Returns:
Wrist pose array [x, y, z, w, x, y, z]
"""
if len(controller_data) > DeviceBase.MotionControllerDataRowIndex.POSE.value:
return controller_data[DeviceBase.MotionControllerDataRowIndex.POSE.value]
return default_pose
def _retarget_abs(self, wrist: np.ndarray) -> np.ndarray:
"""Handle absolute pose retargeting for controller wrists."""
wrist_pos = torch.tensor(wrist[:3], dtype=torch.float32)
wrist_quat = torch.tensor(wrist[3:], dtype=torch.float32)
# Combined -75° (rather than -90° for wrist comfort) Y rotation + 90° Z rotation
# This is equivalent to (0, -75, 90) in euler angles
combined_quat = torch.tensor([0.5358, -0.4619, 0.5358, 0.4619], dtype=torch.float32)
openxr_pose = PoseUtils.make_pose(wrist_pos, PoseUtils.matrix_from_quat(wrist_quat))
transform_pose = PoseUtils.make_pose(torch.zeros(3), PoseUtils.matrix_from_quat(combined_quat))
result_pose = PoseUtils.pose_in_A_to_pose_in_B(transform_pose, openxr_pose)
pos, rot_mat = PoseUtils.unmake_pose(result_pose)
quat = PoseUtils.quat_from_matrix(rot_mat)
return np.concatenate([pos.numpy(), quat.numpy()])
def get_requirements(self) -> list[RetargeterBase.Requirement]:
return [RetargeterBase.Requirement.MOTION_CONTROLLER]
@dataclass
class G1TriHandUpperBodyMotionControllerGripperRetargeterCfg(RetargeterCfg):
"""Configuration for the G1 boolean gripper and wrist retargeter."""
threshold_high: float = 0.6 # Threshold to close hand
threshold_low: float = 0.4 # Threshold to open hand
retargeter_type: type[RetargeterBase] = G1TriHandUpperBodyMotionControllerGripperRetargeter

View File

@@ -0,0 +1,230 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
from dataclasses import dataclass
import numpy as np
import torch
import isaaclab.sim as sim_utils
import isaaclab.utils.math as PoseUtils
from isaaclab.devices.device_base import DeviceBase
from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg
from isaaclab.markers import VisualizationMarkers, VisualizationMarkersCfg
class G1TriHandUpperBodyMotionControllerRetargeter(RetargeterBase):
"""Simple retargeter that maps motion controller inputs to G1 hand joints.
Mapping:
- A button (digital 0/1) → Thumb joints
- Trigger (analog 0-1) → Index finger joints
- Squeeze (analog 0-1) → Middle finger joints
"""
def __init__(self, cfg: G1TriHandUpperBodyMotionControllerRetargeterCfg):
"""Initialize the retargeter."""
super().__init__(cfg)
self._sim_device = cfg.sim_device
self._hand_joint_names = cfg.hand_joint_names
self._enable_visualization = cfg.enable_visualization
if cfg.hand_joint_names is None:
raise ValueError("hand_joint_names must be provided")
# Initialize visualization if enabled
if self._enable_visualization:
marker_cfg = VisualizationMarkersCfg(
prim_path="/Visuals/g1_controller_markers",
markers={
"joint": sim_utils.SphereCfg(
radius=0.01,
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)),
),
},
)
self._markers = VisualizationMarkers(marker_cfg)
def retarget(self, data: dict) -> torch.Tensor:
"""Convert controller inputs to robot commands.
Args:
data: Dictionary with MotionControllerTrackingTarget.LEFT/RIGHT keys
Each value is a 2D array: [pose(7), inputs(7)]
Returns:
Tensor: [left_wrist(7), right_wrist(7), hand_joints(14)]
hand_joints order:
[
left_proximal(3), right_proximal(3), left_distal(2), left_thumb_middle(1),
right_distal(2), right_thumb_middle(1), left_thumb_tip(1), right_thumb_tip(1)
]
"""
# Get controller data
left_controller_data = data.get(DeviceBase.TrackingTarget.CONTROLLER_LEFT, np.array([]))
right_controller_data = data.get(DeviceBase.TrackingTarget.CONTROLLER_RIGHT, np.array([]))
# Default wrist poses (position + quaternion)
default_wrist = np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
# Extract poses from controller data
left_wrist = self._extract_wrist_pose(left_controller_data, default_wrist)
right_wrist = self._extract_wrist_pose(right_controller_data, default_wrist)
# Map controller inputs to hand joints
left_hand_joints = self._map_to_hand_joints(left_controller_data, is_left=True)
right_hand_joints = self._map_to_hand_joints(right_controller_data, is_left=False)
# Negate left hand joints for proper mirroring
left_hand_joints = -left_hand_joints
# Combine joints in the expected order:
# [left_proximal(3), right_proximal(3), left_distal(2), left_thumb_middle(1),
# right_distal(2), right_thumb_middle(1), left_thumb_tip(1), right_thumb_tip(1)]
all_hand_joints = np.array(
[
left_hand_joints[3], # left_index_proximal
left_hand_joints[5], # left_middle_proximal
left_hand_joints[0], # left_thumb_base
right_hand_joints[3], # right_index_proximal
right_hand_joints[5], # right_middle_proximal
right_hand_joints[0], # right_thumb_base
left_hand_joints[4], # left_index_distal
left_hand_joints[6], # left_middle_distal
left_hand_joints[1], # left_thumb_middle
right_hand_joints[4], # right_index_distal
right_hand_joints[6], # right_middle_distal
right_hand_joints[1], # right_thumb_middle
left_hand_joints[2], # left_thumb_tip
right_hand_joints[2], # right_thumb_tip
]
)
# Convert to tensors
left_wrist_tensor = torch.tensor(
self._retarget_abs(left_wrist, is_left=True), dtype=torch.float32, device=self._sim_device
)
right_wrist_tensor = torch.tensor(
self._retarget_abs(right_wrist, is_left=False), dtype=torch.float32, device=self._sim_device
)
hand_joints_tensor = torch.tensor(all_hand_joints, dtype=torch.float32, device=self._sim_device)
return torch.cat([left_wrist_tensor, right_wrist_tensor, hand_joints_tensor])
def get_requirements(self) -> list[RetargeterBase.Requirement]:
return [RetargeterBase.Requirement.MOTION_CONTROLLER]
def _extract_wrist_pose(self, controller_data: np.ndarray, default_pose: np.ndarray) -> np.ndarray:
"""Extract wrist pose from controller data.
Args:
controller_data: 2D array [pose(7), inputs(7)]
default_pose: Default pose to use if no data
Returns:
Wrist pose array [x, y, z, w, x, y, z]
"""
if len(controller_data) > DeviceBase.MotionControllerDataRowIndex.POSE.value:
return controller_data[DeviceBase.MotionControllerDataRowIndex.POSE.value]
return default_pose
def _map_to_hand_joints(self, controller_data: np.ndarray, is_left: bool) -> np.ndarray:
"""Map controller inputs to hand joint angles.
Args:
controller_data: 2D array [pose(7), inputs(7)]
is_left: True for left hand, False for right hand
Returns:
Hand joint angles (7 joints per hand) in radians
"""
# Initialize all joints to zero
hand_joints = np.zeros(7)
if len(controller_data) <= DeviceBase.MotionControllerDataRowIndex.INPUTS.value:
return hand_joints
# Extract inputs from second row
inputs = controller_data[DeviceBase.MotionControllerDataRowIndex.INPUTS.value]
if len(inputs) < len(DeviceBase.MotionControllerInputIndex):
return hand_joints
# Extract specific inputs using enum
trigger = inputs[DeviceBase.MotionControllerInputIndex.TRIGGER.value] # 0.0 to 1.0 (analog)
squeeze = inputs[DeviceBase.MotionControllerInputIndex.SQUEEZE.value] # 0.0 to 1.0 (analog)
# Grasping logic:
# If trigger is pressed, we grasp with index and thumb.
# If squeeze is pressed, we grasp with middle and thumb.
# If both are pressed, we grasp with index, middle, and thumb.
# The thumb rotates towards the direction of the pressing finger.
# If both are pressed, the thumb stays in the middle.
thumb_button = max(trigger, squeeze)
# Map to G1 hand joints (in radians)
# Thumb joints (3 joints) - controlled by A button (digital)
thumb_angle = -thumb_button # Max 1 radian ≈ 57°
# Thumb rotation:
# If trigger is pressed, we rotate the thumb toward the index finger.
# If squeeze is pressed, we rotate the thumb toward the middle finger.
# If both are pressed, the thumb stays between the index and middle fingers.
# Trigger pushes toward +0.5, squeeze pushes toward -0.5
# trigger=1,squeeze=0 → 0.5; trigger=0,squeeze=1 → -0.5; both=1 → 0
thumb_rotation = 0.5 * trigger - 0.5 * squeeze
if not is_left:
thumb_rotation = -thumb_rotation
# These values were found empirically to get a good gripper pose.
hand_joints[0] = thumb_rotation # thumb_0_joint (base)
hand_joints[1] = thumb_angle * 0.4 # thumb_1_joint (middle)
hand_joints[2] = thumb_angle * 0.7 # thumb_2_joint (tip)
# Index finger joints (2 joints) - controlled by trigger (analog)
index_angle = trigger * 1.0 # Max 1.0 radians ≈ 57°
hand_joints[3] = index_angle # index_0_joint (proximal)
hand_joints[4] = index_angle # index_1_joint (distal)
# Middle finger joints (2 joints) - controlled by squeeze (analog)
middle_angle = squeeze * 1.0 # Max 1.0 radians ≈ 57°
hand_joints[5] = middle_angle # middle_0_joint (proximal)
hand_joints[6] = middle_angle # middle_1_joint (distal)
return hand_joints
def _retarget_abs(self, wrist: np.ndarray, is_left: bool) -> np.ndarray:
"""Handle absolute pose retargeting for controller wrists."""
wrist_pos = torch.tensor(wrist[:3], dtype=torch.float32)
wrist_quat = torch.tensor(wrist[3:], dtype=torch.float32)
# Combined -75° (rather than -90° for wrist comfort) Y rotation + 90° Z rotation
# This is equivalent to (0, -75, 90) in euler angles
combined_quat = torch.tensor([0.5358, -0.4619, 0.5358, 0.4619], dtype=torch.float32)
openxr_pose = PoseUtils.make_pose(wrist_pos, PoseUtils.matrix_from_quat(wrist_quat))
transform_pose = PoseUtils.make_pose(torch.zeros(3), PoseUtils.matrix_from_quat(combined_quat))
result_pose = PoseUtils.pose_in_A_to_pose_in_B(transform_pose, openxr_pose)
pos, rot_mat = PoseUtils.unmake_pose(result_pose)
quat = PoseUtils.quat_from_matrix(rot_mat)
return np.concatenate([pos.numpy(), quat.numpy()])
@dataclass
class G1TriHandUpperBodyMotionControllerRetargeterCfg(RetargeterCfg):
"""Configuration for the G1 Controller Upper Body retargeter."""
enable_visualization: bool = False
hand_joint_names: list[str] | None = None # List of robot hand joint names
retargeter_type: type[RetargeterBase] = G1TriHandUpperBodyMotionControllerRetargeter

View File

@@ -0,0 +1,173 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import contextlib
from dataclasses import dataclass
import numpy as np
import torch
import isaaclab.sim as sim_utils
import isaaclab.utils.math as PoseUtils
from isaaclab.devices.device_base import DeviceBase
from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg
from isaaclab.markers import VisualizationMarkers, VisualizationMarkersCfg
# This import exception is suppressed because g1_dex_retargeting_utils depends
# on pinocchio which is not available on Windows.
with contextlib.suppress(Exception):
from .g1_dex_retargeting_utils import G1TriHandDexRetargeting
class G1TriHandUpperBodyRetargeter(RetargeterBase):
"""Retargets OpenXR data to G1 upper body commands.
This retargeter maps hand tracking data from OpenXR to wrist and hand joint commands for the G1 robot.
It handles both left and right hands, converting poses of the hands in OpenXR format to appropriate wrist poses
and joint angles for the G1 robot's upper body.
"""
def __init__(
self,
cfg: G1TriHandUpperBodyRetargeterCfg,
):
"""Initialize the G1 upper body retargeter.
Args:
cfg: Configuration for the retargeter.
"""
super().__init__(cfg)
# Store device name for runtime retrieval
self._sim_device = cfg.sim_device
self._hand_joint_names = cfg.hand_joint_names
# Initialize the hands controller
if cfg.hand_joint_names is not None:
self._hands_controller = G1TriHandDexRetargeting(cfg.hand_joint_names)
else:
raise ValueError("hand_joint_names must be provided in configuration")
# Initialize visualization if enabled
self._enable_visualization = cfg.enable_visualization
self._num_open_xr_hand_joints = cfg.num_open_xr_hand_joints
if self._enable_visualization:
marker_cfg = VisualizationMarkersCfg(
prim_path="/Visuals/g1_hand_markers",
markers={
"joint": sim_utils.SphereCfg(
radius=0.005,
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)),
),
},
)
self._markers = VisualizationMarkers(marker_cfg)
def retarget(self, data: dict) -> torch.Tensor:
"""Convert hand joint poses to robot end-effector commands.
Args:
data: Dictionary mapping tracking targets to joint data dictionaries.
Returns:
A tensor containing the retargeted commands:
- Left wrist pose (7)
- Right wrist pose (7)
- Hand joint angles (len(hand_joint_names))
"""
# Access the left and right hand data using the enum key
left_hand_poses = data[DeviceBase.TrackingTarget.HAND_LEFT]
right_hand_poses = data[DeviceBase.TrackingTarget.HAND_RIGHT]
left_wrist = left_hand_poses.get("wrist")
right_wrist = right_hand_poses.get("wrist")
# Handle case where wrist data is not available
if left_wrist is None or right_wrist is None:
# Set to default pose if no data available.
# pos=(0,0,0), quat=(1,0,0,0) (w,x,y,z)
default_pose = np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
if left_wrist is None:
left_wrist = default_pose
if right_wrist is None:
right_wrist = default_pose
# Visualization if enabled
if self._enable_visualization:
joints_position = np.zeros((self._num_open_xr_hand_joints, 3))
joints_position[::2] = np.array([pose[:3] for pose in left_hand_poses.values()])
joints_position[1::2] = np.array([pose[:3] for pose in right_hand_poses.values()])
self._markers.visualize(translations=torch.tensor(joints_position, device=self._sim_device))
# Compute retargeted hand joints
left_hands_pos = self._hands_controller.compute_left(left_hand_poses)
indexes = [self._hand_joint_names.index(name) for name in self._hands_controller.get_left_joint_names()]
left_retargeted_hand_joints = np.zeros(len(self._hands_controller.get_joint_names()))
left_retargeted_hand_joints[indexes] = left_hands_pos
left_hand_joints = left_retargeted_hand_joints
right_hands_pos = self._hands_controller.compute_right(right_hand_poses)
indexes = [self._hand_joint_names.index(name) for name in self._hands_controller.get_right_joint_names()]
right_retargeted_hand_joints = np.zeros(len(self._hands_controller.get_joint_names()))
right_retargeted_hand_joints[indexes] = right_hands_pos
right_hand_joints = right_retargeted_hand_joints
retargeted_hand_joints = left_hand_joints + right_hand_joints
# Convert numpy arrays to tensors and store in command buffer
left_wrist_tensor = torch.tensor(
self._retarget_abs(left_wrist, is_left=True), dtype=torch.float32, device=self._sim_device
)
right_wrist_tensor = torch.tensor(
self._retarget_abs(right_wrist, is_left=False), dtype=torch.float32, device=self._sim_device
)
hand_joints_tensor = torch.tensor(retargeted_hand_joints, dtype=torch.float32, device=self._sim_device)
# Combine all tensors into a single tensor
return torch.cat([left_wrist_tensor, right_wrist_tensor, hand_joints_tensor])
def get_requirements(self) -> list[RetargeterBase.Requirement]:
return [RetargeterBase.Requirement.HAND_TRACKING]
def _retarget_abs(self, wrist: np.ndarray, is_left: bool) -> np.ndarray:
"""Handle absolute pose retargeting.
Args:
wrist: Wrist pose data from OpenXR.
is_left: True for the left hand, False for the right hand.
Returns:
Retargeted wrist pose in USD control frame.
"""
wrist_pos = torch.tensor(wrist[:3], dtype=torch.float32)
wrist_quat = torch.tensor(wrist[3:], dtype=torch.float32)
if is_left:
# Corresponds to a rotation of (0, 90, 90) in euler angles (x,y,z)
combined_quat = torch.tensor([0.7071, 0, 0.7071, 0], dtype=torch.float32)
else:
# Corresponds to a rotation of (0, -90, -90) in euler angles (x,y,z)
combined_quat = torch.tensor([0, -0.7071, 0, 0.7071], dtype=torch.float32)
openxr_pose = PoseUtils.make_pose(wrist_pos, PoseUtils.matrix_from_quat(wrist_quat))
transform_pose = PoseUtils.make_pose(torch.zeros(3), PoseUtils.matrix_from_quat(combined_quat))
result_pose = PoseUtils.pose_in_A_to_pose_in_B(transform_pose, openxr_pose)
pos, rot_mat = PoseUtils.unmake_pose(result_pose)
quat = PoseUtils.quat_from_matrix(rot_mat)
return np.concatenate([pos.numpy(), quat.numpy()])
@dataclass
class G1TriHandUpperBodyRetargeterCfg(RetargeterCfg):
"""Configuration for the G1 Controller Upper Body retargeter."""
enable_visualization: bool = False
num_open_xr_hand_joints: int = 100
hand_joint_names: list[str] | None = None # List of robot hand joint names
retargeter_type: type[RetargeterBase] = G1TriHandUpperBodyRetargeter

View File

@@ -2,6 +2,15 @@
# All rights reserved. # All rights reserved.
# #
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
"""
python scripts/tools/record_demos.py \
--device cpu \
--task Isaac-PickPlace-Locomanipulation-G1-BrainCo-Abs-v0 \
--teleop_device handtracking \
--dataset_file ./datasets/dataset_g1_locomanip.hdf5 \
--num_demos 5 --enable_pinocchio
"""
"""This sub-module contains the functions that are specific to the locomanipulation environments.""" """This sub-module contains the functions that are specific to the locomanipulation environments."""

View File

@@ -0,0 +1,548 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import tempfile
import torch
from pink.tasks import DampingTask, FrameTask
import carb
import isaaclab.controllers.utils as ControllerUtils
import isaaclab.envs.mdp as base_mdp
import isaaclab.sim as sim_utils
from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg
from isaaclab.controllers.pink_ik import NullSpacePostureTask, PinkIKControllerCfg
from isaaclab.devices.device_base import DevicesCfg
from isaaclab.devices.openxr import ManusViveCfg, OpenXRDeviceCfg, XrCfg
# from isaaclab.devices.openxr.retargeters.humanoid.fourier.gr1t2_retargeter import GR1T2RetargeterCfg
from mindbot.devices.openxr.retargeters.humanoid.fourier.gr1t2_retargeter import GR1T2RetargeterCfg
from isaaclab.envs import ManagerBasedRLEnvCfg
from isaaclab.envs.mdp.actions.pink_actions_cfg import PinkInverseKinematicsActionCfg
from isaaclab.managers import EventTermCfg as EventTerm
from isaaclab.managers import ObservationGroupCfg as ObsGroup
from isaaclab.managers import ObservationTermCfg as ObsTerm
from isaaclab.managers import SceneEntityCfg
from isaaclab.managers import TerminationTermCfg as DoneTerm
from isaaclab.scene import InteractiveSceneCfg
from isaaclab.sim.spawners.from_files.from_files_cfg import GroundPlaneCfg, UsdFileCfg
from isaaclab.utils import configclass
from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR
from . import mdp
from mindbot.robot.gr1t2 import GR1T2_HIGH_PD_CFG # isort: skip
from mindbot.assets.table import TABLE_CFG
from mindbot.assets.centrifuge import CENTRIFUGE_CFG
from mindbot.utils.assets import MINDBOT_ASSETS_DIR
##
# Scene definition
##
@configclass
class ObjectTableSceneCfg(InteractiveSceneCfg):
"""Configuration for the GR1T2 Pick Place Base Scene."""
# Table
packing_table = AssetBaseCfg(
prim_path="/World/envs/env_.*/PackingTable",
init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.55, -0.15], rot=[1.0, 0.0, 0.0, 0.0]),
spawn=UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/PackingTable/packing_table.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True),
),
)
# table = AssetBaseCfg(
# prim_path="/World/envs/env_.*/Table",
# init_state=AssetBaseCfg.InitialStateCfg(pos=[0.6,-0.5,0.0],rot=[0.7071,0,0,0.7071]),
# spawn=UsdFileCfg(
# usd_path=f"{MINDBOT_ASSETS_DIR}/sences/Table_C_coll/Table_C.usd",
# rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True),
# ),
# )
centrifuge01 = CENTRIFUGE_CFG.replace(
prim_path="{ENV_REGEX_NS}/Centrifuge01",
init_state=ArticulationCfg.InitialStateCfg(
pos=[0.0, 0.55, 0.85],
rot=[1.0, 0.0, 0.0, 0.0],
joint_pos={"RevoluteJoint": -1.7453}, # -100°
),
)
# centrifuge02 = CENTRIFUGE_CFG.replace(
# prim_path="{ENV_REGEX_NS}/Centrifuge02",
# init_state=ArticulationCfg.InitialStateCfg(
# pos=[0.5, -0.3, 0.85],
# rot=[-0.7071, 0.0, 0.0, 0.7071],
# joint_pos={"RevoluteJoint": -1.74},
# ),
# )
rack= RigidObjectCfg(
prim_path="{ENV_REGEX_NS}/Rack",
init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.3,0.4,0.8496],rot=[1,0,0,0]),
spawn=UsdFileCfg(
# usd_path=f"{MINDBOT_ASSETS_DIR}/laboratory_equipment/Test_Tube_Rack (2)/tube.usd",
usd_path=f"{MINDBOT_ASSETS_DIR}/laboratory_equipment/Test_Tube_Rack (2)/Test_Tube_Rack_AA_01.usdc",
scale=(1.0,1.0,1.0),
rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True),
),
)
tube= RigidObjectCfg(
prim_path="{ENV_REGEX_NS}/Tube",
init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.32374,0.32415,0.862],rot=[1,0,0,0]),
spawn=UsdFileCfg(
# usd_path=f"{MINDBOT_ASSETS_DIR}/laboratory_equipment/Test_Tube_Rack (2)/tube.usd",
usd_path=f"{MINDBOT_ASSETS_DIR}/laboratory_equipment/Collected_Test_Tube_AA_01/Test_Tube_AA_01.usdc",
scale=(0.8,0.8,1.0),
rigid_props=sim_utils.RigidBodyPropertiesCfg(),
# rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True),
),
)
# object = RigidObjectCfg(
# prim_path="{ENV_REGEX_NS}/Object",
# # init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.45, 0.45, 0.9996], rot=[1, 0, 0, 0]),
# init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.65, 0.45, 0.9996], rot=[1, 0, 0, 0]),
# spawn=UsdFileCfg(
# usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/pick_place_task/pick_place_assets/steering_wheel.usd",
# scale=(0.75, 0.75, 0.75),
# rigid_props=sim_utils.RigidBodyPropertiesCfg(),
# ),
# )
# Humanoid robot configured for pick-place manipulation tasks
robot: ArticulationCfg = GR1T2_HIGH_PD_CFG.replace(
prim_path="/World/envs/env_.*/Robot",
init_state=ArticulationCfg.InitialStateCfg(
pos=(0, 0, 0.93),
rot=(0.7071, 0, 0, 0.7071),
joint_pos={
# right-arm
"right_shoulder_pitch_joint": 0.0,
"right_shoulder_roll_joint": 0.0,
"right_shoulder_yaw_joint": 0.0,
"right_elbow_pitch_joint": -1.5708,
"right_wrist_yaw_joint": 0.0,
"right_wrist_roll_joint": 0.0,
"right_wrist_pitch_joint": 0.0,
# left-arm
"left_shoulder_pitch_joint": 0.0,
"left_shoulder_roll_joint": 0.0,
"left_shoulder_yaw_joint": 0.0,
"left_elbow_pitch_joint": -1.5708,
"left_wrist_yaw_joint": 0.0,
"left_wrist_roll_joint": 0.0,
"left_wrist_pitch_joint": 0.0,
# --
"head_.*": 0.0,
"waist_.*": 0.0,
".*_hip_.*": 0.0,
".*_knee_.*": 0.0,
".*_ankle_.*": 0.0,
"R_.*": 0.0,
"L_.*": 0.0,
},
joint_vel={".*": 0.0},
),
)
# Ground plane
ground = AssetBaseCfg(
prim_path="/World/GroundPlane",
spawn=GroundPlaneCfg(),
)
# Lights
light = AssetBaseCfg(
prim_path="/World/light",
spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0),
)
##
# MDP settings
##
@configclass
class ActionsCfg:
"""Action specifications for the MDP."""
upper_body_ik = PinkInverseKinematicsActionCfg(
pink_controlled_joint_names=[
"left_shoulder_pitch_joint",
"left_shoulder_roll_joint",
"left_shoulder_yaw_joint",
"left_elbow_pitch_joint",
"left_wrist_yaw_joint",
"left_wrist_roll_joint",
"left_wrist_pitch_joint",
"right_shoulder_pitch_joint",
"right_shoulder_roll_joint",
"right_shoulder_yaw_joint",
"right_elbow_pitch_joint",
"right_wrist_yaw_joint",
"right_wrist_roll_joint",
"right_wrist_pitch_joint",
],
# hand_joint_names=[
# "L_index_proximal_joint",
# "L_middle_proximal_joint",
# "L_pinky_proximal_joint",
# "L_ring_proximal_joint",
# "L_thumb_proximal_yaw_joint",
# "R_index_proximal_joint",
# "R_middle_proximal_joint",
# "R_pinky_proximal_joint",
# "R_ring_proximal_joint",
# "R_thumb_proximal_yaw_joint",
# "L_index_intermediate_joint",
# "L_middle_intermediate_joint",
# "L_pinky_intermediate_joint",
# "L_ring_intermediate_joint",
# "L_thumb_proximal_pitch_joint",
# "R_index_intermediate_joint",
# "R_middle_intermediate_joint",
# "R_pinky_intermediate_joint",
# "R_ring_intermediate_joint",
# "R_thumb_proximal_pitch_joint",
# "L_thumb_distal_joint",
# "R_thumb_distal_joint",
# ],
hand_joint_names=[],
target_eef_link_names={
"left_wrist": "left_hand_pitch_link",
"right_wrist": "right_hand_pitch_link",
},
# the robot in the sim scene we are controlling
asset_name="robot",
# Configuration for the IK controller
# The frames names are the ones present in the URDF file
# The urdf has to be generated from the USD that is being used in the scene
controller=PinkIKControllerCfg(
articulation_name="robot",
base_link_name="base_link",
# num_hand_joints=22,
num_hand_joints=0,
show_ik_warnings=False,
# Determines whether Pink IK solver will fail due to a joint limit violation
fail_on_joint_limit_violation=False,
variable_input_tasks=[
FrameTask(
"GR1T2_fourier_hand_6dof_left_hand_pitch_link",
position_cost=8.0, # [cost] / [m]
orientation_cost=1.0, # [cost] / [rad]
lm_damping=12, # dampening for solver for step jumps
gain=0.5,
),
FrameTask(
"GR1T2_fourier_hand_6dof_right_hand_pitch_link",
position_cost=8.0, # [cost] / [m]
orientation_cost=1.0, # [cost] / [rad]
lm_damping=12, # dampening for solver for step jumps
gain=0.5,
),
DampingTask(
cost=0.5, # [cost] * [s] / [rad]
),
NullSpacePostureTask(
cost=0.5,
lm_damping=1,
controlled_frames=[
"GR1T2_fourier_hand_6dof_left_hand_pitch_link",
"GR1T2_fourier_hand_6dof_right_hand_pitch_link",
],
controlled_joints=[
"left_shoulder_pitch_joint",
"left_shoulder_roll_joint",
"left_shoulder_yaw_joint",
"left_elbow_pitch_joint",
"right_shoulder_pitch_joint",
"right_shoulder_roll_joint",
"right_shoulder_yaw_joint",
"right_elbow_pitch_joint",
"waist_yaw_joint",
"waist_pitch_joint",
"waist_roll_joint",
],
),
],
fixed_input_tasks=[],
xr_enabled=bool(carb.settings.get_settings().get("/app/xr/enabled")),
),
)
# 每步强制把离心机盖子 PD 目标设为 -100°防止重力或初始化问题导致盖子下落
centrifuge_lid_hold = base_mdp.JointPositionActionCfg(
asset_name="centrifuge01",
joint_names=["RevoluteJoint"],
scale=0.0, # RL/遥操作不控制此关节
offset=-1.7453, # -100°每步作为 PD 目标写入
)
@configclass
class ObservationsCfg:
"""Observation specifications for the MDP."""
@configclass
class PolicyCfg(ObsGroup):
"""Observations for policy group with state values."""
actions = ObsTerm(func=mdp.last_action)
robot_joint_pos = ObsTerm(
func=base_mdp.joint_pos,
params={"asset_cfg": SceneEntityCfg("robot")},
)
robot_root_pos = ObsTerm(func=base_mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("robot")})
robot_root_rot = ObsTerm(func=base_mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("robot")})
# object_pos = ObsTerm(func=base_mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("object")})
# object_rot = ObsTerm(func=base_mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("object")})
robot_links_state = ObsTerm(func=mdp.get_all_robot_link_state)
left_eef_pos = ObsTerm(func=mdp.get_eef_pos, params={"link_name": "left_hand_roll_link"})
left_eef_quat = ObsTerm(func=mdp.get_eef_quat, params={"link_name": "left_hand_roll_link"})
right_eef_pos = ObsTerm(func=mdp.get_eef_pos, params={"link_name": "right_hand_roll_link"})
right_eef_quat = ObsTerm(func=mdp.get_eef_quat, params={"link_name": "right_hand_roll_link"})
hand_joint_state = ObsTerm(func=mdp.get_robot_joint_state, params={"joint_names": ["R_.*", "L_.*"]})
head_joint_state = ObsTerm(
func=mdp.get_robot_joint_state,
params={"joint_names": ["head_pitch_joint", "head_roll_joint", "head_yaw_joint"]},
)
# object = ObsTerm(
# func=mdp.object_obs,
# params={"left_eef_link_name": "left_hand_roll_link", "right_eef_link_name": "right_hand_roll_link"},
# )
def __post_init__(self):
self.enable_corruption = False
self.concatenate_terms = False
# observation groups
policy: PolicyCfg = PolicyCfg()
@configclass
class TerminationsCfg:
"""Termination terms for the MDP."""
time_out = DoneTerm(func=mdp.time_out, time_out=True)
# object_dropping = DoneTerm(
# func=mdp.root_height_below_minimum, params={"minimum_height": 0.5, "asset_cfg": SceneEntityCfg("object")}
# )
# success = DoneTerm(func=mdp.task_done_pick_place, params={"task_link_name": "right_hand_roll_link"})
@configclass
class EventCfg:
"""Configuration for events."""
reset_centrifuge_joints = EventTerm(
func=base_mdp.reset_joints_by_offset,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("centrifuge01"),
"position_range": (0.0, 0.0),
"velocity_range": (0.0, 0.0),
},
)
reset_robot_joints = EventTerm(
func=base_mdp.reset_joints_by_offset,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("robot"),
"position_range": (0.0, 0.0),
"velocity_range": (0.0, 0.0),
},
)
reset_robot_root = EventTerm(
func=base_mdp.reset_root_state_uniform,
mode="reset",
params={
"pose_range": {},
"velocity_range": {},
"asset_cfg": SceneEntityCfg("robot"),
},
)
reset_tube = EventTerm(
func=base_mdp.reset_root_state_uniform,
mode="reset",
params={
"pose_range": {},
"velocity_range": {},
"asset_cfg": SceneEntityCfg("tube"),
},
)
reset_rack = EventTerm(
func=base_mdp.reset_root_state_uniform,
mode="reset",
params={
"pose_range": {},
"velocity_range": {},
"asset_cfg": SceneEntityCfg("rack"),
},
)
# @configclass
# class EventCfg:
# """Configuration for events."""
# reset_centrifuge_joints = EventTerm(
# func=base_mdp.reset_joints_by_offset,
# mode="reset",
# params={
# "asset_cfg": SceneEntityCfg("centrifuge01"),
# "position_range": (0.0, 0.0),
# "velocity_range": (0.0, 0.0),
# },
# )
# # reset_all = EventTerm(func=mdp.reset_scene_to_default, mode="reset")
# # reset_object = EventTerm(
# # func=mdp.reset_root_state_uniform,
# # mode="reset",
# # params={
# # "pose_range": {
# # "x": [-0.01, 0.01],
# # "y": [-0.01, 0.01],
# # },
# # "velocity_range": {},
# # "asset_cfg": SceneEntityCfg("object"),
# # },
# # )
@configclass
class PickPlaceGR1T2EnvCfg(ManagerBasedRLEnvCfg):
"""Configuration for the GR1T2 environment."""
# Scene settings
scene: ObjectTableSceneCfg = ObjectTableSceneCfg(num_envs=1, env_spacing=2.5, replicate_physics=True)
# Basic settings
observations: ObservationsCfg = ObservationsCfg()
actions: ActionsCfg = ActionsCfg()
# MDP settings
terminations: TerminationsCfg = TerminationsCfg()
events = EventCfg()
# Unused managers
commands = None
rewards = None
curriculum = None
# Position of the XR anchor in the world frame
xr: XrCfg = XrCfg(
anchor_pos=(0.0, 0.0, 0.0),
anchor_rot=(1.0, 0.0, 0.0, 0.0),
)
# OpenXR hand tracking has 26 joints per hand
NUM_OPENXR_HAND_JOINTS = 26
# Temporary directory for URDF files
temp_urdf_dir = tempfile.gettempdir()
# Idle action to hold robot in default pose
# Action format: [left arm pos (3), left arm quat (4), right arm pos (3), right arm quat (4),
# left hand joint pos (11), right hand joint pos (11), centrifuge lid (1)]
idle_action = torch.tensor(
[
-0.22878,
0.2536,
1.0953,
0.5,
0.5,
-0.5,
0.5,
0.22878,
0.2536,
1.0953,
0.5,
0.5,
-0.5,
0.5,
# 0.0,
# 0.0,
# 0.0,
# 0.0,
# 0.0,
# 0.0,
# 0.0,
# 0.0,
# 0.0,
# 0.0,
# 0.0,
# 0.0,
# 0.0,
# 0.0,
# 0.0,
# 0.0,
# 0.0,
# 0.0,
# 0.0,
# 0.0,
# 0.0,
# 0.0,
0.0, # centrifuge lidscale=0值无关紧要
]
)
def __post_init__(self):
"""Post initialization."""
# general settings
self.decimation = 6
self.episode_length_s = 20.0
# simulation settings
self.sim.dt = 1 / 120 # 120Hz
self.sim.render_interval = 2
# Convert USD to URDF and change revolute joints to fixed
temp_urdf_output_path, temp_urdf_meshes_output_path = ControllerUtils.convert_usd_to_urdf(
self.scene.robot.spawn.usd_path, self.temp_urdf_dir, force_conversion=True
)
# Set the URDF and mesh paths for the IK controller
self.actions.upper_body_ik.controller.urdf_path = temp_urdf_output_path
self.actions.upper_body_ik.controller.mesh_path = temp_urdf_meshes_output_path
self.teleop_devices = DevicesCfg(
devices={
"handtracking": OpenXRDeviceCfg(
retargeters=[
GR1T2RetargeterCfg(
enable_visualization=True,
# number of joints in both hands
num_open_xr_hand_joints=2 * self.NUM_OPENXR_HAND_JOINTS,
sim_device=self.sim.device,
hand_joint_names=[],
# hand_joint_names=self.actions.upper_body_ik.hand_joint_names,
),
],
sim_device=self.sim.device,
xr_cfg=self.xr,
),
"manusvive": ManusViveCfg(
retargeters=[
GR1T2RetargeterCfg(
enable_visualization=True,
num_open_xr_hand_joints=2 * 26,
sim_device=self.sim.device,
hand_joint_names=self.actions.upper_body_ik.hand_joint_names,
),
],
sim_device=self.sim.device,
xr_cfg=self.xr,
),
}
)

View File

@@ -21,7 +21,7 @@ import os
MINDBOT_ASSETS_DIR: str = os.environ.get( MINDBOT_ASSETS_DIR: str = os.environ.get(
"MINDBOT_ASSETS_DIR", "MINDBOT_ASSETS_DIR",
# "/home/tangger/LYT/maic_usd_assets_moudle", # "/home/tangger/LYT/maic_usd_assets_moudle",
# "/home/maic/xh/maic_usd_assets_moudle" "/home/maic/xh/maic_usd_assets_moudle"
"/workspace/maic_usd_assets_moudle" # "/workspace/maic_usd_assets_moudle"
# "/home/maic/LYT/maic_usd_assets_moudle", # "/home/maic/LYT/maic_usd_assets_moudle",
) )