Skip to content

Instantly share code, notes, and snippets.

@miguelalonsojr
Last active January 2, 2026 15:14
Show Gist options
  • Select an option

  • Save miguelalonsojr/d0e6d25e91eed9575bd7a05543ed9125 to your computer and use it in GitHub Desktop.

Select an option

Save miguelalonsojr/d0e6d25e91eed9575bd7a05543ed9125 to your computer and use it in GitHub Desktop.
diff --git a/scripts/environments/teleoperation/teleop_se3_agent.py b/scripts/environments/teleoperation/teleop_se3_agent.py
index 9b28ad2410..05b2ba9567 100644
--- a/scripts/environments/teleoperation/teleop_se3_agent.py
+++ b/scripts/environments/teleoperation/teleop_se3_agent.py
@@ -63,7 +63,7 @@ import gymnasium as gym
import logging
import torch
-from isaaclab.devices import Se3Gamepad, Se3GamepadCfg, Se3Keyboard, Se3KeyboardCfg, Se3SpaceMouse, Se3SpaceMouseCfg
+from isaaclab.devices import Se3Gamepad, Se3GamepadCfg, Se3Keyboard, Se3KeyboardCfg, Se3SpaceMouse, Se3SpaceMouseCfg, Se2Keyboard, Se2KeyboardCfg
from isaaclab.devices.openxr import remove_camera_configs
from isaaclab.devices.teleop_device_factory import create_teleop_device
from isaaclab.managers import TerminationTermCfg as DoneTerm
@@ -163,6 +163,11 @@ def main() -> None:
teleoperation_active = False
print("Teleoperation deactivated")
+ def toggle_teleoperation() -> None:
+ nonlocal teleoperation_active
+ teleoperation_active = not teleoperation_active
+ print(f"Teleoperation toggled: {teleoperation_active}")
+
# Create device config if not already in env_cfg
teleoperation_callbacks: dict[str, Callable[[], None]] = {
"R": reset_recording_instance,
@@ -237,6 +242,11 @@ def main() -> None:
print("Teleoperation started. Press 'R' to reset the environment.")
+ keyboard = Se2Keyboard(Se2KeyboardCfg())
+ keyboard.add_callback("R", reset_recording_instance)
+ keyboard.add_callback("S", toggle_teleoperation)
+
+
# simulate environment
while simulation_app.is_running():
try:
diff --git a/source/isaaclab/isaaclab/devices/device_base.py b/source/isaaclab/isaaclab/devices/device_base.py
index 70e0e391a9..5629e87bad 100644
--- a/source/isaaclab/isaaclab/devices/device_base.py
+++ b/source/isaaclab/isaaclab/devices/device_base.py
@@ -140,6 +140,7 @@ class DeviceBase(ABC):
HEAD = 2
CONTROLLER_LEFT = 3
CONTROLLER_RIGHT = 4
+ BODY = 5
class MotionControllerDataRowIndex(Enum):
"""Rows in the motion-controller 2x7 array."""
diff --git a/source/isaaclab/isaaclab/devices/openxr/body_osc_receiver.py b/source/isaaclab/isaaclab/devices/openxr/body_osc_receiver.py
new file mode 100644
index 0000000000..47cb78b48f
--- /dev/null
+++ b/source/isaaclab/isaaclab/devices/openxr/body_osc_receiver.py
@@ -0,0 +1,241 @@
+# body_osc_receiver.py
+from pythonosc.dispatcher import Dispatcher
+from pythonosc.osc_server import ThreadingOSCUDPServer
+import threading
+import numpy as np
+
+# Define the joints you care about (example layout similar to VRChat OSCBody)
+JOINT_NAMES = [
+ "head",
+ "hip",
+ "chest",
+ "left_foot",
+ "right_foot",
+ "left_knee",
+ "right_knee",
+ "left_elbow",
+ "right_elbow",
+]
+
+NUM_JOINTS = len(JOINT_NAMES)
+DOF_PER_JOINT = 7 # 3 pos + 4 rot (quat)
+
+def _normalize(v, eps=1e-6):
+ """Normalize a vector."""
+ norm = np.linalg.norm(v)
+ if norm < eps:
+ return v
+ return v / norm
+
+
+def _quat_from_forward_up_zup_xfwd(
+ forward: np.ndarray,
+ up_ref: np.ndarray = np.array([0.0, 0.0, 1.0], dtype=np.float32),
+) -> np.ndarray:
+ """
+ Build a quaternion (x, y, z, w) for a coordinate system where:
+ - +X is "forward"
+ - +Z is "up"
+ - +Y is chosen so X x Y = Z (right-handed)
+
+ `forward` is a 3D vector in *world* coordinates pointing in the desired +X direction.
+ `up_ref` is a reference up vector (world), usually [0, 0, 1].
+
+ We construct an orthonormal basis (X, Y, Z) in world coords:
+ X = normalized(forward)
+ Z = up direction, projected to be orthogonal to X, close to up_ref
+ Y = normalized(cross(Z, X)) so that X x Y = Z
+
+ The resulting rotation matrix has columns [X, Y, Z], and we convert that to a quaternion.
+ """
+ f = _normalize(forward.astype(np.float32))
+ if np.allclose(f, 0.0):
+ # Default identity (no rotation)
+ return np.array([0.0, 0.0, 0.0, 1.0], dtype=np.float32)
+
+ # Project up_ref onto plane orthogonal to forward
+ u_ref = up_ref.astype(np.float32)
+ up_proj = u_ref - np.dot(u_ref, f) * f
+ if np.linalg.norm(up_proj) < 1e-6:
+ # forward is almost parallel to up_ref; pick an alternate
+ u_ref_alt = np.array([0.0, 1.0, 0.0], dtype=np.float32)
+ up_proj = u_ref_alt - np.dot(u_ref_alt, f) * f
+
+ z_axis = _normalize(up_proj) # world "up" axis
+ x_axis = f # world "forward" axis
+ y_axis = _normalize(np.cross(z_axis, x_axis)) # ensure right-handed: X x Y = Z
+
+ # Rotation matrix with columns [X, Y, Z]
+ R = np.stack([x_axis, y_axis, z_axis], axis=1) # 3x3
+
+ # Convert rotation matrix to quaternion (x, y, z, w)
+ trace = np.trace(R)
+ if trace > 0.0:
+ s = 0.5 / np.sqrt(trace + 1.0)
+ qw = 0.25 / s
+ qx = (R[2, 1] - R[1, 2]) * s
+ qy = (R[0, 2] - R[2, 0]) * s
+ qz = (R[1, 0] - R[0, 1]) * s
+ else:
+ if R[0, 0] > R[1, 1] and R[0, 0] > R[2, 2]:
+ s = 2.0 * np.sqrt(1.0 + R[0, 0] - R[1, 1] - R[2, 2])
+ qw = (R[2, 1] - R[1, 2]) / s
+ qx = 0.25 * s
+ qy = (R[0, 1] + R[1, 0]) / s
+ qz = (R[0, 2] + R[2, 0]) / s
+ elif R[1, 1] > R[2, 2]:
+ s = 2.0 * np.sqrt(1.0 + R[1, 1] - R[0, 0] - R[2, 2])
+ qw = (R[0, 2] - R[2, 0]) / s
+ qx = (R[0, 1] + R[1, 0]) / s
+ qy = 0.25 * s
+ qz = (R[1, 2] + R[2, 1]) / s
+ else:
+ s = 2.0 * np.sqrt(1.0 + R[2, 2] - R[0, 0] - R[1, 1])
+ qw = (R[1, 0] - R[0, 1]) / s
+ qx = (R[0, 2] + R[2, 0]) / s
+ qy = (R[1, 2] + R[2, 1]) / s
+ qz = 0.25 * s
+
+ q = np.array([qx, qy, qz, qw], dtype=np.float32)
+ q = _normalize(q) # normalize quaternion
+ return q
+
+
+class BodyOscReceiver:
+ def __init__(self, ip="127.0.0.1", port=9000):
+ self.joint_index = {name: i for i, name in enumerate(JOINT_NAMES)}
+ # data[joint, :] = [px, py, pz, qx, qy, qz, qw]
+ self._data = np.zeros((NUM_JOINTS, DOF_PER_JOINT), dtype=np.float32)
+ self._data[:, 3:] = np.array([0.0, 0.0, 0.0, 1.0], dtype=np.float32)
+ self._lock = threading.Lock()
+
+ dispatcher = Dispatcher()
+ # wildcard to catch any joint name in that segment
+ dispatcher.map("/tracking/trackers/*/position", self._on_pos)
+ # dispatcher.map("/tracking/trackers/*/rot", self._on_rot)
+
+ self._server = ThreadingOSCUDPServer((ip, port), dispatcher)
+ self._thread = threading.Thread(target=self._server.serve_forever, daemon=True)
+ self._thread.start()
+ print(f"[BodyOscReceiver] Listening on {ip}:{port}")
+
+ # addr example: /tracking/body/hips/pos
+ def _on_pos(self, addr, *args):
+ # Expecting args = (x, y, z)
+ if len(args) < 3:
+ return
+
+ # addr example:
+ # /tracking/trackers/head/position
+ # /tracking/trackers/3/position
+ parts = addr.split("/") # ["", "tracking", "trackers", "<joint>", "position"]
+ if len(parts) < 5:
+ return
+ token = parts[3]
+
+ if token == "head":
+ idx = self.joint_index.get(token)
+ else:
+ try:
+ idx = int(token)
+ except ValueError:
+ return
+
+ if idx is None or idx < 0 or idx >= NUM_JOINTS:
+ return
+
+ x, z, y = args[:3]
+ with self._lock:
+ self._data[idx, 0:3] = [x, y, z]
+
+ def recompute_rotations(self):
+ with self._lock:
+ self._recompute_rotations_locked()
+
+ def _recompute_rotations_locked(self):
+ """Recompute heuristic rotations for all joints based on current positions.
+ Assumes self._lock is already held.
+ """
+ pos = self._data[:, 0:3]
+
+ def idx(name):
+ return self.joint_index.get(name, None)
+
+ hip_i = idx("hip")
+ chest_i = idx("chest")
+ head_i = idx("head")
+ left_foot_i = idx("left_foot")
+ right_foot_i = idx("right_foot")
+ left_knee_i = idx("left_knee")
+ right_knee_i = idx("right_knee")
+ left_elbow_i = idx("left_elbow")
+ right_elbow_i = idx("right_elbow")
+
+ # Hip: forward from hip → chest
+ if hip_i is not None and chest_i is not None:
+ f = pos[chest_i] - pos[hip_i]
+ self._data[hip_i, 3:7] = _quat_from_forward_up_zup_xfwd(f)
+
+ # Chest: forward from chest → head
+ if chest_i is not None and head_i is not None:
+ f = pos[head_i] - pos[chest_i]
+ self._data[chest_i, 3:7] = _quat_from_forward_up_zup_xfwd(f)
+
+ # Head: same forward as chest → head
+ if head_i is not None and chest_i is not None:
+ f = pos[head_i] - pos[chest_i]
+ self._data[head_i, 3:7] = _quat_from_forward_up_zup_xfwd(f)
+
+ # Feet: hip → each foot
+ if hip_i is not None and left_foot_i is not None:
+ f = pos[left_foot_i] - pos[hip_i]
+ self._data[left_foot_i, 3:7] = _quat_from_forward_up_zup_xfwd(f)
+ if hip_i is not None and right_foot_i is not None:
+ f = pos[right_foot_i] - pos[hip_i]
+ self._data[right_foot_i, 3:7] = _quat_from_forward_up_zup_xfwd(f)
+
+ # Knees: hip → knee
+ if hip_i is not None and left_knee_i is not None:
+ f = pos[left_knee_i] - pos[hip_i]
+ self._data[left_knee_i, 3:7] = _quat_from_forward_up_zup_xfwd(f)
+ if hip_i is not None and right_knee_i is not None:
+ f = pos[right_knee_i] - pos[hip_i]
+ self._data[right_knee_i, 3:7] = _quat_from_forward_up_zup_xfwd(f)
+
+ # Elbows: chest → elbow
+ if chest_i is not None and left_elbow_i is not None:
+ f = pos[left_elbow_i] - pos[chest_i]
+ self._data[left_elbow_i, 3:7] = _quat_from_forward_up_zup_xfwd(f)
+ if chest_i is not None and right_elbow_i is not None:
+ f = pos[right_elbow_i] - pos[chest_i]
+ self._data[right_elbow_i, 3:7] = _quat_from_forward_up_zup_xfwd(f)
+
+ def get_flat(self):
+ """Return a copy as a flat (NUM_JOINTS * 7,) float32 array."""
+ with self._lock:
+ return self._data.reshape(-1).copy()
+
+ def get_matrix(self):
+ """Return a copy as (NUM_JOINTS, 7) array."""
+ with self._lock:
+ return self._data.copy()
+
+ def get_position(self, joint_name):
+ with self._lock:
+ index = JOINT_NAMES.index(joint_name)
+ return self._data[index, 0:3].copy()
+
+ def get_pose(self, joint_name):
+ with self._lock:
+ index = JOINT_NAMES.index(joint_name)
+ return self._data[index].copy()
+
+
+if __name__ == "__main__":
+ receiver = BodyOscReceiver(port=9000)
+ import time
+ while True:
+ receiver.recompute_rotations()
+ obs = receiver.get_flat()
+ print(obs[:14]) # first two joints
+ time.sleep(0.1)
diff --git a/source/isaaclab/isaaclab/devices/openxr/openxr_device.py b/source/isaaclab/isaaclab/devices/openxr/openxr_device.py
index e929d102a5..6c5a20ceda 100644
--- a/source/isaaclab/isaaclab/devices/openxr/openxr_device.py
+++ b/source/isaaclab/isaaclab/devices/openxr/openxr_device.py
@@ -24,6 +24,7 @@ from isaaclab.devices.retargeter_base import RetargeterBase
from ..device_base import DeviceBase, DeviceCfg
from .xr_anchor_utils import XrAnchorSynchronizer
from .xr_cfg import XrCfg
+from .body_osc_receiver import BodyOscReceiver
# For testing purposes, we need to mock the XRCore, XRPoseValidityFlags classes
XRCore = None
@@ -35,22 +36,35 @@ with contextlib.suppress(ModuleNotFoundError):
from isaacsim.core.prims import SingleXFormPrim
+# Body tracking joint/tracker names for full body tracking
+BODY_TRACKER_NAMES = [
+ "hip",
+ "chest",
+ "left_foot",
+ "right_foot",
+ "left_knee",
+ "right_knee",
+ "left_elbow",
+ "right_elbow",
+]
+
class OpenXRDevice(DeviceBase):
"""An OpenXR-powered device for teleoperation and interaction.
- This device tracks hand joints using OpenXR and makes them available as:
+ This device tracks hand joints, head pose, and body tracking data using OpenXR and makes them available as:
1. A dictionary of tracking data (when used without retargeters)
2. Retargeted commands for robot control (when retargeters are provided)
Raw data format (_get_raw_data output):
- * A dictionary with keys matching TrackingTarget enum values (HAND_LEFT, HAND_RIGHT, HEAD)
+ * A dictionary with keys matching TrackingTarget enum values (HAND_LEFT, HAND_RIGHT, HEAD, BODY)
* Each hand tracking entry contains a dictionary of joint poses
* Each joint pose is a 7D vector (x, y, z, qw, qx, qy, qz) in meters and quaternion units
* Joint names are defined in HAND_JOINT_NAMES from isaaclab.devices.openxr.common
* Supported joints include palm, wrist, and joints for thumb, index, middle, ring and little fingers
+ * Body tracking data contains a dictionary of tracker poses for full body tracking
Teleop commands:
The device responds to several teleop commands that can be subscribed to via add_callback():
@@ -59,7 +73,7 @@ class OpenXRDevice(DeviceBase):
* "STOP": Pause hand tracking data flow
* "RESET": Reset the tracking and signal simulation reset
- The device tracks the left hand, right hand, head position, or any combination of these
+ The device tracks the left hand, right hand, head position, body trackers, or any combination of these
based on the TrackingTarget enum values. When retargeters are provided, the raw tracking
data is transformed into robot control commands suitable for teleoperation.
"""
@@ -67,9 +81,9 @@ class OpenXRDevice(DeviceBase):
TELEOP_COMMAND_EVENT_TYPE = "teleop_command"
def __init__(
- self,
- cfg: OpenXRDeviceCfg,
- retargeters: list[RetargeterBase] | None = None,
+ self,
+ cfg: OpenXRDeviceCfg,
+ retargeters: list[RetargeterBase] | None = None,
):
"""Initialize the OpenXR device.
@@ -81,6 +95,7 @@ class OpenXRDevice(DeviceBase):
self._xr_cfg = cfg.xr_cfg or XrCfg()
self._additional_callbacks = dict()
self._xr_core = XRCore.get_singleton() if XRCore is not None else None
+ self._body_osc_receiver = BodyOscReceiver(port=9000)
self._vc_subscription = (
self._xr_core.get_message_bus().create_subscription_to_pop_by_type(
carb.events.type_from_string(self.TELEOP_COMMAND_EVENT_TYPE), self._on_teleop_command
@@ -95,6 +110,9 @@ class OpenXRDevice(DeviceBase):
self._previous_joint_poses_right = {name: default_pose.copy() for name in HAND_JOINT_NAMES}
self._previous_headpose = default_pose.copy()
+ # Initialize body tracking dictionaries
+ self._previous_body_trackers = {name: default_pose.copy() for name in BODY_TRACKER_NAMES}
+
if self._xr_cfg.anchor_prim_path is not None:
anchor_path = self._xr_cfg.anchor_prim_path
if anchor_path.endswith("/"):
@@ -209,6 +227,11 @@ class OpenXRDevice(DeviceBase):
msg += "\t\t- Thumb (tip, intermediate joints)\n"
msg += "\t\t- Fingers (tip, distal, intermediate, proximal)\n"
+ # Add body tracking information
+ msg += "\t----------------------------------------------\n"
+ msg += "\tBody Tracking: OSC Tracker support\n"
+ msg += f"\t\tSupported tracker roles: {', '.join(BODY_TRACKER_NAMES)}\n"
+
return msg
"""
@@ -220,6 +243,7 @@ class OpenXRDevice(DeviceBase):
self._previous_joint_poses_left = {name: default_pose.copy() for name in HAND_JOINT_NAMES}
self._previous_joint_poses_right = {name: default_pose.copy() for name in HAND_JOINT_NAMES}
self._previous_headpose = default_pose.copy()
+ self._previous_body_trackers = {name: default_pose.copy() for name in BODY_TRACKER_NAMES}
if hasattr(self, "_anchor_sync") and self._anchor_sync is not None:
self._anchor_sync.reset()
@@ -237,10 +261,11 @@ class OpenXRDevice(DeviceBase):
"""Get the latest tracking data from the OpenXR runtime.
Returns:
- Dictionary with TrackingTarget enum keys (HAND_LEFT, HAND_RIGHT, HEAD) containing:
+ Dictionary with TrackingTarget enum keys (HAND_LEFT, HAND_RIGHT, HEAD, BODY) containing:
- Left hand joint poses: Dictionary of 26 joints with position and orientation
- Right hand joint poses: Dictionary of 26 joints with position and orientation
- Head pose: Single 7-element array with position and orientation
+ - Body trackers: Dictionary of tracker poses with position and orientation
Each pose is represented as a 7-element array: [x, y, z, qw, qx, qy, qz]
where the first 3 elements are position and the last 4 are quaternion orientation.
@@ -260,6 +285,9 @@ class OpenXRDevice(DeviceBase):
if RetargeterBase.Requirement.HEAD_TRACKING in self._required_features:
data[DeviceBase.TrackingTarget.HEAD] = self._calculate_headpose()
+ if RetargeterBase.Requirement.BODY_TRACKING in self._required_features:
+ data[DeviceBase.TrackingTarget.BODY] = self._calculate_body_trackers()
+
if RetargeterBase.Requirement.MOTION_CONTROLLER in self._required_features:
# Optionally include motion controller pose+inputs if devices are available
try:
@@ -282,7 +310,7 @@ class OpenXRDevice(DeviceBase):
"""
def _calculate_joint_poses(
- self, hand_device: Any, previous_joint_poses: dict[str, np.ndarray]
+ self, hand_device: Any, previous_joint_poses: dict[str, np.ndarray]
) -> dict[str, np.ndarray]:
"""Calculate and update joint poses for a hand device.
@@ -357,15 +385,60 @@ class OpenXRDevice(DeviceBase):
return self._previous_headpose
+ def _calculate_body_trackers(self) -> dict[str, np.ndarray]:
+ """Calculate and update body tracker poses.
+
+ This function retrieves the current poses from Vive body trackers via OpenXR
+ and updates the previous tracker poses with the new data. If a tracker's position
+ or orientation is not valid, it will use the previous values.
+
+ Returns:
+ Dictionary mapping tracker names to their poses. Each pose is represented as
+ a 7-element numpy array: [x, y, z, qw, qx, qy, qz] where the first 3 elements
+ are position and the last 4 are quaternion orientation.
+ """
+ if self._xr_core is None:
+ return self._previous_body_trackers
+
+ # Iterate through all supported body tracker roles
+ for tracker_name in BODY_TRACKER_NAMES:
+ try:
+ # Get the tracker pose in virtual world space
+ tracker_pose = self._body_osc_receiver.get_pose(tracker_name)
+
+ if tracker_pose is not None:
+ position = tracker_pose[:3]
+ quat = tracker_pose[3:]
+
+ # Update the tracker pose
+ self._previous_body_trackers[tracker_name] = np.array(
+ [
+ position[0],
+ position[1],
+ position[2],
+ quat[3],
+ quat[0],
+ quat[1],
+ quat[2],
+ ],
+ dtype=np.float32,
+ )
+ except Exception as e:
+ # If a specific tracker is not available or throws an error, keep previous value
+ logger.debug(f"Body tracker '{tracker_name}' not available or error occurred: {e}")
+ continue
+
+ return self._previous_body_trackers
+
# -----------------------------
# Controller button binding utilities
# -----------------------------
def _bind_button_press(
- self,
- device_path: str,
- button_name: str,
- event_name: str,
- on_button_press: Callable[[carb.events.IEvent], None],
+ self,
+ device_path: str,
+ button_name: str,
+ event_name: str,
+ on_button_press: Callable[[carb.events.IEvent], None],
) -> None:
if self._xr_core is None:
logger.warning("XR core not available; skipping button binding")
@@ -504,4 +577,4 @@ class OpenXRDeviceCfg(DeviceCfg):
"""Configuration for OpenXR devices."""
xr_cfg: XrCfg | None = None
- class_type: type[DeviceBase] = OpenXRDevice
+ class_type: type[DeviceBase] = OpenXRDevice
\ No newline at end of file
diff --git a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_left_dexpilot.yml b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_left_dexpilot.yml
index 6a98e47219..c417cff8d5 100644
--- a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_left_dexpilot.yml
+++ b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_left_dexpilot.yml
@@ -1,8 +1,3 @@
-# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-
retargeting:
finger_tip_link_names:
- GR1T2_fourier_hand_6dof_L_thumb_distal_link
diff --git a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_right_dexpilot.yml b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_right_dexpilot.yml
index 183df868e8..9f6915b18b 100644
--- a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_right_dexpilot.yml
+++ b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_right_dexpilot.yml
@@ -1,8 +1,3 @@
-# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-
retargeting:
finger_tip_link_names:
- GR1T2_fourier_hand_6dof_R_thumb_distal_link
diff --git a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/gr1t2_retargeter.py b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/gr1t2_retargeter.py
index a352580c2e..2e87ecc63a 100644
--- a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/gr1t2_retargeter.py
+++ b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/gr1t2_retargeter.py
@@ -16,6 +16,8 @@ from isaaclab.devices.device_base import DeviceBase
from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg
from isaaclab.markers import VisualizationMarkers, VisualizationMarkersCfg
+from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR
+
# This import exception is suppressed because gr1_t2_dex_retargeting_utils depends on pinocchio which is not available on windows
with contextlib.suppress(Exception):
from .gr1_t2_dex_retargeting_utils import GR1TR2DexRetargeting
@@ -50,16 +52,26 @@ class GR1T2Retargeter(RetargeterBase):
self._num_open_xr_hand_joints = cfg.num_open_xr_hand_joints
self._sim_device = cfg.sim_device
if self._enable_visualization:
- marker_cfg = VisualizationMarkersCfg(
+ sphere_marker_cfg = VisualizationMarkersCfg(
prim_path="/Visuals/markers",
markers={
"joint": sim_utils.SphereCfg(
- radius=0.005,
+ radius=0.05,
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)),
),
},
)
+ marker_cfg = VisualizationMarkersCfg(
+ prim_path="/Visuals/markers",
+ markers={
+ "frame": sim_utils.UsdFileCfg(
+ usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/UIElements/frame_prim.usd",
+ scale=(0.01, 0.01, 0.01),
+ ),
+ }
+ )
self._markers = VisualizationMarkers(marker_cfg)
+ self._sphere_markers = VisualizationMarkers(sphere_marker_cfg)
def retarget(self, data: dict) -> torch.Tensor:
"""Convert hand joint poses to robot end-effector commands.
@@ -77,17 +89,37 @@ class GR1T2Retargeter(RetargeterBase):
# Access the left and right hand data using the enum key
left_hand_poses = data[DeviceBase.TrackingTarget.HAND_LEFT]
right_hand_poses = data[DeviceBase.TrackingTarget.HAND_RIGHT]
+ body_poses = data[DeviceBase.TrackingTarget.BODY]
left_wrist = left_hand_poses.get("wrist")
right_wrist = right_hand_poses.get("wrist")
+ left_palm = left_hand_poses.get("palm")
+ right_palm = right_hand_poses.get("palm")
+
+ left_wrist[3:] = left_palm[3:]
+ right_wrist[3:] = right_palm[3:]
+
if self._enable_visualization:
joints_position = np.zeros((self._num_open_xr_hand_joints, 3))
+ joints_orientation = np.zeros((self._num_open_xr_hand_joints, 4))
+
+ body_joints_position = np.zeros((8, 3))
+ body_joints_orientation = np.zeros((8, 4))
joints_position[::2] = np.array([pose[:3] for pose in left_hand_poses.values()])
joints_position[1::2] = np.array([pose[:3] for pose in right_hand_poses.values()])
+ joints_orientation[::2] = np.array([pose[3:] for pose in left_hand_poses.values()])
+ joints_orientation[1::2] = np.array([pose[3:] for pose in right_hand_poses.values()])
+
+ body_joints_position[:] = np.array([pose[:3] for pose in body_poses.values()])
+ body_joints_orientation[:] = np.array([pose[3:] for pose in body_poses.values()])
+
+ self._markers.visualize(translations=torch.tensor(joints_position, device=self._sim_device),
+ orientations=torch.tensor(joints_orientation, device=self._sim_device))
- self._markers.visualize(translations=torch.tensor(joints_position, device=self._sim_device))
+ self._sphere_markers.visualize(translations=torch.tensor(body_joints_position, device=self._sim_device),
+ orientations=torch.tensor(body_joints_orientation, device=self._sim_device))
# Create array of zeros with length matching number of joint names
left_hands_pos = self._hands_controller.compute_left(left_hand_poses)
@@ -112,7 +144,7 @@ class GR1T2Retargeter(RetargeterBase):
return torch.cat([left_wrist_tensor, right_wrist_tensor, hand_joints_tensor])
def get_requirements(self) -> list[RetargeterBase.Requirement]:
- return [RetargeterBase.Requirement.HAND_TRACKING]
+ return [RetargeterBase.Requirement.HAND_TRACKING, RetargeterBase.Requirement.BODY_TRACKING]
def _retarget_abs(self, wrist: np.ndarray) -> np.ndarray:
"""Handle absolute pose retargeting.
diff --git a/source/isaaclab/isaaclab/devices/retargeter_base.py b/source/isaaclab/isaaclab/devices/retargeter_base.py
index c3d2e51f6b..3a9e4c385f 100644
--- a/source/isaaclab/isaaclab/devices/retargeter_base.py
+++ b/source/isaaclab/isaaclab/devices/retargeter_base.py
@@ -43,6 +43,7 @@ class RetargeterBase(ABC):
HAND_TRACKING = "hand_tracking"
HEAD_TRACKING = "head_tracking"
MOTION_CONTROLLER = "motion_controller"
+ BODY_TRACKING = "body_tracking"
@abstractmethod
def retarget(self, data: Any) -> Any:
@leochien1110
Copy link
Copy Markdown

Hi, thank you for the instructions and the patch. I have followed the blog post to complete the setup(add a new line at the end of the patch to make it work).
However, the hand/fingers' coordinates are slightly off from the robot's fingers in the y rotation. I have tried the left-hand tracking rotation offset, but it's not working. The Left controller rotation offset does work. Do you have any clue how to adjust it?
eabc9b726892414da318ecf0faeddcf0

@miguelalonsojr
Copy link
Copy Markdown
Author

miguelalonsojr commented Jan 2, 2026

Yes. You need to adjust the rotation of the hand tracking offsets, not the controller offsets, in ALVR. I set the rotation offset to 5 degrees, but you may need to adjust it for your setup. I found that 5 degrees fixes the incorrect rotation in the default retargeting in IsaacLab. It seems like you may not need this adjustment. Try a rotation of 0 degrees or -5 degress. It's kind of a hacky fix, but it works. Did you modify these settings? See below.

Screenshot From 2026-01-02 09-59-26

Also, can you post a picture of the full hand? I don't see the wrist in your screen shot.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment