-
Notifications
You must be signed in to change notification settings - Fork 644
Description
System Info
Robosuite version 1.5.1
Ubuntu 20.04Information
Dear authors,
Thank you for your amazing work! I have encountered the problem below which really confuses me:
I am using the Dexmimicgen environment built upon the robosuite. I will use the Tray Lift task in Dexmimicgen as an example in this issue.
The original setting for the Tray Lift task is with two Panda arms and two InspireHands. I am doing the direct retargeting from the InspireHand to the parallel gripper (In my case, I use the robotiq85 gripper.). But after doing the retargeting, the Robotiq85 gripper seems to always be broken in the simulator. I first think maybe is the retargeting results exceed the limits of the Robotiq85 gripper, but after I did the simple open and close action for the Robotiq85 gripper and remain it in its initial position and rotation, the Robotiq85 gripper still seems to be broken. As you can see in the videos below and I also do this for other grippers (e.g. Robotiq140 and Panda gripper) but they all seems to be working correctly.
robotiq85_gripper_alternating.mp4
Robotiq140_gripper_alternating.mp4
panda_gripper_alternating.mp4
So I am wondering whether there is something wrong with the Robotiq85's xml file or some other bugs?
Thank you so much for your help and I am looking forward to your response!!!
Reproduction
For reproduce the experiment, I attach the code alternating_gripper_visualization.py below:
- You can directly run the command:
python alternating_gripper_visualization.py --gripper Robotiq85Gripper --output_video robotiq85_gripper_alternating.mp4 - Need to clone and install Dexmimicgen First: https://github.com/NVlabs/dexmimicgen/
import sys
import os
import argparse
import numpy as np
import imageio
from scipy.spatial.transform import Rotation
import robosuite as suit
from robosuite.controllers import load_composite_controller_config
import dexmimicgen
def create_static_drawer_cleanup_env(camera_name="frontview", camera_width=512, camera_height=512, gripper_type="Robotiq140Gripper"):
"""
Create a static TwoArmDrawerCleanup environment with Panda robots and specified grippers.
Args:
camera_name (str): Camera name for rendering (default: "frontview")
camera_width (int): Camera width in pixels (default: 512)
camera_height (int): Camera height in pixels (default: 512)
gripper_type (str): Type of gripper to use (default: "Robotiq140Gripper")
Returns:
robosuite.environments.base.RobotEnv: The created environment
"""
print("Creating TwoArmDrawerCleanup environment...")
print(" Robots: Panda, Panda")
print(f" Grippers: {gripper_type}, {gripper_type}")
print(" Mode: Arms static, grippers alternating")
# Create a simple controller config
controller_config = {
"type": "BASIC",
"body_parts": {
"right": {
"type": "OSC_POSE",
"control_delta": False,
"input_type": "absolute",
"input_ref_frame": "world",
"interpolation": None,
"ramp_ratio": 0.2,
"input_max": 1,
"input_min": -1,
"output_max": [0.05, 0.05, 0.05, 0.5, 0.5, 0.5],
"output_min": [-0.05, -0.05, -0.05, -0.5, -0.5, -0.5],
"kp": 150,
"damping_ratio": 1,
"impedance_mode": "fixed",
"kp_limits": [0, 300],
"damping_ratio_limits": [0, 10],
"position_limits": None,
"orientation_limits": None,
"uncouple_pos_ori": True,
"control_ori": True,
"control_pos": True,
"gripper": {
"type": "GRIP",
"use_action_scaling": False
}
},
"left": {
"type": "OSC_POSE",
"control_delta": False,
"input_type": "absolute",
"input_ref_frame": "world",
"interpolation": None,
"ramp_ratio": 0.2,
"input_max": 1,
"input_min": -1,
"output_max": [0.05, 0.05, 0.05, 0.5, 0.5, 0.5],
"output_min": [-0.05, -0.05, -0.05, -0.5, -0.5, -0.5],
"kp": 150,
"damping_ratio": 1,
"impedance_mode": "fixed",
"kp_limits": [0, 300],
"damping_ratio_limits": [0, 10],
"position_limits": None,
"orientation_limits": None,
"uncouple_pos_ori": True,
"control_ori": True,
"control_pos": True,
"gripper": {
"type": "GRIP",
"use_action_scaling": False
}
}
}
}
# Save to temporary file and load
import json
import tempfile
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
json.dump(controller_config, f, indent=2)
temp_path = f.name
loaded_config = load_composite_controller_config(controller=temp_path)
os.unlink(temp_path)
# Create environment
env = suite.make(
"TwoArmDrawerCleanup",
robots=["Panda", "Panda"],
gripper_types=[gripper_type, gripper_type],
controller_configs=loaded_config,
has_renderer=True, # Enable real-time rendering
has_offscreen_renderer=True, # Enable offscreen rendering for video
render_camera=camera_name,
camera_names=camera_name,
camera_heights=camera_height,
camera_widths=camera_width,
horizon=10000, # Long horizon
ignore_done=True,
use_camera_obs=True, # Need camera observations for video
control_freq=20, # 20 Hz control frequency
reward_shaping=False,
)
# Disable randomization for consistent object placement and robot initialization
env.deterministic_reset = True
print("✓ Environment created successfully")
return env
def get_arm_initial_pose(env, robot_idx):
"""
Get the initial end-effector pose for a specific robot (in world frame).
Args:
env: Robosuite bimanual environment
robot_idx: Index of the robot (0 for right, 1 for left)
Returns:
tuple: (position (3,), orientation (3, 3) as rotation matrix) in world frame
"""
robot = env.robots[robot_idx]
# Get end-effector site ID
if isinstance(robot.eef_site_id, dict):
site_id = robot.eef_site_id['right'] # Framework convention: stored under 'right' key
else:
site_id = robot.eef_site_id
# Get end-effector position and orientation in world frame
ee_pos = env.sim.data.site_xpos[site_id].copy()
ee_ori = env.sim.data.site_xmat[site_id].reshape(3, 3).copy()
return ee_pos, ee_ori
def get_gripper_width(env, robot_idx):
"""
Get the gripper width/aperture for a specific robot.
Args:
env: Robosuite bimanual environment
robot_idx: Index of the robot (0 for right, 1 for left)
Returns:
float: Gripper width in meters, or None if not available
"""
try:
robot = env.robots[robot_idx]
gripper = robot.gripper
# Try to get gripper width using the width() method if available
if hasattr(gripper, 'width') and callable(gripper.width):
width = gripper.width()
return width
# Fallback: try to get from gripper_qpos
if hasattr(gripper, 'gripper_qpos'):
qpos = gripper.gripper_qpos
if isinstance(qpos, np.ndarray) and len(qpos) > 0:
width = np.sum(qpos) if len(qpos) > 1 else qpos[0]
return float(width)
return None
except Exception:
return None
def create_action_with_alternating_grippers(env, right_pos, right_ori, right_gripper_cmd,
left_pos, left_ori, left_gripper_cmd):
"""
Create an action that maintains arm poses but uses specified gripper commands.
Args:
env: Robosuite environment
right_pos: Right arm position (3,)
right_ori: Right arm orientation (3, 3) rotation matrix
right_gripper_cmd: Right gripper command in [-1, 1] range
left_pos: Left arm position (3,)
left_ori: Left arm orientation (3, 3) rotation matrix
left_gripper_cmd: Left gripper command in [-1, 1] range
Returns:
np.ndarray: Action array (14,)
"""
# Convert rotation matrices to axis-angle representation (3D vector)
right_ori_axis_angle = Rotation.from_matrix(right_ori).as_rotvec()
left_ori_axis_angle = Rotation.from_matrix(left_ori).as_rotvec()
# Construct action: [right_pos, right_ori, right_gripper, left_pos, left_ori, left_gripper]
action = np.concatenate([
right_pos, # (3,) - absolute position in world frame
right_ori_axis_angle, # (3,) - absolute orientation as axis-angle in world frame
[right_gripper_cmd], # (1,) - gripper command
left_pos, # (3,) - absolute position in world frame
left_ori_axis_angle, # (3,) - absolute orientation as axis-angle in world frame
[left_gripper_cmd] # (1,) - gripper command
])
assert action.shape == (14,), f"Action shape mismatch: {action.shape}"
return action
def render_alternating_gripper_environment(env, duration=10, fps=20, output_video=None,
camera_name="frontview", alternation_period=2.0):
"""
Render environment with arms static and grippers alternating between open and closed.
Args:
env: Robosuite environment
duration (float): Duration to render in seconds (default: 10)
fps (int): Frames per second for video (default: 20)
output_video (str, optional): Path to save output video. If None, no video is saved.
camera_name (str): Camera name for rendering (default: "frontview")
alternation_period (float): Period for gripper alternation in seconds (default: 2.0)
"""
print(f"\n{'='*80}")
print(f"RENDERING WITH ALTERNATING GRIPPERS")
print(f"{'='*80}")
print(f"Duration: {duration} seconds")
print(f"FPS: {fps}")
print(f"Alternation period: {alternation_period} seconds")
if output_video:
print(f"Output video: {output_video}")
else:
print(f"Output video: None (no video will be saved)")
print(f"{'='*80}\n")
# Reset environment
obs = env.reset()
# Get initial arm poses (will be maintained throughout)
right_pos, right_ori = get_arm_initial_pose(env, 0) # Right robot
left_pos, left_ori = get_arm_initial_pose(env, 1) # Left robot
print(f"Initial Right Arm Position: [{right_pos[0]:.4f}, {right_pos[1]:.4f}, {right_pos[2]:.4f}]")
print(f"Initial Left Arm Position: [{left_pos[0]:.4f}, {left_pos[1]:.4f}, {left_pos[2]:.4f}]")
print()
# Calculate number of frames
num_frames = int(duration * fps)
frames_per_cycle = int(alternation_period * fps)
print(f"Starting rendering... ({num_frames} frames)")
print(f"Grippers will alternate every {alternation_period} seconds ({frames_per_cycle} frames)")
print(f" -1.0 = Closed")
print(f" +1.0 = Open")
print()
# Initialize video writer
frames = []
# Gripper commands: -1.0 = closed, +1.0 = open
max_gripper_cmd = 1.0 # Open
min_gripper_cmd = -1.0 # Closed
# Render with alternating grippers
for i in range(num_frames):
# Calculate which phase of alternation we're in
cycle_progress = (i % frames_per_cycle) / frames_per_cycle
# Determine gripper state for this frame
# Use a sinusoidal transition for smooth alternation
# Or use step function for sharp alternation
use_smooth = True # Set to False for sharp on/off
if use_smooth:
# Smooth sinusoidal transition
sin_val = np.sin(2 * np.pi * cycle_progress)
right_gripper_cmd = sin_val # Ranges from -1 to +1
left_gripper_cmd = -sin_val # Opposite phase (alternating)
else:
# Sharp step alternation
if cycle_progress < 0.5:
# First half: right closed, left open
right_gripper_cmd = min_gripper_cmd # Closed
left_gripper_cmd = max_gripper_cmd # Open
else:
# Second half: right open, left closed
right_gripper_cmd = max_gripper_cmd # Open
left_gripper_cmd = min_gripper_cmd # Closed
# Create action with alternating grippers
action = create_action_with_alternating_grippers(
env, right_pos, right_ori, right_gripper_cmd,
left_pos, left_ori, left_gripper_cmd
)
# Step environment
obs, reward, done, info = env.step(action)
# Render on screen (real-time)
env.render()
# Capture frame for video
if output_video:
frame = obs[f"{camera_name}_image"]
frame = np.flipud(frame) # Flip vertically (OpenGL renders upside down)
frames.append(frame)
# Print progress every second
if (i + 1) % fps == 0:
elapsed = (i + 1) / fps
cycle_num = int(i / frames_per_cycle) + 1
print(f" Rendered {elapsed:.1f}/{duration:.1f} seconds ({i+1}/{num_frames} frames) "
f"- Cycle {cycle_num}, Right: {right_gripper_cmd:.2f}, Left: {left_gripper_cmd:.2f}")
# Save video if requested
if output_video:
print(f"\nSaving video with {len(frames)} frames...")
imageio.mimsave(output_video, frames, fps=fps)
print(f"✓ Video saved to: {output_video}")
print("\n✓ Rendering complete!")
def main():
"""Main function."""
parser = argparse.ArgumentParser(
description="Visualize TwoArmDrawerCleanup environment with alternating grippers (configurable gripper type)"
)
parser.add_argument(
"--duration",
type=float,
default=10.0,
help="Duration to render in seconds (default: 10.0)"
)
parser.add_argument(
"--fps",
type=int,
default=20,
help="Frames per second for video (default: 20)"
)
parser.add_argument(
"--output_video",
type=str,
default=None,
help="Path to save output video. If not specified, no video is saved."
)
parser.add_argument(
"--camera",
type=str,
default="frontview",
help="Camera name for rendering (default: frontview)"
)
parser.add_argument(
"--camera_width",
type=int,
default=512,
help="Camera width in pixels (default: 512)"
)
parser.add_argument(
"--camera_height",
type=int,
default=512,
help="Camera height in pixels (default: 512)"
)
parser.add_argument(
"--alternation_period",
type=float,
default=2.0,
help="Period for gripper alternation in seconds (default: 2.0). "
"Grippers will switch between open/closed every this many seconds."
)
parser.add_argument(
"--gripper",
type=str,
default="Robotiq140Gripper",
help="Type of gripper to use (default: Robotiq140Gripper). "
"Examples: Robotiq140Gripper, Robotiq85Gripper, PandaGripper, "
"RethinkGripper, RobotiqThreeFingerGripper, etc."
)
args = parser.parse_args()
print("="*80)
print("ALTERNATING GRIPPER VISUALIZATION")
print("="*80)
print(f"Environment: TwoArmDrawerCleanup")
print(f"Robots: Panda, Panda")
print(f"Grippers: {args.gripper}, {args.gripper}")
print(f"Rendering: Real-time (on-screen)")
print(f"Duration: {args.duration} seconds")
print(f"FPS: {args.fps}")
print(f"Alternation period: {args.alternation_period} seconds")
print(f"Camera: {args.camera} ({args.camera_width}x{args.camera_height})")
if args.output_video:
print(f"Video output: {args.output_video}")
else:
print(f"Video output: None")
print("="*80)
# Create environment
env = create_static_drawer_cleanup_env(
camera_name=args.camera,
camera_width=args.camera_width,
camera_height=args.camera_height,
gripper_type=args.gripper
)
try:
# Render with alternating grippers
render_alternating_gripper_environment(
env=env,
duration=args.duration,
fps=args.fps,
output_video=args.output_video,
camera_name=args.camera,
alternation_period=args.alternation_period
)
finally:
env.close()
print("\n✓ Environment closed")
if __name__ == "__main__":
main()
Expected behavior
The Robotiq85 gripper is broken during the simulation, which will not happen for all other kinds of gripper.