11 Commits

Author SHA1 Message Date
af6bca7270 多view 2026-03-15 20:23:21 +08:00
1f36a59fe8 场景 2026-03-13 17:41:44 +08:00
08c4cdacb8 记录动作以及回放record_demo,replay_demo 2026-03-11 16:33:29 +08:00
0c557938a7 wheel contorl keyboard 2026-03-09 21:34:52 +08:00
2b290d805a 轮子 2026-03-06 17:31:00 +08:00
36b4273582 VR 2026-03-06 15:05:45 +08:00
7e0f4a3e75 1 2026-03-06 11:11:06 +08:00
cc64e654ff spacemouse ok 2026-03-05 22:41:56 +08:00
2c9db33517 添加全局变量路径;
添加IL的demo
2026-03-04 18:39:56 +08:00
9ba904e7d2 pro6000_加入lab资产 2026-03-04 16:46:15 +08:00
c7c88e5d8d pro6000_xh配置 2026-03-04 16:45:34 +08:00
135 changed files with 10218 additions and 1324 deletions

View File

@@ -1,12 +0,0 @@
{
// See http://go.microsoft.com/fwlink/?LinkId=827846
// for the documentation about the extensions.json format
"recommendations": [
"ms-python.python",
"ms-python.vscode-pylance",
"ban.spellright",
"ms-iot.vscode-ros",
"ms-python.black-formatter",
"ms-python.flake8",
]
}

23
.vscode/tasks.json vendored
View File

@@ -1,23 +0,0 @@
{
"version": "2.0.0",
"tasks": [
{
"label": "setup_python_env",
"type": "shell",
"linux": {
"command": "${input:isaac_path}/python.sh ${workspaceFolder}/.vscode/tools/setup_vscode.py --isaac_path ${input:isaac_path}"
},
"windows": {
"command": "${input:isaac_path}/python.bat ${workspaceFolder}/.vscode/tools/setup_vscode.py --isaac_path ${input:isaac_path}"
}
}
],
"inputs": [
{
"id": "isaac_path",
"description": "Absolute path to the current Isaac Sim installation. If you installed IsaacSim from pip, the import of it failed. Please make sure you run the task with the correct python environment. As fallback, you can directly execute the python script by running: ``python.sh <path-to-your-project>/.vscode/tools/setup_vscode.py``",
"default": "C:/isaacsim",
"type": "promptString"
},
]
}

View File

@@ -4,822 +4,54 @@
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
// For standalone script execution
{
"name": "Python: Current File",
"type": "debugpy",
"type": "python",
"request": "launch",
"program": "${file}",
"console": "integratedTerminal",
},
{
"name": "Python: Train Template-Mindbot-Direct-v0 with rl_games (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "4096", "--headless"],
"program": "${workspaceFolder}/scripts/rl_games/train.py",
"console": "integratedTerminal",
},
{
"name": "Python: Play Template-Mindbot-Direct-v0 with rl_games (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "32"],
"program": "${workspaceFolder}/scripts/rl_games/play.py",
"console": "integratedTerminal",
},
{
"name": "Python: Train Template-Mindbot-Direct-v0 with rsl_rl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "4096", "--headless"],
"program": "${workspaceFolder}/scripts/rsl_rl/train.py",
"console": "integratedTerminal",
},
{
"name": "Python: Play Template-Mindbot-Direct-v0 with rsl_rl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "32"],
"program": "${workspaceFolder}/scripts/rsl_rl/play.py",
"console": "integratedTerminal",
},
{
"name": "Python: Train Template-Mindbot-Direct-v0 with skrl (AMP)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "4096", "--headless", "--algorithm", "AMP"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
},
{
"name": "Python: Play Template-Mindbot-Direct-v0 with skrl (AMP)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "32", "--algorithm", "AMP"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
},
{
"name": "Python: Train Template-Mindbot-Direct-v0 with skrl (IPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "4096", "--headless", "--algorithm", "IPPO"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
},
{
"name": "Python: Play Template-Mindbot-Direct-v0 with skrl (IPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "32", "--algorithm", "IPPO"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
},
{
"name": "Python: Train Template-Mindbot-Direct-v0 with skrl (MAPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "4096", "--headless", "--algorithm", "MAPPO"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
},
{
"name": "Python: Play Template-Mindbot-Direct-v0 with skrl (MAPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "32", "--algorithm", "MAPPO"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
},
{
"name": "Python: Train Template-Mindbot-Direct-v0 with skrl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "4096", "--headless", "--algorithm", "PPO"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
},
{
"name": "Python: Play Template-Mindbot-Direct-v0 with skrl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "32", "--algorithm", "PPO"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
},
{
"name": "Python: Train Template-Mindbot-Direct-v0 with sb3 (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "4096", "--headless"],
"program": "${workspaceFolder}/scripts/sb3/train.py",
"console": "integratedTerminal",
},
{
"name": "Python: Play Template-Mindbot-Direct-v0 with sb3 (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "32"],
"program": "${workspaceFolder}/scripts/sb3/play.py",
"console": "integratedTerminal",
},
{
"name": "Python: Train Template-Mindbot-Marl-Direct-v0 with rl_games (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "4096", "--headless"],
"program": "${workspaceFolder}/scripts/rl_games/train.py",
"console": "integratedTerminal",
},
{
"name": "Python: Play Template-Mindbot-Marl-Direct-v0 with rl_games (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "32"],
"program": "${workspaceFolder}/scripts/rl_games/play.py",
"console": "integratedTerminal",
},
{
"name": "Python: Train Template-Mindbot-Marl-Direct-v0 with rsl_rl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "4096", "--headless"],
"program": "${workspaceFolder}/scripts/rsl_rl/train.py",
"console": "integratedTerminal",
},
{
"name": "Python: Play Template-Mindbot-Marl-Direct-v0 with rsl_rl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "32"],
"program": "${workspaceFolder}/scripts/rsl_rl/play.py",
"console": "integratedTerminal",
},
{
"name": "Python: Train Template-Mindbot-Marl-Direct-v0 with skrl (AMP)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "4096", "--headless", "--algorithm", "AMP"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
},
{
"name": "Python: Play Template-Mindbot-Marl-Direct-v0 with skrl (AMP)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "32", "--algorithm", "AMP"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
},
{
"name": "Python: Train Template-Mindbot-Marl-Direct-v0 with skrl (IPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "4096", "--headless", "--algorithm", "IPPO"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
},
{
"name": "Python: Play Template-Mindbot-Marl-Direct-v0 with skrl (IPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "32", "--algorithm", "IPPO"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
},
{
"name": "Python: Train Template-Mindbot-Marl-Direct-v0 with skrl (MAPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "4096", "--headless", "--algorithm", "MAPPO"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
},
{
"name": "Python: Play Template-Mindbot-Marl-Direct-v0 with skrl (MAPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "32", "--algorithm", "MAPPO"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
},
{
"name": "Python: Train Template-Mindbot-Marl-Direct-v0 with skrl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "4096", "--headless", "--algorithm", "PPO"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
},
{
"name": "Python: Play Template-Mindbot-Marl-Direct-v0 with skrl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "32", "--algorithm", "PPO"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
},
{
"name": "Python: Train Template-Mindbot-Marl-Direct-v0 with sb3 (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "4096", "--headless"],
"program": "${workspaceFolder}/scripts/sb3/train.py",
"console": "integratedTerminal",
},
{
"name": "Python: Play Template-Mindbot-Marl-Direct-v0 with sb3 (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "32"],
"program": "${workspaceFolder}/scripts/sb3/play.py",
"console": "integratedTerminal",
},
{
"name": "Python: Train Template-Mindbot-v0 with rl_games (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "4096", "--headless"],
"program": "${workspaceFolder}/scripts/rl_games/train.py",
"console": "integratedTerminal",
},
{
"name": "Python: Play Template-Mindbot-v0 with rl_games (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "32"],
"program": "${workspaceFolder}/scripts/rl_games/play.py",
"console": "integratedTerminal",
},
{
"name": "Python: Train Template-Mindbot-v0 with rsl_rl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "4096", "--headless"],
"program": "${workspaceFolder}/scripts/rsl_rl/train.py",
"console": "integratedTerminal",
"console": "integratedTerminal"
},
{
"name": "Python: Play Template-Mindbot-v0 with rsl_rl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "32"],
"program": "${workspaceFolder}/scripts/rsl_rl/play.py",
"console": "integratedTerminal",
"name": "Python: Attach (windows-x86_64/linux-x86_64)",
"type": "python",
"request": "attach",
"port": 3000,
"host": "localhost"
},
{
"name": "Python: Train Template-Mindbot-v0 with skrl (AMP)",
"type": "debugpy",
"name": "Python: Train Environment",
"type": "python",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "4096", "--headless", "--algorithm", "AMP"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
"args" : ["--task", "Isaac-Reach-Franka-v0", "--headless"],
"program": "${workspaceFolder}/scripts/reinforcement_learning/rsl_rl/train.py",
"console": "integratedTerminal"
},
{
"name": "Python: Play Template-Mindbot-v0 with skrl (AMP)",
"type": "debugpy",
"name": "Python: Play Environment",
"type": "python",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "32", "--algorithm", "AMP"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
"args" : ["--task", "Isaac-Reach-Franka-v0", "--num_envs", "32"],
"program": "${workspaceFolder}/scripts/reinforcement_learning/rsl_rl/play.py",
"console": "integratedTerminal"
},
{
"name": "Python: Train Template-Mindbot-v0 with skrl (IPPO)",
"type": "debugpy",
"name": "Python: SinglePytest",
"type": "python",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "4096", "--headless", "--algorithm", "IPPO"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
},
{
"name": "Python: Play Template-Mindbot-v0 with skrl (IPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "32", "--algorithm", "IPPO"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
"module": "pytest",
"args": [
"${file}"
],
"console": "integratedTerminal"
},
{
"name": "Python: Train Template-Mindbot-v0 with skrl (MAPPO)",
"type": "debugpy",
"name": "Python: ALL Pytest",
"type": "python",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "4096", "--headless", "--algorithm", "MAPPO"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"module": "pytest",
"args": ["source/isaaclab/test"],
"console": "integratedTerminal",
},
{
"name": "Python: Play Template-Mindbot-v0 with skrl (MAPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "32", "--algorithm", "MAPPO"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
},
{
"name": "Python: Train Template-Mindbot-v0 with skrl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "4096", "--headless", "--algorithm", "PPO"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
},
{
"name": "Python: Play Template-Mindbot-v0 with skrl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "32", "--algorithm", "PPO"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
},
{
"name": "Python: Train Template-Mindbot-v0 with sb3 (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "4096", "--headless"],
"program": "${workspaceFolder}/scripts/sb3/train.py",
"console": "integratedTerminal",
},
{
"name": "Python: Play Template-Mindbot-v0 with sb3 (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "32"],
"program": "${workspaceFolder}/scripts/sb3/play.py",
"console": "integratedTerminal",
},
// For script execution inside a Docker
{
"name": "Docker: Current File",
"type": "debugpy",
"request": "launch",
"program": "${file}",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
}
},
{
"name": "Docker: Train Template-Mindbot-Direct-v0 with rl_games (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "4096", "--headless"],
"program": "${workspaceFolder}/scripts/rl_games/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-Direct-v0 with rl_games (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "32"],
"program": "${workspaceFolder}/scripts/rl_games/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Train Template-Mindbot-Direct-v0 with rsl_rl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "4096", "--headless"],
"program": "${workspaceFolder}/scripts/rsl_rl/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-Direct-v0 with rsl_rl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "32"],
"program": "${workspaceFolder}/scripts/rsl_rl/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Train Template-Mindbot-Direct-v0 with skrl (AMP)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "4096", "--headless", "--algorithm", "AMP"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-Direct-v0 with skrl (AMP)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "32", "--algorithm", "AMP"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Train Template-Mindbot-Direct-v0 with skrl (IPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "4096", "--headless", "--algorithm", "IPPO"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-Direct-v0 with skrl (IPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "32", "--algorithm", "IPPO"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Train Template-Mindbot-Direct-v0 with skrl (MAPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "4096", "--headless", "--algorithm", "MAPPO"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-Direct-v0 with skrl (MAPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "32", "--algorithm", "MAPPO"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Train Template-Mindbot-Direct-v0 with skrl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "4096", "--headless", "--algorithm", "PPO"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-Direct-v0 with skrl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "32", "--algorithm", "PPO"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Train Template-Mindbot-Direct-v0 with sb3 (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "4096", "--headless"],
"program": "${workspaceFolder}/scripts/sb3/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-Direct-v0 with sb3 (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Direct-v0", "--num_envs", "32"],
"program": "${workspaceFolder}/scripts/sb3/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Train Template-Mindbot-Marl-Direct-v0 with rl_games (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "4096", "--headless"],
"program": "${workspaceFolder}/scripts/rl_games/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-Marl-Direct-v0 with rl_games (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "32"],
"program": "${workspaceFolder}/scripts/rl_games/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Train Template-Mindbot-Marl-Direct-v0 with rsl_rl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "4096", "--headless"],
"program": "${workspaceFolder}/scripts/rsl_rl/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-Marl-Direct-v0 with rsl_rl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "32"],
"program": "${workspaceFolder}/scripts/rsl_rl/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Train Template-Mindbot-Marl-Direct-v0 with skrl (AMP)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "4096", "--headless", "--algorithm", "AMP"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-Marl-Direct-v0 with skrl (AMP)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "32", "--algorithm", "AMP"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Train Template-Mindbot-Marl-Direct-v0 with skrl (IPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "4096", "--headless", "--algorithm", "IPPO"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-Marl-Direct-v0 with skrl (IPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "32", "--algorithm", "IPPO"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Train Template-Mindbot-Marl-Direct-v0 with skrl (MAPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "4096", "--headless", "--algorithm", "MAPPO"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-Marl-Direct-v0 with skrl (MAPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "32", "--algorithm", "MAPPO"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Train Template-Mindbot-Marl-Direct-v0 with skrl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "4096", "--headless", "--algorithm", "PPO"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-Marl-Direct-v0 with skrl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "32", "--algorithm", "PPO"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Train Template-Mindbot-Marl-Direct-v0 with sb3 (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "4096", "--headless"],
"program": "${workspaceFolder}/scripts/sb3/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-Marl-Direct-v0 with sb3 (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-Marl-Direct-v0", "--num_envs", "32"],
"program": "${workspaceFolder}/scripts/sb3/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Train Template-Mindbot-v0 with rl_games (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "4096", "--headless"],
"program": "${workspaceFolder}/scripts/rl_games/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-v0 with rl_games (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "32"],
"program": "${workspaceFolder}/scripts/rl_games/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Train Template-Mindbot-v0 with rsl_rl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "4096", "--headless"],
"program": "${workspaceFolder}/scripts/rsl_rl/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-v0 with rsl_rl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "32"],
"program": "${workspaceFolder}/scripts/rsl_rl/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Train Template-Mindbot-v0 with skrl (AMP)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "4096", "--headless", "--algorithm", "AMP"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-v0 with skrl (AMP)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "32", "--algorithm", "AMP"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Train Template-Mindbot-v0 with skrl (IPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "4096", "--headless", "--algorithm", "IPPO"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-v0 with skrl (IPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "32", "--algorithm", "IPPO"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Train Template-Mindbot-v0 with skrl (MAPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "4096", "--headless", "--algorithm", "MAPPO"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-v0 with skrl (MAPPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "32", "--algorithm", "MAPPO"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Train Template-Mindbot-v0 with skrl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "4096", "--headless", "--algorithm", "PPO"],
"program": "${workspaceFolder}/scripts/skrl/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-v0 with skrl (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "32", "--algorithm", "PPO"],
"program": "${workspaceFolder}/scripts/skrl/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Train Template-Mindbot-v0 with sb3 (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "4096", "--headless"],
"program": "${workspaceFolder}/scripts/sb3/train.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
{
"name": "Docker: Play Template-Mindbot-v0 with sb3 (PPO)",
"type": "debugpy",
"request": "launch",
"args" : ["--task", "Template-Mindbot-v0", "--num_envs", "32"],
"program": "${workspaceFolder}/scripts/sb3/play.py",
"console": "integratedTerminal",
"env": {
"PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
},
},
"justMyCode": false
}
]
}
}

View File

@@ -1,4 +1,9 @@
{
"files.exclude": {
"**/.mypy_cache": true,
"**/__pycache__": true,
"**/*.egg-info": true
},
"files.associations": {
"*.tpp": "cpp",
"*.kit": "toml",
@@ -40,7 +45,6 @@
"teleoperation",
"xform",
"numpy",
"tensordict",
"flatcache",
"physx",
"dpad",
@@ -51,26 +55,21 @@
"arange",
"discretization",
"trimesh",
"uninstanceable"
"uninstanceable",
"coeff",
"prestartup"
],
// This enables python language server. Seems to work slightly better than jedi:
"python.languageServer": "Pylance",
// We use "black" as a formatter:
"python.formatting.provider": "black",
"python.formatting.blackArgs": ["--line-length", "120"],
// Use flake8 for linting
"python.linting.pylintEnabled": false,
"python.linting.flake8Enabled": true,
"python.linting.flake8Args": [
"--max-line-length=120"
],
// Use ruff as a formatter and linter
"ruff.configuration": "${workspaceFolder}/pyproject.toml",
// Use docstring generator
"autoDocstring.docstringFormat": "google",
"autoDocstring.guessTypes": true,
// Python environment path
// note: the default interpreter is overridden when user selects a workspace interpreter
// in the status bar. For example, the virtual environment python interpreter
"python.defaultInterpreterPath": "",
"python.defaultInterpreterPath": "${workspaceFolder}/_isaac_sim/python.sh",
// ROS distribution
"ros.distro": "noetic",
// Language specific settings
@@ -81,6 +80,6 @@
"editor.tabSize": 2
},
// Python extra paths
// Note: this is filled up when vscode is set up for the first time
// Note: this is filled up when "./isaaclab.sh -i" is run
"python.analysis.extraPaths": []
}

View File

@@ -12,34 +12,21 @@ This is necessary because Isaac Sim 2022.2.1 onwards does not add the necessary
when the "setup_python_env.sh" is run as part of the vs-code launch configuration.
"""
import argparse
import os
import pathlib
import platform
import re
import sys
import os
import pathlib
PROJECT_DIR = pathlib.Path(__file__).parents[2]
"""Path to the the project directory."""
ISAACLAB_DIR = pathlib.Path(__file__).parents[2]
"""Path to the Isaac Lab directory."""
try:
import isaacsim # noqa: F401
isaacsim_dir = os.environ.get("ISAAC_PATH", "")
except ModuleNotFoundError or ImportError:
# Create a parser to get the isaac-sim path
parser = argparse.ArgumentParser(description="Setup the VSCode settings for the project.")
parser.add_argument("--isaac_path", type=str, help="The absolute path to the Isaac Sim installation.")
args = parser.parse_args()
# parse the isaac-sim directory
isaacsim_dir = args.isaac_path
# check if the isaac-sim directory is provided
if not os.path.exists(isaacsim_dir):
raise FileNotFoundError(
f"Could not find the isaac-sim directory: {isaacsim_dir}. Please provide the correct path to the Isaac Sim"
" installation."
)
isaacsim_dir = os.path.join(ISAACLAB_DIR, "_isaac_sim")
except EOFError:
print("Unable to trigger EULA acceptance. This is likely due to the script being run in a non-interactive shell.")
print("Please run the script in an interactive shell to accept the EULA.")
@@ -49,11 +36,11 @@ except EOFError:
# check if the isaac-sim directory exists
if not os.path.exists(isaacsim_dir):
raise FileNotFoundError(
f"Could not find the isaac-sim directory: {isaacsim_dir}. There are two possible reasons for this:\n\t1. The"
" Isaac Sim directory does not exist as provided CLI path.\n\t2. The script couldn't import the 'isaacsim'"
" package. This could be due to the 'isaacsim' package not being installed in the Python"
" environment.\n\nPlease make sure that the Isaac Sim directory exists or that the 'isaacsim' package is"
" installed."
f"Could not find the isaac-sim directory: {isaacsim_dir}. There are two possible reasons for this:"
f"\n\t1. The Isaac Sim directory does not exist as a symlink at: {os.path.join(ISAACLAB_DIR, '_isaac_sim')}"
"\n\t2. The script could not import the 'isaacsim' package. This could be due to the 'isaacsim' package not "
"being installed in the Python environment.\n"
"\nPlease make sure that the Isaac Sim directory exists or that the 'isaacsim' package is installed."
)
ISAACSIM_DIR = isaacsim_dir
@@ -98,7 +85,7 @@ def overwrite_python_analysis_extra_paths(isaaclab_settings: str) -> str:
path_names = [path_name for path_name in path_names if len(path_name) > 0]
# change the path names to be relative to the Isaac Lab directory
rel_path = os.path.relpath(ISAACSIM_DIR, PROJECT_DIR)
rel_path = os.path.relpath(ISAACSIM_DIR, ISAACLAB_DIR)
path_names = ['"${workspaceFolder}/' + rel_path + "/" + path_name + '"' for path_name in path_names]
else:
path_names = []
@@ -111,7 +98,7 @@ def overwrite_python_analysis_extra_paths(isaaclab_settings: str) -> str:
)
# add the path names that are in the Isaac Lab extensions directory
isaaclab_extensions = os.listdir(os.path.join(PROJECT_DIR, "source"))
isaaclab_extensions = os.listdir(os.path.join(ISAACLAB_DIR, "source"))
path_names.extend(['"${workspaceFolder}/source/' + ext + '"' for ext in isaaclab_extensions])
# combine them into a single string
@@ -144,17 +131,15 @@ def overwrite_default_python_interpreter(isaaclab_settings: str) -> str:
The settings string with overwritten default python interpreter.
"""
# read executable name
python_exe = os.path.normpath(sys.executable)
# replace with Isaac Sim's python.sh or python.bat scripts to make sure python with correct
# source paths is set as default
if f"kit{os.sep}python{os.sep}bin{os.sep}python" in python_exe:
# Check if the OS is Windows or Linux to use appropriate shell file
if platform.system() == "Windows":
python_exe = python_exe.replace(f"kit{os.sep}python{os.sep}bin{os.sep}python3", "python.bat")
else:
python_exe = python_exe.replace(f"kit{os.sep}python{os.sep}bin{os.sep}python3", "python.sh")
python_exe = sys.executable.replace("\\", "/")
# We make an exception for replacing the default interpreter if the
# path (/kit/python/bin/python3) indicates that we are using a local/container
# installation of IsaacSim. We will preserve the calling script as the default, python.sh.
# We want to use python.sh because it modifies LD_LIBRARY_PATH and PYTHONPATH
# (among other envars) that we need for all of our dependencies to be accessible.
if "kit/python/bin/python3" in python_exe:
return isaaclab_settings
# replace the default python interpreter in the Isaac Lab settings file with the path to the
# python interpreter in the Isaac Lab directory
isaaclab_settings = re.sub(
@@ -169,7 +154,7 @@ def overwrite_default_python_interpreter(isaaclab_settings: str) -> str:
def main():
# Isaac Lab template settings
isaaclab_vscode_template_filename = os.path.join(PROJECT_DIR, ".vscode", "tools", "settings.template.json")
isaaclab_vscode_template_filename = os.path.join(ISAACLAB_DIR, ".vscode", "tools", "settings.template.json")
# make sure the Isaac Lab template settings file exists
if not os.path.exists(isaaclab_vscode_template_filename):
raise FileNotFoundError(
@@ -195,13 +180,13 @@ def main():
isaaclab_settings = header_message + isaaclab_settings
# write the Isaac Lab settings file
isaaclab_vscode_filename = os.path.join(PROJECT_DIR, ".vscode", "settings.json")
isaaclab_vscode_filename = os.path.join(ISAACLAB_DIR, ".vscode", "settings.json")
with open(isaaclab_vscode_filename, "w") as f:
f.write(isaaclab_settings)
# copy the launch.json file if it doesn't exist
isaaclab_vscode_launch_filename = os.path.join(PROJECT_DIR, ".vscode", "launch.json")
isaaclab_vscode_template_launch_filename = os.path.join(PROJECT_DIR, ".vscode", "tools", "launch.template.json")
isaaclab_vscode_launch_filename = os.path.join(ISAACLAB_DIR, ".vscode", "launch.json")
isaaclab_vscode_template_launch_filename = os.path.join(ISAACLAB_DIR, ".vscode", "tools", "launch.template.json")
if not os.path.exists(isaaclab_vscode_launch_filename):
# read template launch settings
with open(isaaclab_vscode_template_launch_filename) as f:

View File

@@ -132,4 +132,8 @@ Some examples of packages that can likely be excluded are:
"<path-to-isaac-sim>/extscache/omni.graph.*" // Graph UI tools
"<path-to-isaac-sim>/extscache/omni.services.*" // Services tools
...
```
```
python scripts/environments/teleoperation/teleop_se3_agent.py --task Isaac-MindRobot-LeftArm-IK-Rel-v0 --teleop_device spacemouse

24
check_hdf5.py Normal file
View File

@@ -0,0 +1,24 @@
import h5py
file_path = "/home/maic/LYT/mindbot/datasets/mindrobot_demos.hdf5"
with h5py.File(file_path, "r") as f:
print("文件结构:")
def print_structure(name, obj):
print(name)
f.visititems(print_structure)
print("\n开始打印前 50 条数据:\n")
def print_first_50(name, obj):
if isinstance(obj, h5py.Dataset):
print(f"\nDataset: {name}")
print("Shape:", obj.shape)
# 只打印前 50 条
data = obj[:50]
print(data)
f.visititems(print_first_)

View File

@@ -0,0 +1,101 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Script to an environment with random action agent."""
"""Launch Isaac Sim Simulator first."""
import argparse
import os
from isaaclab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Random agent for Isaac Lab environments.")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
parser.add_argument("--output_dir", type=str, default=None, help="Path to the output directory.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
args_cli.headless = True
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import gymnasium as gym
import torch
import isaaclab_tasks # noqa: F401
from isaaclab_tasks.utils import parse_env_cfg
# PLACEHOLDER: Extension template (do not remove this comment)
def main():
"""Random actions agent with Isaac Lab environment."""
# create environment configuration
env_cfg = parse_env_cfg(args_cli.task, device=args_cli.device, num_envs=1, use_fabric=True)
# create environment
env = gym.make(args_cli.task, cfg=env_cfg)
# print info (this is vectorized environment)
print(f"[INFO]: Gym observation space: {env.observation_space}")
print(f"[INFO]: Gym action space: {env.action_space}")
# reset environment
env.reset()
outs = env.unwrapped.get_IO_descriptors
out_observations = outs["observations"]
out_actions = outs["actions"]
out_articulations = outs["articulations"]
out_scene = outs["scene"]
# Make a yaml file with the output
import yaml
name = args_cli.task.lower().replace("-", "_")
name = name.replace(" ", "_")
if not os.path.exists(args_cli.output_dir):
os.makedirs(args_cli.output_dir)
with open(os.path.join(args_cli.output_dir, f"{name}_IO_descriptors.yaml"), "w") as f:
print(f"[INFO]: Exporting IO descriptors to {os.path.join(args_cli.output_dir, f'{name}_IO_descriptors.yaml')}")
yaml.safe_dump(outs, f)
for k in out_actions:
print(f"--- Action term: {k['name']} ---")
k.pop("name")
for k1, v1 in k.items():
print(f"{k1}: {v1}")
for obs_group_name, obs_group in out_observations.items():
print(f"--- Obs group: {obs_group_name} ---")
for k in obs_group:
print(f"--- Obs term: {k['name']} ---")
k.pop("name")
for k1, v1 in k.items():
print(f"{k1}: {v1}")
for articulation_name, articulation_data in out_articulations.items():
print(f"--- Articulation: {articulation_name} ---")
for k1, v1 in articulation_data.items():
print(f"{k1}: {v1}")
for k1, v1 in out_scene.items():
print(f"{k1}: {v1}")
env.step(torch.zeros(env.action_space.shape, device=env.unwrapped.device))
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()

View File

@@ -0,0 +1,72 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Script to print all the available environments in Isaac Lab.
The script iterates over all registered environments and stores the details in a table.
It prints the name of the environment, the entry point and the config file.
All the environments are registered in the `isaaclab_tasks` extension. They start
with `Isaac` in their name.
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from isaaclab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="List Isaac Lab environments.")
parser.add_argument("--keyword", type=str, default=None, help="Keyword to filter environments.")
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(headless=True)
simulation_app = app_launcher.app
"""Rest everything follows."""
import gymnasium as gym
from prettytable import PrettyTable
import isaaclab_tasks # noqa: F401
def main():
"""Print all environments registered in `isaaclab_tasks` extension."""
# print all the available environments
table = PrettyTable(["S. No.", "Task Name", "Entry Point", "Config"])
table.title = "Available Environments in Isaac Lab"
# set alignment of table columns
table.align["Task Name"] = "l"
table.align["Entry Point"] = "l"
table.align["Config"] = "l"
# count of environments
index = 0
# acquire all Isaac environments names
for task_spec in gym.registry.values():
if "Isaac" in task_spec.id and (args_cli.keyword is None or args_cli.keyword in task_spec.id):
# add details to table
table.add_row([index + 1, task_spec.id, task_spec.entry_point, task_spec.kwargs["env_cfg_entry_point"]])
# increment count
index += 1
print(table)
if __name__ == "__main__":
try:
# run the main function
main()
except Exception as e:
raise e
finally:
# close the app
simulation_app.close()

View File

@@ -0,0 +1,73 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Script to an environment with random action agent."""
"""Launch Isaac Sim Simulator first."""
import argparse
from isaaclab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Random agent for Isaac Lab environments.")
parser.add_argument(
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
)
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import gymnasium as gym
import torch
import isaaclab_tasks # noqa: F401
import mindbot.tasks # noqa: F401
from isaaclab_tasks.utils import parse_env_cfg
# PLACEHOLDER: Extension template (do not remove this comment)
def main():
"""Random actions agent with Isaac Lab environment."""
# create environment configuration
env_cfg = parse_env_cfg(
args_cli.task, device=args_cli.device, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
)
# create environment
env = gym.make(args_cli.task, cfg=env_cfg)
# print info (this is vectorized environment)
print(f"[INFO]: Gym observation space: {env.observation_space}")
print(f"[INFO]: Gym action space: {env.action_space}")
# reset environment
env.reset()
# simulate environment
while simulation_app.is_running():
# run everything in inference mode
with torch.inference_mode():
# sample actions from -1 to 1
actions = 2 * torch.rand(env.action_space.shape, device=env.unwrapped.device) - 1
# apply actions
env.step(actions)
# close the simulator
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()

View File

@@ -0,0 +1,319 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Script to run an environment with a pick and lift state machine.
The state machine is implemented in the kernel function `infer_state_machine`.
It uses the `warp` library to run the state machine in parallel on the GPU.
.. code-block:: bash
./isaaclab.sh -p scripts/environments/state_machine/lift_cube_sm.py --num_envs 32
"""
"""Launch Omniverse Toolkit first."""
import argparse
from isaaclab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Pick and lift state machine for lift environments.")
parser.add_argument(
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
)
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(headless=args_cli.headless)
simulation_app = app_launcher.app
"""Rest everything else."""
from collections.abc import Sequence
import gymnasium as gym
import torch
import warp as wp
from isaaclab.assets.rigid_object.rigid_object_data import RigidObjectData
import isaaclab_tasks # noqa: F401
from isaaclab_tasks.manager_based.manipulation.lift.lift_env_cfg import LiftEnvCfg
from isaaclab_tasks.utils.parse_cfg import parse_env_cfg
# initialize warp
wp.init()
class GripperState:
"""States for the gripper."""
OPEN = wp.constant(1.0)
CLOSE = wp.constant(-1.0)
class PickSmState:
"""States for the pick state machine."""
REST = wp.constant(0)
APPROACH_ABOVE_OBJECT = wp.constant(1)
APPROACH_OBJECT = wp.constant(2)
GRASP_OBJECT = wp.constant(3)
LIFT_OBJECT = wp.constant(4)
class PickSmWaitTime:
"""Additional wait times (in s) for states for before switching."""
REST = wp.constant(0.2)
APPROACH_ABOVE_OBJECT = wp.constant(0.5)
APPROACH_OBJECT = wp.constant(0.6)
GRASP_OBJECT = wp.constant(0.3)
LIFT_OBJECT = wp.constant(1.0)
@wp.func
def distance_below_threshold(current_pos: wp.vec3, desired_pos: wp.vec3, threshold: float) -> bool:
return wp.length(current_pos - desired_pos) < threshold
@wp.kernel
def infer_state_machine(
dt: wp.array(dtype=float),
sm_state: wp.array(dtype=int),
sm_wait_time: wp.array(dtype=float),
ee_pose: wp.array(dtype=wp.transform),
object_pose: wp.array(dtype=wp.transform),
des_object_pose: wp.array(dtype=wp.transform),
des_ee_pose: wp.array(dtype=wp.transform),
gripper_state: wp.array(dtype=float),
offset: wp.array(dtype=wp.transform),
position_threshold: float,
):
# retrieve thread id
tid = wp.tid()
# retrieve state machine state
state = sm_state[tid]
# decide next state
if state == PickSmState.REST:
des_ee_pose[tid] = ee_pose[tid]
gripper_state[tid] = GripperState.OPEN
# wait for a while
if sm_wait_time[tid] >= PickSmWaitTime.REST:
# move to next state and reset wait time
sm_state[tid] = PickSmState.APPROACH_ABOVE_OBJECT
sm_wait_time[tid] = 0.0
elif state == PickSmState.APPROACH_ABOVE_OBJECT:
des_ee_pose[tid] = wp.transform_multiply(offset[tid], object_pose[tid])
gripper_state[tid] = GripperState.OPEN
if distance_below_threshold(
wp.transform_get_translation(ee_pose[tid]),
wp.transform_get_translation(des_ee_pose[tid]),
position_threshold,
):
# wait for a while
if sm_wait_time[tid] >= PickSmWaitTime.APPROACH_OBJECT:
# move to next state and reset wait time
sm_state[tid] = PickSmState.APPROACH_OBJECT
sm_wait_time[tid] = 0.0
elif state == PickSmState.APPROACH_OBJECT:
des_ee_pose[tid] = object_pose[tid]
gripper_state[tid] = GripperState.OPEN
if distance_below_threshold(
wp.transform_get_translation(ee_pose[tid]),
wp.transform_get_translation(des_ee_pose[tid]),
position_threshold,
):
if sm_wait_time[tid] >= PickSmWaitTime.APPROACH_OBJECT:
# move to next state and reset wait time
sm_state[tid] = PickSmState.GRASP_OBJECT
sm_wait_time[tid] = 0.0
elif state == PickSmState.GRASP_OBJECT:
des_ee_pose[tid] = object_pose[tid]
gripper_state[tid] = GripperState.CLOSE
# wait for a while
if sm_wait_time[tid] >= PickSmWaitTime.GRASP_OBJECT:
# move to next state and reset wait time
sm_state[tid] = PickSmState.LIFT_OBJECT
sm_wait_time[tid] = 0.0
elif state == PickSmState.LIFT_OBJECT:
des_ee_pose[tid] = des_object_pose[tid]
gripper_state[tid] = GripperState.CLOSE
if distance_below_threshold(
wp.transform_get_translation(ee_pose[tid]),
wp.transform_get_translation(des_ee_pose[tid]),
position_threshold,
):
# wait for a while
if sm_wait_time[tid] >= PickSmWaitTime.LIFT_OBJECT:
# move to next state and reset wait time
sm_state[tid] = PickSmState.LIFT_OBJECT
sm_wait_time[tid] = 0.0
# increment wait time
sm_wait_time[tid] = sm_wait_time[tid] + dt[tid]
class PickAndLiftSm:
"""A simple state machine in a robot's task space to pick and lift an object.
The state machine is implemented as a warp kernel. It takes in the current state of
the robot's end-effector and the object, and outputs the desired state of the robot's
end-effector and the gripper. The state machine is implemented as a finite state
machine with the following states:
1. REST: The robot is at rest.
2. APPROACH_ABOVE_OBJECT: The robot moves above the object.
3. APPROACH_OBJECT: The robot moves to the object.
4. GRASP_OBJECT: The robot grasps the object.
5. LIFT_OBJECT: The robot lifts the object to the desired pose. This is the final state.
"""
def __init__(self, dt: float, num_envs: int, device: torch.device | str = "cpu", position_threshold=0.01):
"""Initialize the state machine.
Args:
dt: The environment time step.
num_envs: The number of environments to simulate.
device: The device to run the state machine on.
"""
# save parameters
self.dt = float(dt)
self.num_envs = num_envs
self.device = device
self.position_threshold = position_threshold
# initialize state machine
self.sm_dt = torch.full((self.num_envs,), self.dt, device=self.device)
self.sm_state = torch.full((self.num_envs,), 0, dtype=torch.int32, device=self.device)
self.sm_wait_time = torch.zeros((self.num_envs,), device=self.device)
# desired state
self.des_ee_pose = torch.zeros((self.num_envs, 7), device=self.device)
self.des_gripper_state = torch.full((self.num_envs,), 0.0, device=self.device)
# approach above object offset
self.offset = torch.zeros((self.num_envs, 7), device=self.device)
self.offset[:, 2] = 0.1
self.offset[:, -1] = 1.0 # warp expects quaternion as (x, y, z, w)
# convert to warp
self.sm_dt_wp = wp.from_torch(self.sm_dt, wp.float32)
self.sm_state_wp = wp.from_torch(self.sm_state, wp.int32)
self.sm_wait_time_wp = wp.from_torch(self.sm_wait_time, wp.float32)
self.des_ee_pose_wp = wp.from_torch(self.des_ee_pose, wp.transform)
self.des_gripper_state_wp = wp.from_torch(self.des_gripper_state, wp.float32)
self.offset_wp = wp.from_torch(self.offset, wp.transform)
def reset_idx(self, env_ids: Sequence[int] = None):
"""Reset the state machine."""
if env_ids is None:
env_ids = slice(None)
self.sm_state[env_ids] = 0
self.sm_wait_time[env_ids] = 0.0
def compute(self, ee_pose: torch.Tensor, object_pose: torch.Tensor, des_object_pose: torch.Tensor) -> torch.Tensor:
"""Compute the desired state of the robot's end-effector and the gripper."""
# convert all transformations from (w, x, y, z) to (x, y, z, w)
ee_pose = ee_pose[:, [0, 1, 2, 4, 5, 6, 3]]
object_pose = object_pose[:, [0, 1, 2, 4, 5, 6, 3]]
des_object_pose = des_object_pose[:, [0, 1, 2, 4, 5, 6, 3]]
# convert to warp
ee_pose_wp = wp.from_torch(ee_pose.contiguous(), wp.transform)
object_pose_wp = wp.from_torch(object_pose.contiguous(), wp.transform)
des_object_pose_wp = wp.from_torch(des_object_pose.contiguous(), wp.transform)
# run state machine
wp.launch(
kernel=infer_state_machine,
dim=self.num_envs,
inputs=[
self.sm_dt_wp,
self.sm_state_wp,
self.sm_wait_time_wp,
ee_pose_wp,
object_pose_wp,
des_object_pose_wp,
self.des_ee_pose_wp,
self.des_gripper_state_wp,
self.offset_wp,
self.position_threshold,
],
device=self.device,
)
# convert transformations back to (w, x, y, z)
des_ee_pose = self.des_ee_pose[:, [0, 1, 2, 6, 3, 4, 5]]
# convert to torch
return torch.cat([des_ee_pose, self.des_gripper_state.unsqueeze(-1)], dim=-1)
def main():
# parse configuration
env_cfg: LiftEnvCfg = parse_env_cfg(
"Isaac-Lift-Cube-Franka-IK-Abs-v0",
device=args_cli.device,
num_envs=args_cli.num_envs,
use_fabric=not args_cli.disable_fabric,
)
# create environment
env = gym.make("Isaac-Lift-Cube-Franka-IK-Abs-v0", cfg=env_cfg)
# reset environment at start
env.reset()
# create action buffers (position + quaternion)
actions = torch.zeros(env.unwrapped.action_space.shape, device=env.unwrapped.device)
actions[:, 3] = 1.0
# desired object orientation (we only do position control of object)
desired_orientation = torch.zeros((env.unwrapped.num_envs, 4), device=env.unwrapped.device)
desired_orientation[:, 1] = 1.0
# create state machine
pick_sm = PickAndLiftSm(
env_cfg.sim.dt * env_cfg.decimation, env.unwrapped.num_envs, env.unwrapped.device, position_threshold=0.01
)
while simulation_app.is_running():
# run everything in inference mode
with torch.inference_mode():
# step environment
dones = env.step(actions)[-2]
# observations
# -- end-effector frame
ee_frame_sensor = env.unwrapped.scene["ee_frame"]
tcp_rest_position = ee_frame_sensor.data.target_pos_w[..., 0, :].clone() - env.unwrapped.scene.env_origins
tcp_rest_orientation = ee_frame_sensor.data.target_quat_w[..., 0, :].clone()
# -- object frame
object_data: RigidObjectData = env.unwrapped.scene["object"].data
object_position = object_data.root_pos_w - env.unwrapped.scene.env_origins
# -- target object frame
desired_position = env.unwrapped.command_manager.get_command("object_pose")[..., :3]
# advance state machine
actions = pick_sm.compute(
torch.cat([tcp_rest_position, tcp_rest_orientation], dim=-1),
torch.cat([object_position, desired_orientation], dim=-1),
torch.cat([desired_position, desired_orientation], dim=-1),
)
# reset state machine
if dones.any():
pick_sm.reset_idx(dones.nonzero(as_tuple=False).squeeze(-1))
# close the environment
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()

View File

@@ -0,0 +1,341 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Script to demonstrate lifting a deformable object with a robotic arm.
The state machine is implemented in the kernel function `infer_state_machine`.
It uses the `warp` library to run the state machine in parallel on the GPU.
.. code-block:: bash
./isaaclab.sh -p scripts/environments/state_machine/lift_teddy_bear.py
"""
"""Launch Omniverse Toolkit first."""
import argparse
from isaaclab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Pick and lift a teddy bear with a robotic arm.")
parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to simulate.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(headless=args_cli.headless)
simulation_app = app_launcher.app
# disable metrics assembler due to scene graph instancing
from isaacsim.core.utils.extensions import disable_extension
disable_extension("omni.usd.metrics.assembler.ui")
"""Rest everything else."""
from collections.abc import Sequence
import gymnasium as gym
import torch
import warp as wp
from isaaclab.assets.rigid_object.rigid_object_data import RigidObjectData
import isaaclab_tasks # noqa: F401
from isaaclab_tasks.manager_based.manipulation.lift.lift_env_cfg import LiftEnvCfg
from isaaclab_tasks.utils.parse_cfg import parse_env_cfg
# initialize warp
wp.init()
class GripperState:
"""States for the gripper."""
OPEN = wp.constant(1.0)
CLOSE = wp.constant(-1.0)
class PickSmState:
"""States for the pick state machine."""
REST = wp.constant(0)
APPROACH_ABOVE_OBJECT = wp.constant(1)
APPROACH_OBJECT = wp.constant(2)
GRASP_OBJECT = wp.constant(3)
LIFT_OBJECT = wp.constant(4)
OPEN_GRIPPER = wp.constant(5)
class PickSmWaitTime:
"""Additional wait times (in s) for states for before switching."""
REST = wp.constant(0.2)
APPROACH_ABOVE_OBJECT = wp.constant(0.5)
APPROACH_OBJECT = wp.constant(0.6)
GRASP_OBJECT = wp.constant(0.6)
LIFT_OBJECT = wp.constant(1.0)
OPEN_GRIPPER = wp.constant(0.0)
@wp.func
def distance_below_threshold(current_pos: wp.vec3, desired_pos: wp.vec3, threshold: float) -> bool:
return wp.length(current_pos - desired_pos) < threshold
@wp.kernel
def infer_state_machine(
dt: wp.array(dtype=float),
sm_state: wp.array(dtype=int),
sm_wait_time: wp.array(dtype=float),
ee_pose: wp.array(dtype=wp.transform),
object_pose: wp.array(dtype=wp.transform),
des_object_pose: wp.array(dtype=wp.transform),
des_ee_pose: wp.array(dtype=wp.transform),
gripper_state: wp.array(dtype=float),
offset: wp.array(dtype=wp.transform),
position_threshold: float,
):
# retrieve thread id
tid = wp.tid()
# retrieve state machine state
state = sm_state[tid]
# decide next state
if state == PickSmState.REST:
des_ee_pose[tid] = ee_pose[tid]
gripper_state[tid] = GripperState.OPEN
# wait for a while
if sm_wait_time[tid] >= PickSmWaitTime.REST:
# move to next state and reset wait time
sm_state[tid] = PickSmState.APPROACH_ABOVE_OBJECT
sm_wait_time[tid] = 0.0
elif state == PickSmState.APPROACH_ABOVE_OBJECT:
des_ee_pose[tid] = wp.transform_multiply(offset[tid], object_pose[tid])
gripper_state[tid] = GripperState.OPEN
if distance_below_threshold(
wp.transform_get_translation(ee_pose[tid]),
wp.transform_get_translation(des_ee_pose[tid]),
position_threshold,
):
# wait for a while
if sm_wait_time[tid] >= PickSmWaitTime.APPROACH_OBJECT:
# move to next state and reset wait time
sm_state[tid] = PickSmState.APPROACH_OBJECT
sm_wait_time[tid] = 0.0
elif state == PickSmState.APPROACH_OBJECT:
des_ee_pose[tid] = object_pose[tid]
gripper_state[tid] = GripperState.OPEN
if distance_below_threshold(
wp.transform_get_translation(ee_pose[tid]),
wp.transform_get_translation(des_ee_pose[tid]),
position_threshold,
):
# wait for a while
if sm_wait_time[tid] >= PickSmWaitTime.APPROACH_OBJECT:
# move to next state and reset wait time
sm_state[tid] = PickSmState.GRASP_OBJECT
sm_wait_time[tid] = 0.0
elif state == PickSmState.GRASP_OBJECT:
des_ee_pose[tid] = object_pose[tid]
gripper_state[tid] = GripperState.CLOSE
# wait for a while
if sm_wait_time[tid] >= PickSmWaitTime.GRASP_OBJECT:
# move to next state and reset wait time
sm_state[tid] = PickSmState.LIFT_OBJECT
sm_wait_time[tid] = 0.0
elif state == PickSmState.LIFT_OBJECT:
des_ee_pose[tid] = des_object_pose[tid]
gripper_state[tid] = GripperState.CLOSE
if distance_below_threshold(
wp.transform_get_translation(ee_pose[tid]),
wp.transform_get_translation(des_ee_pose[tid]),
position_threshold,
):
# wait for a while
if sm_wait_time[tid] >= PickSmWaitTime.LIFT_OBJECT:
# move to next state and reset wait time
sm_state[tid] = PickSmState.OPEN_GRIPPER
sm_wait_time[tid] = 0.0
elif state == PickSmState.OPEN_GRIPPER:
# des_ee_pose[tid] = object_pose[tid]
gripper_state[tid] = GripperState.OPEN
# wait for a while
if sm_wait_time[tid] >= PickSmWaitTime.OPEN_GRIPPER:
# move to next state and reset wait time
sm_state[tid] = PickSmState.OPEN_GRIPPER
sm_wait_time[tid] = 0.0
# increment wait time
sm_wait_time[tid] = sm_wait_time[tid] + dt[tid]
class PickAndLiftSm:
"""A simple state machine in a robot's task space to pick and lift an object.
The state machine is implemented as a warp kernel. It takes in the current state of
the robot's end-effector and the object, and outputs the desired state of the robot's
end-effector and the gripper. The state machine is implemented as a finite state
machine with the following states:
1. REST: The robot is at rest.
2. APPROACH_ABOVE_OBJECT: The robot moves above the object.
3. APPROACH_OBJECT: The robot moves to the object.
4. GRASP_OBJECT: The robot grasps the object.
5. LIFT_OBJECT: The robot lifts the object to the desired pose. This is the final state.
"""
def __init__(self, dt: float, num_envs: int, device: torch.device | str = "cpu", position_threshold=0.01):
"""Initialize the state machine.
Args:
dt: The environment time step.
num_envs: The number of environments to simulate.
device: The device to run the state machine on.
"""
# save parameters
self.dt = float(dt)
self.num_envs = num_envs
self.device = device
self.position_threshold = position_threshold
# initialize state machine
self.sm_dt = torch.full((self.num_envs,), self.dt, device=self.device)
self.sm_state = torch.full((self.num_envs,), 0, dtype=torch.int32, device=self.device)
self.sm_wait_time = torch.zeros((self.num_envs,), device=self.device)
# desired state
self.des_ee_pose = torch.zeros((self.num_envs, 7), device=self.device)
self.des_gripper_state = torch.full((self.num_envs,), 0.0, device=self.device)
# approach above object offset
self.offset = torch.zeros((self.num_envs, 7), device=self.device)
self.offset[:, 2] = 0.2
self.offset[:, -1] = 1.0 # warp expects quaternion as (x, y, z, w)
# convert to warp
self.sm_dt_wp = wp.from_torch(self.sm_dt, wp.float32)
self.sm_state_wp = wp.from_torch(self.sm_state, wp.int32)
self.sm_wait_time_wp = wp.from_torch(self.sm_wait_time, wp.float32)
self.des_ee_pose_wp = wp.from_torch(self.des_ee_pose, wp.transform)
self.des_gripper_state_wp = wp.from_torch(self.des_gripper_state, wp.float32)
self.offset_wp = wp.from_torch(self.offset, wp.transform)
def reset_idx(self, env_ids: Sequence[int] = None):
"""Reset the state machine."""
if env_ids is None:
env_ids = slice(None)
self.sm_state[env_ids] = 0
self.sm_wait_time[env_ids] = 0.0
def compute(self, ee_pose: torch.Tensor, object_pose: torch.Tensor, des_object_pose: torch.Tensor):
"""Compute the desired state of the robot's end-effector and the gripper."""
# convert all transformations from (w, x, y, z) to (x, y, z, w)
ee_pose = ee_pose[:, [0, 1, 2, 4, 5, 6, 3]]
object_pose = object_pose[:, [0, 1, 2, 4, 5, 6, 3]]
des_object_pose = des_object_pose[:, [0, 1, 2, 4, 5, 6, 3]]
# convert to warp
ee_pose_wp = wp.from_torch(ee_pose.contiguous(), wp.transform)
object_pose_wp = wp.from_torch(object_pose.contiguous(), wp.transform)
des_object_pose_wp = wp.from_torch(des_object_pose.contiguous(), wp.transform)
# run state machine
wp.launch(
kernel=infer_state_machine,
dim=self.num_envs,
inputs=[
self.sm_dt_wp,
self.sm_state_wp,
self.sm_wait_time_wp,
ee_pose_wp,
object_pose_wp,
des_object_pose_wp,
self.des_ee_pose_wp,
self.des_gripper_state_wp,
self.offset_wp,
self.position_threshold,
],
device=self.device,
)
# convert transformations back to (w, x, y, z)
des_ee_pose = self.des_ee_pose[:, [0, 1, 2, 6, 3, 4, 5]]
# convert to torch
return torch.cat([des_ee_pose, self.des_gripper_state.unsqueeze(-1)], dim=-1)
def main():
# parse configuration
env_cfg: LiftEnvCfg = parse_env_cfg(
"Isaac-Lift-Teddy-Bear-Franka-IK-Abs-v0",
device=args_cli.device,
num_envs=args_cli.num_envs,
)
env_cfg.viewer.eye = (2.1, 1.0, 1.3)
# create environment
env = gym.make("Isaac-Lift-Teddy-Bear-Franka-IK-Abs-v0", cfg=env_cfg)
# reset environment at start
env.reset()
# create action buffers (position + quaternion)
actions = torch.zeros(env.unwrapped.action_space.shape, device=env.unwrapped.device)
actions[:, 3] = 1.0
# desired rotation after grasping
desired_orientation = torch.zeros((env.unwrapped.num_envs, 4), device=env.unwrapped.device)
desired_orientation[:, 1] = 1.0
object_grasp_orientation = torch.zeros((env.unwrapped.num_envs, 4), device=env.unwrapped.device)
# z-axis pointing down and 45 degrees rotation
object_grasp_orientation[:, 1] = 0.9238795
object_grasp_orientation[:, 2] = -0.3826834
object_local_grasp_position = torch.tensor([0.02, -0.08, 0.0], device=env.unwrapped.device)
# create state machine
pick_sm = PickAndLiftSm(env_cfg.sim.dt * env_cfg.decimation, env.unwrapped.num_envs, env.unwrapped.device)
while simulation_app.is_running():
# run everything in inference mode
with torch.inference_mode():
# step environment
dones = env.step(actions)[-2]
# observations
# -- end-effector frame
ee_frame_sensor = env.unwrapped.scene["ee_frame"]
tcp_rest_position = ee_frame_sensor.data.target_pos_w[..., 0, :].clone() - env.unwrapped.scene.env_origins
tcp_rest_orientation = ee_frame_sensor.data.target_quat_w[..., 0, :].clone()
# -- object frame
object_data: RigidObjectData = env.unwrapped.scene["object"].data
object_position = object_data.root_pos_w - env.unwrapped.scene.env_origins
object_position += object_local_grasp_position
# -- target object frame
desired_position = env.unwrapped.command_manager.get_command("object_pose")[..., :3]
# advance state machine
actions = pick_sm.compute(
torch.cat([tcp_rest_position, tcp_rest_orientation], dim=-1),
torch.cat([object_position, object_grasp_orientation], dim=-1),
torch.cat([desired_position, desired_orientation], dim=-1),
)
# reset state machine
if dones.any():
pick_sm.reset_idx(dones.nonzero(as_tuple=False).squeeze(-1))
# close the environment
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()

View File

@@ -0,0 +1,333 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Script to run an environment with a cabinet opening state machine.
The state machine is implemented in the kernel function `infer_state_machine`.
It uses the `warp` library to run the state machine in parallel on the GPU.
.. code-block:: bash
./isaaclab.sh -p scripts/environments/state_machine/open_cabinet_sm.py --num_envs 32
"""
"""Launch Omniverse Toolkit first."""
import argparse
from isaaclab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Pick and lift state machine for cabinet environments.")
parser.add_argument(
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
)
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(headless=args_cli.headless)
simulation_app = app_launcher.app
"""Rest everything else."""
from collections.abc import Sequence
import gymnasium as gym
import torch
import warp as wp
from isaaclab.sensors import FrameTransformer
import isaaclab_tasks # noqa: F401
from isaaclab_tasks.manager_based.manipulation.cabinet.cabinet_env_cfg import CabinetEnvCfg
from isaaclab_tasks.utils.parse_cfg import parse_env_cfg
# initialize warp
wp.init()
class GripperState:
"""States for the gripper."""
OPEN = wp.constant(1.0)
CLOSE = wp.constant(-1.0)
class OpenDrawerSmState:
"""States for the cabinet drawer opening state machine."""
REST = wp.constant(0)
APPROACH_INFRONT_HANDLE = wp.constant(1)
APPROACH_HANDLE = wp.constant(2)
GRASP_HANDLE = wp.constant(3)
OPEN_DRAWER = wp.constant(4)
RELEASE_HANDLE = wp.constant(5)
class OpenDrawerSmWaitTime:
"""Additional wait times (in s) for states for before switching."""
REST = wp.constant(0.5)
APPROACH_INFRONT_HANDLE = wp.constant(1.25)
APPROACH_HANDLE = wp.constant(1.0)
GRASP_HANDLE = wp.constant(1.0)
OPEN_DRAWER = wp.constant(3.0)
RELEASE_HANDLE = wp.constant(0.2)
@wp.func
def distance_below_threshold(current_pos: wp.vec3, desired_pos: wp.vec3, threshold: float) -> bool:
return wp.length(current_pos - desired_pos) < threshold
@wp.kernel
def infer_state_machine(
dt: wp.array(dtype=float),
sm_state: wp.array(dtype=int),
sm_wait_time: wp.array(dtype=float),
ee_pose: wp.array(dtype=wp.transform),
handle_pose: wp.array(dtype=wp.transform),
des_ee_pose: wp.array(dtype=wp.transform),
gripper_state: wp.array(dtype=float),
handle_approach_offset: wp.array(dtype=wp.transform),
handle_grasp_offset: wp.array(dtype=wp.transform),
drawer_opening_rate: wp.array(dtype=wp.transform),
position_threshold: float,
):
# retrieve thread id
tid = wp.tid()
# retrieve state machine state
state = sm_state[tid]
# decide next state
if state == OpenDrawerSmState.REST:
des_ee_pose[tid] = ee_pose[tid]
gripper_state[tid] = GripperState.OPEN
# wait for a while
if sm_wait_time[tid] >= OpenDrawerSmWaitTime.REST:
# move to next state and reset wait time
sm_state[tid] = OpenDrawerSmState.APPROACH_INFRONT_HANDLE
sm_wait_time[tid] = 0.0
elif state == OpenDrawerSmState.APPROACH_INFRONT_HANDLE:
des_ee_pose[tid] = wp.transform_multiply(handle_approach_offset[tid], handle_pose[tid])
gripper_state[tid] = GripperState.OPEN
if distance_below_threshold(
wp.transform_get_translation(ee_pose[tid]),
wp.transform_get_translation(des_ee_pose[tid]),
position_threshold,
):
# wait for a while
if sm_wait_time[tid] >= OpenDrawerSmWaitTime.APPROACH_INFRONT_HANDLE:
# move to next state and reset wait time
sm_state[tid] = OpenDrawerSmState.APPROACH_HANDLE
sm_wait_time[tid] = 0.0
elif state == OpenDrawerSmState.APPROACH_HANDLE:
des_ee_pose[tid] = handle_pose[tid]
gripper_state[tid] = GripperState.OPEN
if distance_below_threshold(
wp.transform_get_translation(ee_pose[tid]),
wp.transform_get_translation(des_ee_pose[tid]),
position_threshold,
):
# wait for a while
if sm_wait_time[tid] >= OpenDrawerSmWaitTime.APPROACH_HANDLE:
# move to next state and reset wait time
sm_state[tid] = OpenDrawerSmState.GRASP_HANDLE
sm_wait_time[tid] = 0.0
elif state == OpenDrawerSmState.GRASP_HANDLE:
des_ee_pose[tid] = wp.transform_multiply(handle_grasp_offset[tid], handle_pose[tid])
gripper_state[tid] = GripperState.CLOSE
# wait for a while
if sm_wait_time[tid] >= OpenDrawerSmWaitTime.GRASP_HANDLE:
# move to next state and reset wait time
sm_state[tid] = OpenDrawerSmState.OPEN_DRAWER
sm_wait_time[tid] = 0.0
elif state == OpenDrawerSmState.OPEN_DRAWER:
des_ee_pose[tid] = wp.transform_multiply(drawer_opening_rate[tid], handle_pose[tid])
gripper_state[tid] = GripperState.CLOSE
# wait for a while
if sm_wait_time[tid] >= OpenDrawerSmWaitTime.OPEN_DRAWER:
# move to next state and reset wait time
sm_state[tid] = OpenDrawerSmState.RELEASE_HANDLE
sm_wait_time[tid] = 0.0
elif state == OpenDrawerSmState.RELEASE_HANDLE:
des_ee_pose[tid] = ee_pose[tid]
gripper_state[tid] = GripperState.CLOSE
# wait for a while
if sm_wait_time[tid] >= OpenDrawerSmWaitTime.RELEASE_HANDLE:
# move to next state and reset wait time
sm_state[tid] = OpenDrawerSmState.RELEASE_HANDLE
sm_wait_time[tid] = 0.0
# increment wait time
sm_wait_time[tid] = sm_wait_time[tid] + dt[tid]
class OpenDrawerSm:
"""A simple state machine in a robot's task space to open a drawer in the cabinet.
The state machine is implemented as a warp kernel. It takes in the current state of
the robot's end-effector and the object, and outputs the desired state of the robot's
end-effector and the gripper. The state machine is implemented as a finite state
machine with the following states:
1. REST: The robot is at rest.
2. APPROACH_HANDLE: The robot moves towards the handle of the drawer.
3. GRASP_HANDLE: The robot grasps the handle of the drawer.
4. OPEN_DRAWER: The robot opens the drawer.
5. RELEASE_HANDLE: The robot releases the handle of the drawer. This is the final state.
"""
def __init__(self, dt: float, num_envs: int, device: torch.device | str = "cpu", position_threshold=0.01):
"""Initialize the state machine.
Args:
dt: The environment time step.
num_envs: The number of environments to simulate.
device: The device to run the state machine on.
"""
# save parameters
self.dt = float(dt)
self.num_envs = num_envs
self.device = device
self.position_threshold = position_threshold
# initialize state machine
self.sm_dt = torch.full((self.num_envs,), self.dt, device=self.device)
self.sm_state = torch.full((self.num_envs,), 0, dtype=torch.int32, device=self.device)
self.sm_wait_time = torch.zeros((self.num_envs,), device=self.device)
# desired state
self.des_ee_pose = torch.zeros((self.num_envs, 7), device=self.device)
self.des_gripper_state = torch.full((self.num_envs,), 0.0, device=self.device)
# approach in front of the handle
self.handle_approach_offset = torch.zeros((self.num_envs, 7), device=self.device)
self.handle_approach_offset[:, 0] = -0.1
self.handle_approach_offset[:, -1] = 1.0 # warp expects quaternion as (x, y, z, w)
# handle grasp offset
self.handle_grasp_offset = torch.zeros((self.num_envs, 7), device=self.device)
self.handle_grasp_offset[:, 0] = 0.025
self.handle_grasp_offset[:, -1] = 1.0 # warp expects quaternion as (x, y, z, w)
# drawer opening rate
self.drawer_opening_rate = torch.zeros((self.num_envs, 7), device=self.device)
self.drawer_opening_rate[:, 0] = -0.015
self.drawer_opening_rate[:, -1] = 1.0 # warp expects quaternion as (x, y, z, w)
# convert to warp
self.sm_dt_wp = wp.from_torch(self.sm_dt, wp.float32)
self.sm_state_wp = wp.from_torch(self.sm_state, wp.int32)
self.sm_wait_time_wp = wp.from_torch(self.sm_wait_time, wp.float32)
self.des_ee_pose_wp = wp.from_torch(self.des_ee_pose, wp.transform)
self.des_gripper_state_wp = wp.from_torch(self.des_gripper_state, wp.float32)
self.handle_approach_offset_wp = wp.from_torch(self.handle_approach_offset, wp.transform)
self.handle_grasp_offset_wp = wp.from_torch(self.handle_grasp_offset, wp.transform)
self.drawer_opening_rate_wp = wp.from_torch(self.drawer_opening_rate, wp.transform)
def reset_idx(self, env_ids: Sequence[int] | None = None):
"""Reset the state machine."""
if env_ids is None:
env_ids = slice(None)
# reset state machine
self.sm_state[env_ids] = 0
self.sm_wait_time[env_ids] = 0.0
def compute(self, ee_pose: torch.Tensor, handle_pose: torch.Tensor):
"""Compute the desired state of the robot's end-effector and the gripper."""
# convert all transformations from (w, x, y, z) to (x, y, z, w)
ee_pose = ee_pose[:, [0, 1, 2, 4, 5, 6, 3]]
handle_pose = handle_pose[:, [0, 1, 2, 4, 5, 6, 3]]
# convert to warp
ee_pose_wp = wp.from_torch(ee_pose.contiguous(), wp.transform)
handle_pose_wp = wp.from_torch(handle_pose.contiguous(), wp.transform)
# run state machine
wp.launch(
kernel=infer_state_machine,
dim=self.num_envs,
inputs=[
self.sm_dt_wp,
self.sm_state_wp,
self.sm_wait_time_wp,
ee_pose_wp,
handle_pose_wp,
self.des_ee_pose_wp,
self.des_gripper_state_wp,
self.handle_approach_offset_wp,
self.handle_grasp_offset_wp,
self.drawer_opening_rate_wp,
self.position_threshold,
],
device=self.device,
)
# convert transformations back to (w, x, y, z)
des_ee_pose = self.des_ee_pose[:, [0, 1, 2, 6, 3, 4, 5]]
# convert to torch
return torch.cat([des_ee_pose, self.des_gripper_state.unsqueeze(-1)], dim=-1)
def main():
# parse configuration
env_cfg: CabinetEnvCfg = parse_env_cfg(
"Isaac-Open-Drawer-Franka-IK-Abs-v0",
device=args_cli.device,
num_envs=args_cli.num_envs,
use_fabric=not args_cli.disable_fabric,
)
# create environment
env = gym.make("Isaac-Open-Drawer-Franka-IK-Abs-v0", cfg=env_cfg)
# reset environment at start
env.reset()
# create action buffers (position + quaternion)
actions = torch.zeros(env.unwrapped.action_space.shape, device=env.unwrapped.device)
actions[:, 3] = 1.0
# desired object orientation (we only do position control of object)
desired_orientation = torch.zeros((env.unwrapped.num_envs, 4), device=env.unwrapped.device)
desired_orientation[:, 1] = 1.0
# create state machine
open_sm = OpenDrawerSm(env_cfg.sim.dt * env_cfg.decimation, env.unwrapped.num_envs, env.unwrapped.device)
while simulation_app.is_running():
# run everything in inference mode
with torch.inference_mode():
# step environment
dones = env.step(actions)[-2]
# observations
# -- end-effector frame
ee_frame_tf: FrameTransformer = env.unwrapped.scene["ee_frame"]
tcp_rest_position = ee_frame_tf.data.target_pos_w[..., 0, :].clone() - env.unwrapped.scene.env_origins
tcp_rest_orientation = ee_frame_tf.data.target_quat_w[..., 0, :].clone()
# -- handle frame
cabinet_frame_tf: FrameTransformer = env.unwrapped.scene["cabinet_frame"]
cabinet_position = cabinet_frame_tf.data.target_pos_w[..., 0, :].clone() - env.unwrapped.scene.env_origins
cabinet_orientation = cabinet_frame_tf.data.target_quat_w[..., 0, :].clone()
# advance state machine
actions = open_sm.compute(
torch.cat([tcp_rest_position, tcp_rest_orientation], dim=-1),
torch.cat([cabinet_position, cabinet_orientation], dim=-1),
)
# reset state machine
if dones.any():
open_sm.reset_idx(dones.nonzero(as_tuple=False).squeeze(-1))
# close the environment
env.close()
if __name__ == "__main__":
# run the main execution
main()
# close sim app
simulation_app.close()

View File

@@ -0,0 +1,143 @@
# scripts/environments/teleoperation/mindrobot_keyboard.py
# Copyright (c) 2022-2026, The Isaac Lab Project Developers.
# SPDX-License-Identifier: BSD-3-Clause
"""Shared keyboard controllers for MindRobot teleoperation and demo recording.
This module contains ONLY class definitions (no argparse, no AppLauncher),
so it is safe to import from both standalone scripts and from record_demos.py.
"""
import weakref
import numpy as np
import torch
import carb
from isaaclab.devices import Se3Keyboard, Se3KeyboardCfg
class WheelKeyboard:
"""Differential-drive (skid-steer) keyboard controller.
Listens to arrow keys via Carb and produces a 4-D joint-velocity
command: [right_b, left_b, left_f, right_f] (rad/s).
Key mappings
─────────────────────────────────────────────────────
↑ (UP) forward [ v, v, v, v]
↓ (DOWN) backward [-v, -v, -v, -v]
← (LEFT) left turn [ v, -v, -v, v]
→ (RIGHT) right turn[-v, v, v, -v]
─────────────────────────────────────────────────────
"""
_WHEEL_KEYS = {"UP", "DOWN", "LEFT", "RIGHT"}
def __init__(self, wheel_speed: float = 5.0, sim_device: str = "cuda:0"):
self.wheel_speed = wheel_speed
self._sim_device = sim_device
self._wheel_vel = np.zeros(4)
self._key_map: dict[str, np.ndarray] = {}
self._create_bindings()
import omni
self._appwindow = omni.appwindow.get_default_app_window()
self._input = carb.input.acquire_input_interface()
self._keyboard = self._appwindow.get_keyboard()
self._keyboard_sub = self._input.subscribe_to_keyboard_events(
self._keyboard,
lambda event, *args, obj=weakref.proxy(self): obj._on_keyboard_event(event, *args),
)
def __del__(self):
self._input.unsubscribe_to_keyboard_events(self._keyboard, self._keyboard_sub)
def __str__(self) -> str:
return (
"WheelKeyboard (skid-steer chassis controller)\n"
"\t↑ UP — forward\n"
"\t↓ DOWN — backward\n"
"\t← LEFT — left turn (in-place)\n"
"\t→ RIGHT — right turn (in-place)"
)
def reset(self):
self._wheel_vel = np.zeros(4)
def advance(self) -> torch.Tensor:
return torch.tensor(self._wheel_vel.copy(), dtype=torch.float32, device=self._sim_device)
def _create_bindings(self):
v = self.wheel_speed
self._key_map = {
"UP": np.array([ v, v, v, v]),
"DOWN": np.array([-v, -v, -v, -v]),
"LEFT": np.array([ v, -v, -v, v]),
"RIGHT": np.array([-v, v, v, -v]),
}
def _on_keyboard_event(self, event, *args, **kwargs):
if event.type == carb.input.KeyboardEventType.KEY_PRESS:
key = event.input.name
if key in self._key_map:
self._wheel_vel += self._key_map[key]
if event.type == carb.input.KeyboardEventType.KEY_RELEASE:
key = event.input.name
if key in self._key_map:
self._wheel_vel -= self._key_map[key]
return True
class MindRobotCombinedKeyboard:
"""组合遥操作控制器:左臂 IK (Se3Keyboard 7D) + 底盘轮速 (WheelKeyboard 4D)
输出 11 维 action: [arm_IK(6) | wheel_vel(4) | gripper(1)]
与 MindRobotTeleopActionsCfg 中声明顺序一致。
Key bindings
─────────────────────────────────────────────
W/S/A/D/Q/E — arm IK translate X/Y/Z
Z/X T/G C/V — arm IK rotate roll/pitch/yaw
K — gripper toggle open/close
↑↓←→ — chassis forward/backward/turn
R — reset
─────────────────────────────────────────────
"""
def __init__(
self,
pos_sensitivity: float = 0.05,
rot_sensitivity: float = 0.05,
wheel_speed: float = 5.0,
sim_device: str = "cuda:0",
):
self._arm = Se3Keyboard(
Se3KeyboardCfg(
pos_sensitivity=pos_sensitivity,
rot_sensitivity=rot_sensitivity,
sim_device=sim_device,
)
)
self._wheel = WheelKeyboard(wheel_speed=wheel_speed, sim_device=sim_device)
def reset(self):
self._arm.reset()
self._wheel.reset()
def add_callback(self, key: str, callback):
"""Delegate callbacks to the arm keyboard (Se3Keyboard supports arbitrary key callbacks)."""
self._arm.add_callback(key, callback)
def advance(self) -> torch.Tensor:
arm_cmd = self._arm.advance() # (7,): [dx,dy,dz,rx,ry,rz, gripper]
arm_ik = arm_cmd[:6] # (6,)
gripper = arm_cmd[6:7] # (1,)
wheel_vel = self._wheel.advance() # (4,)
return torch.cat([arm_ik, wheel_vel, gripper]) # (11,)
def __str__(self) -> str:
return (
"MindRobotCombinedKeyboard\n"
" Arm (Se3Keyboard): W/S/A/D/Q/E + Z/X/T/G/C/V, K=gripper\n"
" Wheel (arrows): ↑↓←→\n"
" R = reset | L = save-success-and-reset"
)

View File

@@ -0,0 +1,124 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers.
# SPDX-License-Identifier: BSD-3-Clause
"""Script to run teleoperation for MindRobot with chassis (wheel) control.
Extends teleop_se3_agent.py with differential-drive keyboard control:
↑ / ↓ — chassis forward / backward
← / → — chassis left turn / right turn (in-place, skid-steer)
W/S/A/D/Q/E — left arm IK: translate X/Y/Z
Z/X T/G C/V — left arm IK: rotate roll/pitch/yaw
K — left gripper toggle open/close
R — reset environment
"""
import argparse
import torch
from isaaclab.app import AppLauncher
parser = argparse.ArgumentParser(description="MindRobot teleoperation with wheel control.")
parser.add_argument("--num_envs", type=int, default=1)
parser.add_argument("--task", type=str, default=None)
parser.add_argument("--sensitivity", type=float, default=1.0)
parser.add_argument("--wheel_speed", type=float, default=5.0,
help="Wheel velocity in rad/s (default: 5.0)")
AppLauncher.add_app_launcher_args(parser)
args_cli = parser.parse_args()
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
# ---------- Rest everything follows ----------
import logging
import gymnasium as gym
from isaaclab.devices import Se3Keyboard, Se3KeyboardCfg
from isaaclab.envs import ManagerBasedRLEnvCfg
import mindbot.tasks # noqa: F401
from isaaclab_tasks.utils import parse_env_cfg
# mindrobot_keyboard.py lives in the same directory as this script.
# Python adds the script's own directory to sys.path, so a direct import works.
from mindrobot_keyboard import WheelKeyboard, MindRobotCombinedKeyboard # noqa: E402
logger = logging.getLogger(__name__)
# ─────────────────────────────────────────────────────────────
# main
# ─────────────────────────────────────────────────────────────
def main() -> None:
env_cfg = parse_env_cfg(args_cli.task, device=args_cli.device, num_envs=args_cli.num_envs)
env_cfg.env_name = args_cli.task
if not isinstance(env_cfg, ManagerBasedRLEnvCfg):
raise ValueError(f"Unsupported env config type: {type(env_cfg).__name__}")
env_cfg.terminations.time_out = None
env = gym.make(args_cli.task, cfg=env_cfg).unwrapped
should_reset = False
def on_reset():
nonlocal should_reset
should_reset = True
print("[teleop] Reset triggered.")
# ── 机械臂 IK 控制器Se3Keyboard7 维输出)──
arm_keyboard = Se3Keyboard(
Se3KeyboardCfg(
pos_sensitivity=0.05 * args_cli.sensitivity,
rot_sensitivity=0.05 * args_cli.sensitivity,
sim_device=args_cli.device,
)
)
arm_keyboard.add_callback("R", on_reset)
# ── 底盘轮速控制器WheelKeyboard4 维输出)──
wheel_keyboard = WheelKeyboard(
wheel_speed=args_cli.wheel_speed,
sim_device=args_cli.device,
)
print(arm_keyboard)
print(wheel_keyboard)
print("\nAction layout: [arm_IK(6) | wheel_vel(4) | gripper(1)] = 11 dims")
print("Press R to reset. Press Ctrl+C to exit.\n")
env.reset()
arm_keyboard.reset()
wheel_keyboard.reset()
while simulation_app.is_running():
with torch.inference_mode():
# ── 1. 获取机械臂 delta pose + gripper (7维) ──
arm_cmd = arm_keyboard.advance() # [dx,dy,dz,rx,ry,rz, gripper]
arm_ik = arm_cmd[:6] # (6,)
gripper = arm_cmd[6:7] # (1,)
# ── 2. 获取底盘轮速 (4维) ──
wheel_vel = wheel_keyboard.advance() # [right_b, left_b, left_f, right_f]
# ── 3. 拼接完整 action (11维) ──
# 顺序与 MindRobotTeleopActionsCfg 中的声明顺序一致:
# arm_action(6) → wheel_action(4) → gripper_action(1)
action = torch.cat([arm_ik, wheel_vel, gripper]) # (11,)
actions = action.unsqueeze(0).repeat(env.num_envs, 1)
env.step(actions)
if should_reset:
env.reset()
arm_keyboard.reset()
wheel_keyboard.reset()
should_reset = False
print("[teleop] Environment reset complete.")
env.close()
if __name__ == "__main__":
main()
simulation_app.close()

View File

@@ -0,0 +1,371 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Script to run teleoperation with Isaac Lab manipulation environments.
Supports multiple input devices (e.g., keyboard, spacemouse, gamepad) and devices
configured within the environment (including OpenXR-based hand tracking or motion
controllers)."""
"""Launch Isaac Sim Simulator first."""
import argparse
from collections.abc import Callable
from isaaclab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(
description="Teleoperation for Isaac Lab environments."
)
parser.add_argument(
"--num_envs", type=int, default=1, help="Number of environments to simulate."
)
parser.add_argument(
"--teleop_device",
type=str,
default="keyboard",
help=(
"Teleop device. Set here (legacy) or via the environment config. If using the environment config, pass the"
" device key/name defined under 'teleop_devices' (it can be a custom name, not necessarily 'handtracking')."
" Built-ins: keyboard, spacemouse, gamepad. Not all tasks support all built-ins."
),
)
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
parser.add_argument(
"--sensitivity", type=float, default=1.0, help="Sensitivity factor."
)
parser.add_argument(
"--enable_pinocchio",
action="store_true",
default=False,
help="Enable Pinocchio.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
app_launcher_args = vars(args_cli)
if args_cli.enable_pinocchio:
# Import pinocchio before AppLauncher to force the use of the version installed by IsaacLab and
# not the one installed by Isaac Sim pinocchio is required by the Pink IK controllers and the
# GR1T2 retargeter
import pinocchio # noqa: F401
if "handtracking" in args_cli.teleop_device.lower():
app_launcher_args["xr"] = True
# launch omniverse app
app_launcher = AppLauncher(app_launcher_args)
simulation_app = app_launcher.app
"""Rest everything follows."""
import logging
import gymnasium as gym
import torch
from isaaclab.devices import (
Se3Gamepad,
Se3GamepadCfg,
Se3Keyboard,
Se3KeyboardCfg,
Se3SpaceMouse,
Se3SpaceMouseCfg,
)
from isaaclab.devices.openxr import remove_camera_configs
from isaaclab.devices.teleop_device_factory import create_teleop_device
from isaaclab.envs import ManagerBasedRLEnvCfg
from isaaclab.managers import TerminationTermCfg as DoneTerm
import isaaclab_tasks # noqa: F401
import mindbot.tasks # noqa: F401
from isaaclab_tasks.manager_based.manipulation.lift import mdp
from isaaclab_tasks.utils import parse_env_cfg
if args_cli.enable_pinocchio:
import isaaclab_tasks.manager_based.locomanipulation.pick_place # noqa: F401
import isaaclab_tasks.manager_based.manipulation.pick_place # noqa: F401
# =====================================================================
# Robot Camera Viewport Utilities (pure GUI — no --enable_cameras needed)
# =====================================================================
# Resolved prim paths for env_0.
# These match the camera prims embedded in mindrobot_cd2.usd.
# Run with the diagnostic block once to confirm exact paths.
_ROBOT_CAM_PRIMS: dict[str, str] = {
"Left Hand": "/World/envs/env_0/Robot/rm_65_fb_left/Link_6/robot___left_8_02/cam_left_hand",
"Right Hand": "/World/envs/env_0/Robot/rm_65_b_right/Link6/robot___right_8_2/cam_right_hand",
"Head": "/World/envs/env_0/Robot/robot_head/cam_head",
"Chest": "/World/envs/env_0/Robot/robot_trunk/cam_chest",
}
_robot_viewports: dict[str, object] = {}
def create_robot_viewports() -> None:
"""Create 4 viewport windows bound to robot camera prims (GUI only)."""
try:
import omni.kit.viewport.utility as vp_util
except ImportError:
logger.warning("[Viewport] omni.kit.viewport.utility not available.")
return
for name, cam_path in _ROBOT_CAM_PRIMS.items():
vp_win = vp_util.create_viewport_window(
f"Robot {name} View", width=640, height=360
)
vp_win.viewport_api.camera_path = cam_path
_robot_viewports[name] = vp_win
print(f"[INFO] Viewport 'Robot {name} View'{cam_path}")
def rebind_robot_viewports() -> None:
"""Re-bind viewports to camera paths after env reset."""
for name, vp_win in _robot_viewports.items():
vp_win.viewport_api.camera_path = _ROBOT_CAM_PRIMS[name]
# import logger
logger = logging.getLogger(__name__)
def main() -> None:
"""
Run teleoperation with an Isaac Lab manipulation environment.
Creates the environment, sets up teleoperation interfaces and callbacks,
and runs the main simulation loop until the application is closed.
Returns:
None
"""
# parse configuration
env_cfg = parse_env_cfg(
args_cli.task, device=args_cli.device, num_envs=args_cli.num_envs
)
env_cfg.env_name = args_cli.task
if not isinstance(env_cfg, ManagerBasedRLEnvCfg):
raise ValueError(
"Teleoperation is only supported for ManagerBasedRLEnv environments. "
f"Received environment config type: {type(env_cfg).__name__}"
)
# modify configuration
env_cfg.terminations.time_out = None
if "Lift" in args_cli.task:
# set the resampling time range to large number to avoid resampling
env_cfg.commands.object_pose.resampling_time_range = (1.0e9, 1.0e9)
# add termination condition for reaching the goal otherwise the environment won't reset
env_cfg.terminations.object_reached_goal = DoneTerm(
func=mdp.object_reached_goal
)
if args_cli.xr:
env_cfg = remove_camera_configs(env_cfg)
env_cfg.sim.render.antialiasing_mode = "DLSS"
try:
# create environment
env = gym.make(args_cli.task, cfg=env_cfg).unwrapped
# check environment name (for reach , we don't allow the gripper)
if "Reach" in args_cli.task:
logger.warning(
f"The environment '{args_cli.task}' does not support gripper control. The device command will be"
" ignored."
)
except Exception as e:
logger.error(f"Failed to create environment: {e}")
simulation_app.close()
return
# Flags for controlling teleoperation flow
should_reset_recording_instance = False
teleoperation_active = True
# Callback handlers
def reset_recording_instance() -> None:
"""
Reset the environment to its initial state.
Sets a flag to reset the environment on the next simulation step.
Returns:
None
"""
nonlocal should_reset_recording_instance
should_reset_recording_instance = True
print("Reset triggered - Environment will reset on next step")
def start_teleoperation() -> None:
"""
Activate teleoperation control of the robot.
Enables the application of teleoperation commands to the environment.
Returns:
None
"""
nonlocal teleoperation_active
teleoperation_active = True
print("Teleoperation activated")
def stop_teleoperation() -> None:
"""
Deactivate teleoperation control of the robot.
Disables the application of teleoperation commands to the environment.
Returns:
None
"""
nonlocal teleoperation_active
teleoperation_active = False
print("Teleoperation deactivated")
# Create device config if not already in env_cfg
teleoperation_callbacks: dict[str, Callable[[], None]] = {
"R": reset_recording_instance,
"START": start_teleoperation,
"STOP": stop_teleoperation,
"RESET": reset_recording_instance,
}
# For hand tracking devices, add additional callbacks
if args_cli.xr:
# Default to inactive for hand tracking
teleoperation_active = False
else:
# Always active for other devices
teleoperation_active = True
# Create teleop device from config if present, otherwise create manually
teleop_interface = None
try:
if (
hasattr(env_cfg, "teleop_devices")
and args_cli.teleop_device in env_cfg.teleop_devices.devices
):
teleop_interface = create_teleop_device(
args_cli.teleop_device,
env_cfg.teleop_devices.devices,
teleoperation_callbacks,
)
else:
logger.warning(
f"No teleop device '{args_cli.teleop_device}' found in environment config. Creating default."
)
# Create fallback teleop device
sensitivity = args_cli.sensitivity
if args_cli.teleop_device.lower() == "keyboard":
teleop_interface = Se3Keyboard(
Se3KeyboardCfg(
pos_sensitivity=0.05 * sensitivity,
rot_sensitivity=0.05 * sensitivity,
)
)
elif args_cli.teleop_device.lower() == "spacemouse":
teleop_interface = Se3SpaceMouse(
Se3SpaceMouseCfg(
pos_sensitivity=0.05 * sensitivity,
rot_sensitivity=0.05 * sensitivity,
)
)
elif args_cli.teleop_device.lower() == "gamepad":
teleop_interface = Se3Gamepad(
Se3GamepadCfg(
pos_sensitivity=0.1 * sensitivity,
rot_sensitivity=0.1 * sensitivity,
)
)
else:
logger.error(f"Unsupported teleop device: {args_cli.teleop_device}")
logger.error("Configure the teleop device in the environment config.")
env.close()
simulation_app.close()
return
# Add callbacks to fallback device
for key, callback in teleoperation_callbacks.items():
try:
teleop_interface.add_callback(key, callback)
except (ValueError, TypeError) as e:
logger.warning(f"Failed to add callback for key {key}: {e}")
except Exception as e:
logger.error(f"Failed to create teleop device: {e}")
env.close()
simulation_app.close()
return
if teleop_interface is None:
logger.error("Failed to create teleop interface")
env.close()
simulation_app.close()
return
print(f"Using teleop device: {teleop_interface}")
# reset environment
env.reset()
teleop_interface.reset()
# -- Diagnostic: print all Camera prims in the stage --
try:
import omni.usd
from pxr import UsdGeom
stage = omni.usd.get_context().get_stage()
print("\n[DIAG] All Camera prims in stage:")
for prim in stage.Traverse():
if UsdGeom.Camera(prim):
print(f" {prim.GetPath()}")
print("[DIAG] Done.\n")
except Exception as _diag_e:
print(f"[DIAG] Could not traverse stage: {_diag_e}")
# -- End diagnostic --
# Create viewport windows bound to robot cameras (no --enable_cameras needed)
create_robot_viewports()
print("Teleoperation started. Press 'R' to reset the environment.")
# simulate environment
while simulation_app.is_running():
try:
# run everything in inference mode
with torch.inference_mode():
# get device command
action = teleop_interface.advance()
# Only apply teleop commands when active
if teleoperation_active:
# process actions
actions = action.repeat(env.num_envs, 1)
# apply actions
env.step(actions)
else:
env.sim.render()
if should_reset_recording_instance:
env.reset()
teleop_interface.reset()
rebind_robot_viewports()
should_reset_recording_instance = False
print("Environment reset complete")
except Exception as e:
logger.error(f"Error during simulation step: {e}")
break
# close the simulator
env.close()
print("Environment closed")
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()

View File

@@ -0,0 +1,588 @@
#!/usr/bin/env python3
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Script to run teleoperation with Isaac Lab manipulation environments using PICO XR Controllers.
This script uses XRoboToolkit to fetch XR controller poses and maps them to differential IK actions.
"""
import argparse
import logging
import sys
import os
from collections.abc import Callable
# Ensure xr_utils (next to this script) is importable when running directly
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from isaaclab.app import AppLauncher
logger = logging.getLogger(__name__)
# add argparse arguments
parser = argparse.ArgumentParser(
description="Teleoperation for Isaac Lab environments with PICO XR Controller."
)
parser.add_argument(
"--num_envs", type=int, default=1, help="Number of environments to simulate."
)
parser.add_argument(
"--task",
type=str,
default="Isaac-MindRobot-LeftArm-IK-Rel-v0",
help="Name of the task.",
)
parser.add_argument(
"--sensitivity", type=float, default=5.0, help="Sensitivity factor for pos/rot."
)
parser.add_argument(
"--arm",
type=str,
default="left",
choices=["left", "right"],
help="Which arm/controller to use.",
)
parser.add_argument(
"--base_speed", type=float, default=3.0,
help="Max wheel speed (rad/s) for joystick full forward.",
)
parser.add_argument(
"--base_turn", type=float, default=2.0,
help="Max wheel differential (rad/s) for joystick full left/right.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
args_cli = parser.parse_args()
app_launcher_args = vars(args_cli)
# Disable some rendering settings to speed up
app_launcher_args["xr"] = False
# launch omniverse app
app_launcher = AppLauncher(app_launcher_args)
simulation_app = app_launcher.app
"""Rest everything follows."""
import gymnasium as gym
import numpy as np
import torch
from scipy.spatial.transform import Rotation as R
from isaaclab.envs import ManagerBasedRLEnvCfg
import isaaclab_tasks # noqa: F401
import mindbot.tasks # noqa: F401
from isaaclab_tasks.utils import parse_env_cfg
from xr_utils import XrClient, transform_xr_pose, quat_diff_as_rotvec_xyzw, is_valid_quaternion
from xr_utils.geometry import R_HEADSET_TO_WORLD
# =====================================================================
# Robot Camera Viewport Utilities
# =====================================================================
# Resolved prim paths for env_0 — must match CameraCfg prim_path with
# {ENV_REGEX_NS} → /World/envs/env_0
_ROBOT_CAM_PRIMS: dict[str, str] = {
"Left Hand": "/World/envs/env_0/Robot/rm_65_fb_left/Link_6/robot___left_8_02/cam_left_hand",
"Right Hand": "/World/envs/env_0/Robot/rm_65_b_right/Link6/robot___right_8_2/cam_right_hand",
"Head": "/World/envs/env_0/Robot/robot_head/cam_head",
"Chest": "/World/envs/env_0/Robot/robot_trunk/cam_chest",
}
# Stores created viewport window objects
_robot_viewports: dict[str, object] = {}
def create_robot_viewports() -> None:
"""Create 4 viewport windows and bind each to a robot camera prim.
Must be called after env.reset() so all prims exist on the USD stage.
"""
try:
import omni.kit.viewport.utility as vp_util
except ImportError:
logger.warning("[Viewport] omni.kit.viewport.utility not available — skipping viewport creation.")
return
for name, cam_path in _ROBOT_CAM_PRIMS.items():
vp_win = vp_util.create_viewport_window(
f"Robot {name} View", width=640, height=360
)
vp_win.viewport_api.camera_path = cam_path
_robot_viewports[name] = vp_win
print(f"[INFO] Viewport 'Robot {name} View' bound to: {cam_path}")
def rebind_robot_viewports() -> None:
"""Re-bind all robot viewports to their camera paths.
Call this after every env reset so viewports stay locked to the cameras.
"""
for name, vp_win in _robot_viewports.items():
cam_path = _ROBOT_CAM_PRIMS[name]
vp_win.viewport_api.camera_path = cam_path
# =====================================================================
# Teleoperation Interface for XR
# =====================================================================
def _quat_wxyz_to_rotation(quat_wxyz: np.ndarray) -> R:
"""Convert Isaac-style wxyz quaternion to scipy Rotation."""
return R.from_quat([quat_wxyz[1], quat_wxyz[2], quat_wxyz[3], quat_wxyz[0]])
def _rotation_to_quat_wxyz(rot: R) -> np.ndarray:
"""Convert scipy Rotation quaternion to Isaac-style wxyz."""
quat_xyzw = rot.as_quat()
return np.array([quat_xyzw[3], quat_xyzw[0], quat_xyzw[1], quat_xyzw[2]])
def world_pose_to_root_frame(
pos_w: np.ndarray,
quat_wxyz: np.ndarray,
root_pos_w: np.ndarray,
root_quat_wxyz: np.ndarray,
) -> tuple[np.ndarray, np.ndarray]:
"""Express a world-frame pose in the robot root frame."""
root_rot = _quat_wxyz_to_rotation(root_quat_wxyz)
pose_rot = _quat_wxyz_to_rotation(quat_wxyz)
pos_root = root_rot.inv().apply(pos_w - root_pos_w)
quat_root = _rotation_to_quat_wxyz(root_rot.inv() * pose_rot)
return pos_root, quat_root
class XrTeleopController:
"""Teleop controller for PICO XR headset."""
def __init__(self, arm="left", pos_sensitivity=1.0, rot_sensitivity=0.3, is_absolute=False):
self.xr_client = XrClient()
self.pos_sensitivity = pos_sensitivity
self.rot_sensitivity = rot_sensitivity
self.arm = arm
self.is_absolute = is_absolute
self.controller_name = "left_controller" if arm == "left" else "right_controller"
self.grip_name = "left_grip" if arm == "left" else "right_grip"
self.trigger_name = "left_trigger" if arm == "left" else "right_trigger"
# Coordinate transform matrix
self.R_headset_world = R_HEADSET_TO_WORLD
# Raw XR tracking space poses
self.prev_xr_pos = None
self.prev_xr_quat = None
# Absolute target states
self.target_eef_pos = None
self.target_eef_quat = None # wxyz
self.grip_active = False
self.frame_count = 0
self.reset_button_latched = False
self.require_grip_reengage = False
self.grip_engage_threshold = 0.8
self.grip_release_threshold = 0.2
# Callbacks (like reset, etc)
self.callbacks = {}
def add_callback(self, name: str, func: Callable):
self.callbacks[name] = func
def reset(self):
self.prev_xr_pos = None
self.prev_xr_quat = None
self.grip_active = False
self.frame_count = 0
self.target_eef_pos = None
self.target_eef_quat = None
# Require one grip release after reset so stale controller motion
# cannot immediately drive the robot back toward the previous pose.
self.require_grip_reengage = True
def close(self):
self.xr_client.close()
def get_zero_action(self, trigger, current_eef_pos=None, current_eef_quat=None):
if self.is_absolute:
action = torch.zeros(8)
# Stay at the current valid pose, or fallback to the cached target
if current_eef_pos is not None and current_eef_quat is not None:
action[:3] = torch.tensor(current_eef_pos.copy())
action[3:7] = torch.tensor(current_eef_quat.copy())
elif self.target_eef_pos is not None and self.target_eef_quat is not None:
action[:3] = torch.tensor(self.target_eef_pos.copy())
action[3:7] = torch.tensor(self.target_eef_quat.copy())
else:
action[3] = 1.0 # default w=1 for quat
action[7] = 1.0 if trigger > 0.5 else -1.0
else:
action = torch.zeros(7)
action[6] = 1.0 if trigger > 0.5 else -1.0
return action
def advance(self, current_eef_pos=None, current_eef_quat=None) -> torch.Tensor:
"""
Reads the XR controller.
Relative bounds return 7D action tensor: [dx, dy, dz, drx, dry, drz, gripper]
Absolute bounds return 8D action tensor: [x, y, z, qw, qx, qy, qz, gripper]
Note: in absolute mode current_eef_* and the returned target are in WORLD frame.
The caller is responsible for converting to root frame before sending to IK.
"""
# XR buttons check (e.g. A or B for reset)
try:
reset_pressed = self.xr_client.get_button("B") or self.xr_client.get_button("Y")
if reset_pressed and not self.reset_button_latched:
if "RESET" in self.callbacks:
self.callbacks["RESET"]()
self.reset_button_latched = reset_pressed
except Exception:
pass
try:
raw_pose = self.xr_client.get_pose(self.controller_name)
grip = self.xr_client.get_key_value(self.grip_name)
trigger = self.xr_client.get_key_value(self.trigger_name)
except Exception as e:
return self.get_zero_action(0.0, current_eef_pos, current_eef_quat)
# Skip transformation if quaternion is invalid
if not is_valid_quaternion(raw_pose[3:]):
return self.get_zero_action(trigger, current_eef_pos, current_eef_quat)
if self.require_grip_reengage:
if grip <= self.grip_release_threshold:
self.require_grip_reengage = False
else:
if self.is_absolute and current_eef_pos is not None and current_eef_quat is not None:
self.target_eef_pos = current_eef_pos.copy()
self.target_eef_quat = current_eef_quat.copy()
return self.get_zero_action(trigger, current_eef_pos, current_eef_quat)
# Use hysteresis so noisy analog grip values do not accidentally re-enable teleop.
if self.grip_active:
grip_pressed = grip > self.grip_release_threshold
else:
grip_pressed = grip >= self.grip_engage_threshold
# 握持键作为离合器 (Clutch)
if not grip_pressed:
self.prev_xr_pos = None
self.prev_xr_quat = None
self.grip_active = False
# Ensure target tracks the current pose while we are not grabbing
if self.is_absolute and current_eef_pos is not None and current_eef_quat is not None:
self.target_eef_pos = current_eef_pos.copy()
self.target_eef_quat = current_eef_quat.copy() # wxyz
return self.get_zero_action(trigger, current_eef_pos, current_eef_quat)
if not self.grip_active:
self.prev_xr_pos = raw_pose[:3].copy()
self.prev_xr_quat = raw_pose[3:].copy()
if self.is_absolute and current_eef_pos is not None and current_eef_quat is not None:
self.target_eef_pos = current_eef_pos.copy()
self.target_eef_quat = current_eef_quat.copy() # wxyz
self.grip_active = True
return self.get_zero_action(trigger, current_eef_pos, current_eef_quat)
# Since OpenXR headset zero is not guaranteed to match robot zero, we compute the
# raw transformation in World Frame, but apply it relatively to the stored target.
# 1. First, find current XR pose projected into World frame
xr_world_pos, xr_world_quat_xyzw = transform_xr_pose(raw_pose[:3], raw_pose[3:])
prev_xr_world_pos, prev_xr_world_quat_xyzw = transform_xr_pose(self.prev_xr_pos, self.prev_xr_quat)
# 2. Extract Delta POS in World frame
world_delta_pos = (xr_world_pos - prev_xr_world_pos) * self.pos_sensitivity
# 3. Extract Delta ROT in World frame
world_delta_rot = quat_diff_as_rotvec_xyzw(prev_xr_world_quat_xyzw, xr_world_quat_xyzw) * self.rot_sensitivity
# 4. Gripper
gripper_action = 1.0 if trigger > 0.5 else -1.0
if self.is_absolute:
if self.target_eef_pos is None:
self.target_eef_pos = np.zeros(3)
self.target_eef_quat = np.array([1.0, 0.0, 0.0, 0.0])
# Accumulate in world frame so VR direction always matches sim direction.
self.target_eef_pos += world_delta_pos
target_r = _quat_wxyz_to_rotation(self.target_eef_quat)
delta_r = R.from_rotvec(world_delta_rot)
self.target_eef_quat = _rotation_to_quat_wxyz(delta_r * target_r)
action = torch.tensor([
self.target_eef_pos[0], self.target_eef_pos[1], self.target_eef_pos[2],
self.target_eef_quat[0], self.target_eef_quat[1], self.target_eef_quat[2], self.target_eef_quat[3],
gripper_action], dtype=torch.float32)
self.prev_xr_pos = raw_pose[:3].copy()
self.prev_xr_quat = raw_pose[3:].copy()
else:
max_pos_delta = 0.05
world_pos_norm = np.linalg.norm(world_delta_pos)
if world_pos_norm > max_pos_delta:
world_delta_pos = world_delta_pos * (max_pos_delta / world_pos_norm)
max_rot_delta = 0.15
world_rot_norm = np.linalg.norm(world_delta_rot)
if world_rot_norm > max_rot_delta:
world_delta_rot = world_delta_rot * (max_rot_delta / world_rot_norm)
action = torch.tensor([
world_delta_pos[0], world_delta_pos[1], world_delta_pos[2],
world_delta_rot[0], world_delta_rot[1], world_delta_rot[2],
gripper_action], dtype=torch.float32)
self.prev_xr_pos = raw_pose[:3].copy()
self.prev_xr_quat = raw_pose[3:].copy()
self.frame_count += 1
if self.frame_count % 30 == 0:
np.set_printoptions(precision=4, suppress=True, floatmode='fixed')
print("\n====================== [VR TELEOP DEBUG] ======================")
print(f"| Task Mode: {'ABSOLUTE' if self.is_absolute else 'RELATIVE'}")
print(f"| Raw VR Pos (OpenXR): {np.array(raw_pose[:3])}")
print(f"| Raw VR Quat (xyzw): {np.array(raw_pose[3:])}")
print(f"| XR Delta Pos (world): {world_delta_pos} (norm={np.linalg.norm(world_delta_pos):.4f})")
print(f"| XR Delta Rot (world): {world_delta_rot} (norm={np.linalg.norm(world_delta_rot):.4f})")
if self.is_absolute:
print(f"| Targ Pos (world): {action[:3].numpy()}")
print(f"| Targ Quat (world, wxyz): {action[3:7].numpy()}")
else:
print(f"| Sent Action Pos (dx,dy,dz): {action[:3].numpy()}")
print(f"| Sent Action Rot (rx,ry,rz): {action[3:6].numpy()}")
print(f"| Gripper & Trigger: Grip={grip:.2f}, Trig={trigger:.2f}")
print("===============================================================")
return action
# =====================================================================
# Main Execution Loop
# =====================================================================
def main() -> None:
"""Run teleoperation with PICO XR Controller against Isaac Lab environment."""
# 1. Configuration parsing
env_cfg = parse_env_cfg(args_cli.task, num_envs=args_cli.num_envs)
env_cfg.env_name = args_cli.task
if not isinstance(env_cfg, ManagerBasedRLEnvCfg):
raise ValueError(f"Teleoperation requires ManagerBasedRLEnvCfg. Got: {type(env_cfg)}")
env_cfg.terminations.time_out = None
# 2. Environment creation
try:
env = gym.make(args_cli.task, cfg=env_cfg).unwrapped
except Exception as e:
logger.error(f"Failed to create environment '{args_cli.task}': {e}")
simulation_app.close()
return
# 3. Teleoperation Interface Initialization
is_abs_mode = "Abs" in args_cli.task
print(f"\n[INFO] Connecting to PICO XR Headset using {args_cli.arm} controller...")
print(f"[INFO] Using IK Mode: {'ABSOLUTE' if is_abs_mode else 'RELATIVE'}")
teleop_interface = XrTeleopController(
arm=args_cli.arm,
pos_sensitivity=args_cli.sensitivity,
rot_sensitivity=args_cli.sensitivity * (1.0 if is_abs_mode else 0.3), # Absolute rotation handles 1:1 better than relative
is_absolute=is_abs_mode
)
should_reset = False
def request_reset():
nonlocal should_reset
should_reset = True
print("[INFO] Reset requested via XR button.")
teleop_interface.add_callback("RESET", request_reset)
def get_arm_action_term():
return env.action_manager._terms["arm_action"]
def clear_ik_target_state():
"""Clear the internal IK target so reset does not reuse the previous pose command."""
if not is_abs_mode:
return
arm_action_term = get_arm_action_term()
ee_pos_b, ee_quat_b = arm_action_term._compute_frame_pose()
arm_action_term._raw_actions.zero_()
arm_action_term._processed_actions.zero_()
arm_action_term._ik_controller._command.zero_()
arm_action_term._ik_controller.ee_pos_des[:] = ee_pos_b
arm_action_term._ik_controller.ee_quat_des[:] = ee_quat_b
def convert_action_world_to_root(action_tensor: torch.Tensor) -> torch.Tensor:
"""Convert an absolute IK action from world frame to robot root frame."""
robot = env.unwrapped.scene["robot"]
root_pos_w = robot.data.root_pos_w[0].detach().cpu().numpy()
root_quat_w = robot.data.root_quat_w[0].detach().cpu().numpy() # wxyz
target_pos_w = action_tensor[:3].numpy()
target_quat_w = action_tensor[3:7].numpy()
pos_root, quat_root = world_pose_to_root_frame(
target_pos_w, target_quat_w, root_pos_w, root_quat_w,
)
out = action_tensor.clone()
out[:3] = torch.tensor(pos_root, dtype=torch.float32)
out[3:7] = torch.tensor(quat_root, dtype=torch.float32)
return out
def get_wheel_action() -> torch.Tensor:
"""Read left joystick and return 4-DOF wheel velocity command.
Skid-steer differential drive.
Joystick: Y-axis (+1 = forward), X-axis (+1 = right turn).
Joint order from articulation (terminal log):
[right_b, left_b, left_f, right_f]
Right/left sign convention assumes both sides' joints have the same
axis direction (positive velocity = forward). If the robot drives
backward when pushing forward, negate base_speed in the launch command.
"""
try:
joy = teleop_interface.xr_client.get_joystick("left")
jy = float(joy[1]) # forward / backward
jx = float(joy[0]) # right / left
except Exception:
return torch.zeros(4)
v = jy * args_cli.base_speed
omega = jx * args_cli.base_turn
# Positive omega = turn right → left wheels faster, right wheels slower
right_vel = v - omega
left_vel = v + omega
return torch.tensor(
[right_vel, left_vel, left_vel, right_vel], dtype=torch.float32
)
env.reset()
clear_ik_target_state()
teleop_interface.reset()
# Create 4 viewport windows bound to the robot cameras
create_robot_viewports()
print("\n" + "=" * 50)
print(" 🚀 Teleoperation Started!")
print(" 🎮 Use the TRIGGER to open/close gripper.")
print(" ✊ Hold GRIP and move the controller to move the arm.")
print(" 🕹️ Left joystick: Y=forward/back, X=turn left/right.")
print(" 🔄 Press B or Y to reset the environment.")
print("=" * 50 + "\n")
device = env.unwrapped.device
sim_frame = 0
obs, _ = env.reset()
clear_ik_target_state()
while simulation_app.is_running():
try:
with torch.inference_mode():
if should_reset:
obs, _ = env.reset()
clear_ik_target_state()
teleop_interface.reset()
rebind_robot_viewports()
should_reset = False
sim_frame = 0
continue
# Read current EEF in world frame from observations.
policy_obs = obs["policy"]
eef_pos = policy_obs["eef_pos"][0].cpu().numpy()
eef_quat = policy_obs["eef_quat"][0].cpu().numpy()
# Get action from XR Controller (world frame for absolute mode).
action_np = teleop_interface.advance(
current_eef_pos=eef_pos, current_eef_quat=eef_quat,
)
# IK expects root-frame commands; convert just before sending.
if is_abs_mode:
action_np = convert_action_world_to_root(action_np)
# Action manager order: arm_action | wheel_action | gripper_action
# arm=7, wheel=4, gripper=1 → total 12 dims.
wheel_np = get_wheel_action()
action_np = torch.cat([action_np[:7], wheel_np, action_np[7:]])
actions = action_np.unsqueeze(0).repeat(env.num_envs, 1).to(device)
# Step environment
obs, _, _, _, _ = env.step(actions)
# Print robot state every 30 frames
sim_frame += 1
if sim_frame % 30 == 0:
np.set_printoptions(precision=4, suppress=True, floatmode='fixed')
policy_obs = obs["policy"]
joint_pos = policy_obs["joint_pos"][0].cpu().numpy()
eef_pos = policy_obs["eef_pos"][0].cpu().numpy()
eef_quat = policy_obs["eef_quat"][0].cpu().numpy()
last_act = policy_obs["actions"][0].cpu().numpy()
# On first print, dump ALL joint names + positions to identify indices
if sim_frame == 30:
robot = env.unwrapped.scene["robot"]
jnames = robot.joint_names
print(f"\n{'='*70}")
print(f" ALL {len(jnames)} JOINT NAMES AND POSITIONS (relative)")
print(f"{'='*70}")
for i, name in enumerate(jnames):
print(f" [{i:2d}] {name:30s} = {joint_pos[i]:+.4f}")
print(f"{'='*70}")
# Find arm joint indices dynamically by looking at the first 6-7 joints that aren't fingers or hands
arm_idx = [i for i, n in enumerate(jnames) if n.startswith("l_joint")]
print(f" Deduced left arm indices: {arm_idx}")
print(f"{'='*70}\n")
# Get arm indices (cache-friendly: find once)
if not hasattr(env, '_arm_idx_cache'):
robot = env.unwrapped.scene["robot"]
jnames = robot.joint_names
env._arm_idx_cache = [i for i, n in enumerate(jnames) if n.startswith("l_joint")]
arm_idx = env._arm_idx_cache
arm_joints = joint_pos[arm_idx]
try:
joy_dbg = teleop_interface.xr_client.get_joystick("left")
joy_str = f"[{joy_dbg[0]:+.2f}, {joy_dbg[1]:+.2f}]"
except Exception:
joy_str = "N/A"
print(f"\n---------------- [ROBOT STATE frame={sim_frame}] ----------------")
print(f"| Left Arm Joints (rad): {arm_joints}")
print(f"| EEF Pos (world): {eef_pos}")
print(f"| EEF Quat (world, wxyz): {eef_quat}")
print(f"| Last Action Sent: {last_act}")
print(f"| Joystick (x=turn,y=fwd): {joy_str}")
print(f"----------------------------------------------------------------")
except Exception as e:
logger.error(f"Error during simulation step: {e}")
break
teleop_interface.close()
env.close()
if __name__ == "__main__":
main()
simulation_app.close()

View File

@@ -0,0 +1,33 @@
# Copyright (c) 2022-2026, The Mindbot Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""XR teleoperation utilities for PICO 4 Ultra / XRoboToolkit integration.
This package provides coordinate transformation, XR device communication,
and teleoperation control utilities adapted from XRoboToolkit for use
with Isaac Lab environments.
"""
from .geometry import (
R_HEADSET_TO_WORLD,
apply_delta_pose,
is_valid_quaternion,
quat_diff_as_angle_axis,
quat_diff_as_rotvec_xyzw,
quaternion_to_angle_axis,
transform_xr_pose,
)
from .xr_client import XrClient
__all__ = [
"R_HEADSET_TO_WORLD",
"apply_delta_pose",
"is_valid_quaternion",
"quat_diff_as_angle_axis",
"quat_diff_as_rotvec_xyzw",
"quaternion_to_angle_axis",
"transform_xr_pose",
"XrClient",
]

View File

@@ -0,0 +1,203 @@
# Copyright (c) 2022-2026, The Mindbot Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Adapted from XRoboToolkit (https://github.com/NVIDIA-AI-IOT/XRoboToolkit-Teleop-Sample-Python)
# Original geometry utilities by Zhigen Zhao.
"""Geometry utilities for XR-to-World coordinate transformations.
This module provides the core coordinate transformation matrices and functions
needed to convert PICO 4 Ultra (OpenXR) data into Isaac Sim world coordinates.
Coordinate Systems:
- OpenXR (PICO 4): +X Right, +Y Up, -Z Forward
- Isaac Sim (World): +X Forward, +Y Left, +Z Up
"""
import numpy as np
from scipy.spatial.transform import Rotation as R
# =====================================================================
# Core Coordinate Transform: OpenXR -> Isaac Sim World
# =====================================================================
R_HEADSET_TO_WORLD = np.array(
[
[0, 0, -1], # XR -Z (forward) -> World +X (forward)
[-1, 0, 0], # XR -X (left) -> World +Y (left)
[0, 1, 0], # XR +Y (up) -> World +Z (up)
],
dtype=np.float64,
)
"""3x3 rotation matrix that maps OpenXR coordinates to Isaac Sim world coordinates."""
# =====================================================================
# Quaternion Utilities
# =====================================================================
def is_valid_quaternion(quat: np.ndarray, tol: float = 1e-6) -> bool:
"""Check if a quaternion is valid (unit norm).
Args:
quat: Quaternion array of length 4 (any convention).
tol: Tolerance for norm deviation from 1.0.
Returns:
True if the quaternion is valid.
"""
if not isinstance(quat, (list, tuple, np.ndarray)) or len(quat) != 4:
return False
if not np.all(np.isfinite(quat)):
return False
magnitude = np.sqrt(np.sum(np.square(quat)))
return abs(magnitude - 1.0) <= tol
def quaternion_to_angle_axis(quat_wxyz: np.ndarray, eps: float = 1e-6) -> np.ndarray:
"""Convert a quaternion (w, x, y, z) to angle-axis representation.
Args:
quat_wxyz: Quaternion in (w, x, y, z) format.
eps: Tolerance for small angles.
Returns:
Angle-axis vector [ax*angle, ay*angle, az*angle], shape (3,).
"""
q = np.array(quat_wxyz, dtype=np.float64, copy=True)
if q[0] < 0.0:
q = -q
w = q[0]
vec_part = q[1:]
angle = 2.0 * np.arccos(np.clip(w, -1.0, 1.0))
if angle < eps:
return np.zeros(3, dtype=np.float64)
sin_half_angle = np.sin(angle / 2.0)
if sin_half_angle < eps:
return np.zeros(3, dtype=np.float64)
axis = vec_part / sin_half_angle
return axis * angle
def quat_diff_as_angle_axis(
q1_wxyz: np.ndarray, q2_wxyz: np.ndarray, eps: float = 1e-6
) -> np.ndarray:
"""Calculate the rotation from q1 to q2 as an angle-axis vector.
Computes DeltaQ such that q2 = DeltaQ * q1.
Args:
q1_wxyz: Source quaternion (w, x, y, z).
q2_wxyz: Target quaternion (w, x, y, z).
eps: Tolerance for small rotations.
Returns:
Angle-axis vector representing DeltaQ, shape (3,).
"""
# Use scipy for robust quaternion math (scipy uses xyzw convention)
r1 = R.from_quat([q1_wxyz[1], q1_wxyz[2], q1_wxyz[3], q1_wxyz[0]])
r2 = R.from_quat([q2_wxyz[1], q2_wxyz[2], q2_wxyz[3], q2_wxyz[0]])
delta = r2 * r1.inv()
rotvec = delta.as_rotvec()
if np.linalg.norm(rotvec) < eps:
return np.zeros(3, dtype=np.float64)
return rotvec
def quat_diff_as_rotvec_xyzw(
q1_xyzw: np.ndarray, q2_xyzw: np.ndarray, eps: float = 1e-6
) -> np.ndarray:
"""Calculate the rotation from q1 to q2 as a rotation vector.
Same as quat_diff_as_angle_axis but takes (x, y, z, w) quaternions
directly (scipy convention), avoiding extra conversions.
Args:
q1_xyzw: Source quaternion (x, y, z, w).
q2_xyzw: Target quaternion (x, y, z, w).
eps: Tolerance for small rotations.
Returns:
Rotation vector (angle * axis), shape (3,).
"""
r1 = R.from_quat(q1_xyzw)
r2 = R.from_quat(q2_xyzw)
delta = r2 * r1.inv()
rotvec = delta.as_rotvec()
if np.linalg.norm(rotvec) < eps:
return np.zeros(3, dtype=np.float64)
return rotvec
def apply_delta_pose(
source_pos: np.ndarray,
source_quat_xyzw: np.ndarray,
delta_pos: np.ndarray,
delta_rot: np.ndarray,
eps: float = 1.0e-6,
) -> tuple:
"""Apply a delta pose to a source pose.
Args:
source_pos: Position of source frame, shape (3,).
source_quat_xyzw: Quaternion of source frame (x, y, z, w), shape (4,).
delta_pos: Position displacement, shape (3,).
delta_rot: Rotation displacement in angle-axis format, shape (3,).
eps: Tolerance to consider rotation as zero.
Returns:
Tuple of (target_pos, target_quat_xyzw), both np.ndarray.
"""
target_pos = source_pos + delta_pos
angle = np.linalg.norm(delta_rot)
if angle > eps:
delta_r = R.from_rotvec(delta_rot)
else:
delta_r = R.identity()
source_r = R.from_quat(source_quat_xyzw)
target_r = delta_r * source_r
target_quat_xyzw = target_r.as_quat()
return target_pos, target_quat_xyzw
# =====================================================================
# XR Pose Transform
# =====================================================================
def transform_xr_pose(
pos_xr: np.ndarray, quat_xyzw_xr: np.ndarray
) -> tuple:
"""Transform an XR device pose from OpenXR frame to Isaac Sim world frame.
Uses the R_HEADSET_TO_WORLD rotation matrix to transform both position
and orientation, following the same approach as XRoboToolkit.
Args:
pos_xr: Position in OpenXR frame, shape (3,).
quat_xyzw_xr: Quaternion in (x, y, z, w) format from XR SDK, shape (4,).
Returns:
Tuple of (pos_world, quat_xyzw_world):
- pos_world: Position in world frame, shape (3,).
- quat_xyzw_world: Quaternion in (x, y, z, w) for scipy, shape (4,).
"""
# Transform position
pos_world = R_HEADSET_TO_WORLD @ pos_xr
# Transform orientation: R_world = R_transform * R_xr * R_transform^T
r_xr = R.from_quat(quat_xyzw_xr) # scipy uses (x, y, z, w)
r_transform = R.from_matrix(R_HEADSET_TO_WORLD)
r_world = r_transform * r_xr * r_transform.inv()
quat_xyzw_world = r_world.as_quat()
return pos_world, quat_xyzw_world

View File

@@ -0,0 +1,169 @@
# Copyright (c) 2022-2026, The Mindbot Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Adapted from XRoboToolkit (https://github.com/NVIDIA-AI-IOT/XRoboToolkit-Teleop-Sample-Python)
# Original XR client by Zhigen Zhao.
"""XR device client for PICO 4 Ultra via XRoboToolkit SDK.
This module wraps the ``xrobotoolkit_sdk`` C++ extension to provide a
Pythonic interface for retrieving pose, button, joystick, and hand-tracking
data from PICO XR headsets over the local network.
Usage::
from xr_utils import XrClient
client = XrClient()
headset_pose = client.get_pose("headset")
left_trigger = client.get_key_value("left_trigger")
client.close()
"""
import numpy as np
import xrobotoolkit_sdk as xrt
class XrClient:
"""Client for PICO XR devices via the XRoboToolkit SDK."""
def __init__(self):
"""Initialize the SDK and start listening for XR data."""
xrt.init()
print("[XrClient] XRoboToolkit SDK initialized.")
# ------------------------------------------------------------------
# Pose
# ------------------------------------------------------------------
def get_pose(self, name: str) -> np.ndarray:
"""Return the 7-DoF pose [x, y, z, qx, qy, qz, qw] of a device.
Args:
name: One of ``"left_controller"``, ``"right_controller"``, ``"headset"``.
Returns:
np.ndarray of shape (7,).
"""
_POSE_FUNCS = {
"left_controller": xrt.get_left_controller_pose,
"right_controller": xrt.get_right_controller_pose,
"headset": xrt.get_headset_pose,
}
if name not in _POSE_FUNCS:
raise ValueError(
f"Invalid pose name: {name}. Choose from {list(_POSE_FUNCS)}"
)
return np.asarray(_POSE_FUNCS[name](), dtype=np.float64)
# ------------------------------------------------------------------
# Analog Keys (trigger / grip)
# ------------------------------------------------------------------
def get_key_value(self, name: str) -> float:
"""Return an analog value (0.0~1.0) for a trigger or grip.
Args:
name: One of ``"left_trigger"``, ``"right_trigger"``,
``"left_grip"``, ``"right_grip"``.
"""
_KEY_FUNCS = {
"left_trigger": xrt.get_left_trigger,
"right_trigger": xrt.get_right_trigger,
"left_grip": xrt.get_left_grip,
"right_grip": xrt.get_right_grip,
}
if name not in _KEY_FUNCS:
raise ValueError(
f"Invalid key name: {name}. Choose from {list(_KEY_FUNCS)}"
)
return float(_KEY_FUNCS[name]())
# ------------------------------------------------------------------
# Buttons
# ------------------------------------------------------------------
def get_button(self, name: str) -> bool:
"""Return boolean state of a face / menu button.
Args:
name: One of ``"A"``, ``"B"``, ``"X"``, ``"Y"``,
``"left_menu_button"``, ``"right_menu_button"``,
``"left_axis_click"``, ``"right_axis_click"``.
"""
_BTN_FUNCS = {
"A": xrt.get_A_button,
"B": xrt.get_B_button,
"X": xrt.get_X_button,
"Y": xrt.get_Y_button,
"left_menu_button": xrt.get_left_menu_button,
"right_menu_button": xrt.get_right_menu_button,
"left_axis_click": xrt.get_left_axis_click,
"right_axis_click": xrt.get_right_axis_click,
}
if name not in _BTN_FUNCS:
raise ValueError(
f"Invalid button name: {name}. Choose from {list(_BTN_FUNCS)}"
)
return bool(_BTN_FUNCS[name]())
# ------------------------------------------------------------------
# Joystick
# ------------------------------------------------------------------
def get_joystick(self, side: str) -> np.ndarray:
"""Return joystick (x, y) for left or right controller.
Args:
side: ``"left"`` or ``"right"``.
Returns:
np.ndarray of shape (2,).
"""
if side == "left":
return np.asarray(xrt.get_left_axis(), dtype=np.float64)
elif side == "right":
return np.asarray(xrt.get_right_axis(), dtype=np.float64)
raise ValueError(f"Invalid side: {side}. Choose 'left' or 'right'.")
# ------------------------------------------------------------------
# Hand Tracking
# ------------------------------------------------------------------
def get_hand_tracking(self, side: str) -> np.ndarray | None:
"""Return hand tracking joint poses or None if inactive.
Args:
side: ``"left"`` or ``"right"``.
Returns:
np.ndarray of shape (27, 7) or None.
"""
if side == "left":
if not xrt.get_left_hand_is_active():
return None
return np.asarray(xrt.get_left_hand_tracking_state(), dtype=np.float64)
elif side == "right":
if not xrt.get_right_hand_is_active():
return None
return np.asarray(xrt.get_right_hand_tracking_state(), dtype=np.float64)
raise ValueError(f"Invalid side: {side}. Choose 'left' or 'right'.")
# ------------------------------------------------------------------
# Timestamp
# ------------------------------------------------------------------
def get_timestamp_ns(self) -> int:
"""Return the current XR system timestamp in nanoseconds."""
return int(xrt.get_time_stamp_ns())
# ------------------------------------------------------------------
# Lifecycle
# ------------------------------------------------------------------
def close(self):
"""Shut down the SDK connection."""
xrt.close()
print("[XrClient] SDK connection closed.")

View File

@@ -0,0 +1,72 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Script to run an environment with zero action agent."""
"""Launch Isaac Sim Simulator first."""
import argparse
from isaaclab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Zero agent for Isaac Lab environments.")
parser.add_argument(
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
)
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import gymnasium as gym
import torch
import isaaclab_tasks # noqa: F401
from isaaclab_tasks.utils import parse_env_cfg
# PLACEHOLDER: Extension template (do not remove this comment)
def main():
"""Zero actions agent with Isaac Lab environment."""
# parse configuration
env_cfg = parse_env_cfg(
args_cli.task, device=args_cli.device, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
)
# create environment
env = gym.make(args_cli.task, cfg=env_cfg)
# print info (this is vectorized environment)
print(f"[INFO]: Gym observation space: {env.observation_space}")
print(f"[INFO]: Gym action space: {env.action_space}")
# reset environment
env.reset()
# simulate environment
while simulation_app.is_running():
# run everything in inference mode
with torch.inference_mode():
# compute zero actions
actions = torch.zeros(env.action_space.shape, device=env.unwrapped.device)
# apply actions
env.step(actions)
# close the simulator
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()

View File

@@ -0,0 +1,100 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Convert a mesh file to `.obj` using blender.
This file processes a given dae mesh file and saves the resulting mesh file in obj format.
It needs to be called using the python packaged with blender, i.e.:
blender --background --python blender_obj.py -- -in_file FILE -out_file FILE
For more information: https://docs.blender.org/api/current/index.html
The script was tested on Blender 3.2 on Ubuntu 20.04LTS.
"""
import os
import sys
import bpy
def parse_cli_args():
"""Parse the input command line arguments."""
import argparse
# get the args passed to blender after "--", all of which are ignored by
# blender so scripts may receive their own arguments
argv = sys.argv
if "--" not in argv:
argv = [] # as if no args are passed
else:
argv = argv[argv.index("--") + 1 :] # get all args after "--"
# When --help or no args are given, print this help
usage_text = (
f"Run blender in background mode with this script:\n\tblender --background --python {__file__} -- [options]"
)
parser = argparse.ArgumentParser(description=usage_text)
# Add arguments
parser.add_argument("-i", "--in_file", metavar="FILE", type=str, required=True, help="Path to input OBJ file.")
parser.add_argument("-o", "--out_file", metavar="FILE", type=str, required=True, help="Path to output OBJ file.")
args = parser.parse_args(argv)
# Check if any arguments provided
if not argv or not args.in_file or not args.out_file:
parser.print_help()
return None
# return arguments
return args
def convert_to_obj(in_file: str, out_file: str, save_usd: bool = False):
"""Convert a mesh file to `.obj` using blender.
Args:
in_file: Input mesh file to process.
out_file: Path to store output obj file.
"""
# check valid input file
if not os.path.exists(in_file):
raise FileNotFoundError(in_file)
# add ending of file format
if not out_file.endswith(".obj"):
out_file += ".obj"
# create directory if it doesn't exist for destination file
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file), exist_ok=True)
# reset scene to empty
bpy.ops.wm.read_factory_settings(use_empty=True)
# load object into scene
if in_file.endswith(".dae"):
bpy.ops.wm.collada_import(filepath=in_file)
elif in_file.endswith(".stl") or in_file.endswith(".STL"):
bpy.ops.import_mesh.stl(filepath=in_file)
else:
raise ValueError(f"Input file not in dae/stl format: {in_file}")
# convert to obj format and store with z up
# TODO: Read the convention from dae file instead of manually fixing it.
# Reference: https://docs.blender.org/api/2.79/bpy.ops.export_scene.html
bpy.ops.export_scene.obj(
filepath=out_file, check_existing=False, axis_forward="Y", axis_up="Z", global_scale=1, path_mode="RELATIVE"
)
# save it as usd as well
if save_usd:
out_file = out_file.replace("obj", "usd")
bpy.ops.wm.usd_export(filepath=out_file, check_existing=False)
if __name__ == "__main__":
# read arguments
cli_args = parse_cli_args()
# check CLI args
if cli_args is None:
sys.exit()
# process via blender
convert_to_obj(cli_args.in_file, cli_args.out_file)

View File

@@ -0,0 +1,134 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script uses the cloner API to check if asset has been instanced properly.
Usage with different inputs (replace `<Asset-Path>` and `<Asset-Path-Instanced>` with the path to the
original asset and the instanced asset respectively):
```bash
./isaaclab.sh -p source/tools/check_instanceable.py <Asset-Path> -n 4096 --headless --physics
./isaaclab.sh -p source/tools/check_instanceable.py <Asset-Path-Instanced> -n 4096 --headless --physics
./isaaclab.sh -p source/tools/check_instanceable.py <Asset-Path> -n 4096 --headless
./isaaclab.sh -p source/tools/check_instanceable.py <Asset-Path-Instanced> -n 4096 --headless
```
Output from the above commands:
```bash
>>> Cloning time (cloner.clone): 0.648198 seconds
>>> Setup time (sim.reset): : 5.843589 seconds
[#clones: 4096, physics: True] Asset: <Asset-Path-Instanced> : 6.491870 seconds
>>> Cloning time (cloner.clone): 0.693133 seconds
>>> Setup time (sim.reset): 50.860526 seconds
[#clones: 4096, physics: True] Asset: <Asset-Path> : 51.553743 seconds
>>> Cloning time (cloner.clone) : 0.687201 seconds
>>> Setup time (sim.reset) : 6.302215 seconds
[#clones: 4096, physics: False] Asset: <Asset-Path-Instanced> : 6.989500 seconds
>>> Cloning time (cloner.clone) : 0.678150 seconds
>>> Setup time (sim.reset) : 52.854054 seconds
[#clones: 4096, physics: False] Asset: <Asset-Path> : 53.532287 seconds
```
"""
"""Launch Isaac Sim Simulator first."""
import argparse
import contextlib
import os
from isaaclab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser("Utility to empirically check if asset in instanced properly.")
parser.add_argument("input", type=str, help="The path to the USD file.")
parser.add_argument("-n", "--num_clones", type=int, default=128, help="Number of clones to spawn.")
parser.add_argument("-s", "--spacing", type=float, default=1.5, help="Spacing between instances in a grid.")
parser.add_argument("-p", "--physics", action="store_true", default=False, help="Clone assets using physics cloner.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
from isaacsim.core.cloner import GridCloner
import isaaclab.sim as sim_utils
from isaaclab.sim import SimulationCfg, SimulationContext
from isaaclab.utils import Timer
from isaaclab.utils.assets import check_file_path
def main():
"""Spawns the USD asset robot and clones it using Isaac Gym Cloner API."""
# check valid file path
if not check_file_path(args_cli.input):
raise ValueError(f"Invalid file path: {args_cli.input}")
# Load kit helper
sim = SimulationContext(SimulationCfg(dt=0.01))
# get stage handle
stage = sim_utils.get_current_stage()
# enable fabric which avoids passing data over to USD structure
# this speeds up the read-write operation of GPU buffers
if sim.get_physics_context().use_gpu_pipeline:
sim.get_physics_context().enable_fabric(True)
# increase GPU buffer dimensions
sim.get_physics_context().set_gpu_found_lost_aggregate_pairs_capacity(2**25)
sim.get_physics_context().set_gpu_total_aggregate_pairs_capacity(2**21)
# enable hydra scene-graph instancing
# this is needed to visualize the scene when fabric is enabled
sim._settings.set_bool("/persistent/omnihydra/useSceneGraphInstancing", True)
# Create interface to clone the scene
cloner = GridCloner(spacing=args_cli.spacing, stage=stage)
cloner.define_base_env("/World/envs")
stage.DefinePrim("/World/envs/env_0", "Xform")
# Spawn things into stage
sim_utils.create_prim("/World/Light", "DistantLight")
# Everything under the namespace "/World/envs/env_0" will be cloned
sim_utils.create_prim("/World/envs/env_0/Asset", "Xform", usd_path=os.path.abspath(args_cli.input))
# Clone the scene
num_clones = args_cli.num_clones
# Create a timer to measure the cloning time
with Timer(f"[#clones: {num_clones}, physics: {args_cli.physics}] Asset: {args_cli.input}"):
# Clone the scene
with Timer(">>> Cloning time (cloner.clone)"):
cloner.define_base_env("/World/envs")
envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_clones)
_ = cloner.clone(
source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=args_cli.physics
)
# Play the simulator
with Timer(">>> Setup time (sim.reset)"):
sim.reset()
# Simulate scene (if not headless)
if not args_cli.headless:
with contextlib.suppress(KeyboardInterrupt):
while sim.is_playing():
# perform step
sim.step()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()

View File

@@ -0,0 +1,160 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Utility to bulk convert URDFs or mesh files into instanceable USD format.
Unified Robot Description Format (URDF) is an XML file format used in ROS to describe all elements of
a robot. For more information, see: http://wiki.ros.org/urdf
This script uses the URDF importer extension from Isaac Sim (``omni.isaac.urdf_importer``) to convert a
URDF asset into USD format. It is designed as a convenience script for command-line use. For more
information on the URDF importer, see the documentation for the extension:
https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/ext_omni_isaac_urdf.html
positional arguments:
input The path to the input directory containing URDFs and Meshes.
output The path to directory to store the instanceable files.
optional arguments:
-h, --help Show this help message and exit
--conversion-type Select file type to convert, urdf or mesh. (default: urdf)
--merge-joints Consolidate links that are connected by fixed joints. (default: False)
--fix-base Fix the base to where it is imported. (default: False)
--make-instanceable Make the asset instanceable for efficient cloning. (default: False)
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from isaaclab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Utility to convert a URDF or mesh into an Instanceable asset.")
parser.add_argument("input", type=str, help="The path to the input directory.")
parser.add_argument("output", type=str, help="The path to directory to store converted instanceable files.")
parser.add_argument(
"--conversion-type", type=str, default="both", help="Select file type to convert, urdf, mesh, or both."
)
parser.add_argument(
"--merge-joints",
action="store_true",
default=False,
help="Consolidate links that are connected by fixed joints.",
)
parser.add_argument("--fix-base", action="store_true", default=False, help="Fix the base to where it is imported.")
parser.add_argument(
"--make-instanceable",
action="store_true",
default=True,
help="Make the asset instanceable for efficient cloning.",
)
parser.add_argument(
"--collision-approximation",
type=str,
default="convexDecomposition",
choices=["convexDecomposition", "convexHull", "none"],
help=(
'The method used for approximating collision mesh. Set to "none" '
"to not add a collision mesh to the converted mesh."
),
)
parser.add_argument(
"--mass",
type=float,
default=None,
help="The mass (in kg) to assign to the converted asset. If not provided, then no mass is added.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import os
from isaaclab.sim.converters import MeshConverter, MeshConverterCfg, UrdfConverter, UrdfConverterCfg
from isaaclab.sim.schemas import schemas_cfg
def main():
# Define conversion time given
conversion_type = args_cli.conversion_type.lower()
# Warning if conversion type input is not valid
if conversion_type != "urdf" and conversion_type != "mesh" and conversion_type != "both":
raise Warning("Conversion type is not valid, please select either 'urdf', 'mesh', or 'both'.")
if not os.path.exists(args_cli.input):
print(f"Error: The directory {args_cli.input} does not exist.")
# For each file and subsequent sub-directory
for root, dirs, files in os.walk(args_cli.input):
# For each file
for filename in files:
# Check for URDF extensions
if (conversion_type == "urdf" or conversion_type == "both") and filename.lower().endswith(".urdf"):
# URDF converter call
urdf_converter_cfg = UrdfConverterCfg(
asset_path=f"{root}/{filename}",
usd_dir=f"{args_cli.output}/{filename[:-5]}",
usd_file_name=f"{filename[:-5]}.usd",
fix_base=args_cli.fix_base,
merge_fixed_joints=args_cli.merge_joints,
force_usd_conversion=True,
make_instanceable=args_cli.make_instanceable,
)
# Create Urdf converter and import the file
urdf_converter = UrdfConverter(urdf_converter_cfg)
print(f"Generated USD file: {urdf_converter.usd_path}")
elif (conversion_type == "mesh" or conversion_type == "both") and (
filename.lower().endswith(".fbx")
or filename.lower().endswith(".obj")
or filename.lower().endswith(".dae")
or filename.lower().endswith(".stl")
):
# Mass properties
if args_cli.mass is not None:
mass_props = schemas_cfg.MassPropertiesCfg(mass=args_cli.mass)
rigid_props = schemas_cfg.RigidBodyPropertiesCfg()
else:
mass_props = None
rigid_props = None
# Collision properties
collision_props = schemas_cfg.CollisionPropertiesCfg(
collision_enabled=args_cli.collision_approximation != "none"
)
# Mesh converter call
mesh_converter_cfg = MeshConverterCfg(
mass_props=mass_props,
rigid_props=rigid_props,
collision_props=collision_props,
asset_path=f"{root}/{filename}",
force_usd_conversion=True,
usd_dir=f"{args_cli.output}/{filename[:-4]}",
usd_file_name=f"{filename[:-4]}.usd",
make_instanceable=args_cli.make_instanceable,
collision_approximation=args_cli.collision_approximation,
)
# Create mesh converter and import the file
mesh_converter = MeshConverter(mesh_converter_cfg)
print(f"Generated USD file: {mesh_converter.usd_path}")
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()

View File

@@ -0,0 +1,205 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Utility to convert a OBJ/STL/FBX into USD format.
The OBJ file format is a simple data-format that represents 3D geometry alone — namely, the position
of each vertex, the UV position of each texture coordinate vertex, vertex normals, and the faces that
make each polygon defined as a list of vertices, and texture vertices.
An STL file describes a raw, unstructured triangulated surface by the unit normal and vertices (ordered
by the right-hand rule) of the triangles using a three-dimensional Cartesian coordinate system.
FBX files are a type of 3D model file created using the Autodesk FBX software. They can be designed and
modified in various modeling applications, such as Maya, 3ds Max, and Blender. Moreover, FBX files typically
contain mesh, material, texture, and skeletal animation data.
Link: https://www.autodesk.com/products/fbx/overview
This script uses the asset converter extension from Isaac Sim (``omni.kit.asset_converter``) to convert a
OBJ/STL/FBX asset into USD format. It is designed as a convenience script for command-line use.
positional arguments:
input The path to the input mesh (.OBJ/.STL/.FBX) file.
output The path to store the USD file.
optional arguments:
-h, --help Show this help message and exit
--make-instanceable, Make the asset instanceable for efficient cloning. (default: False)
--collision-approximation The method used for approximating collision mesh. Defaults to convexDecomposition.
Set to \"none\" to not add a collision mesh to the converted mesh.
(default: convexDecomposition)
--mass The mass (in kg) to assign to the converted asset. (default: None)
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from isaaclab.app import AppLauncher
# Define collision approximation choices (must be defined before parser)
_valid_collision_approx = [
"convexDecomposition",
"convexHull",
"triangleMesh",
"meshSimplification",
"sdf",
"boundingCube",
"boundingSphere",
"none",
]
# add argparse arguments
parser = argparse.ArgumentParser(description="Utility to convert a mesh file into USD format.")
parser.add_argument("input", type=str, help="The path to the input mesh file.")
parser.add_argument("output", type=str, help="The path to store the USD file.")
parser.add_argument(
"--make-instanceable",
action="store_true",
default=False,
help="Make the asset instanceable for efficient cloning.",
)
parser.add_argument(
"--collision-approximation",
type=str,
default="convexDecomposition",
choices=_valid_collision_approx,
help="The method used for approximating the collision mesh. Set to 'none' to disable collision mesh generation.",
)
parser.add_argument(
"--mass",
type=float,
default=None,
help="The mass (in kg) to assign to the converted asset. If not provided, then no mass is added.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import contextlib
import os
import carb
import omni.kit.app
import isaaclab.sim as sim_utils
from isaaclab.sim.converters import MeshConverter, MeshConverterCfg
from isaaclab.sim.schemas import schemas_cfg
from isaaclab.utils.assets import check_file_path
from isaaclab.utils.dict import print_dict
collision_approximation_map = {
"convexDecomposition": schemas_cfg.ConvexDecompositionPropertiesCfg,
"convexHull": schemas_cfg.ConvexHullPropertiesCfg,
"triangleMesh": schemas_cfg.TriangleMeshPropertiesCfg,
"meshSimplification": schemas_cfg.TriangleMeshSimplificationPropertiesCfg,
"sdf": schemas_cfg.SDFMeshPropertiesCfg,
"boundingCube": schemas_cfg.BoundingCubePropertiesCfg,
"boundingSphere": schemas_cfg.BoundingSpherePropertiesCfg,
"none": None,
}
def main():
# check valid file path
mesh_path = args_cli.input
if not os.path.isabs(mesh_path):
mesh_path = os.path.abspath(mesh_path)
if not check_file_path(mesh_path):
raise ValueError(f"Invalid mesh file path: {mesh_path}")
# create destination path
dest_path = args_cli.output
if not os.path.isabs(dest_path):
dest_path = os.path.abspath(dest_path)
# Mass properties
if args_cli.mass is not None:
mass_props = schemas_cfg.MassPropertiesCfg(mass=args_cli.mass)
rigid_props = schemas_cfg.RigidBodyPropertiesCfg()
else:
mass_props = None
rigid_props = None
# Collision properties
collision_props = schemas_cfg.CollisionPropertiesCfg(collision_enabled=args_cli.collision_approximation != "none")
# Create Mesh converter config
cfg_class = collision_approximation_map.get(args_cli.collision_approximation)
if cfg_class is None and args_cli.collision_approximation != "none":
valid_keys = ", ".join(sorted(collision_approximation_map.keys()))
raise ValueError(
f"Invalid collision approximation type '{args_cli.collision_approximation}'. "
f"Valid options are: {valid_keys}."
)
collision_cfg = cfg_class() if cfg_class is not None else None
mesh_converter_cfg = MeshConverterCfg(
mass_props=mass_props,
rigid_props=rigid_props,
collision_props=collision_props,
asset_path=mesh_path,
force_usd_conversion=True,
usd_dir=os.path.dirname(dest_path),
usd_file_name=os.path.basename(dest_path),
make_instanceable=args_cli.make_instanceable,
mesh_collision_props=collision_cfg,
)
# Print info
print("-" * 80)
print("-" * 80)
print(f"Input Mesh file: {mesh_path}")
print("Mesh importer config:")
print_dict(mesh_converter_cfg.to_dict(), nesting=0)
print("-" * 80)
print("-" * 80)
# Create Mesh converter and import the file
mesh_converter = MeshConverter(mesh_converter_cfg)
# print output
print("Mesh importer output:")
print(f"Generated USD file: {mesh_converter.usd_path}")
print("-" * 80)
print("-" * 80)
# Determine if there is a GUI to update:
# acquire settings interface
carb_settings_iface = carb.settings.get_settings()
# read flag for whether a local GUI is enabled
local_gui = carb_settings_iface.get("/app/window/enabled")
# read flag for whether livestreaming GUI is enabled
livestream_gui = carb_settings_iface.get("/app/livestream/enabled")
# Simulate scene (if not headless)
if local_gui or livestream_gui:
# Open the stage with USD
sim_utils.open_stage(mesh_converter.usd_path)
# Reinitialize the simulation
app = omni.kit.app.get_app_interface()
# Run simulation
with contextlib.suppress(KeyboardInterrupt):
while app.is_running():
# perform step
app.update()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()

View File

@@ -0,0 +1,139 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Utility to convert a MJCF into USD format.
MuJoCo XML Format (MJCF) is an XML file format used in MuJoCo to describe all elements of a robot.
For more information, see: http://www.mujoco.org/book/XMLreference.html
This script uses the MJCF importer extension from Isaac Sim (``isaacsim.asset.importer.mjcf``) to convert
a MJCF asset into USD format. It is designed as a convenience script for command-line use. For more information
on the MJCF importer, see the documentation for the extension:
https://docs.isaacsim.omniverse.nvidia.com/latest/robot_setup/ext_isaacsim_asset_importer_mjcf.html
positional arguments:
input The path to the input URDF file.
output The path to store the USD file.
optional arguments:
-h, --help Show this help message and exit
--fix-base Fix the base to where it is imported. (default: False)
--import-sites Import sites by parse <site> tag. (default: True)
--make-instanceable Make the asset instanceable for efficient cloning. (default: False)
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from isaaclab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Utility to convert a MJCF into USD format.")
parser.add_argument("input", type=str, help="The path to the input MJCF file.")
parser.add_argument("output", type=str, help="The path to store the USD file.")
parser.add_argument("--fix-base", action="store_true", default=False, help="Fix the base to where it is imported.")
parser.add_argument(
"--import-sites", action="store_true", default=False, help="Import sites by parsing the <site> tag."
)
parser.add_argument(
"--make-instanceable",
action="store_true",
default=False,
help="Make the asset instanceable for efficient cloning.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import contextlib
import os
import carb
import omni.kit.app
import isaaclab.sim as sim_utils
from isaaclab.sim.converters import MjcfConverter, MjcfConverterCfg
from isaaclab.utils.assets import check_file_path
from isaaclab.utils.dict import print_dict
def main():
# check valid file path
mjcf_path = args_cli.input
if not os.path.isabs(mjcf_path):
mjcf_path = os.path.abspath(mjcf_path)
if not check_file_path(mjcf_path):
raise ValueError(f"Invalid file path: {mjcf_path}")
# create destination path
dest_path = args_cli.output
if not os.path.isabs(dest_path):
dest_path = os.path.abspath(dest_path)
# create the converter configuration
mjcf_converter_cfg = MjcfConverterCfg(
asset_path=mjcf_path,
usd_dir=os.path.dirname(dest_path),
usd_file_name=os.path.basename(dest_path),
fix_base=args_cli.fix_base,
import_sites=args_cli.import_sites,
force_usd_conversion=True,
make_instanceable=args_cli.make_instanceable,
)
# Print info
print("-" * 80)
print("-" * 80)
print(f"Input MJCF file: {mjcf_path}")
print("MJCF importer config:")
print_dict(mjcf_converter_cfg.to_dict(), nesting=0)
print("-" * 80)
print("-" * 80)
# Create mjcf converter and import the file
mjcf_converter = MjcfConverter(mjcf_converter_cfg)
# print output
print("MJCF importer output:")
print(f"Generated USD file: {mjcf_converter.usd_path}")
print("-" * 80)
print("-" * 80)
# Determine if there is a GUI to update:
# acquire settings interface
carb_settings_iface = carb.settings.get_settings()
# read flag for whether a local GUI is enabled
local_gui = carb_settings_iface.get("/app/window/enabled")
# read flag for whether livestreaming GUI is enabled
livestream_gui = carb_settings_iface.get("/app/livestream/enabled")
# Simulate scene (if not headless)
if local_gui or livestream_gui:
# Open the stage with USD
sim_utils.open_stage(mjcf_converter.usd_path)
# Reinitialize the simulation
app = omni.kit.app.get_app_interface()
# Run simulation
with contextlib.suppress(KeyboardInterrupt):
while app.is_running():
# perform step
app.update()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()

View File

@@ -0,0 +1,163 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Utility to convert a URDF into USD format.
Unified Robot Description Format (URDF) is an XML file format used in ROS to describe all elements of
a robot. For more information, see: http://wiki.ros.org/urdf
This script uses the URDF importer extension from Isaac Sim (``isaacsim.asset.importer.urdf``) to convert a
URDF asset into USD format. It is designed as a convenience script for command-line use. For more
information on the URDF importer, see the documentation for the extension:
https://docs.isaacsim.omniverse.nvidia.com/latest/robot_setup/ext_isaacsim_asset_importer_urdf.html
positional arguments:
input The path to the input URDF file.
output The path to store the USD file.
optional arguments:
-h, --help Show this help message and exit
--merge-joints Consolidate links that are connected by fixed joints. (default: False)
--fix-base Fix the base to where it is imported. (default: False)
--joint-stiffness The stiffness of the joint drive. (default: 100.0)
--joint-damping The damping of the joint drive. (default: 1.0)
--joint-target-type The type of control to use for the joint drive. (default: "position")
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from isaaclab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Utility to convert a URDF into USD format.")
parser.add_argument("input", type=str, help="The path to the input URDF file.")
parser.add_argument("output", type=str, help="The path to store the USD file.")
parser.add_argument(
"--merge-joints",
action="store_true",
default=False,
help="Consolidate links that are connected by fixed joints.",
)
parser.add_argument("--fix-base", action="store_true", default=False, help="Fix the base to where it is imported.")
parser.add_argument(
"--joint-stiffness",
type=float,
default=100.0,
help="The stiffness of the joint drive.",
)
parser.add_argument(
"--joint-damping",
type=float,
default=1.0,
help="The damping of the joint drive.",
)
parser.add_argument(
"--joint-target-type",
type=str,
default="position",
choices=["position", "velocity", "none"],
help="The type of control to use for the joint drive.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import contextlib
import os
import carb
import omni.kit.app
import isaaclab.sim as sim_utils
from isaaclab.sim.converters import UrdfConverter, UrdfConverterCfg
from isaaclab.utils.assets import check_file_path
from isaaclab.utils.dict import print_dict
def main():
# check valid file path
urdf_path = args_cli.input
if not os.path.isabs(urdf_path):
urdf_path = os.path.abspath(urdf_path)
if not check_file_path(urdf_path):
raise ValueError(f"Invalid file path: {urdf_path}")
# create destination path
dest_path = args_cli.output
if not os.path.isabs(dest_path):
dest_path = os.path.abspath(dest_path)
# Create Urdf converter config
urdf_converter_cfg = UrdfConverterCfg(
asset_path=urdf_path,
usd_dir=os.path.dirname(dest_path),
usd_file_name=os.path.basename(dest_path),
fix_base=args_cli.fix_base,
merge_fixed_joints=args_cli.merge_joints,
force_usd_conversion=True,
joint_drive=UrdfConverterCfg.JointDriveCfg(
gains=UrdfConverterCfg.JointDriveCfg.PDGainsCfg(
stiffness=args_cli.joint_stiffness,
damping=args_cli.joint_damping,
),
target_type=args_cli.joint_target_type,
),
)
# Print info
print("-" * 80)
print("-" * 80)
print(f"Input URDF file: {urdf_path}")
print("URDF importer config:")
print_dict(urdf_converter_cfg.to_dict(), nesting=0)
print("-" * 80)
print("-" * 80)
# Create Urdf converter and import the file
urdf_converter = UrdfConverter(urdf_converter_cfg)
# print output
print("URDF importer output:")
print(f"Generated USD file: {urdf_converter.usd_path}")
print("-" * 80)
print("-" * 80)
# Determine if there is a GUI to update:
# acquire settings interface
carb_settings_iface = carb.settings.get_settings()
# read flag for whether a local GUI is enabled
local_gui = carb_settings_iface.get("/app/window/enabled")
# read flag for whether livestreaming GUI is enabled
livestream_gui = carb_settings_iface.get("/app/livestream/enabled")
# Simulate scene (if not headless)
if local_gui or livestream_gui:
# Open the stage with USD
sim_utils.open_stage(urdf_converter.usd_path)
# Reinitialize the simulation
app = omni.kit.app.get_app_interface()
# Run simulation
with contextlib.suppress(KeyboardInterrupt):
while app.is_running():
# perform step
app.update()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()

View File

@@ -0,0 +1,85 @@
# Copyright (c) 2024-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Script to construct prompts to control the Cosmos model's generation.
Required arguments:
--templates_path Path to the file containing templates for the prompts.
Optional arguments:
--num_prompts Number of prompts to generate (default: 1).
--output_path Path to the output file to write generated prompts (default: prompts.txt).
"""
import argparse
import json
import random
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description="Generate prompts for controlling Cosmos model's generation.")
parser.add_argument(
"--templates_path", type=str, required=True, help="Path to the JSON file containing prompt templates"
)
parser.add_argument("--num_prompts", type=int, default=1, help="Number of prompts to generate (default: 1)")
parser.add_argument(
"--output_path", type=str, default="prompts.txt", help="Path to the output file to write generated prompts"
)
args = parser.parse_args()
return args
def generate_prompt(templates_path: str):
"""Generate a random prompt for controlling the Cosmos model's visual augmentation.
The prompt describes the scene and desired visual variations, which the model
uses to guide the augmentation process while preserving the core robotic actions.
Args:
templates_path (str): Path to the JSON file containing prompt templates.
Returns:
str: Generated prompt string that specifies visual aspects to modify in the video.
"""
try:
with open(templates_path) as f:
templates = json.load(f)
except FileNotFoundError:
raise FileNotFoundError(f"Prompt templates file not found: {templates_path}")
except json.JSONDecodeError:
raise ValueError(f"Invalid JSON in prompt templates file: {templates_path}")
prompt_parts = []
for section_name, section_options in templates.items():
if not isinstance(section_options, list):
continue
if len(section_options) == 0:
continue
selected_option = random.choice(section_options)
prompt_parts.append(selected_option)
return " ".join(prompt_parts)
def main():
# Parse command line arguments
args = parse_args()
prompts = [generate_prompt(args.templates_path) for _ in range(args.num_prompts)]
try:
with open(args.output_path, "w") as f:
for prompt in prompts:
f.write(prompt + "\n")
except Exception as e:
print(f"Failed to write to {args.output_path}: {e}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,96 @@
{
"env": [
"A robotic arm is picking up and stacking cubes inside a foggy industrial scrapyard at dawn, surrounded by piles of old robotic parts and twisted metal. The background includes large magnetic cranes, rusted conveyor belts, and flickering yellow floodlights struggling to penetrate the fog.",
"A robotic arm is picking up and stacking cubes inside a luxury penthouse showroom during sunset. The background includes minimalist designer furniture, a panoramic view of a glowing city skyline, and hovering autonomous drones offering refreshments.",
"A robotic arm is picking up and stacking cubes within an ancient temple-themed robotics exhibit in a museum. The background includes stone columns with hieroglyphic-style etchings, interactive display panels, and a few museum visitors observing silently from behind glass barriers.",
"A robotic arm is picking up and stacking cubes inside a futuristic daycare facility for children. The background includes robotic toys, soft padded walls, holographic storybooks floating in mid-air, and tiny humanoid robots assisting toddlers.",
"A robotic arm is picking up and stacking cubes inside a deep underwater laboratory where pressure-resistant glass panels reveal a shimmering ocean outside. The background includes jellyfish drifting outside the windows, robotic submarines gliding by, and walls lined with wet-surface equipment panels.",
"A robotic arm is picking up and stacking cubes inside a post-apocalyptic lab, partially collapsed and exposed to the open sky. The background includes ruined machinery, exposed rebar, and a distant city skyline covered in ash and fog.",
"A robotic arm is picking up and stacking cubes in a biotech greenhouse surrounded by lush plant life. The background includes rows of bio-engineered plants, misting systems, and hovering inspection drones checking crop health.",
"A robotic arm is picking up and stacking cubes inside a dark, volcanic research outpost. The background includes robotic arms encased in heat-resistant suits, seismic monitors, and distant lava fountains occasionally illuminating the space.",
"A robotic arm is picking up and stacking cubes inside an icy arctic base, with frost-covered walls and equipment glinting under bright artificial white lights. The background includes heavy-duty heaters, control consoles wrapped in thermal insulation, and a large window looking out onto a frozen tundra with polar winds swirling snow outside.",
"A robotic arm is picking up and stacking cubes inside a zero-gravity chamber on a rotating space habitat. The background includes floating lab instruments, panoramic windows showing stars and Earth in rotation, and astronauts monitoring data.",
"A robotic arm is picking up and stacking cubes inside a mystical tech-art installation blending robotics with generative art. The background includes sculptural robotics, shifting light patterns on the walls, and visitors interacting with the exhibit using gestures.",
"A robotic arm is picking up and stacking cubes in a Martian colony dome, under a terraformed red sky filtering through thick glass. The background includes pressure-locked entry hatches, Martian rovers parked outside, and domed hydroponic farms stretching into the distance.",
"A robotic arm is picking up and stacking cubes inside a high-security military robotics testing bunker, with matte green steel walls and strict order. The background includes surveillance cameras, camouflage netting over equipment racks, and military personnel observing from a secure glass-walled control room.",
"A robotic arm is picking up and stacking cubes inside a retro-futuristic robotics lab from the 1980s with checkered floors and analog computer panels. The background includes CRT monitors with green code, rotary dials, printed schematics on the walls, and operators in lab coats typing on clunky terminals.",
"A robotic arm is picking up and stacking cubes inside a sunken ancient ruin repurposed for modern robotics experiments. The background includes carved pillars, vines creeping through gaps in stone, and scattered crates of modern equipment sitting on ancient floors.",
"A robotic arm is picking up and stacking cubes on a luxury interstellar yacht cruising through deep space. The background includes elegant furnishings, ambient synth music systems, and holographic butlers attending to other passengers.",
"A robotic arm is picking up and stacking cubes in a rebellious underground cybernetic hacker hideout. The background includes graffiti-covered walls, tangled wires, makeshift workbenches, and anonymous figures hunched over terminals with scrolling code.",
"A robotic arm is picking up and stacking cubes inside a dense jungle outpost where technology is being tested in extreme organic environments. The background includes humid control panels, vines creeping onto the robotics table, and occasional wildlife observed from a distance by researchers in camo gear.",
"A robotic arm is picking up and stacking cubes in a minimalist Zen tech temple. The background includes bonsai trees on floating platforms, robotic monks sweeping floors silently, and smooth stone pathways winding through digital meditation alcoves."
],
"robot": [
"The robot arm is matte dark green with yellow diagonal hazard stripes along the upper arm; the joints are rugged and chipped, and the hydraulics are exposed with faded red tubing.",
"The robot arm is worn orange with black caution tape markings near the wrist; the elbow joint is dented and the pistons have visible scarring from long use.",
"The robot arm is steel gray with smooth curved panels and subtle blue stripes running down the length; the joints are sealed tight and the hydraulics have a glossy black casing.",
"The robot arm is bright yellow with alternating black bands around each segment; the joints show minor wear, and the hydraulics gleam with fresh lubrication.",
"The robot arm is navy blue with white serial numbers stenciled along the arm; the joints are well-maintained and the hydraulic shafts are matte silver with no visible dirt.",
"The robot arm is deep red with a matte finish and faint white grid lines across the panels; the joints are squared off and the hydraulic units look compact and embedded.",
"The robot arm is dirty white with dark gray speckled patches from wear; the joints are squeaky with exposed rivets, and the hydraulics are rusted at the base.",
"The robot arm is olive green with chipped paint and a black triangle warning icon near the shoulder; the joints are bulky and the hydraulics leak slightly around the seals.",
"The robot arm is bright teal with a glossy surface and silver stripes on the outer edges; the joints rotate smoothly and the pistons reflect a pale cyan hue.",
"The robot arm is orange-red with carbon fiber textures and white racing-style stripes down the forearm; the joints have minimal play and the hydraulics are tightly sealed in synthetic tubing.",
"The robot arm is flat black with uneven camouflage blotches in dark gray; the joints are reinforced and the hydraulic tubes are dusty and loose-fitting.",
"The robot arm is dull maroon with vertical black grooves etched into the panels; the joints show corrosion on the bolts and the pistons are thick and slow-moving.",
"The robot arm is powder blue with repeating geometric patterns printed in light gray; the joints are square and the hydraulic systems are internal and silent.",
"The robot arm is brushed silver with high-gloss finish and blue LED strips along the seams; the joints are shiny and tight, and the hydraulics hiss softly with every movement.",
"The robot arm is lime green with paint faded from sun exposure and white warning labels near each joint; the hydraulics are scraped and the fittings show heat marks.",
"The robot arm is dusty gray with chevron-style black stripes pointing toward the claw; the joints have uneven wear, and the pistons are dented and slightly bent.",
"The robot arm is cobalt blue with glossy texture and stylized angular black patterns across each segment; the joints are clean and the hydraulics show new flexible tubing.",
"The robot arm is industrial brown with visible welded seams and red caution tape wrapped loosely around the middle section; the joints are clunky and the hydraulics are slow and loud.",
"The robot arm is flat tan with dark green splotches and faint stencil text across the forearm; the joints have dried mud stains and the pistons are partially covered in grime.",
"The robot arm is light orange with chrome hexagon detailing and black number codes on the side; the joints are smooth and the hydraulic actuators shine under the lab lights."
],
"table": [
"The robot arm is mounted on a table that is dull gray metal with scratches and scuff marks across the surface; faint rust rings are visible where older machinery used to be mounted.",
"The robot arm is mounted on a table that is smooth black plastic with a matte finish and faint fingerprint smudges near the edges; corners are slightly worn from regular use.",
"The robot arm is mounted on a table that is light oak wood with a natural grain pattern and a glossy varnish that reflects overhead lights softly; small burn marks dot one corner.",
"The robot arm is mounted on a table that is rough concrete with uneven texture and visible air bubbles; some grease stains and faded yellow paint markings suggest heavy usage.",
"The robot arm is mounted on a table that is brushed aluminum with a clean silver tone and very fine linear grooves; surface reflects light evenly, giving a soft glow.",
"The robot arm is mounted on a table that is pale green composite with chipped corners and scratches revealing darker material beneath; tape residue is stuck along the edges.",
"The robot arm is mounted on a table that is dark brown with a slightly cracked synthetic coating; patches of discoloration suggest exposure to heat or chemicals over time.",
"The robot arm is mounted on a table that is polished steel with mirror-like reflections; every small movement of the robot is mirrored faintly across the surface.",
"The robot arm is mounted on a table that is white with a slightly textured ceramic top, speckled with tiny black dots; the surface is clean but the edges are chipped.",
"The robot arm is mounted on a table that is glossy black glass with a deep shine and minimal dust; any lights above are clearly reflected, and fingerprints are visible under certain angles.",
"The robot arm is mounted on a table that is matte red plastic with wide surface scuffs and paint transfer from other objects; faint gridlines are etched into one side.",
"The robot arm is mounted on a table that is dark navy laminate with a low-sheen surface and subtle wood grain texture; the edge banding is slightly peeling off.",
"The robot arm is mounted on a table that is yellow-painted steel with diagonal black warning stripes running along one side; the paint is scratched and faded in high-contact areas.",
"The robot arm is mounted on a table that is translucent pale blue polymer with internal striations and slight glow under overhead lights; small bubbles are frozen inside the material.",
"The robot arm is mounted on a table that is cold concrete with embedded metal panels bolted into place; the surface has oil stains, welding marks, and tiny debris scattered around.",
"The robot arm is mounted on a table that is shiny chrome with heavy smudging and streaks; the table reflects distorted shapes of everything around it, including the arm itself.",
"The robot arm is mounted on a table that is matte forest green with shallow dents and drag marks from prior mechanical operations; a small sticker label is half-torn in one corner.",
"The robot arm is mounted on a table that is textured black rubber with slight give under pressure; scratches from the robot's base and clamp marks are clearly visible.",
"The robot arm is mounted on a table that is medium gray ceramic tile with visible grout lines and chips along the edges; some tiles have tiny cracks or stains.",
"The robot arm is mounted on a table that is old dark wood with faded polish and visible circular stains from spilled liquids; a few deep grooves are carved into the surface near the center."
],
"cubes": [
"The arm is connected to the base mounted on the table. The bottom cube is deep blue, the second cube is bright red, and the top cube is vivid green, maintaining their correct order after stacking."
],
"light": [
"The lighting is soft and diffused from large windows, allowing daylight to fill the room, creating gentle shadows that elongate throughout the space, with a natural warmth due to the sunlight streaming in.",
"Bright fluorescent tubes overhead cast a harsh, even light across the scene, creating sharp, well-defined shadows under the arm and cubes, with a sterile, clinical feel due to the cold white light.",
"Warm tungsten lights in the ceiling cast a golden glow over the table, creating long, soft shadows and a cozy, welcoming atmosphere. The light contrasts with cool blue tones from the robot arm.",
"The lighting comes from several intense spotlights mounted above, each casting focused beams of light that create stark, dramatic shadows around the cubes and the robotic arm, producing a high-contrast look.",
"A single adjustable desk lamp with a soft white bulb casts a directional pool of light over the cubes, causing deep, hard shadows and a quiet, intimate feel in the dimly lit room.",
"The space is illuminated with bright daylight filtering in through a skylight above, casting diffused, soft shadows and giving the scene a clean and natural look, with a cool tint from the daylight.",
"Soft, ambient lighting from hidden LEDs embedded in the ceiling creates a halo effect around the robotic arm, while subtle, elongated shadows stretch across the table surface, giving a sleek modern vibe.",
"Neon strip lights line the walls, casting a cool blue and purple glow across the scene. The robot and table are bathed in this colored light, producing sharp-edged shadows with a futuristic feel.",
"Bright artificial lights overhead illuminate the scene in a harsh white, with scattered, uneven shadows across the table and robot arm. There's a slight yellow hue to the light, giving it an industrial ambiance.",
"Soft morning sunlight spills through a large open window, casting long shadows across the floor and the robot arm. The warm, golden light creates a peaceful, natural atmosphere with a slight coolness in the shadows.",
"Dim ambient lighting with occasional flashes of bright blue light from overhead digital screens creates a high-tech, slightly eerie atmosphere. The shadows are soft, stretching in an almost surreal manner.",
"Lighting from tall lamps outside the room filters in through large glass doors, casting angled shadows across the table and robot arm. The ambient light creates a relaxing, slightly diffused atmosphere.",
"Artificial overhead lighting casts a harsh, stark white light with little warmth, producing sharply defined, almost clinical shadows on the robot arm and cubes. The space feels cold and industrial.",
"Soft moonlight from a large window at night creates a cool, ethereal glow on the table and arm. The shadows are long and faint, and the lighting provides a calm and serene atmosphere.",
"Bright overhead LED panels illuminate the scene with clean, white light, casting neutral shadows that give the environment a modern, sleek feel with minimal distortion or softness in the shadows.",
"A floodlight positioned outside casts bright, almost blinding natural light through an open door, creating high-contrast, sharp-edged shadows across the table and robot arm, adding dramatic tension to the scene.",
"Dim lighting from vintage tungsten bulbs hanging from the ceiling gives the room a warm, nostalgic glow, casting elongated, soft shadows that provide a cozy atmosphere around the robotic arm.",
"Bright fluorescent lights directly above produce a harsh, clinical light that creates sharp, defined shadows on the table and robotic arm, enhancing the industrial feel of the scene.",
"Neon pink and purple lights flicker softly from the walls, illuminating the robot arm with an intense glow that produces sharp, angular shadows across the cubes. The atmosphere feels futuristic and edgy.",
"Sunlight pouring in from a large, open window bathes the table and robotic arm in a warm golden light. The shadows are soft, and the scene feels natural and inviting with a slight contrast between light and shadow."
]
}

View File

@@ -0,0 +1,208 @@
# Copyright (c) 2024-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Script to convert HDF5 demonstration files to MP4 videos.
This script converts camera frames stored in HDF5 demonstration files to MP4 videos.
It supports multiple camera modalities including RGB, segmentation, and normal maps.
The output videos are saved in the specified directory with appropriate naming.
required arguments:
--input_file Path to the input HDF5 file.
--output_dir Directory to save the output MP4 files.
optional arguments:
--input_keys List of input keys to process from the HDF5 file.
(default: ["table_cam", "wrist_cam", "table_cam_segmentation",
"table_cam_normals", "table_cam_shaded_segmentation"])
--video_height Height of the output video in pixels. (default: 704)
--video_width Width of the output video in pixels. (default: 1280)
--framerate Frames per second for the output video. (default: 30)
"""
import argparse
import os
import cv2
import h5py
import numpy as np
# Constants
DEFAULT_VIDEO_HEIGHT = 704
DEFAULT_VIDEO_WIDTH = 1280
DEFAULT_INPUT_KEYS = [
"table_cam",
"wrist_cam",
"table_cam_segmentation",
"table_cam_normals",
"table_cam_shaded_segmentation",
"table_cam_depth",
]
DEFAULT_FRAMERATE = 30
LIGHT_SOURCE = np.array([0.0, 0.0, 1.0])
MIN_DEPTH = 0.0
MAX_DEPTH = 1.5
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description="Convert HDF5 demonstration files to MP4 videos.")
parser.add_argument(
"--input_file",
type=str,
required=True,
help="Path to the input HDF5 file containing demonstration data.",
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Directory path where the output MP4 files will be saved.",
)
parser.add_argument(
"--input_keys",
type=str,
nargs="+",
default=DEFAULT_INPUT_KEYS,
help="List of input keys to process.",
)
parser.add_argument(
"--video_height",
type=int,
default=DEFAULT_VIDEO_HEIGHT,
help="Height of the output video in pixels.",
)
parser.add_argument(
"--video_width",
type=int,
default=DEFAULT_VIDEO_WIDTH,
help="Width of the output video in pixels.",
)
parser.add_argument(
"--framerate",
type=int,
default=DEFAULT_FRAMERATE,
help="Frames per second for the output video.",
)
args = parser.parse_args()
return args
def write_demo_to_mp4(
hdf5_file,
demo_id,
frames_path,
input_key,
output_dir,
video_height,
video_width,
framerate=DEFAULT_FRAMERATE,
):
"""Convert frames from an HDF5 file to an MP4 video.
Args:
hdf5_file (str): Path to the HDF5 file containing the frames.
demo_id (int): ID of the demonstration to convert.
frames_path (str): Path to the frames data in the HDF5 file.
input_key (str): Name of the input key to convert.
output_dir (str): Directory to save the output MP4 file.
video_height (int): Height of the output video in pixels.
video_width (int): Width of the output video in pixels.
framerate (int, optional): Frames per second for the output video. Defaults to 30.
"""
with h5py.File(hdf5_file, "r") as f:
# Get frames based on input key type
if "shaded_segmentation" in input_key:
temp_key = input_key.replace("shaded_segmentation", "segmentation")
frames = f[f"data/demo_{demo_id}/obs/{temp_key}"]
else:
frames = f[frames_path + "/" + input_key]
# Setup video writer
output_path = os.path.join(output_dir, f"demo_{demo_id}_{input_key}.mp4")
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
if "depth" in input_key:
video = cv2.VideoWriter(output_path, fourcc, framerate, (video_width, video_height), isColor=False)
else:
video = cv2.VideoWriter(output_path, fourcc, framerate, (video_width, video_height))
# Process and write frames
for ix, frame in enumerate(frames):
# Convert normal maps to uint8 if needed
if "normals" in input_key:
frame = (frame * 255.0).astype(np.uint8)
# Process shaded segmentation frames
elif "shaded_segmentation" in input_key:
seg = frame[..., :-1]
normals_key = input_key.replace("shaded_segmentation", "normals")
normals = f[f"data/demo_{demo_id}/obs/{normals_key}"][ix]
shade = 0.5 + (normals * LIGHT_SOURCE[None, None, :]).sum(axis=-1) * 0.5
shaded_seg = (shade[..., None] * seg).astype(np.uint8)
frame = np.concatenate((shaded_seg, frame[..., -1:]), axis=-1)
# Convert RGB to BGR
if "depth" not in input_key:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
else:
frame = (frame[..., 0] - MIN_DEPTH) / (MAX_DEPTH - MIN_DEPTH)
frame = np.where(frame < 0.01, 1.0, frame)
frame = 1.0 - frame
frame = (frame * 255.0).astype(np.uint8)
# Resize to video resolution
frame = cv2.resize(frame, (video_width, video_height), interpolation=cv2.INTER_CUBIC)
video.write(frame)
video.release()
def get_num_demos(hdf5_file):
"""Get the number of demonstrations in the HDF5 file.
Args:
hdf5_file (str): Path to the HDF5 file.
Returns:
int: Number of demonstrations found in the file.
"""
with h5py.File(hdf5_file, "r") as f:
return len(f["data"].keys())
def main():
"""Main function to convert all demonstrations to MP4 videos."""
# Parse command line arguments
args = parse_args()
# Create output directory if it doesn't exist
os.makedirs(args.output_dir, exist_ok=True)
# Get number of demonstrations from the file
num_demos = get_num_demos(args.input_file)
print(f"Found {num_demos} demonstrations in {args.input_file}")
# Convert each demonstration
for i in range(num_demos):
frames_path = f"data/demo_{str(i)}/obs"
for input_key in args.input_keys:
write_demo_to_mp4(
args.input_file,
i,
frames_path,
input_key,
args.output_dir,
args.video_height,
args.video_width,
args.framerate,
)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,47 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import os
import h5py
parser = argparse.ArgumentParser(description="Merge a set of HDF5 datasets.")
parser.add_argument(
"--input_files",
type=str,
nargs="+",
default=[],
help="A list of paths to HDF5 files to merge.",
)
parser.add_argument("--output_file", type=str, default="merged_dataset.hdf5", help="File path to merged output.")
args_cli = parser.parse_args()
def merge_datasets():
for filepath in args_cli.input_files:
if not os.path.exists(filepath):
raise FileNotFoundError(f"The dataset file {filepath} does not exist.")
with h5py.File(args_cli.output_file, "w") as output:
episode_idx = 0
copy_attributes = True
for filepath in args_cli.input_files:
with h5py.File(filepath, "r") as input:
for episode, data in input["data"].items():
input.copy(f"data/{episode}", output, f"data/demo_{episode_idx}")
episode_idx += 1
if copy_attributes:
output["data"].attrs["env_args"] = input["data"].attrs["env_args"]
copy_attributes = False
print(f"Merged dataset saved to {args_cli.output_file}")
if __name__ == "__main__":
merge_datasets()

View File

@@ -0,0 +1,169 @@
# Copyright (c) 2024-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Script to create a new dataset by combining existing HDF5 demonstrations with visually augmented MP4 videos.
This script takes an existing HDF5 dataset containing demonstrations and a directory of MP4 videos
that are visually augmented versions of the original demonstration videos (e.g., with different lighting,
color schemes, or visual effects). It creates a new HDF5 dataset that preserves all the original
demonstration data (actions, robot state, etc.) but replaces the video frames with the augmented versions.
required arguments:
--input_file Path to the input HDF5 file containing original demonstrations.
--output_file Path to save the new HDF5 file with augmented videos.
--videos_dir Directory containing the visually augmented MP4 videos.
"""
import argparse
import glob
import os
import cv2
import h5py
import numpy as np
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description="Create a new dataset with visually augmented videos.")
parser.add_argument(
"--input_file",
type=str,
required=True,
help="Path to the input HDF5 file containing original demonstrations.",
)
parser.add_argument(
"--videos_dir",
type=str,
required=True,
help="Directory containing the visually augmented MP4 videos.",
)
parser.add_argument(
"--output_file",
type=str,
required=True,
help="Path to save the new HDF5 file with augmented videos.",
)
args = parser.parse_args()
return args
def get_frames_from_mp4(video_path, target_height=None, target_width=None):
"""Extract frames from an MP4 video file.
Args:
video_path (str): Path to the MP4 video file.
target_height (int, optional): Target height for resizing frames. If None, no resizing is done.
target_width (int, optional): Target width for resizing frames. If None, no resizing is done.
Returns:
np.ndarray: Array of frames from the video in RGB format.
"""
# Open the video file
video = cv2.VideoCapture(video_path)
# Get video properties
frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
# Read all frames into a numpy array
frames = []
for _ in range(frame_count):
ret, frame = video.read()
if not ret:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if target_height is not None and target_width is not None:
frame = cv2.resize(frame, (target_width, target_height), interpolation=cv2.INTER_LINEAR)
frames.append(frame)
# Convert to numpy array
frames = np.array(frames).astype(np.uint8)
# Release the video object
video.release()
return frames
def process_video_and_demo(f_in, f_out, video_path, orig_demo_id, new_demo_id):
"""Process a single video and create a new demo with augmented video frames.
Args:
f_in (h5py.File): Input HDF5 file.
f_out (h5py.File): Output HDF5 file.
video_path (str): Path to the augmented video file.
orig_demo_id (int): ID of the original demo to copy.
new_demo_id (int): ID for the new demo.
"""
# Get original demo data
actions = f_in[f"data/demo_{str(orig_demo_id)}/actions"]
eef_pos = f_in[f"data/demo_{str(orig_demo_id)}/obs/eef_pos"]
eef_quat = f_in[f"data/demo_{str(orig_demo_id)}/obs/eef_quat"]
gripper_pos = f_in[f"data/demo_{str(orig_demo_id)}/obs/gripper_pos"]
wrist_cam = f_in[f"data/demo_{str(orig_demo_id)}/obs/wrist_cam"]
# Get original video resolution
orig_video = f_in[f"data/demo_{str(orig_demo_id)}/obs/table_cam"]
target_height, target_width = orig_video.shape[1:3]
# Extract frames from video with original resolution
frames = get_frames_from_mp4(video_path, target_height, target_width)
# Create new datasets
f_out.create_dataset(f"data/demo_{str(new_demo_id)}/actions", data=actions, compression="gzip")
f_out.create_dataset(f"data/demo_{str(new_demo_id)}/obs/eef_pos", data=eef_pos, compression="gzip")
f_out.create_dataset(f"data/demo_{str(new_demo_id)}/obs/eef_quat", data=eef_quat, compression="gzip")
f_out.create_dataset(f"data/demo_{str(new_demo_id)}/obs/gripper_pos", data=gripper_pos, compression="gzip")
f_out.create_dataset(
f"data/demo_{str(new_demo_id)}/obs/table_cam", data=frames.astype(np.uint8), compression="gzip"
)
f_out.create_dataset(f"data/demo_{str(new_demo_id)}/obs/wrist_cam", data=wrist_cam, compression="gzip")
# Copy attributes
f_out[f"data/demo_{str(new_demo_id)}"].attrs["num_samples"] = f_in[f"data/demo_{str(orig_demo_id)}"].attrs[
"num_samples"
]
def main():
"""Main function to create a new dataset with augmented videos."""
# Parse command line arguments
args = parse_args()
# Get list of MP4 videos
search_path = os.path.join(args.videos_dir, "*.mp4")
video_paths = glob.glob(search_path)
video_paths.sort()
print(f"Found {len(video_paths)} MP4 videos in {args.videos_dir}")
# Create output directory if it doesn't exist
os.makedirs(os.path.dirname(args.output_file), exist_ok=True)
with h5py.File(args.input_file, "r") as f_in, h5py.File(args.output_file, "w") as f_out:
# Copy all data from input to output
f_in.copy("data", f_out)
# Get the largest demo ID to start new demos from
demo_ids = [int(key.split("_")[1]) for key in f_in["data"].keys()]
next_demo_id = max(demo_ids) + 1 # noqa: SIM113
print(f"Starting new demos from ID: {next_demo_id}")
# Process each video and create new demo
for video_path in video_paths:
# Extract original demo ID from video filename
video_filename = os.path.basename(video_path)
orig_demo_id = int(video_filename.split("_")[1])
process_video_and_demo(f_in, f_out, video_path, orig_demo_id, next_demo_id)
next_demo_id += 1
print(f"Augmented data saved to {args.output_file}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,90 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Convert all mesh files to `.obj` in given folders."""
import argparse
import os
import shutil
import subprocess
# Constants
# Path to blender
BLENDER_EXE_PATH = shutil.which("blender")
def parse_cli_args():
"""Parse the input command line arguments."""
# add argparse arguments
parser = argparse.ArgumentParser("Utility to convert all mesh files to `.obj` in given folders.")
parser.add_argument("input_dir", type=str, help="The input directory from which to load meshes.")
parser.add_argument(
"-o",
"--output_dir",
type=str,
default=None,
help="The output directory to save converted meshes into. Default is same as input directory.",
)
args_cli = parser.parse_args()
# resolve output directory
if args_cli.output_dir is None:
args_cli.output_dir = args_cli.input_dir
# return arguments
return args_cli
def run_blender_convert2obj(in_file: str, out_file: str):
"""Calls the python script using `subprocess` to perform processing of mesh file.
Args:
in_file: Input mesh file.
out_file: Output obj file.
"""
# resolve for python file
tools_dirname = os.path.dirname(os.path.abspath(__file__))
script_file = os.path.join(tools_dirname, "blender_obj.py")
# complete command
command_exe = f"{BLENDER_EXE_PATH} --background --python {script_file} -- -i {in_file} -o {out_file}"
# break command into list
command_exe_list = command_exe.split(" ")
# run command
subprocess.run(command_exe_list)
def convert_meshes(source_folders: list[str], destination_folders: list[str]):
"""Processes all mesh files of supported format into OBJ file using blender.
Args:
source_folders: List of directories to search for meshes.
destination_folders: List of directories to dump converted files.
"""
# create folder for corresponding destination
for folder in destination_folders:
os.makedirs(folder, exist_ok=True)
# iterate over each folder
for in_folder, out_folder in zip(source_folders, destination_folders):
# extract all dae files in the directory
mesh_filenames = [f for f in os.listdir(in_folder) if f.endswith("dae")]
mesh_filenames += [f for f in os.listdir(in_folder) if f.endswith("stl")]
mesh_filenames += [f for f in os.listdir(in_folder) if f.endswith("STL")]
# print status
print(f"Found {len(mesh_filenames)} files to process in directory: {in_folder}")
# iterate over each OBJ file
for mesh_file in mesh_filenames:
# extract meshname
mesh_name = os.path.splitext(mesh_file)[0]
# complete path of input and output files
in_file_path = os.path.join(in_folder, mesh_file)
out_file_path = os.path.join(out_folder, mesh_name + ".obj")
# perform blender processing
print("Processing: ", in_file_path)
run_blender_convert2obj(in_file_path, out_file_path)
if __name__ == "__main__":
# Parse command line arguments
args = parse_cli_args()
# Run conversion
convert_meshes([args.input_dir], [args.output_dir])

View File

@@ -0,0 +1,593 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Script to record demonstrations with Isaac Lab environments using human teleoperation.
This script allows users to record demonstrations operated by human teleoperation for a specified task.
The recorded demonstrations are stored as episodes in a hdf5 file. Users can specify the task, teleoperation
device, dataset directory, and environment stepping rate through command-line arguments.
required arguments:
--task Name of the task.
optional arguments:
-h, --help Show this help message and exit
--teleop_device Device for interacting with environment. (default: keyboard)
--dataset_file File path to export recorded demos. (default: "./datasets/dataset.hdf5")
--step_hz Environment stepping rate in Hz. (default: 30)
--num_demos Number of demonstrations to record. (default: 0)
--num_success_steps Number of continuous steps with task success for concluding a demo as successful.
(default: 10)
"""
"""Launch Isaac Sim Simulator first."""
# Standard library imports
import argparse
import contextlib
# Isaac Lab AppLauncher
from isaaclab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Record demonstrations for Isaac Lab environments.")
parser.add_argument("--task", type=str, required=True, help="Name of the task.")
parser.add_argument(
"--teleop_device",
type=str,
default="keyboard",
help=(
"Teleop device. Set here (legacy) or via the environment config. If using the environment config, pass the"
" device key/name defined under 'teleop_devices' (it can be a custom name, not necessarily 'handtracking')."
" Built-ins: keyboard, spacemouse, gamepad. Not all tasks support all built-ins."
),
)
parser.add_argument(
"--dataset_file", type=str, default="./datasets/dataset.hdf5", help="File path to export recorded demos."
)
parser.add_argument("--step_hz", type=int, default=30, help="Environment stepping rate in Hz.")
parser.add_argument(
"--num_demos", type=int, default=0, help="Number of demonstrations to record. Set to 0 for infinite."
)
parser.add_argument(
"--num_success_steps",
type=int,
default=10,
help="Number of continuous steps with task success for concluding a demo as successful. Default is 10.",
)
parser.add_argument(
"--enable_pinocchio",
action="store_true",
default=False,
help="Enable Pinocchio.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# Validate required arguments
if args_cli.task is None:
parser.error("--task is required")
app_launcher_args = vars(args_cli)
if args_cli.enable_pinocchio:
# Import pinocchio before AppLauncher to force the use of the version
# installed by IsaacLab and not the one installed by Isaac Sim.
# pinocchio is required by the Pink IK controllers and the GR1T2 retargeter
import pinocchio # noqa: F401
if "handtracking" in args_cli.teleop_device.lower():
app_launcher_args["xr"] = True
# launch the simulator
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
# Third-party imports
import logging
import os
import sys
import time
import gymnasium as gym
import torch
import omni.ui as ui
from isaaclab.devices import Se3Keyboard, Se3KeyboardCfg, Se3SpaceMouse, Se3SpaceMouseCfg
from isaaclab.devices.openxr import remove_camera_configs
from isaaclab.devices.teleop_device_factory import create_teleop_device
import isaaclab_mimic.envs # noqa: F401
from isaaclab_mimic.ui.instruction_display import InstructionDisplay, show_subtask_instructions
if args_cli.enable_pinocchio:
import isaaclab_tasks.manager_based.locomanipulation.pick_place # noqa: F401
import isaaclab_tasks.manager_based.manipulation.pick_place # noqa: F401
from collections.abc import Callable
from isaaclab.envs import DirectRLEnvCfg, ManagerBasedRLEnvCfg
from isaaclab.envs.mdp.recorders.recorders_cfg import ActionStateRecorderManagerCfg
from isaaclab.envs.ui import EmptyWindow
from isaaclab.managers import DatasetExportMode
# Add workspace root to sys.path so mindrobot_keyboard is importable as a package path.
# record_demos.py lives at scripts/tools/, workspace root is three levels up.
_ws_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
if _ws_root not in sys.path:
sys.path.insert(0, _ws_root)
from scripts.environments.teleoperation.mindrobot_keyboard import MindRobotCombinedKeyboard # noqa: E402
import isaaclab_tasks # noqa: F401
import mindbot.tasks # noqa: F401 — registers Isaac-MindRobot-* environments
from isaaclab_tasks.utils.parse_cfg import parse_env_cfg
# import logger
logger = logging.getLogger(__name__)
class RateLimiter:
"""Convenience class for enforcing rates in loops."""
def __init__(self, hz: int):
"""Initialize a RateLimiter with specified frequency.
Args:
hz: Frequency to enforce in Hertz.
"""
self.hz = hz
self.last_time = time.time()
self.sleep_duration = 1.0 / hz
self.render_period = min(0.033, self.sleep_duration)
def sleep(self, env: gym.Env):
"""Attempt to sleep at the specified rate in hz.
Args:
env: Environment to render during sleep periods.
"""
next_wakeup_time = self.last_time + self.sleep_duration
while time.time() < next_wakeup_time:
time.sleep(self.render_period)
env.sim.render()
self.last_time = self.last_time + self.sleep_duration
# detect time jumping forwards (e.g. loop is too slow)
if self.last_time < time.time():
while self.last_time < time.time():
self.last_time += self.sleep_duration
def setup_output_directories() -> tuple[str, str]:
"""Set up output directories for saving demonstrations.
Creates the output directory if it doesn't exist and extracts the file name
from the dataset file path.
Returns:
tuple[str, str]: A tuple containing:
- output_dir: The directory path where the dataset will be saved
- output_file_name: The filename (without extension) for the dataset
"""
# get directory path and file name (without extension) from cli arguments
output_dir = os.path.dirname(args_cli.dataset_file)
output_file_name = os.path.splitext(os.path.basename(args_cli.dataset_file))[0]
# create directory if it does not exist
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print(f"Created output directory: {output_dir}")
return output_dir, output_file_name
def create_environment_config(
output_dir: str, output_file_name: str
) -> tuple[ManagerBasedRLEnvCfg | DirectRLEnvCfg, object | None]:
"""Create and configure the environment configuration.
Parses the environment configuration and makes necessary adjustments for demo recording.
Extracts the success termination function and configures the recorder manager.
Args:
output_dir: Directory where recorded demonstrations will be saved
output_file_name: Name of the file to store the demonstrations
Returns:
tuple[isaaclab_tasks.utils.parse_cfg.EnvCfg, Optional[object]]: A tuple containing:
- env_cfg: The configured environment configuration
- success_term: The success termination object or None if not available
Raises:
Exception: If parsing the environment configuration fails
"""
# parse configuration
try:
env_cfg = parse_env_cfg(args_cli.task, device=args_cli.device, num_envs=1)
env_cfg.env_name = args_cli.task.split(":")[-1]
except Exception as e:
logger.error(f"Failed to parse environment configuration: {e}")
exit(1)
# extract success checking function to invoke in the main loop
success_term = None
if hasattr(env_cfg.terminations, "success"):
success_term = env_cfg.terminations.success
env_cfg.terminations.success = None
else:
logger.warning(
"No success termination term was found in the environment."
" Will not be able to mark recorded demos as successful."
)
if args_cli.xr:
# If cameras are not enabled and XR is enabled, remove camera configs
if not args_cli.enable_cameras:
env_cfg = remove_camera_configs(env_cfg)
env_cfg.sim.render.antialiasing_mode = "DLSS"
# modify configuration such that the environment runs indefinitely until
# the goal is reached or other termination conditions are met
env_cfg.terminations.time_out = None
env_cfg.observations.policy.concatenate_terms = False
env_cfg.recorders: ActionStateRecorderManagerCfg = ActionStateRecorderManagerCfg()
env_cfg.recorders.dataset_export_dir_path = output_dir
env_cfg.recorders.dataset_filename = output_file_name
env_cfg.recorders.dataset_export_mode = DatasetExportMode.EXPORT_SUCCEEDED_ONLY
return env_cfg, success_term
def create_environment(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg) -> gym.Env:
"""Create the environment from the configuration.
Args:
env_cfg: The environment configuration object that defines the environment properties.
This should be an instance of EnvCfg created by parse_env_cfg().
Returns:
gym.Env: A Gymnasium environment instance for the specified task.
Raises:
Exception: If environment creation fails for any reason.
"""
try:
env = gym.make(args_cli.task, cfg=env_cfg).unwrapped
return env
except Exception as e:
logger.error(f"Failed to create environment: {e}")
exit(1)
def setup_teleop_device(callbacks: dict[str, Callable]) -> object:
"""Set up the teleoperation device based on configuration.
Attempts to create a teleoperation device based on the environment configuration.
Falls back to default devices if the specified device is not found in the configuration.
Args:
callbacks: Dictionary mapping callback keys to functions that will be
attached to the teleop device
Returns:
object: The configured teleoperation device interface
Raises:
Exception: If teleop device creation fails
"""
teleop_interface = None
try:
if hasattr(env_cfg, "teleop_devices") and args_cli.teleop_device in env_cfg.teleop_devices.devices:
teleop_interface = create_teleop_device(args_cli.teleop_device, env_cfg.teleop_devices.devices, callbacks)
else:
logger.warning(
f"No teleop device '{args_cli.teleop_device}' found in environment config. Creating default."
)
# Create fallback teleop device
if args_cli.teleop_device.lower() == "keyboard":
teleop_interface = Se3Keyboard(Se3KeyboardCfg(pos_sensitivity=0.2, rot_sensitivity=0.5))
elif args_cli.teleop_device.lower() == "spacemouse":
teleop_interface = Se3SpaceMouse(Se3SpaceMouseCfg(pos_sensitivity=0.2, rot_sensitivity=0.5))
elif args_cli.teleop_device.lower() == "mindrobot_keyboard":
teleop_interface = MindRobotCombinedKeyboard(
pos_sensitivity=0.05,
rot_sensitivity=0.05,
wheel_speed=5.0,
sim_device=args_cli.device,
)
# for key, callback in callbacks.items():
# teleop_interface.add_callback(key, callback)
else:
logger.error(f"Unsupported teleop device: {args_cli.teleop_device}")
logger.error("Supported devices: keyboard, spacemouse, handtracking")
exit(1)
# Add callbacks to fallback device
for key, callback in callbacks.items():
teleop_interface.add_callback(key, callback)
except Exception as e:
logger.error(f"Failed to create teleop device: {e}")
exit(1)
if teleop_interface is None:
logger.error("Failed to create teleop interface")
exit(1)
return teleop_interface
def setup_ui(label_text: str, env: gym.Env) -> InstructionDisplay:
"""Set up the user interface elements.
Creates instruction display and UI window with labels for showing information
to the user during demonstration recording.
Args:
label_text: Text to display showing current recording status
env: The environment instance for which UI is being created
Returns:
InstructionDisplay: The configured instruction display object
"""
instruction_display = InstructionDisplay(args_cli.xr)
if not args_cli.xr:
window = EmptyWindow(env, "Instruction")
with window.ui_window_elements["main_vstack"]:
demo_label = ui.Label(label_text)
subtask_label = ui.Label("")
instruction_display.set_labels(subtask_label, demo_label)
return instruction_display
def process_success_condition(env: gym.Env, success_term: object | None, success_step_count: int) -> tuple[int, bool]:
"""Process the success condition for the current step.
Checks if the environment has met the success condition for the required
number of consecutive steps. Marks the episode as successful if criteria are met.
Args:
env: The environment instance to check
success_term: The success termination object or None if not available
success_step_count: Current count of consecutive successful steps
Returns:
tuple[int, bool]: A tuple containing:
- updated success_step_count: The updated count of consecutive successful steps
- success_reset_needed: Boolean indicating if reset is needed due to success
"""
if success_term is None:
return success_step_count, False
if bool(success_term.func(env, **success_term.params)[0]):
success_step_count += 1
if success_step_count >= args_cli.num_success_steps:
env.recorder_manager.record_pre_reset([0], force_export_or_skip=False)
env.recorder_manager.set_success_to_episodes(
[0], torch.tensor([[True]], dtype=torch.bool, device=env.device)
)
env.recorder_manager.export_episodes([0])
print("Success condition met! Recording completed.")
return success_step_count, True
else:
success_step_count = 0
return success_step_count, False
def handle_reset(
env: gym.Env, success_step_count: int, instruction_display: InstructionDisplay, label_text: str
) -> int:
"""Handle resetting the environment.
Resets the environment, recorder manager, and related state variables.
Updates the instruction display with current status.
Args:
env: The environment instance to reset
success_step_count: Current count of consecutive successful steps
instruction_display: The display object to update
label_text: Text to display showing current recording status
Returns:
int: Reset success step count (0)
"""
print("Resetting environment...")
env.sim.reset()
env.recorder_manager.reset()
env.reset()
success_step_count = 0
instruction_display.show_demo(label_text)
return success_step_count
def run_simulation_loop(
env: gym.Env,
teleop_interface: object | None,
success_term: object | None,
rate_limiter: RateLimiter | None,
) -> int:
"""Run the main simulation loop for collecting demonstrations.
Sets up callback functions for teleop device, initializes the UI,
and runs the main loop that processes user inputs and environment steps.
Records demonstrations when success conditions are met.
Args:
env: The environment instance
teleop_interface: Optional teleop interface (will be created if None)
success_term: The success termination object or None if not available
rate_limiter: Optional rate limiter to control simulation speed
Returns:
int: Number of successful demonstrations recorded
"""
current_recorded_demo_count = 0
success_step_count = 0
should_reset_recording_instance = False
running_recording_instance = not args_cli.xr
# Callback closures for the teleop device
def reset_recording_instance():
nonlocal should_reset_recording_instance
should_reset_recording_instance = True
print("Recording instance reset requested")
def start_recording_instance():
nonlocal running_recording_instance
running_recording_instance = True
print("Recording started")
def stop_recording_instance():
nonlocal running_recording_instance
running_recording_instance = False
print("Recording paused")
def save_success_and_reset():
"""L 键:手动标记当前 episode 为成功并导出,然后重置。"""
nonlocal should_reset_recording_instance
print("[record] Manually marking episode as success and saving...")
env.recorder_manager.record_pre_reset([0], force_export_or_skip=False)
env.recorder_manager.set_success_to_episodes(
[0], torch.tensor([[True]], dtype=torch.bool, device=env.device)
)
env.recorder_manager.export_episodes([0])
should_reset_recording_instance = True
print("[record] Episode saved successfully!")
# Set up teleoperation callbacks
teleoperation_callbacks = {
"R": reset_recording_instance,
"L": save_success_and_reset,
"START": start_recording_instance,
"STOP": stop_recording_instance,
"RESET": reset_recording_instance,
}
teleop_interface = setup_teleop_device(teleoperation_callbacks)
# Reset before starting
env.sim.reset()
env.reset()
teleop_interface.reset()
label_text = f"Recorded {current_recorded_demo_count} successful demonstrations."
instruction_display = setup_ui(label_text, env)
subtasks = {}
with contextlib.suppress(KeyboardInterrupt) and torch.inference_mode():
while simulation_app.is_running():
# Get keyboard command
action = teleop_interface.advance()
# Expand to batch dimension
actions = action.repeat(env.num_envs, 1)
# Perform action on environment
if running_recording_instance:
# Compute actions based on environment
obv = env.step(actions)
if subtasks is not None:
if subtasks == {}:
subtasks = obv[0].get("subtask_terms")
elif subtasks:
show_subtask_instructions(instruction_display, subtasks, obv, env.cfg)
else:
env.sim.render()
# Check for success condition
success_step_count, success_reset_needed = process_success_condition(env, success_term, success_step_count)
if success_reset_needed:
should_reset_recording_instance = True
# Update demo count if it has changed
if env.recorder_manager.exported_successful_episode_count > current_recorded_demo_count:
current_recorded_demo_count = env.recorder_manager.exported_successful_episode_count
label_text = f"Recorded {current_recorded_demo_count} successful demonstrations."
print(label_text)
# Check if we've reached the desired number of demos
if args_cli.num_demos > 0 and env.recorder_manager.exported_successful_episode_count >= args_cli.num_demos:
label_text = f"All {current_recorded_demo_count} demonstrations recorded.\nExiting the app."
instruction_display.show_demo(label_text)
print(label_text)
target_time = time.time() + 0.8
while time.time() < target_time:
if rate_limiter:
rate_limiter.sleep(env)
else:
env.sim.render()
break
# Handle reset if requested
if should_reset_recording_instance:
success_step_count = handle_reset(env, success_step_count, instruction_display, label_text)
should_reset_recording_instance = False
# Check if simulation is stopped
if env.sim.is_stopped():
break
# Rate limiting
if rate_limiter:
rate_limiter.sleep(env)
return current_recorded_demo_count
def main() -> None:
"""Collect demonstrations from the environment using teleop interfaces.
Main function that orchestrates the entire process:
1. Sets up rate limiting based on configuration
2. Creates output directories for saving demonstrations
3. Configures the environment
4. Runs the simulation loop to collect demonstrations
5. Cleans up resources when done
Raises:
Exception: Propagates exceptions from any of the called functions
"""
# if handtracking is selected, rate limiting is achieved via OpenXR
if args_cli.xr:
rate_limiter = None
from isaaclab.ui.xr_widgets import TeleopVisualizationManager, XRVisualization
# Assign the teleop visualization manager to the visualization system
XRVisualization.assign_manager(TeleopVisualizationManager)
else:
rate_limiter = RateLimiter(args_cli.step_hz)
# Set up output directories
output_dir, output_file_name = setup_output_directories()
# Create and configure environment
global env_cfg # Make env_cfg available to setup_teleop_device
env_cfg, success_term = create_environment_config(output_dir, output_file_name)
# Create environment
env = create_environment(env_cfg)
# Run simulation loop
current_recorded_demo_count = run_simulation_loop(env, None, success_term, rate_limiter)
# Clean up
env.close()
print(f"Recording session completed with {current_recorded_demo_count} successful demonstrations")
print(f"Demonstrations saved to: {args_cli.dataset_file}")
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()

View File

@@ -0,0 +1,315 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Script to replay demonstrations with Isaac Lab environments."""
"""Launch Isaac Sim Simulator first."""
import argparse
from isaaclab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Replay demonstrations in Isaac Lab environments.")
parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to replay episodes.")
parser.add_argument("--task", type=str, default=None, help="Force to use the specified task.")
parser.add_argument(
"--select_episodes",
type=int,
nargs="+",
default=[],
help="A list of episode indices to be replayed. Keep empty to replay all in the dataset file.",
)
parser.add_argument("--dataset_file", type=str, default="datasets/dataset.hdf5", help="Dataset file to be replayed.")
parser.add_argument(
"--validate_states",
action="store_true",
default=False,
help=(
"Validate if the states, if available, match between loaded from datasets and replayed. Only valid if"
" --num_envs is 1."
),
)
parser.add_argument(
"--validate_success_rate",
action="store_true",
default=False,
help="Validate the replay success rate using the task environment termination criteria",
)
parser.add_argument(
"--enable_pinocchio",
action="store_true",
default=False,
help="Enable Pinocchio.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# args_cli.headless = True
if args_cli.enable_pinocchio:
# Import pinocchio before AppLauncher to force the use of the version
# installed by IsaacLab and not the one installed by Isaac Sim.
# pinocchio is required by the Pink IK controllers and the GR1T2 retargeter
import pinocchio # noqa: F401
# launch the simulator
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import contextlib
import os
import gymnasium as gym
import torch
from isaaclab.devices import Se3Keyboard, Se3KeyboardCfg
from isaaclab.utils.datasets import EpisodeData, HDF5DatasetFileHandler
if args_cli.enable_pinocchio:
import isaaclab_tasks.manager_based.locomanipulation.pick_place # noqa: F401
import isaaclab_tasks.manager_based.manipulation.pick_place # noqa: F401
import isaaclab_tasks # noqa: F401
import mindbot.tasks # noqa: F401 — registers Isaac-MindRobot-* environments
from isaaclab_tasks.utils.parse_cfg import parse_env_cfg
is_paused = False
def play_cb():
global is_paused
is_paused = False
def pause_cb():
global is_paused
is_paused = True
def compare_states(state_from_dataset, runtime_state, runtime_env_index) -> (bool, str):
"""Compare states from dataset and runtime.
Args:
state_from_dataset: State from dataset.
runtime_state: State from runtime.
runtime_env_index: Index of the environment in the runtime states to be compared.
Returns:
bool: True if states match, False otherwise.
str: Log message if states don't match.
"""
states_matched = True
output_log = ""
for asset_type in ["articulation", "rigid_object"]:
for asset_name in runtime_state[asset_type].keys():
for state_name in runtime_state[asset_type][asset_name].keys():
runtime_asset_state = runtime_state[asset_type][asset_name][state_name][runtime_env_index]
dataset_asset_state = state_from_dataset[asset_type][asset_name][state_name]
if len(dataset_asset_state) != len(runtime_asset_state):
raise ValueError(f"State shape of {state_name} for asset {asset_name} don't match")
for i in range(len(dataset_asset_state)):
if abs(dataset_asset_state[i] - runtime_asset_state[i]) > 0.01:
states_matched = False
output_log += f'\tState ["{asset_type}"]["{asset_name}"]["{state_name}"][{i}] don\'t match\r\n'
output_log += f"\t Dataset:\t{dataset_asset_state[i]}\r\n"
output_log += f"\t Runtime: \t{runtime_asset_state[i]}\r\n"
return states_matched, output_log
def main():
"""Replay episodes loaded from a file."""
global is_paused
# Load dataset
if not os.path.exists(args_cli.dataset_file):
raise FileNotFoundError(f"The dataset file {args_cli.dataset_file} does not exist.")
dataset_file_handler = HDF5DatasetFileHandler()
dataset_file_handler.open(args_cli.dataset_file)
env_name = dataset_file_handler.get_env_name()
episode_count = dataset_file_handler.get_num_episodes()
if episode_count == 0:
print("No episodes found in the dataset.")
exit()
episode_indices_to_replay = args_cli.select_episodes
if len(episode_indices_to_replay) == 0:
episode_indices_to_replay = list(range(episode_count))
if args_cli.task is not None:
env_name = args_cli.task.split(":")[-1]
if env_name is None:
raise ValueError("Task/env name was not specified nor found in the dataset.")
num_envs = args_cli.num_envs
env_cfg = parse_env_cfg(env_name, device=args_cli.device, num_envs=num_envs)
# extract success checking function to invoke in the main loop
success_term = None
if args_cli.validate_success_rate:
if hasattr(env_cfg.terminations, "success"):
success_term = env_cfg.terminations.success
env_cfg.terminations.success = None
else:
print(
"No success termination term was found in the environment."
" Will not be able to mark recorded demos as successful."
)
# Disable all recorders and terminations
env_cfg.recorders = {}
env_cfg.terminations = {}
# create environment from loaded config
env = gym.make(args_cli.task, cfg=env_cfg).unwrapped
teleop_interface = Se3Keyboard(Se3KeyboardCfg(pos_sensitivity=0.1, rot_sensitivity=0.1))
teleop_interface.add_callback("N", play_cb)
teleop_interface.add_callback("B", pause_cb)
print('Press "B" to pause and "N" to resume the replayed actions.')
# Determine if state validation should be conducted
state_validation_enabled = False
if args_cli.validate_states and num_envs == 1:
state_validation_enabled = True
elif args_cli.validate_states and num_envs > 1:
print("Warning: State validation is only supported with a single environment. Skipping state validation.")
# Get idle action (idle actions are applied to envs without next action)
if hasattr(env_cfg, "idle_action"):
idle_action = env_cfg.idle_action.repeat(num_envs, 1)
else:
idle_action = torch.zeros(env.action_space.shape)
# reset before starting
env.reset()
teleop_interface.reset()
# simulate environment -- run everything in inference mode
episode_names = list(dataset_file_handler.get_episode_names())
replayed_episode_count = 0
recorded_episode_count = 0
# Track current episode indices for each environment
current_episode_indices = [None] * num_envs
# Track failed demo IDs
failed_demo_ids = []
with contextlib.suppress(KeyboardInterrupt) and torch.inference_mode():
while simulation_app.is_running() and not simulation_app.is_exiting():
env_episode_data_map = {index: EpisodeData() for index in range(num_envs)}
first_loop = True
has_next_action = True
episode_ended = [False] * num_envs
while has_next_action:
# initialize actions with idle action so those without next action will not move
actions = idle_action
has_next_action = False
for env_id in range(num_envs):
env_next_action = env_episode_data_map[env_id].get_next_action()
if env_next_action is None:
# check if the episode is successful after the whole episode_data is
if (
(success_term is not None)
and (current_episode_indices[env_id]) is not None
and (not episode_ended[env_id])
):
if bool(success_term.func(env, **success_term.params)[env_id]):
recorded_episode_count += 1
plural_trailing_s = "s" if recorded_episode_count > 1 else ""
print(
f"Successfully replayed {recorded_episode_count} episode{plural_trailing_s} out"
f" of {replayed_episode_count} demos."
)
else:
# if not successful, add to failed demo IDs list
if (
current_episode_indices[env_id] is not None
and current_episode_indices[env_id] not in failed_demo_ids
):
failed_demo_ids.append(current_episode_indices[env_id])
episode_ended[env_id] = True
next_episode_index = None
while episode_indices_to_replay:
next_episode_index = episode_indices_to_replay.pop(0)
if next_episode_index < episode_count:
episode_ended[env_id] = False
break
next_episode_index = None
if next_episode_index is not None:
replayed_episode_count += 1
current_episode_indices[env_id] = next_episode_index
print(f"{replayed_episode_count:4}: Loading #{next_episode_index} episode to env_{env_id}")
episode_data = dataset_file_handler.load_episode(
episode_names[next_episode_index], env.device
)
env_episode_data_map[env_id] = episode_data
# Set initial state for the new episode
initial_state = episode_data.get_initial_state()
env.reset_to(initial_state, torch.tensor([env_id], device=env.device), is_relative=True)
# Get the first action for the new episode
env_next_action = env_episode_data_map[env_id].get_next_action()
has_next_action = True
else:
continue
else:
has_next_action = True
actions[env_id] = env_next_action
if first_loop:
first_loop = False
else:
while is_paused:
env.sim.render()
continue
env.step(actions)
if state_validation_enabled:
state_from_dataset = env_episode_data_map[0].get_next_state()
if state_from_dataset is not None:
print(
f"Validating states at action-index: {env_episode_data_map[0].next_state_index - 1:4}",
end="",
)
current_runtime_state = env.scene.get_state(is_relative=True)
states_matched, comparison_log = compare_states(state_from_dataset, current_runtime_state, 0)
if states_matched:
print("\t- matched.")
else:
print("\t- mismatched.")
print(comparison_log)
break
# Close environment after replay in complete
plural_trailing_s = "s" if replayed_episode_count > 1 else ""
print(f"Finished replaying {replayed_episode_count} episode{plural_trailing_s}.")
# Print success statistics only if validation was enabled
if success_term is not None:
print(f"Successfully replayed: {recorded_episode_count}/{replayed_episode_count}")
# Print failed demo IDs if any
if failed_demo_ids:
print(f"\nFailed demo IDs ({len(failed_demo_ids)} total):")
print(f" {sorted(failed_demo_ids)}")
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()

View File

@@ -0,0 +1,169 @@
# Copyright (c) 2024-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Test cases for Cosmos prompt generation script."""
import json
import os
import tempfile
import pytest
from scripts.tools.cosmos.cosmos_prompt_gen import generate_prompt, main
@pytest.fixture(scope="class")
def temp_templates_file():
"""Create temporary templates file."""
temp_file = tempfile.NamedTemporaryFile(suffix=".json", delete=False) # noqa: SIM115
# Create test templates
test_templates = {
"lighting": ["with bright lighting", "with dim lighting", "with natural lighting"],
"color": ["in warm colors", "in cool colors", "in vibrant colors"],
"style": ["in a realistic style", "in an artistic style", "in a minimalist style"],
"empty_section": [], # Test empty section
"invalid_section": "not a list", # Test invalid section
}
# Write templates to file
with open(temp_file.name, "w") as f:
json.dump(test_templates, f)
yield temp_file.name
# Cleanup
os.remove(temp_file.name)
@pytest.fixture
def temp_output_file():
"""Create temporary output file."""
temp_file = tempfile.NamedTemporaryFile(suffix=".txt", delete=False) # noqa: SIM115
yield temp_file.name
# Cleanup
os.remove(temp_file.name)
class TestCosmosPromptGen:
"""Test cases for Cosmos prompt generation functionality."""
def test_generate_prompt_valid_templates(self, temp_templates_file):
"""Test generating a prompt with valid templates."""
prompt = generate_prompt(temp_templates_file)
# Check that prompt is a string
assert isinstance(prompt, str)
# Check that prompt contains at least one word
assert len(prompt.split()) > 0
# Check that prompt contains valid sections
valid_sections = ["lighting", "color", "style"]
found_sections = [section for section in valid_sections if section in prompt.lower()]
assert len(found_sections) > 0
def test_generate_prompt_invalid_file(self):
"""Test generating a prompt with invalid file path."""
with pytest.raises(FileNotFoundError):
generate_prompt("nonexistent_file.json")
def test_generate_prompt_invalid_json(self):
"""Test generating a prompt with invalid JSON file."""
# Create a temporary file with invalid JSON
with tempfile.NamedTemporaryFile(suffix=".json", delete=False) as temp_file:
temp_file.write(b"invalid json content")
temp_file.flush()
try:
with pytest.raises(ValueError):
generate_prompt(temp_file.name)
finally:
os.remove(temp_file.name)
def test_main_function_single_prompt(self, temp_templates_file, temp_output_file):
"""Test main function with single prompt generation."""
# Mock command line arguments
import sys
original_argv = sys.argv
sys.argv = [
"cosmos_prompt_gen.py",
"--templates_path",
temp_templates_file,
"--num_prompts",
"1",
"--output_path",
temp_output_file,
]
try:
main()
# Check if output file was created
assert os.path.exists(temp_output_file)
# Check content of output file
with open(temp_output_file) as f:
content = f.read().strip()
assert len(content) > 0
assert len(content.split("\n")) == 1
finally:
# Restore original argv
sys.argv = original_argv
def test_main_function_multiple_prompts(self, temp_templates_file, temp_output_file):
"""Test main function with multiple prompt generation."""
# Mock command line arguments
import sys
original_argv = sys.argv
sys.argv = [
"cosmos_prompt_gen.py",
"--templates_path",
temp_templates_file,
"--num_prompts",
"3",
"--output_path",
temp_output_file,
]
try:
main()
# Check if output file was created
assert os.path.exists(temp_output_file)
# Check content of output file
with open(temp_output_file) as f:
content = f.read().strip()
assert len(content) > 0
assert len(content.split("\n")) == 3
# Check that each line is a valid prompt
for line in content.split("\n"):
assert len(line) > 0
finally:
# Restore original argv
sys.argv = original_argv
def test_main_function_default_output(self, temp_templates_file):
"""Test main function with default output path."""
# Mock command line arguments
import sys
original_argv = sys.argv
sys.argv = ["cosmos_prompt_gen.py", "--templates_path", temp_templates_file, "--num_prompts", "1"]
try:
main()
# Check if default output file was created
assert os.path.exists("prompts.txt")
# Clean up default output file
os.remove("prompts.txt")
finally:
# Restore original argv
sys.argv = original_argv

View File

@@ -0,0 +1,173 @@
# Copyright (c) 2024-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Test cases for HDF5 to MP4 conversion script."""
import os
import tempfile
import h5py
import numpy as np
import pytest
from scripts.tools.hdf5_to_mp4 import get_num_demos, main, write_demo_to_mp4
@pytest.fixture(scope="class")
def temp_hdf5_file():
"""Create temporary HDF5 file with test data."""
temp_file = tempfile.NamedTemporaryFile(suffix=".h5", delete=False) # noqa: SIM115
with h5py.File(temp_file.name, "w") as h5f:
# Create test data structure
for demo_id in range(2): # Create 2 demos
demo_group = h5f.create_group(f"data/demo_{demo_id}/obs")
# Create RGB frames (2 frames per demo)
rgb_data = np.random.randint(0, 255, (2, 704, 1280, 3), dtype=np.uint8)
demo_group.create_dataset("table_cam", data=rgb_data)
# Create segmentation frames
seg_data = np.random.randint(0, 255, (2, 704, 1280, 4), dtype=np.uint8)
demo_group.create_dataset("table_cam_segmentation", data=seg_data)
# Create normal maps
normals_data = np.random.rand(2, 704, 1280, 3).astype(np.float32)
demo_group.create_dataset("table_cam_normals", data=normals_data)
# Create depth maps
depth_data = np.random.rand(2, 704, 1280, 1).astype(np.float32)
demo_group.create_dataset("table_cam_depth", data=depth_data)
yield temp_file.name
# Cleanup
os.remove(temp_file.name)
@pytest.fixture
def temp_output_dir():
"""Create temporary output directory."""
temp_dir = tempfile.mkdtemp() # noqa: SIM115
yield temp_dir
# Cleanup
for file in os.listdir(temp_dir):
os.remove(os.path.join(temp_dir, file))
os.rmdir(temp_dir)
class TestHDF5ToMP4:
"""Test cases for HDF5 to MP4 conversion functionality."""
def test_get_num_demos(self, temp_hdf5_file):
"""Test the get_num_demos function."""
num_demos = get_num_demos(temp_hdf5_file)
assert num_demos == 2
def test_write_demo_to_mp4_rgb(self, temp_hdf5_file, temp_output_dir):
"""Test writing RGB frames to MP4."""
write_demo_to_mp4(temp_hdf5_file, 0, "data/demo_0/obs", "table_cam", temp_output_dir, 704, 1280)
output_file = os.path.join(temp_output_dir, "demo_0_table_cam.mp4")
assert os.path.exists(output_file)
assert os.path.getsize(output_file) > 0
def test_write_demo_to_mp4_segmentation(self, temp_hdf5_file, temp_output_dir):
"""Test writing segmentation frames to MP4."""
write_demo_to_mp4(temp_hdf5_file, 0, "data/demo_0/obs", "table_cam_segmentation", temp_output_dir, 704, 1280)
output_file = os.path.join(temp_output_dir, "demo_0_table_cam_segmentation.mp4")
assert os.path.exists(output_file)
assert os.path.getsize(output_file) > 0
def test_write_demo_to_mp4_normals(self, temp_hdf5_file, temp_output_dir):
"""Test writing normal maps to MP4."""
write_demo_to_mp4(temp_hdf5_file, 0, "data/demo_0/obs", "table_cam_normals", temp_output_dir, 704, 1280)
output_file = os.path.join(temp_output_dir, "demo_0_table_cam_normals.mp4")
assert os.path.exists(output_file)
assert os.path.getsize(output_file) > 0
def test_write_demo_to_mp4_shaded_segmentation(self, temp_hdf5_file, temp_output_dir):
"""Test writing shaded_segmentation frames to MP4."""
write_demo_to_mp4(
temp_hdf5_file,
0,
"data/demo_0/obs",
"table_cam_shaded_segmentation",
temp_output_dir,
704,
1280,
)
output_file = os.path.join(temp_output_dir, "demo_0_table_cam_shaded_segmentation.mp4")
assert os.path.exists(output_file)
assert os.path.getsize(output_file) > 0
def test_write_demo_to_mp4_depth(self, temp_hdf5_file, temp_output_dir):
"""Test writing depth maps to MP4."""
write_demo_to_mp4(temp_hdf5_file, 0, "data/demo_0/obs", "table_cam_depth", temp_output_dir, 704, 1280)
output_file = os.path.join(temp_output_dir, "demo_0_table_cam_depth.mp4")
assert os.path.exists(output_file)
assert os.path.getsize(output_file) > 0
def test_write_demo_to_mp4_invalid_demo(self, temp_hdf5_file, temp_output_dir):
"""Test writing with invalid demo ID."""
with pytest.raises(KeyError):
write_demo_to_mp4(
temp_hdf5_file,
999, # Invalid demo ID
"data/demo_999/obs",
"table_cam",
temp_output_dir,
704,
1280,
)
def test_write_demo_to_mp4_invalid_key(self, temp_hdf5_file, temp_output_dir):
"""Test writing with invalid input key."""
with pytest.raises(KeyError):
write_demo_to_mp4(temp_hdf5_file, 0, "data/demo_0/obs", "invalid_key", temp_output_dir, 704, 1280)
def test_main_function(self, temp_hdf5_file, temp_output_dir):
"""Test the main function."""
# Mock command line arguments
import sys
original_argv = sys.argv
sys.argv = [
"hdf5_to_mp4.py",
"--input_file",
temp_hdf5_file,
"--output_dir",
temp_output_dir,
"--input_keys",
"table_cam",
"table_cam_segmentation",
"--video_height",
"704",
"--video_width",
"1280",
"--framerate",
"30",
]
try:
main()
# Check if output files were created
expected_files = [
"demo_0_table_cam.mp4",
"demo_0_table_cam_segmentation.mp4",
"demo_1_table_cam.mp4",
"demo_1_table_cam_segmentation.mp4",
]
for file in expected_files:
output_file = os.path.join(temp_output_dir, file)
assert os.path.exists(output_file)
assert os.path.getsize(output_file) > 0
finally:
# Restore original argv
sys.argv = original_argv

View File

@@ -0,0 +1,181 @@
# Copyright (c) 2024-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Test cases for MP4 to HDF5 conversion script."""
import os
import tempfile
import cv2
import h5py
import numpy as np
import pytest
from scripts.tools.mp4_to_hdf5 import get_frames_from_mp4, main, process_video_and_demo
@pytest.fixture(scope="class")
def temp_hdf5_file():
"""Create temporary HDF5 file with test data."""
temp_file = tempfile.NamedTemporaryFile(suffix=".h5", delete=False) # noqa: SIM115
with h5py.File(temp_file.name, "w") as h5f:
# Create test data structure for 2 demos
for demo_id in range(2):
demo_group = h5f.create_group(f"data/demo_{demo_id}")
obs_group = demo_group.create_group("obs")
# Create actions data
actions_data = np.random.rand(10, 7).astype(np.float32)
demo_group.create_dataset("actions", data=actions_data)
# Create robot state data
eef_pos_data = np.random.rand(10, 3).astype(np.float32)
eef_quat_data = np.random.rand(10, 4).astype(np.float32)
gripper_pos_data = np.random.rand(10, 1).astype(np.float32)
obs_group.create_dataset("eef_pos", data=eef_pos_data)
obs_group.create_dataset("eef_quat", data=eef_quat_data)
obs_group.create_dataset("gripper_pos", data=gripper_pos_data)
# Create camera data
table_cam_data = np.random.randint(0, 255, (10, 704, 1280, 3), dtype=np.uint8)
wrist_cam_data = np.random.randint(0, 255, (10, 704, 1280, 3), dtype=np.uint8)
obs_group.create_dataset("table_cam", data=table_cam_data)
obs_group.create_dataset("wrist_cam", data=wrist_cam_data)
# Set attributes
demo_group.attrs["num_samples"] = 10
yield temp_file.name
# Cleanup
os.remove(temp_file.name)
@pytest.fixture(scope="class")
def temp_videos_dir():
"""Create temporary MP4 files."""
temp_dir = tempfile.mkdtemp() # noqa: SIM115
video_paths = []
for demo_id in range(2):
video_path = os.path.join(temp_dir, f"demo_{demo_id}_table_cam.mp4")
video_paths.append(video_path)
# Create a test video
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
video = cv2.VideoWriter(video_path, fourcc, 30, (1280, 704))
# Write some random frames
for _ in range(10):
frame = np.random.randint(0, 255, (704, 1280, 3), dtype=np.uint8)
video.write(frame)
video.release()
yield temp_dir, video_paths
# Cleanup
for video_path in video_paths:
os.remove(video_path)
os.rmdir(temp_dir)
@pytest.fixture
def temp_output_file():
"""Create temporary output file."""
temp_file = tempfile.NamedTemporaryFile(suffix=".h5", delete=False) # noqa: SIM115
yield temp_file.name
# Cleanup
os.remove(temp_file.name)
class TestMP4ToHDF5:
"""Test cases for MP4 to HDF5 conversion functionality."""
def test_get_frames_from_mp4(self, temp_videos_dir):
"""Test extracting frames from MP4 video."""
_, video_paths = temp_videos_dir
frames = get_frames_from_mp4(video_paths[0])
# Check frame properties
assert frames.shape[0] == 10 # Number of frames
assert frames.shape[1:] == (704, 1280, 3) # Frame dimensions
assert frames.dtype == np.uint8 # Data type
def test_get_frames_from_mp4_resize(self, temp_videos_dir):
"""Test extracting frames with resizing."""
_, video_paths = temp_videos_dir
target_height, target_width = 352, 640
frames = get_frames_from_mp4(video_paths[0], target_height, target_width)
# Check resized frame properties
assert frames.shape[0] == 10 # Number of frames
assert frames.shape[1:] == (target_height, target_width, 3) # Resized dimensions
assert frames.dtype == np.uint8 # Data type
def test_process_video_and_demo(self, temp_hdf5_file, temp_videos_dir, temp_output_file):
"""Test processing a single video and creating a new demo."""
_, video_paths = temp_videos_dir
with h5py.File(temp_hdf5_file, "r") as f_in, h5py.File(temp_output_file, "w") as f_out:
process_video_and_demo(f_in, f_out, video_paths[0], 0, 2)
# Check if new demo was created with correct data
assert "data/demo_2" in f_out
assert "data/demo_2/actions" in f_out
assert "data/demo_2/obs/eef_pos" in f_out
assert "data/demo_2/obs/eef_quat" in f_out
assert "data/demo_2/obs/gripper_pos" in f_out
assert "data/demo_2/obs/table_cam" in f_out
assert "data/demo_2/obs/wrist_cam" in f_out
# Check data shapes
assert f_out["data/demo_2/actions"].shape == (10, 7)
assert f_out["data/demo_2/obs/eef_pos"].shape == (10, 3)
assert f_out["data/demo_2/obs/eef_quat"].shape == (10, 4)
assert f_out["data/demo_2/obs/gripper_pos"].shape == (10, 1)
assert f_out["data/demo_2/obs/table_cam"].shape == (10, 704, 1280, 3)
assert f_out["data/demo_2/obs/wrist_cam"].shape == (10, 704, 1280, 3)
# Check attributes
assert f_out["data/demo_2"].attrs["num_samples"] == 10
def test_main_function(self, temp_hdf5_file, temp_videos_dir, temp_output_file):
"""Test the main function."""
# Mock command line arguments
import sys
original_argv = sys.argv
sys.argv = [
"mp4_to_hdf5.py",
"--input_file",
temp_hdf5_file,
"--videos_dir",
temp_videos_dir[0],
"--output_file",
temp_output_file,
]
try:
main()
# Check if output file was created with correct data
with h5py.File(temp_output_file, "r") as f:
# Check if original demos were copied
assert "data/demo_0" in f
assert "data/demo_1" in f
# Check if new demos were created
assert "data/demo_2" in f
assert "data/demo_3" in f
# Check data in new demos
for demo_id in [2, 3]:
assert f"data/demo_{demo_id}/actions" in f
assert f"data/demo_{demo_id}/obs/eef_pos" in f
assert f"data/demo_{demo_id}/obs/eef_quat" in f
assert f"data/demo_{demo_id}/obs/gripper_pos" in f
assert f"data/demo_{demo_id}/obs/table_cam" in f
assert f"data/demo_{demo_id}/obs/wrist_cam" in f
finally:
# Restore original argv
sys.argv = original_argv

View File

@@ -0,0 +1,414 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Script to manage pretrained checkpoints for Isaac Lab environments.
This script is used to train and publish pretrained checkpoints for Isaac Lab environments.
It supports multiple workflows: rl_games, rsl_rl, sb3, and skrl.
* To train an agent using the rl_games workflow on the Isaac-Cartpole-v0 environment:
.. code-block:: shell
python scripts/tools/train_and_publish_checkpoints.py --train rl_games:Isaac-Cartpole-v0
* To train and publish the checkpoints for all workflows on only the direct Cartpole environments:
.. code-block:: shell
python scripts/tools/train_and_publish_checkpoints.py \
-tp "*:Isaac-Cartpole-*Direct-v0" \
--/persistent/isaaclab/asset_root/pretrained_checkpoints="/some/path"
* To review all repose cube jobs, excluding the 'Play' tasks and 'skrl' workflows:
.. code-block:: shell
python scripts/tools/train_and_publish_checkpoints.py \
-r "*:*Repose-Cube*" \
--exclude "*:*Play*" \
--exclude skrl:*
* To publish all results (that have been reviewed and approved).
.. code-block:: shell
python scripts/tools/train_and_publish_checkpoints.py \
--publish --all \
--/persistent/isaaclab/asset_root/pretrained_checkpoints="/some/path"
"""
import argparse
from isaaclab.app import AppLauncher
# Initialize the parser
parser = argparse.ArgumentParser(
description="""
Script for training and publishing pre-trained checkpoints in Isaac Lab.
Examples:
# Train an agent using the rl_games workflow for the Isaac-Cartpole-v0 environment.
train_and_publish_checkpoints.py --train rl_games:Isaac-Cartpole-v0
# Train and publish checkpoints for all workflows, targeting only direct Cartpole environments.
train_and_publish_checkpoints.py -tp "*:Isaac-Cartpole-*Direct-v0" \\
--/persistent/isaaclab/asset_root/pretrained_checkpoints="/some/path"
# Review all Repose Cube jobs, excluding Play tasks and skrl jobs.
train_and_publish_checkpoints.py -r "*:*Repose-Cube*" --exclude "*:*Play*" --exclude skrl:*
# Publish all results that have been reviewed and approved.
train_and_publish_checkpoints.py --publish --all \\
--/persistent/isaaclab/asset_root/pretrained_checkpoints="/some/path"
""",
formatter_class=argparse.RawTextHelpFormatter,
)
# Add positional arguments that can accept zero or more values
parser.add_argument(
"jobs",
nargs="*",
help="""
A job consists of a workflow and a task name, separated by a colon (wildcards are optional). Examples:
rl_games:Isaac-Humanoid-*v0 # Wildcard for any Humanoid version
rsl_rl:Isaac-Ant-*-v0 # Wildcard for any Ant environment
*:Isaac-Velocity-Flat-Spot-v0 # Wildcard for any workflow, specific task
Wildcards can be used in either the workflow or task name to match multiple entries.
""",
)
parser.add_argument("-t", "--train", action="store_true", help="Train checkpoints for later publishing.")
parser.add_argument("-p", "--publish_checkpoint", action="store_true", help="Publish pre-trained checkpoints.")
parser.add_argument("-r", "--review", action="store_true", help="Review checkpoints.")
parser.add_argument("-l", "--list", action="store_true", help="List all available environments and workflows.")
parser.add_argument("-f", "--force", action="store_true", help="Force training when results already exist.")
parser.add_argument("-a", "--all", action="store_true", help="Run all valid workflow task pairs.")
parser.add_argument(
"-E",
"--exclude",
action="append",
type=str,
default=[],
help="Excludes jobs matching the argument, with wildcard support.",
)
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
parser.add_argument("--force_review", action="store_true", help="Forces review when one already exists.")
parser.add_argument("--force_publish", action="store_true", help="Publish checkpoints without review.")
parser.add_argument("--headless", action="store_true", help="Run training without the UI.")
args, _ = parser.parse_known_args()
# Need something to do
if len(args.jobs) == 0 and not args.all:
parser.error("Jobs must be provided, or --all.")
# Must train, publish, review or list
if not (args.train or args.publish_checkpoint or args.review or args.list):
parser.error("A train, publish, review or list flag must be given.")
# List excludes train and publish
if args.list and (args.train or args.publish_checkpoint):
parser.error("Can't train or publish when listing.")
# launch omniverse app
app_launcher = AppLauncher(headless=True)
simulation_app = app_launcher.app
import csv
# Now everything else
import fnmatch
import json
import os
import subprocess
import sys
import gymnasium as gym
import numpy as np
import omni.client
from omni.client._omniclient import CopyBehavior
from isaaclab_rl.utils.pretrained_checkpoint import (
WORKFLOW_EXPERIMENT_NAME_VARIABLE,
WORKFLOW_PLAYER,
WORKFLOW_TRAINER,
WORKFLOWS,
get_log_root_path,
get_pretrained_checkpoint_path,
get_pretrained_checkpoint_publish_path,
get_pretrained_checkpoint_review,
get_pretrained_checkpoint_review_path,
has_pretrained_checkpoint_job_finished,
has_pretrained_checkpoint_job_run,
has_pretrained_checkpoints_asset_root_dir,
)
# Need somewhere to publish
if args.publish_checkpoint and not has_pretrained_checkpoints_asset_root_dir():
raise Exception("A /persistent/isaaclab/asset_root/pretrained_checkpoints setting is required to publish.")
def train_job(workflow, task_name, headless=False, force=False, num_envs=None):
"""
This trains a task using the workflow's train.py script, overriding the experiment name to ensure unique
log directories. By default it will return if an experiment has already been run.
Args:
workflow: The workflow.
task_name: The task name.
headless: Should the training run without the UI.
force: Run training even if previous experiments have been run.
num_envs: How many simultaneous environments to simulate, overriding the config.
"""
log_root_path = get_log_root_path(workflow, task_name)
# We already ran this
if not force and os.path.exists(log_root_path) and len(os.listdir(log_root_path)) > 0:
print(f"Skipping training of {workflow}:{task_name}, already has been run")
return
print(f"Training {workflow}:{task_name}")
# Construct our command
cmd = [
sys.executable,
WORKFLOW_TRAINER[workflow],
"--task",
task_name,
"--enable_cameras",
]
# Changes the directory name for logging
if WORKFLOW_EXPERIMENT_NAME_VARIABLE[workflow]:
cmd.append(f"{WORKFLOW_EXPERIMENT_NAME_VARIABLE[workflow]}={task_name}")
if headless:
cmd.append("--headless")
if num_envs:
cmd.extend(["--num_envs", str(num_envs)])
print("Running : " + " ".join(cmd))
subprocess.run(cmd)
def review_pretrained_checkpoint(workflow, task_name, force_review=False, num_envs=None):
"""
This initiates a review of the pretrained checkpoint. The play.py script for the workflow is run, and the user
inspects the results. When done they close the simulator and will be prompted for their review.
Args:
workflow: The workflow.
task_name: The task name.
force_review: Performs the review even if a review already exists.
num_envs: How many simultaneous environments to simulate, overriding the config.
"""
# This workflow task pair hasn't been trained
if not has_pretrained_checkpoint_job_run(workflow, task_name):
print(f"Skipping review of {workflow}:{task_name}, hasn't been trained yet")
return
# Couldn't find the checkpoint
if not has_pretrained_checkpoint_job_finished(workflow, task_name):
print(f"Training not complete for {workflow}:{task_name}")
return
review = get_pretrained_checkpoint_review(workflow, task_name)
if not force_review and review and review["reviewed"]:
print(f"Review already complete for {workflow}:{task_name}")
return
print(f"Reviewing {workflow}:{task_name}")
# Construct our command
cmd = [
sys.executable,
WORKFLOW_PLAYER[workflow],
"--task",
task_name,
"--checkpoint",
get_pretrained_checkpoint_path(workflow, task_name),
"--enable_cameras",
]
if num_envs:
cmd.extend(["--num_envs", str(num_envs)])
print("Running : " + " ".join(cmd))
subprocess.run(cmd)
# Give user a chance to leave the old review
if force_review and review and review["reviewed"]:
result = review["result"]
notes = review.get("notes")
print(f"A review already exists for {workflow}:{task_name}, it was marked as '{result}'.")
print(f" Notes: {notes}")
answer = input("Would you like to replace it? Please answer yes or no (y/n) [n]: ").strip().lower()
if answer != "y":
return
# Get the verdict from the user
print(f"Do you accept this checkpoint for {workflow}:{task_name}?")
answer = input("Please answer yes, no or undetermined (y/n/u) [u]: ").strip().lower()
if answer not in {"y", "n", "u"}:
answer = "u"
answer_map = {
"y": "accepted",
"n": "rejected",
"u": "undetermined",
}
# Create the review dict
review = {
"reviewed": True,
"result": answer_map[answer],
}
# Maybe add some notes
notes = input("Please add notes or hit enter: ").strip().lower()
if notes:
review["notes"] = notes
# Save the review JSON file
path = get_pretrained_checkpoint_review_path(workflow, task_name)
if not path:
raise Exception("This shouldn't be possible, something went very wrong.")
with open(path, "w") as f:
json.dump(review, f, indent=4)
def publish_pretrained_checkpoint(workflow, task_name, force_publish=False):
"""
This publishes the pretrained checkpoint to Nucleus using the asset path in the
/persistent/isaaclab/asset_root/pretrained_checkpoints Carb variable.
Args:
workflow: The workflow.
task_name: The task name.
force_publish: Publish without review.
"""
# This workflow task pair hasn't been trained
if not has_pretrained_checkpoint_job_run(workflow, task_name):
print(f"Skipping publishing of {workflow}:{task_name}, hasn't been trained yet")
return
# Couldn't find the checkpoint
if not has_pretrained_checkpoint_job_finished(workflow, task_name):
print(f"Training not complete for {workflow}:{task_name}")
return
# Get local pretrained checkpoint path
local_path = get_pretrained_checkpoint_path(workflow, task_name)
if not local_path:
raise Exception("This shouldn't be possible, something went very wrong.")
# Not forcing, need to check review results
if not force_publish:
# Grab the review if it exists
review = get_pretrained_checkpoint_review(workflow, task_name)
if not review or not review["reviewed"]:
print(f"Skipping publishing of {workflow}:{task_name}, hasn't been reviewed yet")
return
result = review["result"]
if result != "accepted":
print(f'Skipping publishing of {workflow}:{task_name}, review result was "{result}"')
return
print(f"Publishing {workflow}:{task_name}")
# Copy the file
publish_path = get_pretrained_checkpoint_publish_path(workflow, task_name)
omni.client.copy_file(local_path, publish_path, CopyBehavior.OVERWRITE)
def get_job_summary_row(workflow, task_name):
"""Returns a single row summary of the job"""
has_run = has_pretrained_checkpoint_job_run(workflow, task_name)
has_finished = has_pretrained_checkpoint_job_finished(workflow, task_name)
review = get_pretrained_checkpoint_review(workflow, task_name)
if review:
result = review.get("result", "undetermined")
notes = review.get("notes", "")
else:
result = ""
notes = ""
return [workflow, task_name, has_run, has_finished, result, notes]
def main():
# Figure out what workflows and tasks we'll be using
if args.all:
jobs = ["*:*"]
else:
jobs = args.jobs
if args.list:
print()
print("# Workflow, Task, Ran, Finished, Review, Notes")
summary_rows = []
# Could be implemented more efficiently, but the performance gain would be inconsequential
for workflow in WORKFLOWS:
for task_spec in sorted(gym.registry.values(), key=lambda t: t.id):
job_id = f"{workflow}:{task_spec.id}"
# We've excluded this job
if any(fnmatch.fnmatch(job_id, e) for e in args.exclude):
continue
# None of our jobs match this pair
if not np.any(np.array([fnmatch.fnmatch(job_id, job) for job in jobs])):
continue
# No config for this workflow
if workflow + "_cfg_entry_point" not in task_spec.kwargs:
continue
if args.list:
summary_rows.append(get_job_summary_row(workflow, task_spec.id))
continue
# Training reviewing and publishing
if args.train:
train_job(workflow, task_spec.id, args.headless, args.force, args.num_envs)
if args.review:
review_pretrained_checkpoint(workflow, task_spec.id, args.force_review, args.num_envs)
if args.publish_checkpoint:
publish_pretrained_checkpoint(workflow, task_spec.id, args.force_publish)
if args.list:
writer = csv.writer(sys.stdout, quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerows(summary_rows)
if __name__ == "__main__":
try:
# Run the main function
main()
except Exception as e:
raise e
finally:
# Close the app
simulation_app.close()

View File

@@ -0,0 +1,39 @@
import isaaclab.sim as sim_utils
from isaaclab.actuators import ImplicitActuatorCfg
from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg
from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR
from isaaclab.sim.spawners.from_files.from_files_cfg import UsdFileCfg
from isaaclab.sim.schemas.schemas_cfg import (
RigidBodyPropertiesCfg,
CollisionPropertiesCfg,
)
from mindbot.utils.assets import MINDBOT_ASSETS_DIR
DRYINGBOX_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=r"c:\Users\PC\workpalce\maic_usd_assets_moudle\devices\DryingBox\Equipment_BB_13.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=True, max_depenetration_velocity=1.0
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=False,
solver_position_iteration_count=32,
solver_velocity_iteration_count=16,
stabilization_threshold=1e-6,
),
),
init_state=ArticulationCfg.InitialStateCfg(
pos=[1.95, -0.45, 0.9], rot=[-0.7071, 0.0, 0.0, 0.7071]
),
# actuators={}
actuators={
"passive_damper": ImplicitActuatorCfg(
# ".*" 表示匹配该USD文件内的所有关节无论是轮子、屏幕转轴还是其他
joint_names_expr=["RevoluteJoint"],
stiffness=10000.0,
damping=1000.0,
effort_limit_sim=10000.0,
velocity_limit_sim=100.0,
),
},
)

View File

@@ -0,0 +1,18 @@
import isaaclab.sim as sim_utils
from isaaclab.actuators import ImplicitActuatorCfg
from isaaclab.assets import ArticulationCfg, AssetBaseCfg
from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR
from isaaclab.sim.spawners.from_files.from_files_cfg import UsdFileCfg
from isaaclab.sim.schemas.schemas_cfg import (
RigidBodyPropertiesCfg,
CollisionPropertiesCfg,
)
from mindbot.utils.assets import MINDBOT_ASSETS_DIR
ROOM_CFG = AssetBaseCfg(
# prim_path="{ENV_REGEX_NS}/Room",
spawn=UsdFileCfg(
usd_path=f"{MINDBOT_ASSETS_DIR}\\sences\\Lab\\lab.usd",
),
)

View File

@@ -0,0 +1,32 @@
import isaaclab.sim as sim_utils
from isaaclab.actuators import ImplicitActuatorCfg
from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg
from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR
from isaaclab.sim.spawners.from_files.from_files_cfg import UsdFileCfg
from isaaclab.sim.schemas.schemas_cfg import (
RigidBodyPropertiesCfg,
CollisionPropertiesCfg,
)
from mindbot.utils.assets import MINDBOT_ASSETS_DIR
TABLE_CFG = AssetBaseCfg(
# prim_path="{ENV_REGEX_NS}/Table",
init_state=AssetBaseCfg.InitialStateCfg(
pos=[1.95, -0.3, 0.005],
rot=[0.7071, 0.0, 0.0, -0.7071],
),
spawn=UsdFileCfg(
usd_path=f"{MINDBOT_ASSETS_DIR}\\sences\\Table_C\\Table_C.usd",
rigid_props=RigidBodyPropertiesCfg(
rigid_body_enabled=True,
solver_position_iteration_count=32,
solver_velocity_iteration_count=16,
disable_gravity=False,
),
collision_props=CollisionPropertiesCfg(
collision_enabled=True,
contact_offset=0.0005, # original 0.02
rest_offset=0,
),
),
)

View File

@@ -11,16 +11,15 @@ from isaaclab.actuators import ImplicitActuatorCfg
from isaaclab.assets import ArticulationCfg
from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR
from mindbot.utils.assets import MINDBOT_ASSETS_DIR
##
# Configuration
##
MINDBOT_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
# 1. UPDATE THE USD PATH to your local file
# C:\Users\PC\workpalce\maic_usd_assets\twinlab\MIC_sim-3.0\pupet_arm_sim_product\sim_data_acquisition\usds\Collected_robot_2_3\robot_2_4.usd
usd_path="C:/Users/PC/workpalce/maic_usd_assets/robots/mindrobot-1105-lab/mindrobot_cd2.usd",
# usd_path="C:/Users/PC/workpalce/maic_usd_assets/twinlab/MIC_sim-3.0/pupet_arm_sim_product/sim_data_acquisition/usds/Collected_robot_2_3/robot_2_6.usd",
usd_path=f"{MINDBOT_ASSETS_DIR}/robots/mindrobot-1105-lab/mindrobot_cd2.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
max_depenetration_velocity=1.0,
@@ -36,8 +35,8 @@ MINDBOT_CFG = ArticulationCfg(
),
collision_props=sim_utils.CollisionPropertiesCfg(
collision_enabled=True,
contact_offset=0.0005, # 尝试从默认值增大到 0.01 或 0.02
rest_offset=0, # 保持略小于 contact_offset
contact_offset=0.0005, # 尝试从默认值增大到 0.01 或 0.02
rest_offset=0, # 保持略小于 contact_offset
),
),
init_state=ArticulationCfg.InitialStateCfg(
@@ -45,7 +44,7 @@ MINDBOT_CFG = ArticulationCfg(
# All are set to 0.0 here, but you should adjust them for a suitable "home" position.
joint_pos={
# Left arm joints
# Left arm joints
# Left arm joints
# 135° -> 2.3562
"l_joint1": 2.3562,
# -70° -> -1.2217
@@ -65,7 +64,7 @@ MINDBOT_CFG = ArticulationCfg(
# "l_joint4": 0.0,
# "l_joint5": 0.0,
# "l_joint6": 0.0,
# Right arm joints
# Right arm joints
# # -45° -> -0.7854
# "r_joint1": -0.7854,
# # 70° -> 1.2217
@@ -95,15 +94,15 @@ MINDBOT_CFG = ArticulationCfg(
# left gripper
# 注意:如果希望初始状态为完全打开,应该设置为 left=0.0, right=0.0
# 当前设置为 right=0.01 表示略微闭合状态
"left_hand_joint_left": 0.0, # 范围:(-0.03, 0.0)0.0=打开,-0.03=闭合
"left_hand_joint_left": 0.0, # 范围:(-0.03, 0.0)0.0=打开,-0.03=闭合
"left_hand_joint_right": 0.0, # 范围:(0.0, 0.03)0.0=打开0.03=闭合
# right gripper
"right_hand_joint_left": 0.0,
"right_hand_joint_right": 0.0,
# trunk
"PrismaticJoint": 0.23, # 0.30
"PrismaticJoint": 0.23, # 0.30
# head
"head_revoluteJoint": 0.0 #0.5236
"head_revoluteJoint": 0.0, # 0.5236
},
# The initial (x, y, z) position of the robot's base in the world
pos=(0, 0, 0.05),
@@ -112,40 +111,46 @@ MINDBOT_CFG = ArticulationCfg(
# 3. DEFINE ACTUATOR GROUPS FOR THE ARMS
# Group for the 6 left arm joints using a regular expression
"left_arm": ImplicitActuatorCfg(
joint_names_expr=["l_joint[1-6]"], # This matches l_joint1, l_joint2, ..., l_joint6
effort_limit_sim=100.0, # Note: Tune this based on your robot's specs
velocity_limit_sim=100.0, # Note: Tune this based on your robot's specs
stiffness=0.0, #10000.0, # Note: Tune this for desired control performance
damping=1000.0, #10000.0, # Note: Tune this for desired control performance
joint_names_expr=[
"l_joint[1-6]"
], # This matches l_joint1, l_joint2, ..., l_joint6
effort_limit_sim=100.0, # Note: Tune this based on your robot's specs
velocity_limit_sim=100.0, # Note: Tune this based on your robot's specs
stiffness=0.0, # 10000.0, # Note: Tune this for desired control performance
damping=1000.0, # 10000.0, # Note: Tune this for desired control performance
),
# Group for the 6 right arm joints using a regular expression
"right_arm": ImplicitActuatorCfg(
joint_names_expr=["r_joint[1-6]"], # This matches r_joint1, r_joint2, ..., r_joint6
effort_limit_sim=100.0, # Note: Tune this based on your robot's specs
velocity_limit_sim=100.0, # Note: Tune this based on your robot's specs
stiffness=10000.0, #10000.0, # Note: Tune this for desired control performance1
damping=1000.0, #10000.0, # Note: Tune this for desired control performance
joint_names_expr=[
"r_joint[1-6]"
], # This matches r_joint1, r_joint2, ..., r_joint6
effort_limit_sim=100.0, # Note: Tune this based on your robot's specs
velocity_limit_sim=100.0, # Note: Tune this based on your robot's specs
stiffness=10000.0, # 10000.0, # Note: Tune this for desired control performance1
damping=1000.0, # 10000.0, # Note: Tune this for desired control performance
),
"head": ImplicitActuatorCfg(
joint_names_expr=["head_revoluteJoint"], stiffness=10000.0, damping=1000.0
),
"head": ImplicitActuatorCfg(joint_names_expr=["head_revoluteJoint"], stiffness=10000.0, damping=1000.0),
"trunk": ImplicitActuatorCfg(
joint_names_expr=["PrismaticJoint"],
stiffness=40000.0, # 从 10000 增:强持位
damping=4000.0, # 从 1000 增:抗振荡
stiffness=40000.0, # 从 10000 增:强持位
damping=4000.0, # 从 1000 增:抗振荡
effort_limit_sim=10000.0,
velocity_limit_sim=0.2,
),
"wheels": ImplicitActuatorCfg(
joint_names_expr=[".*_revolute_Joint"],
stiffness=0.0,
joint_names_expr=[".*_revolute_Joint"],
stiffness=0.0,
damping=100.0,
effort_limit_sim=200.0, # <<<<<< 新增:力矩上限,足够驱动轮子
velocity_limit_sim=50.0, # <<<<<< 新增:速度上限,防止过快
velocity_limit_sim=50.0, # <<<<<< 新增:速度上限,防止过快
),
"grippers": ImplicitActuatorCfg(
joint_names_expr=[".*_hand_joint.*"],
stiffness=10000.0, #10000.0
joint_names_expr=[".*_hand_joint.*"],
stiffness=10000.0, # 10000.0
damping=1000.0,
effort_limit_sim=50.0, # default is 10, gemini said it is too small, so I set it to 50.
effort_limit_sim=50.0, # default is 10, gemini said it is too small, so I set it to 50.
velocity_limit_sim=50.0,
),
},

View File

@@ -0,0 +1,8 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Imitation learning tasks."""
import gymnasium as gym # noqa: F401

View File

@@ -0,0 +1,43 @@
# IsaacLab 官方 Demo 代码
> ⚠️ 本目录提供**可运行的官方 Demo**,不仅可以作为只读参考,更可以直接用来运行和验证,加深对 IsaacLab 各种机制的理解。
> 所有的环境配置均直接复用了 `/home/tangger/IsaacLab/source/isaaclab_tasks` 里的模块。我们给这些任务加上了 `Mindbot-Demo-` 前缀重新注册,以便直接在此运行。
## 目录说明
### `franka_stack/` — Franka 机械臂 Stack 任务 (IK-Rel)
- **注册名称**: `Mindbot-Demo-Stack-Cube-Franka-IK-Rel-v0`
- **参考价值**:
- 完整的 ManagerBased IL/RL env cfg 结构scene / actions / observations / events / rewards / terminations
- `events/` 里的 `reset_scene_to_default` + `reset_robot_joints` 写法 → **R 键重置参考**
- 这是所有机械臂操作任务(比如你正在写的 open_drybox的最佳对标参考。
- **如何运行它**:
```bash
python scripts/environments/teleoperation/teleop_se3_agent.py \
--task Mindbot-Demo-Stack-Cube-Franka-IK-Rel-v0 \
--num_envs 1 --teleop_device keyboard
```
### `h1_locomotion/` — 宇树 H1 人形机器人运动控制
- **注册名称**:
- `Mindbot-Demo-Velocity-Flat-H1-v0`
- `Mindbot-Demo-Velocity-Rough-H1-v0`
- **参考价值**:
- 人形双足机器人(或移动底盘)的关节配置、观测空间设计
- 运动控制的 rewards / curriculum / terminations 完整示例
- **如何运行它**:
```bash
# 跑一个随机策略看样子
python scripts/environments/random_agent.py \
--task Mindbot-Demo-Velocity-Flat-H1-v0 \
--num_envs 8
# 或者测试基于 RL 的训练 (如果有装 rsl_rl)
python scripts/reinforcement_learning/rsl_rl/train.py \
--task Mindbot-Demo-Velocity-Flat-H1-v0 \
--num_envs 4096 --headless
```
## 对 AI 辅助的意义
当你进行新任务的开发时AI 会将这些 Demo 当作**强有力的本地示例和知识库**,提供准确、符合 IsaacLab 最优实践的代码修改意见,避免凭空猜设 API。

View File

@@ -0,0 +1,6 @@
# Copyright (c) 2024, MindRobot Project
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym # noqa: F401
from . import franka_stack, h1_locomotion

View File

@@ -0,0 +1,34 @@
# Copyright (c) 2024, MindRobot Project
# SPDX-License-Identifier: BSD-3-Clause
#
# Demo: Franka Stack-Cube IK-Rel
# 直接复用 IsaacLab 官方的 Franka Stack 任务,注册为 Mindbot-Demo 前缀。
# 参考原始注册: Isaac-Stack-Cube-Franka-IK-Rel-v0
#
# 运行指令:
# python scripts/environments/teleoperation/teleop_se3_agent.py \
# --task Mindbot-Demo-Stack-Cube-Franka-IK-Rel-v0 \
# --num_envs 1 --teleop_device keyboard
import gymnasium as gym
##
# Register Gym environments.
##
gym.register(
id="Mindbot-Demo-Stack-Cube-Franka-IK-Rel-v0",
entry_point="isaaclab.envs:ManagerBasedRLEnv",
kwargs={
# 直接引用 isaaclab_tasks 中的原始 cfg无需 copy 依赖链
"env_cfg_entry_point": (
"isaaclab_tasks.manager_based.manipulation.stack.config.franka"
".stack_ik_rel_env_cfg:FrankaCubeStackEnvCfg"
),
"robomimic_bc_cfg_entry_point": (
"isaaclab_tasks.manager_based.manipulation.stack.config.franka"
".agents:robomimic/bc_rnn_low_dim.json"
),
},
disable_env_checker=True,
)

View File

@@ -0,0 +1,11 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""This sub-module contains the functions that are specific to the lift environments."""
from isaaclab.envs.mdp import * # noqa: F401, F403
from .observations import * # noqa: F401, F403
from .terminations import * # noqa: F401, F403

View File

@@ -0,0 +1,315 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import math
import random
from typing import TYPE_CHECKING
import torch
from isaacsim.core.utils.extensions import enable_extension
import isaaclab.utils.math as math_utils
from isaaclab.assets import Articulation, AssetBase
from isaaclab.managers import SceneEntityCfg
if TYPE_CHECKING:
from isaaclab.envs import ManagerBasedEnv
def set_default_joint_pose(
env: ManagerBasedEnv,
env_ids: torch.Tensor,
default_pose: torch.Tensor,
asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
):
# Set the default pose for robots in all envs
asset = env.scene[asset_cfg.name]
asset.data.default_joint_pos = torch.tensor(default_pose, device=env.device).repeat(env.num_envs, 1)
def randomize_joint_by_gaussian_offset(
env: ManagerBasedEnv,
env_ids: torch.Tensor,
mean: float,
std: float,
asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
):
asset: Articulation = env.scene[asset_cfg.name]
# Add gaussian noise to joint states
joint_pos = asset.data.default_joint_pos[env_ids].clone()
joint_vel = asset.data.default_joint_vel[env_ids].clone()
joint_pos += math_utils.sample_gaussian(mean, std, joint_pos.shape, joint_pos.device)
# Clamp joint pos to limits
joint_pos_limits = asset.data.soft_joint_pos_limits[env_ids]
joint_pos = joint_pos.clamp_(joint_pos_limits[..., 0], joint_pos_limits[..., 1])
# Don't noise the gripper poses
joint_pos[:, -2:] = asset.data.default_joint_pos[env_ids, -2:]
# Set into the physics simulation
asset.set_joint_position_target(joint_pos, env_ids=env_ids)
asset.set_joint_velocity_target(joint_vel, env_ids=env_ids)
asset.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids)
def sample_random_color(base=(0.75, 0.75, 0.75), variation=0.1):
"""
Generates a randomized color that stays close to the base color while preserving overall brightness.
The relative balance between the R, G, and B components is maintained by ensuring that
the sum of random offsets is zero.
Parameters:
base (tuple): The base RGB color with each component between 0 and 1.
variation (float): Maximum deviation to sample for each channel before balancing.
Returns:
tuple: A new RGB color with balanced random variation.
"""
# Generate random offsets for each channel in the range [-variation, variation]
offsets = [random.uniform(-variation, variation) for _ in range(3)]
# Compute the average offset
avg_offset = sum(offsets) / 3
# Adjust offsets so their sum is zero (maintaining brightness)
balanced_offsets = [offset - avg_offset for offset in offsets]
# Apply the balanced offsets to the base color and clamp each channel between 0 and 1
new_color = tuple(max(0, min(1, base_component + offset)) for base_component, offset in zip(base, balanced_offsets))
return new_color
def randomize_scene_lighting_domelight(
env: ManagerBasedEnv,
env_ids: torch.Tensor,
intensity_range: tuple[float, float],
color_variation: float,
textures: list[str],
default_intensity: float = 3000.0,
default_color: tuple[float, float, float] = (0.75, 0.75, 0.75),
default_texture: str = "",
asset_cfg: SceneEntityCfg = SceneEntityCfg("light"),
):
asset: AssetBase = env.scene[asset_cfg.name]
light_prim = asset.prims[0]
intensity_attr = light_prim.GetAttribute("inputs:intensity")
intensity_attr.Set(default_intensity)
color_attr = light_prim.GetAttribute("inputs:color")
color_attr.Set(default_color)
texture_file_attr = light_prim.GetAttribute("inputs:texture:file")
texture_file_attr.Set(default_texture)
if not hasattr(env.cfg, "eval_mode") or not env.cfg.eval_mode:
return
if env.cfg.eval_type in ["light_intensity", "all"]:
# Sample new light intensity
new_intensity = random.uniform(intensity_range[0], intensity_range[1])
# Set light intensity to light prim
intensity_attr.Set(new_intensity)
if env.cfg.eval_type in ["light_color", "all"]:
# Sample new light color
new_color = sample_random_color(base=default_color, variation=color_variation)
# Set light color to light prim
color_attr.Set(new_color)
if env.cfg.eval_type in ["light_texture", "all"]:
# Sample new light texture (background)
new_texture = random.sample(textures, 1)[0]
# Set light texture to light prim
texture_file_attr.Set(new_texture)
def sample_object_poses(
num_objects: int,
min_separation: float = 0.0,
pose_range: dict[str, tuple[float, float]] = {},
max_sample_tries: int = 5000,
):
range_list = [pose_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]]
pose_list = []
for i in range(num_objects):
for j in range(max_sample_tries):
sample = [random.uniform(range[0], range[1]) for range in range_list]
# Accept pose if it is the first one, or if reached max num tries
if len(pose_list) == 0 or j == max_sample_tries - 1:
pose_list.append(sample)
break
# Check if pose of object is sufficiently far away from all other objects
separation_check = [math.dist(sample[:3], pose[:3]) > min_separation for pose in pose_list]
if False not in separation_check:
pose_list.append(sample)
break
return pose_list
def randomize_object_pose(
env: ManagerBasedEnv,
env_ids: torch.Tensor,
asset_cfgs: list[SceneEntityCfg],
min_separation: float = 0.0,
pose_range: dict[str, tuple[float, float]] = {},
max_sample_tries: int = 5000,
):
if env_ids is None:
return
# Randomize poses in each environment independently
for cur_env in env_ids.tolist():
pose_list = sample_object_poses(
num_objects=len(asset_cfgs),
min_separation=min_separation,
pose_range=pose_range,
max_sample_tries=max_sample_tries,
)
# Randomize pose for each object
for i in range(len(asset_cfgs)):
asset_cfg = asset_cfgs[i]
asset = env.scene[asset_cfg.name]
# Write pose to simulation
pose_tensor = torch.tensor([pose_list[i]], device=env.device)
positions = pose_tensor[:, 0:3] + env.scene.env_origins[cur_env, 0:3]
orientations = math_utils.quat_from_euler_xyz(pose_tensor[:, 3], pose_tensor[:, 4], pose_tensor[:, 5])
asset.write_root_pose_to_sim(
torch.cat([positions, orientations], dim=-1), env_ids=torch.tensor([cur_env], device=env.device)
)
asset.write_root_velocity_to_sim(
torch.zeros(1, 6, device=env.device), env_ids=torch.tensor([cur_env], device=env.device)
)
def randomize_rigid_objects_in_focus(
env: ManagerBasedEnv,
env_ids: torch.Tensor,
asset_cfgs: list[SceneEntityCfg],
out_focus_state: torch.Tensor,
min_separation: float = 0.0,
pose_range: dict[str, tuple[float, float]] = {},
max_sample_tries: int = 5000,
):
if env_ids is None:
return
# List of rigid objects in focus for each env (dim = [num_envs, num_rigid_objects])
env.rigid_objects_in_focus = []
for cur_env in env_ids.tolist():
# Sample in focus object poses
pose_list = sample_object_poses(
num_objects=len(asset_cfgs),
min_separation=min_separation,
pose_range=pose_range,
max_sample_tries=max_sample_tries,
)
selected_ids = []
for asset_idx in range(len(asset_cfgs)):
asset_cfg = asset_cfgs[asset_idx]
asset = env.scene[asset_cfg.name]
# Randomly select an object to bring into focus
object_id = random.randint(0, asset.num_objects - 1)
selected_ids.append(object_id)
# Create object state tensor
object_states = torch.stack([out_focus_state] * asset.num_objects).to(device=env.device)
pose_tensor = torch.tensor([pose_list[asset_idx]], device=env.device)
positions = pose_tensor[:, 0:3] + env.scene.env_origins[cur_env, 0:3]
orientations = math_utils.quat_from_euler_xyz(pose_tensor[:, 3], pose_tensor[:, 4], pose_tensor[:, 5])
object_states[object_id, 0:3] = positions
object_states[object_id, 3:7] = orientations
asset.write_object_state_to_sim(
object_state=object_states, env_ids=torch.tensor([cur_env], device=env.device)
)
env.rigid_objects_in_focus.append(selected_ids)
def randomize_visual_texture_material(
env: ManagerBasedEnv,
env_ids: torch.Tensor,
asset_cfg: SceneEntityCfg,
textures: list[str],
default_texture: str = "",
texture_rotation: tuple[float, float] = (0.0, 0.0),
):
"""Randomize the visual texture of bodies on an asset using Replicator API.
This function randomizes the visual texture of the bodies of the asset using the Replicator API.
The function samples random textures from the given texture paths and applies them to the bodies
of the asset. The textures are projected onto the bodies and rotated by the given angles.
.. note::
The function assumes that the asset follows the prim naming convention as:
"{asset_prim_path}/{body_name}/visuals" where the body name is the name of the body to
which the texture is applied. This is the default prim ordering when importing assets
from the asset converters in Isaac Lab.
.. note::
When randomizing the texture of individual assets, please make sure to set
:attr:`isaaclab.scene.InteractiveSceneCfg.replicate_physics` to False. This ensures that physics
parser will parse the individual asset properties separately.
"""
if hasattr(env.cfg, "eval_mode") and (
not env.cfg.eval_mode or env.cfg.eval_type not in [f"{asset_cfg.name}_texture", "all"]
):
return
# textures = [default_texture]
# enable replicator extension if not already enabled
enable_extension("omni.replicator.core")
# we import the module here since we may not always need the replicator
import omni.replicator.core as rep
# check to make sure replicate_physics is set to False, else raise error
# note: We add an explicit check here since texture randomization can happen outside of 'prestartup' mode
# and the event manager doesn't check in that case.
if env.cfg.scene.replicate_physics:
raise RuntimeError(
"Unable to randomize visual texture material with scene replication enabled."
" For stable USD-level randomization, please disable scene replication"
" by setting 'replicate_physics' to False in 'InteractiveSceneCfg'."
)
# convert from radians to degrees
texture_rotation = tuple(math.degrees(angle) for angle in texture_rotation)
# obtain the asset entity
asset = env.scene[asset_cfg.name]
# join all bodies in the asset
body_names = asset_cfg.body_names
if isinstance(body_names, str):
body_names_regex = body_names
elif isinstance(body_names, list):
body_names_regex = "|".join(body_names)
else:
body_names_regex = ".*"
if not hasattr(asset, "cfg"):
prims_group = rep.get.prims(path_pattern=f"{asset.prim_paths[0]}/visuals")
else:
prims_group = rep.get.prims(path_pattern=f"{asset.cfg.prim_path}/{body_names_regex}/visuals")
with prims_group:
rep.randomizer.texture(
textures=textures, project_uvw=True, texture_rotate=rep.distribution.uniform(*texture_rotation)
)

View File

@@ -0,0 +1,534 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
from typing import TYPE_CHECKING, Literal
import torch
import isaaclab.utils.math as math_utils
from isaaclab.assets import Articulation, RigidObject, RigidObjectCollection
from isaaclab.managers import SceneEntityCfg
from isaaclab.sensors import FrameTransformer
if TYPE_CHECKING:
from isaaclab.envs import ManagerBasedRLEnv
def cube_positions_in_world_frame(
env: ManagerBasedRLEnv,
cube_1_cfg: SceneEntityCfg = SceneEntityCfg("cube_1"),
cube_2_cfg: SceneEntityCfg = SceneEntityCfg("cube_2"),
cube_3_cfg: SceneEntityCfg = SceneEntityCfg("cube_3"),
) -> torch.Tensor:
"""The position of the cubes in the world frame."""
cube_1: RigidObject = env.scene[cube_1_cfg.name]
cube_2: RigidObject = env.scene[cube_2_cfg.name]
cube_3: RigidObject = env.scene[cube_3_cfg.name]
return torch.cat((cube_1.data.root_pos_w, cube_2.data.root_pos_w, cube_3.data.root_pos_w), dim=1)
def instance_randomize_cube_positions_in_world_frame(
env: ManagerBasedRLEnv,
cube_1_cfg: SceneEntityCfg = SceneEntityCfg("cube_1"),
cube_2_cfg: SceneEntityCfg = SceneEntityCfg("cube_2"),
cube_3_cfg: SceneEntityCfg = SceneEntityCfg("cube_3"),
) -> torch.Tensor:
"""The position of the cubes in the world frame."""
if not hasattr(env, "rigid_objects_in_focus"):
return torch.full((env.num_envs, 9), fill_value=-1)
cube_1: RigidObjectCollection = env.scene[cube_1_cfg.name]
cube_2: RigidObjectCollection = env.scene[cube_2_cfg.name]
cube_3: RigidObjectCollection = env.scene[cube_3_cfg.name]
cube_1_pos_w = []
cube_2_pos_w = []
cube_3_pos_w = []
for env_id in range(env.num_envs):
cube_1_pos_w.append(cube_1.data.object_pos_w[env_id, env.rigid_objects_in_focus[env_id][0], :3])
cube_2_pos_w.append(cube_2.data.object_pos_w[env_id, env.rigid_objects_in_focus[env_id][1], :3])
cube_3_pos_w.append(cube_3.data.object_pos_w[env_id, env.rigid_objects_in_focus[env_id][2], :3])
cube_1_pos_w = torch.stack(cube_1_pos_w)
cube_2_pos_w = torch.stack(cube_2_pos_w)
cube_3_pos_w = torch.stack(cube_3_pos_w)
return torch.cat((cube_1_pos_w, cube_2_pos_w, cube_3_pos_w), dim=1)
def cube_orientations_in_world_frame(
env: ManagerBasedRLEnv,
cube_1_cfg: SceneEntityCfg = SceneEntityCfg("cube_1"),
cube_2_cfg: SceneEntityCfg = SceneEntityCfg("cube_2"),
cube_3_cfg: SceneEntityCfg = SceneEntityCfg("cube_3"),
):
"""The orientation of the cubes in the world frame."""
cube_1: RigidObject = env.scene[cube_1_cfg.name]
cube_2: RigidObject = env.scene[cube_2_cfg.name]
cube_3: RigidObject = env.scene[cube_3_cfg.name]
return torch.cat((cube_1.data.root_quat_w, cube_2.data.root_quat_w, cube_3.data.root_quat_w), dim=1)
def instance_randomize_cube_orientations_in_world_frame(
env: ManagerBasedRLEnv,
cube_1_cfg: SceneEntityCfg = SceneEntityCfg("cube_1"),
cube_2_cfg: SceneEntityCfg = SceneEntityCfg("cube_2"),
cube_3_cfg: SceneEntityCfg = SceneEntityCfg("cube_3"),
) -> torch.Tensor:
"""The orientation of the cubes in the world frame."""
if not hasattr(env, "rigid_objects_in_focus"):
return torch.full((env.num_envs, 9), fill_value=-1)
cube_1: RigidObjectCollection = env.scene[cube_1_cfg.name]
cube_2: RigidObjectCollection = env.scene[cube_2_cfg.name]
cube_3: RigidObjectCollection = env.scene[cube_3_cfg.name]
cube_1_quat_w = []
cube_2_quat_w = []
cube_3_quat_w = []
for env_id in range(env.num_envs):
cube_1_quat_w.append(cube_1.data.object_quat_w[env_id, env.rigid_objects_in_focus[env_id][0], :4])
cube_2_quat_w.append(cube_2.data.object_quat_w[env_id, env.rigid_objects_in_focus[env_id][1], :4])
cube_3_quat_w.append(cube_3.data.object_quat_w[env_id, env.rigid_objects_in_focus[env_id][2], :4])
cube_1_quat_w = torch.stack(cube_1_quat_w)
cube_2_quat_w = torch.stack(cube_2_quat_w)
cube_3_quat_w = torch.stack(cube_3_quat_w)
return torch.cat((cube_1_quat_w, cube_2_quat_w, cube_3_quat_w), dim=1)
def object_obs(
env: ManagerBasedRLEnv,
cube_1_cfg: SceneEntityCfg = SceneEntityCfg("cube_1"),
cube_2_cfg: SceneEntityCfg = SceneEntityCfg("cube_2"),
cube_3_cfg: SceneEntityCfg = SceneEntityCfg("cube_3"),
ee_frame_cfg: SceneEntityCfg = SceneEntityCfg("ee_frame"),
):
"""
Object observations (in world frame):
cube_1 pos,
cube_1 quat,
cube_2 pos,
cube_2 quat,
cube_3 pos,
cube_3 quat,
gripper to cube_1,
gripper to cube_2,
gripper to cube_3,
cube_1 to cube_2,
cube_2 to cube_3,
cube_1 to cube_3,
"""
cube_1: RigidObject = env.scene[cube_1_cfg.name]
cube_2: RigidObject = env.scene[cube_2_cfg.name]
cube_3: RigidObject = env.scene[cube_3_cfg.name]
ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name]
cube_1_pos_w = cube_1.data.root_pos_w
cube_1_quat_w = cube_1.data.root_quat_w
cube_2_pos_w = cube_2.data.root_pos_w
cube_2_quat_w = cube_2.data.root_quat_w
cube_3_pos_w = cube_3.data.root_pos_w
cube_3_quat_w = cube_3.data.root_quat_w
ee_pos_w = ee_frame.data.target_pos_w[:, 0, :]
gripper_to_cube_1 = cube_1_pos_w - ee_pos_w
gripper_to_cube_2 = cube_2_pos_w - ee_pos_w
gripper_to_cube_3 = cube_3_pos_w - ee_pos_w
cube_1_to_2 = cube_1_pos_w - cube_2_pos_w
cube_2_to_3 = cube_2_pos_w - cube_3_pos_w
cube_1_to_3 = cube_1_pos_w - cube_3_pos_w
return torch.cat(
(
cube_1_pos_w - env.scene.env_origins,
cube_1_quat_w,
cube_2_pos_w - env.scene.env_origins,
cube_2_quat_w,
cube_3_pos_w - env.scene.env_origins,
cube_3_quat_w,
gripper_to_cube_1,
gripper_to_cube_2,
gripper_to_cube_3,
cube_1_to_2,
cube_2_to_3,
cube_1_to_3,
),
dim=1,
)
def instance_randomize_object_obs(
env: ManagerBasedRLEnv,
cube_1_cfg: SceneEntityCfg = SceneEntityCfg("cube_1"),
cube_2_cfg: SceneEntityCfg = SceneEntityCfg("cube_2"),
cube_3_cfg: SceneEntityCfg = SceneEntityCfg("cube_3"),
ee_frame_cfg: SceneEntityCfg = SceneEntityCfg("ee_frame"),
):
"""
Object observations (in world frame):
cube_1 pos,
cube_1 quat,
cube_2 pos,
cube_2 quat,
cube_3 pos,
cube_3 quat,
gripper to cube_1,
gripper to cube_2,
gripper to cube_3,
cube_1 to cube_2,
cube_2 to cube_3,
cube_1 to cube_3,
"""
if not hasattr(env, "rigid_objects_in_focus"):
return torch.full((env.num_envs, 9), fill_value=-1)
cube_1: RigidObjectCollection = env.scene[cube_1_cfg.name]
cube_2: RigidObjectCollection = env.scene[cube_2_cfg.name]
cube_3: RigidObjectCollection = env.scene[cube_3_cfg.name]
ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name]
cube_1_pos_w = []
cube_2_pos_w = []
cube_3_pos_w = []
cube_1_quat_w = []
cube_2_quat_w = []
cube_3_quat_w = []
for env_id in range(env.num_envs):
cube_1_pos_w.append(cube_1.data.object_pos_w[env_id, env.rigid_objects_in_focus[env_id][0], :3])
cube_2_pos_w.append(cube_2.data.object_pos_w[env_id, env.rigid_objects_in_focus[env_id][1], :3])
cube_3_pos_w.append(cube_3.data.object_pos_w[env_id, env.rigid_objects_in_focus[env_id][2], :3])
cube_1_quat_w.append(cube_1.data.object_quat_w[env_id, env.rigid_objects_in_focus[env_id][0], :4])
cube_2_quat_w.append(cube_2.data.object_quat_w[env_id, env.rigid_objects_in_focus[env_id][1], :4])
cube_3_quat_w.append(cube_3.data.object_quat_w[env_id, env.rigid_objects_in_focus[env_id][2], :4])
cube_1_pos_w = torch.stack(cube_1_pos_w)
cube_2_pos_w = torch.stack(cube_2_pos_w)
cube_3_pos_w = torch.stack(cube_3_pos_w)
cube_1_quat_w = torch.stack(cube_1_quat_w)
cube_2_quat_w = torch.stack(cube_2_quat_w)
cube_3_quat_w = torch.stack(cube_3_quat_w)
ee_pos_w = ee_frame.data.target_pos_w[:, 0, :]
gripper_to_cube_1 = cube_1_pos_w - ee_pos_w
gripper_to_cube_2 = cube_2_pos_w - ee_pos_w
gripper_to_cube_3 = cube_3_pos_w - ee_pos_w
cube_1_to_2 = cube_1_pos_w - cube_2_pos_w
cube_2_to_3 = cube_2_pos_w - cube_3_pos_w
cube_1_to_3 = cube_1_pos_w - cube_3_pos_w
return torch.cat(
(
cube_1_pos_w - env.scene.env_origins,
cube_1_quat_w,
cube_2_pos_w - env.scene.env_origins,
cube_2_quat_w,
cube_3_pos_w - env.scene.env_origins,
cube_3_quat_w,
gripper_to_cube_1,
gripper_to_cube_2,
gripper_to_cube_3,
cube_1_to_2,
cube_2_to_3,
cube_1_to_3,
),
dim=1,
)
def ee_frame_pos(env: ManagerBasedRLEnv, ee_frame_cfg: SceneEntityCfg = SceneEntityCfg("ee_frame")) -> torch.Tensor:
ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name]
ee_frame_pos = ee_frame.data.target_pos_w[:, 0, :] - env.scene.env_origins[:, 0:3]
return ee_frame_pos
def ee_frame_quat(env: ManagerBasedRLEnv, ee_frame_cfg: SceneEntityCfg = SceneEntityCfg("ee_frame")) -> torch.Tensor:
ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name]
ee_frame_quat = ee_frame.data.target_quat_w[:, 0, :]
return ee_frame_quat
def gripper_pos(
env: ManagerBasedRLEnv,
robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
) -> torch.Tensor:
"""
Obtain the versatile gripper position of both Gripper and Suction Cup.
"""
robot: Articulation = env.scene[robot_cfg.name]
if hasattr(env.scene, "surface_grippers") and len(env.scene.surface_grippers) > 0:
# Handle multiple surface grippers by concatenating their states
gripper_states = []
for gripper_name, surface_gripper in env.scene.surface_grippers.items():
gripper_states.append(surface_gripper.state.view(-1, 1))
if len(gripper_states) == 1:
return gripper_states[0]
else:
return torch.cat(gripper_states, dim=1)
else:
if hasattr(env.cfg, "gripper_joint_names"):
gripper_joint_ids, _ = robot.find_joints(env.cfg.gripper_joint_names)
assert len(gripper_joint_ids) == 2, "Observation gripper_pos only support parallel gripper for now"
finger_joint_1 = robot.data.joint_pos[:, gripper_joint_ids[0]].clone().unsqueeze(1)
finger_joint_2 = -1 * robot.data.joint_pos[:, gripper_joint_ids[1]].clone().unsqueeze(1)
return torch.cat((finger_joint_1, finger_joint_2), dim=1)
else:
raise NotImplementedError("[Error] Cannot find gripper_joint_names in the environment config")
def object_grasped(
env: ManagerBasedRLEnv,
robot_cfg: SceneEntityCfg,
ee_frame_cfg: SceneEntityCfg,
object_cfg: SceneEntityCfg,
diff_threshold: float = 0.06,
) -> torch.Tensor:
"""Check if an object is grasped by the specified robot."""
robot: Articulation = env.scene[robot_cfg.name]
ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name]
object: RigidObject = env.scene[object_cfg.name]
object_pos = object.data.root_pos_w
end_effector_pos = ee_frame.data.target_pos_w[:, 0, :]
pose_diff = torch.linalg.vector_norm(object_pos - end_effector_pos, dim=1)
if hasattr(env.scene, "surface_grippers") and len(env.scene.surface_grippers) > 0:
surface_gripper = env.scene.surface_grippers["surface_gripper"]
suction_cup_status = surface_gripper.state.view(-1, 1) # 1: closed, 0: closing, -1: open
suction_cup_is_closed = (suction_cup_status == 1).to(torch.float32)
grasped = torch.logical_and(suction_cup_is_closed, pose_diff < diff_threshold)
else:
if hasattr(env.cfg, "gripper_joint_names"):
gripper_joint_ids, _ = robot.find_joints(env.cfg.gripper_joint_names)
assert len(gripper_joint_ids) == 2, "Observations only support parallel gripper for now"
grasped = torch.logical_and(
pose_diff < diff_threshold,
torch.abs(
robot.data.joint_pos[:, gripper_joint_ids[0]]
- torch.tensor(env.cfg.gripper_open_val, dtype=torch.float32).to(env.device)
)
> env.cfg.gripper_threshold,
)
grasped = torch.logical_and(
grasped,
torch.abs(
robot.data.joint_pos[:, gripper_joint_ids[1]]
- torch.tensor(env.cfg.gripper_open_val, dtype=torch.float32).to(env.device)
)
> env.cfg.gripper_threshold,
)
return grasped
def object_stacked(
env: ManagerBasedRLEnv,
robot_cfg: SceneEntityCfg,
upper_object_cfg: SceneEntityCfg,
lower_object_cfg: SceneEntityCfg,
xy_threshold: float = 0.05,
height_threshold: float = 0.005,
height_diff: float = 0.0468,
) -> torch.Tensor:
"""Check if an object is stacked by the specified robot."""
robot: Articulation = env.scene[robot_cfg.name]
upper_object: RigidObject = env.scene[upper_object_cfg.name]
lower_object: RigidObject = env.scene[lower_object_cfg.name]
pos_diff = upper_object.data.root_pos_w - lower_object.data.root_pos_w
height_dist = torch.linalg.vector_norm(pos_diff[:, 2:], dim=1)
xy_dist = torch.linalg.vector_norm(pos_diff[:, :2], dim=1)
stacked = torch.logical_and(xy_dist < xy_threshold, (height_dist - height_diff) < height_threshold)
if hasattr(env.scene, "surface_grippers") and len(env.scene.surface_grippers) > 0:
surface_gripper = env.scene.surface_grippers["surface_gripper"]
suction_cup_status = surface_gripper.state.view(-1, 1) # 1: closed, 0: closing, -1: open
suction_cup_is_open = (suction_cup_status == -1).to(torch.float32)
stacked = torch.logical_and(suction_cup_is_open, stacked)
else:
if hasattr(env.cfg, "gripper_joint_names"):
gripper_joint_ids, _ = robot.find_joints(env.cfg.gripper_joint_names)
assert len(gripper_joint_ids) == 2, "Observations only support parallel gripper for now"
stacked = torch.logical_and(
torch.isclose(
robot.data.joint_pos[:, gripper_joint_ids[0]],
torch.tensor(env.cfg.gripper_open_val, dtype=torch.float32).to(env.device),
atol=1e-4,
rtol=1e-4,
),
stacked,
)
stacked = torch.logical_and(
torch.isclose(
robot.data.joint_pos[:, gripper_joint_ids[1]],
torch.tensor(env.cfg.gripper_open_val, dtype=torch.float32).to(env.device),
atol=1e-4,
rtol=1e-4,
),
stacked,
)
else:
raise ValueError("No gripper_joint_names found in environment config")
return stacked
def cube_poses_in_base_frame(
env: ManagerBasedRLEnv,
cube_1_cfg: SceneEntityCfg = SceneEntityCfg("cube_1"),
cube_2_cfg: SceneEntityCfg = SceneEntityCfg("cube_2"),
cube_3_cfg: SceneEntityCfg = SceneEntityCfg("cube_3"),
robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
return_key: Literal["pos", "quat", None] = None,
) -> torch.Tensor:
"""The position and orientation of the cubes in the robot base frame."""
cube_1: RigidObject = env.scene[cube_1_cfg.name]
cube_2: RigidObject = env.scene[cube_2_cfg.name]
cube_3: RigidObject = env.scene[cube_3_cfg.name]
pos_cube_1_world = cube_1.data.root_pos_w
pos_cube_2_world = cube_2.data.root_pos_w
pos_cube_3_world = cube_3.data.root_pos_w
quat_cube_1_world = cube_1.data.root_quat_w
quat_cube_2_world = cube_2.data.root_quat_w
quat_cube_3_world = cube_3.data.root_quat_w
robot: Articulation = env.scene[robot_cfg.name]
root_pos_w = robot.data.root_pos_w
root_quat_w = robot.data.root_quat_w
pos_cube_1_base, quat_cube_1_base = math_utils.subtract_frame_transforms(
root_pos_w, root_quat_w, pos_cube_1_world, quat_cube_1_world
)
pos_cube_2_base, quat_cube_2_base = math_utils.subtract_frame_transforms(
root_pos_w, root_quat_w, pos_cube_2_world, quat_cube_2_world
)
pos_cube_3_base, quat_cube_3_base = math_utils.subtract_frame_transforms(
root_pos_w, root_quat_w, pos_cube_3_world, quat_cube_3_world
)
pos_cubes_base = torch.cat((pos_cube_1_base, pos_cube_2_base, pos_cube_3_base), dim=1)
quat_cubes_base = torch.cat((quat_cube_1_base, quat_cube_2_base, quat_cube_3_base), dim=1)
if return_key == "pos":
return pos_cubes_base
elif return_key == "quat":
return quat_cubes_base
else:
return torch.cat((pos_cubes_base, quat_cubes_base), dim=1)
def object_abs_obs_in_base_frame(
env: ManagerBasedRLEnv,
cube_1_cfg: SceneEntityCfg = SceneEntityCfg("cube_1"),
cube_2_cfg: SceneEntityCfg = SceneEntityCfg("cube_2"),
cube_3_cfg: SceneEntityCfg = SceneEntityCfg("cube_3"),
ee_frame_cfg: SceneEntityCfg = SceneEntityCfg("ee_frame"),
robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
):
"""
Object Abs observations (in base frame): remove the relative observations,
and add abs gripper pos and quat in robot base frame
cube_1 pos,
cube_1 quat,
cube_2 pos,
cube_2 quat,
cube_3 pos,
cube_3 quat,
gripper pos,
gripper quat,
"""
cube_1: RigidObject = env.scene[cube_1_cfg.name]
cube_2: RigidObject = env.scene[cube_2_cfg.name]
cube_3: RigidObject = env.scene[cube_3_cfg.name]
ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name]
robot: Articulation = env.scene[robot_cfg.name]
root_pos_w = robot.data.root_pos_w
root_quat_w = robot.data.root_quat_w
cube_1_pos_w = cube_1.data.root_pos_w
cube_1_quat_w = cube_1.data.root_quat_w
cube_2_pos_w = cube_2.data.root_pos_w
cube_2_quat_w = cube_2.data.root_quat_w
cube_3_pos_w = cube_3.data.root_pos_w
cube_3_quat_w = cube_3.data.root_quat_w
pos_cube_1_base, quat_cube_1_base = math_utils.subtract_frame_transforms(
root_pos_w, root_quat_w, cube_1_pos_w, cube_1_quat_w
)
pos_cube_2_base, quat_cube_2_base = math_utils.subtract_frame_transforms(
root_pos_w, root_quat_w, cube_2_pos_w, cube_2_quat_w
)
pos_cube_3_base, quat_cube_3_base = math_utils.subtract_frame_transforms(
root_pos_w, root_quat_w, cube_3_pos_w, cube_3_quat_w
)
ee_pos_w = ee_frame.data.target_pos_w[:, 0, :]
ee_quat_w = ee_frame.data.target_quat_w[:, 0, :]
ee_pos_base, ee_quat_base = math_utils.subtract_frame_transforms(root_pos_w, root_quat_w, ee_pos_w, ee_quat_w)
return torch.cat(
(
pos_cube_1_base,
quat_cube_1_base,
pos_cube_2_base,
quat_cube_2_base,
pos_cube_3_base,
quat_cube_3_base,
ee_pos_base,
ee_quat_base,
),
dim=1,
)
def ee_frame_pose_in_base_frame(
env: ManagerBasedRLEnv,
ee_frame_cfg: SceneEntityCfg = SceneEntityCfg("ee_frame"),
robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
return_key: Literal["pos", "quat", None] = None,
) -> torch.Tensor:
"""
The end effector pose in the robot base frame.
"""
ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name]
ee_frame_pos_w = ee_frame.data.target_pos_w[:, 0, :]
ee_frame_quat_w = ee_frame.data.target_quat_w[:, 0, :]
robot: Articulation = env.scene[robot_cfg.name]
root_pos_w = robot.data.root_pos_w
root_quat_w = robot.data.root_quat_w
ee_pos_in_base, ee_quat_in_base = math_utils.subtract_frame_transforms(
root_pos_w, root_quat_w, ee_frame_pos_w, ee_frame_quat_w
)
if return_key == "pos":
return ee_pos_in_base
elif return_key == "quat":
return ee_quat_in_base
else:
return torch.cat((ee_pos_in_base, ee_quat_in_base), dim=1)

View File

@@ -0,0 +1,93 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Common functions that can be used to activate certain terminations for the lift task.
The functions can be passed to the :class:`isaaclab.managers.TerminationTermCfg` object to enable
the termination introduced by the function.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
import torch
from isaaclab.assets import Articulation, RigidObject
from isaaclab.managers import SceneEntityCfg
if TYPE_CHECKING:
from isaaclab.envs import ManagerBasedRLEnv
def cubes_stacked(
env: ManagerBasedRLEnv,
robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
cube_1_cfg: SceneEntityCfg = SceneEntityCfg("cube_1"),
cube_2_cfg: SceneEntityCfg = SceneEntityCfg("cube_2"),
cube_3_cfg: SceneEntityCfg = SceneEntityCfg("cube_3"),
xy_threshold: float = 0.04,
height_threshold: float = 0.005,
height_diff: float = 0.0468,
atol=0.0001,
rtol=0.0001,
):
robot: Articulation = env.scene[robot_cfg.name]
cube_1: RigidObject = env.scene[cube_1_cfg.name]
cube_2: RigidObject = env.scene[cube_2_cfg.name]
cube_3: RigidObject = env.scene[cube_3_cfg.name]
pos_diff_c12 = cube_1.data.root_pos_w - cube_2.data.root_pos_w
pos_diff_c23 = cube_2.data.root_pos_w - cube_3.data.root_pos_w
# Compute cube position difference in x-y plane
xy_dist_c12 = torch.norm(pos_diff_c12[:, :2], dim=1)
xy_dist_c23 = torch.norm(pos_diff_c23[:, :2], dim=1)
# Compute cube height difference
h_dist_c12 = torch.norm(pos_diff_c12[:, 2:], dim=1)
h_dist_c23 = torch.norm(pos_diff_c23[:, 2:], dim=1)
# Check cube positions
stacked = torch.logical_and(xy_dist_c12 < xy_threshold, xy_dist_c23 < xy_threshold)
stacked = torch.logical_and(h_dist_c12 - height_diff < height_threshold, stacked)
stacked = torch.logical_and(pos_diff_c12[:, 2] < 0.0, stacked)
stacked = torch.logical_and(h_dist_c23 - height_diff < height_threshold, stacked)
stacked = torch.logical_and(pos_diff_c23[:, 2] < 0.0, stacked)
# Check gripper positions
if hasattr(env.scene, "surface_grippers") and len(env.scene.surface_grippers) > 0:
surface_gripper = env.scene.surface_grippers["surface_gripper"]
suction_cup_status = surface_gripper.state.view(-1) # 1: closed, 0: closing, -1: open
suction_cup_is_open = (suction_cup_status == -1).to(torch.float32)
stacked = torch.logical_and(suction_cup_is_open, stacked)
else:
if hasattr(env.cfg, "gripper_joint_names"):
gripper_joint_ids, _ = robot.find_joints(env.cfg.gripper_joint_names)
assert len(gripper_joint_ids) == 2, "Terminations only support parallel gripper for now"
stacked = torch.logical_and(
torch.isclose(
robot.data.joint_pos[:, gripper_joint_ids[0]],
torch.tensor(env.cfg.gripper_open_val, dtype=torch.float32).to(env.device),
atol=atol,
rtol=rtol,
),
stacked,
)
stacked = torch.logical_and(
torch.isclose(
robot.data.joint_pos[:, gripper_joint_ids[1]],
torch.tensor(env.cfg.gripper_open_val, dtype=torch.float32).to(env.device),
atol=atol,
rtol=rtol,
),
stacked,
)
else:
raise ValueError("No gripper_joint_names found in environment config")
return stacked

View File

@@ -0,0 +1,69 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from isaaclab.controllers.differential_ik_cfg import DifferentialIKControllerCfg
from isaaclab.devices.device_base import DeviceBase, DevicesCfg
from isaaclab.devices.keyboard import Se3KeyboardCfg
from isaaclab.devices.openxr.openxr_device import OpenXRDeviceCfg
from isaaclab.devices.openxr.retargeters.manipulator.gripper_retargeter import GripperRetargeterCfg
from isaaclab.devices.openxr.retargeters.manipulator.se3_rel_retargeter import Se3RelRetargeterCfg
from isaaclab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg
from isaaclab.utils import configclass
from . import stack_joint_pos_env_cfg
##
# Pre-defined configs
##
from isaaclab_assets.robots.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip
@configclass
class FrankaCubeStackEnvCfg(stack_joint_pos_env_cfg.FrankaCubeStackEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# Set Franka as robot
# We switch here to a stiffer PD controller for IK tracking to be better.
self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
# Set actions for the specific robot type (franka)
self.actions.arm_action = DifferentialInverseKinematicsActionCfg(
asset_name="robot",
joint_names=["panda_joint.*"],
body_name="panda_hand",
controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=True, ik_method="dls"),
scale=0.5,
body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]),
)
self.teleop_devices = DevicesCfg(
devices={
"handtracking": OpenXRDeviceCfg(
retargeters=[
Se3RelRetargeterCfg(
bound_hand=DeviceBase.TrackingTarget.HAND_RIGHT,
zero_out_xy_rotation=True,
use_wrist_rotation=False,
use_wrist_position=True,
delta_pos_scale_factor=10.0,
delta_rot_scale_factor=10.0,
sim_device=self.sim.device,
),
GripperRetargeterCfg(
bound_hand=DeviceBase.TrackingTarget.HAND_RIGHT, sim_device=self.sim.device
),
],
sim_device=self.sim.device,
xr_cfg=self.xr,
),
"keyboard": Se3KeyboardCfg(
pos_sensitivity=0.05,
rot_sensitivity=0.05,
sim_device=self.sim.device,
),
}
)

View File

@@ -0,0 +1,9 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configurations for the object stack environments."""
# We leave this file empty since we don't want to expose any configs in this package directly.
# We still need this file to import the "config" module in the parent package.

View File

@@ -0,0 +1,55 @@
# Copyright (c) 2024, MindRobot Project
# SPDX-License-Identifier: BSD-3-Clause
#
# Demo: Unitree H1 Humanoid Locomotion (Flat terrain)
# 直接复用 IsaacLab 官方的 H1 运动控制任务,注册为 Mindbot-Demo 前缀。
# 参考原始注册: Isaac-Velocity-Flat-H1-v0
#
# 运行指令 (查看):
# python scripts/environments/random_agent.py \
# --task Mindbot-Demo-Velocity-Flat-H1-v0 \
# --num_envs 32
#
# 训练指令 (RSL-RL):
# python scripts/reinforcement_learning/rsl_rl/train.py \
# --task Mindbot-Demo-Velocity-Flat-H1-v0 \
# --num_envs 4096 --headless
import gymnasium as gym
##
# Register Gym environments.
##
gym.register(
id="Mindbot-Demo-Velocity-Flat-H1-v0",
entry_point="isaaclab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
# 直接引用 isaaclab_tasks 中的原始 cfg
"env_cfg_entry_point": (
"isaaclab_tasks.manager_based.locomotion.velocity.config.h1"
".flat_env_cfg:H1FlatEnvCfg"
),
"rsl_rl_cfg_entry_point": (
"isaaclab_tasks.manager_based.locomotion.velocity.config.h1"
".agents.rsl_rl_ppo_cfg:H1FlatPPORunnerCfg"
),
},
)
gym.register(
id="Mindbot-Demo-Velocity-Rough-H1-v0",
entry_point="isaaclab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": (
"isaaclab_tasks.manager_based.locomotion.velocity.config.h1"
".rough_env_cfg:H1RoughEnvCfg"
),
"rsl_rl_cfg_entry_point": (
"isaaclab_tasks.manager_based.locomotion.velocity.config.h1"
".agents.rsl_rl_ppo_cfg:H1RoughPPORunnerCfg"
),
},
)

View File

@@ -0,0 +1,41 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from isaaclab.utils import configclass
from .rough_env_cfg import H1RoughEnvCfg
@configclass
class H1FlatEnvCfg(H1RoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# change terrain to flat
self.scene.terrain.terrain_type = "plane"
self.scene.terrain.terrain_generator = None
# no height scan
self.scene.height_scanner = None
self.observations.policy.height_scan = None
# no terrain curriculum
self.curriculum.terrain_levels = None
self.rewards.feet_air_time.weight = 1.0
self.rewards.feet_air_time.params["threshold"] = 0.6
class H1FlatEnvCfg_PLAY(H1FlatEnvCfg):
def __post_init__(self) -> None:
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing
self.events.base_external_force_torque = None
self.events.push_robot = None

View File

@@ -0,0 +1,12 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""This sub-module contains the functions that are specific to the locomotion environments."""
from isaaclab.envs.mdp import * # noqa: F401, F403
from .curriculums import * # noqa: F401, F403
from .rewards import * # noqa: F401, F403
from .terminations import * # noqa: F401, F403

View File

@@ -0,0 +1,56 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Common functions that can be used to create curriculum for the learning environment.
The functions can be passed to the :class:`isaaclab.managers.CurriculumTermCfg` object to enable
the curriculum introduced by the function.
"""
from __future__ import annotations
from collections.abc import Sequence
from typing import TYPE_CHECKING
import torch
from isaaclab.assets import Articulation
from isaaclab.managers import SceneEntityCfg
from isaaclab.terrains import TerrainImporter
if TYPE_CHECKING:
from isaaclab.envs import ManagerBasedRLEnv
def terrain_levels_vel(
env: ManagerBasedRLEnv, env_ids: Sequence[int], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Curriculum based on the distance the robot walked when commanded to move at a desired velocity.
This term is used to increase the difficulty of the terrain when the robot walks far enough and decrease the
difficulty when the robot walks less than half of the distance required by the commanded velocity.
.. note::
It is only possible to use this term with the terrain type ``generator``. For further information
on different terrain types, check the :class:`isaaclab.terrains.TerrainImporter` class.
Returns:
The mean terrain level for the given environment ids.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
terrain: TerrainImporter = env.scene.terrain
command = env.command_manager.get_command("base_velocity")
# compute the distance the robot walked
distance = torch.norm(asset.data.root_pos_w[env_ids, :2] - env.scene.env_origins[env_ids, :2], dim=1)
# robots that walked far enough progress to harder terrains
move_up = distance > terrain.cfg.terrain_generator.size[0] / 2
# robots that walked less than half of their required distance go to simpler terrains
move_down = distance < torch.norm(command[env_ids, :2], dim=1) * env.max_episode_length_s * 0.5
move_down *= ~move_up
# update terrain levels
terrain.update_env_origins(env_ids, move_up, move_down)
# return the mean terrain level
return torch.mean(terrain.terrain_levels.float())

View File

@@ -0,0 +1,119 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Common functions that can be used to define rewards for the learning environment.
The functions can be passed to the :class:`isaaclab.managers.RewardTermCfg` object to
specify the reward function and its parameters.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
import torch
from isaaclab.envs import mdp
from isaaclab.managers import SceneEntityCfg
from isaaclab.sensors import ContactSensor
from isaaclab.utils.math import quat_apply_inverse, yaw_quat
if TYPE_CHECKING:
from isaaclab.envs import ManagerBasedRLEnv
def feet_air_time(
env: ManagerBasedRLEnv, command_name: str, sensor_cfg: SceneEntityCfg, threshold: float
) -> torch.Tensor:
"""Reward long steps taken by the feet using L2-kernel.
This function rewards the agent for taking steps that are longer than a threshold. This helps ensure
that the robot lifts its feet off the ground and takes steps. The reward is computed as the sum of
the time for which the feet are in the air.
If the commands are small (i.e. the agent is not supposed to take a step), then the reward is zero.
"""
# extract the used quantities (to enable type-hinting)
contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name]
# compute the reward
first_contact = contact_sensor.compute_first_contact(env.step_dt)[:, sensor_cfg.body_ids]
last_air_time = contact_sensor.data.last_air_time[:, sensor_cfg.body_ids]
reward = torch.sum((last_air_time - threshold) * first_contact, dim=1)
# no reward for zero command
reward *= torch.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1
return reward
def feet_air_time_positive_biped(env, command_name: str, threshold: float, sensor_cfg: SceneEntityCfg) -> torch.Tensor:
"""Reward long steps taken by the feet for bipeds.
This function rewards the agent for taking steps up to a specified threshold and also keep one foot at
a time in the air.
If the commands are small (i.e. the agent is not supposed to take a step), then the reward is zero.
"""
contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name]
# compute the reward
air_time = contact_sensor.data.current_air_time[:, sensor_cfg.body_ids]
contact_time = contact_sensor.data.current_contact_time[:, sensor_cfg.body_ids]
in_contact = contact_time > 0.0
in_mode_time = torch.where(in_contact, contact_time, air_time)
single_stance = torch.sum(in_contact.int(), dim=1) == 1
reward = torch.min(torch.where(single_stance.unsqueeze(-1), in_mode_time, 0.0), dim=1)[0]
reward = torch.clamp(reward, max=threshold)
# no reward for zero command
reward *= torch.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1
return reward
def feet_slide(env, sensor_cfg: SceneEntityCfg, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Penalize feet sliding.
This function penalizes the agent for sliding its feet on the ground. The reward is computed as the
norm of the linear velocity of the feet multiplied by a binary contact sensor. This ensures that the
agent is penalized only when the feet are in contact with the ground.
"""
# Penalize feet sliding
contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name]
contacts = contact_sensor.data.net_forces_w_history[:, :, sensor_cfg.body_ids, :].norm(dim=-1).max(dim=1)[0] > 1.0
asset = env.scene[asset_cfg.name]
body_vel = asset.data.body_lin_vel_w[:, asset_cfg.body_ids, :2]
reward = torch.sum(body_vel.norm(dim=-1) * contacts, dim=1)
return reward
def track_lin_vel_xy_yaw_frame_exp(
env, std: float, command_name: str, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Reward tracking of linear velocity commands (xy axes) in the gravity aligned
robot frame using an exponential kernel.
"""
# extract the used quantities (to enable type-hinting)
asset = env.scene[asset_cfg.name]
vel_yaw = quat_apply_inverse(yaw_quat(asset.data.root_quat_w), asset.data.root_lin_vel_w[:, :3])
lin_vel_error = torch.sum(
torch.square(env.command_manager.get_command(command_name)[:, :2] - vel_yaw[:, :2]), dim=1
)
return torch.exp(-lin_vel_error / std**2)
def track_ang_vel_z_world_exp(
env, command_name: str, std: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Reward tracking of angular velocity commands (yaw) in world frame using exponential kernel."""
# extract the used quantities (to enable type-hinting)
asset = env.scene[asset_cfg.name]
ang_vel_error = torch.square(env.command_manager.get_command(command_name)[:, 2] - asset.data.root_ang_vel_w[:, 2])
return torch.exp(-ang_vel_error / std**2)
def stand_still_joint_deviation_l1(
env, command_name: str, command_threshold: float = 0.06, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Penalize offsets from the default joint positions when the command is very small."""
command = env.command_manager.get_command(command_name)
# Penalize motion when command is nearly zero.
return mdp.joint_deviation_l1(env, asset_cfg) * (torch.norm(command[:, :2], dim=1) < command_threshold)

View File

@@ -0,0 +1,10 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Symmetry functions for the velocity tasks.
These functions are used to augment the observations and actions of the environment.
They are specific to the velocity task and the choice of the robot.
"""

View File

@@ -0,0 +1,261 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Functions to specify the symmetry in the observation and action space for ANYmal."""
from __future__ import annotations
from typing import TYPE_CHECKING
import torch
from tensordict import TensorDict
if TYPE_CHECKING:
from isaaclab.envs import ManagerBasedRLEnv
# specify the functions that are available for import
__all__ = ["compute_symmetric_states"]
@torch.no_grad()
def compute_symmetric_states(
env: ManagerBasedRLEnv,
obs: TensorDict | None = None,
actions: torch.Tensor | None = None,
):
"""Augments the given observations and actions by applying symmetry transformations.
This function creates augmented versions of the provided observations and actions by applying
four symmetrical transformations: original, left-right, front-back, and diagonal. The symmetry
transformations are beneficial for reinforcement learning tasks by providing additional
diverse data without requiring additional data collection.
Args:
env: The environment instance.
obs: The original observation tensor dictionary. Defaults to None.
actions: The original actions tensor. Defaults to None.
Returns:
Augmented observations and actions tensors, or None if the respective input was None.
"""
# observations
if obs is not None:
batch_size = obs.batch_size[0]
# since we have 4 different symmetries, we need to augment the batch size by 4
obs_aug = obs.repeat(4)
# policy observation group
# -- original
obs_aug["policy"][:batch_size] = obs["policy"][:]
# -- left-right
obs_aug["policy"][batch_size : 2 * batch_size] = _transform_policy_obs_left_right(env.unwrapped, obs["policy"])
# -- front-back
obs_aug["policy"][2 * batch_size : 3 * batch_size] = _transform_policy_obs_front_back(
env.unwrapped, obs["policy"]
)
# -- diagonal
obs_aug["policy"][3 * batch_size :] = _transform_policy_obs_front_back(
env.unwrapped, obs_aug["policy"][batch_size : 2 * batch_size]
)
else:
obs_aug = None
# actions
if actions is not None:
batch_size = actions.shape[0]
# since we have 4 different symmetries, we need to augment the batch size by 4
actions_aug = torch.zeros(batch_size * 4, actions.shape[1], device=actions.device)
# -- original
actions_aug[:batch_size] = actions[:]
# -- left-right
actions_aug[batch_size : 2 * batch_size] = _transform_actions_left_right(actions)
# -- front-back
actions_aug[2 * batch_size : 3 * batch_size] = _transform_actions_front_back(actions)
# -- diagonal
actions_aug[3 * batch_size :] = _transform_actions_front_back(actions_aug[batch_size : 2 * batch_size])
else:
actions_aug = None
return obs_aug, actions_aug
"""
Symmetry functions for observations.
"""
def _transform_policy_obs_left_right(env: ManagerBasedRLEnv, obs: torch.Tensor) -> torch.Tensor:
"""Apply a left-right symmetry transformation to the observation tensor.
This function modifies the given observation tensor by applying transformations
that represent a symmetry with respect to the left-right axis. This includes
negating certain components of the linear and angular velocities, projected gravity,
velocity commands, and flipping the joint positions, joint velocities, and last actions
for the ANYmal robot. Additionally, if height-scan data is present, it is flipped
along the relevant dimension.
Args:
env: The environment instance from which the observation is obtained.
obs: The observation tensor to be transformed.
Returns:
The transformed observation tensor with left-right symmetry applied.
"""
# copy observation tensor
obs = obs.clone()
device = obs.device
# lin vel
obs[:, :3] = obs[:, :3] * torch.tensor([1, -1, 1], device=device)
# ang vel
obs[:, 3:6] = obs[:, 3:6] * torch.tensor([-1, 1, -1], device=device)
# projected gravity
obs[:, 6:9] = obs[:, 6:9] * torch.tensor([1, -1, 1], device=device)
# velocity command
obs[:, 9:12] = obs[:, 9:12] * torch.tensor([1, -1, -1], device=device)
# joint pos
obs[:, 12:24] = _switch_anymal_joints_left_right(obs[:, 12:24])
# joint vel
obs[:, 24:36] = _switch_anymal_joints_left_right(obs[:, 24:36])
# last actions
obs[:, 36:48] = _switch_anymal_joints_left_right(obs[:, 36:48])
# note: this is hard-coded for grid-pattern of ordering "xy" and size (1.6, 1.0)
if "height_scan" in env.observation_manager.active_terms["policy"]:
obs[:, 48:235] = obs[:, 48:235].view(-1, 11, 17).flip(dims=[1]).view(-1, 11 * 17)
return obs
def _transform_policy_obs_front_back(env: ManagerBasedRLEnv, obs: torch.Tensor) -> torch.Tensor:
"""Applies a front-back symmetry transformation to the observation tensor.
This function modifies the given observation tensor by applying transformations
that represent a symmetry with respect to the front-back axis. This includes negating
certain components of the linear and angular velocities, projected gravity, velocity commands,
and flipping the joint positions, joint velocities, and last actions for the ANYmal robot.
Additionally, if height-scan data is present, it is flipped along the relevant dimension.
Args:
env: The environment instance from which the observation is obtained.
obs: The observation tensor to be transformed.
Returns:
The transformed observation tensor with front-back symmetry applied.
"""
# copy observation tensor
obs = obs.clone()
device = obs.device
# lin vel
obs[:, :3] = obs[:, :3] * torch.tensor([-1, 1, 1], device=device)
# ang vel
obs[:, 3:6] = obs[:, 3:6] * torch.tensor([1, -1, -1], device=device)
# projected gravity
obs[:, 6:9] = obs[:, 6:9] * torch.tensor([-1, 1, 1], device=device)
# velocity command
obs[:, 9:12] = obs[:, 9:12] * torch.tensor([-1, 1, -1], device=device)
# joint pos
obs[:, 12:24] = _switch_anymal_joints_front_back(obs[:, 12:24])
# joint vel
obs[:, 24:36] = _switch_anymal_joints_front_back(obs[:, 24:36])
# last actions
obs[:, 36:48] = _switch_anymal_joints_front_back(obs[:, 36:48])
# note: this is hard-coded for grid-pattern of ordering "xy" and size (1.6, 1.0)
if "height_scan" in env.observation_manager.active_terms["policy"]:
obs[:, 48:235] = obs[:, 48:235].view(-1, 11, 17).flip(dims=[2]).view(-1, 11 * 17)
return obs
"""
Symmetry functions for actions.
"""
def _transform_actions_left_right(actions: torch.Tensor) -> torch.Tensor:
"""Applies a left-right symmetry transformation to the actions tensor.
This function modifies the given actions tensor by applying transformations
that represent a symmetry with respect to the left-right axis. This includes
flipping the joint positions, joint velocities, and last actions for the
ANYmal robot.
Args:
actions: The actions tensor to be transformed.
Returns:
The transformed actions tensor with left-right symmetry applied.
"""
actions = actions.clone()
actions[:] = _switch_anymal_joints_left_right(actions[:])
return actions
def _transform_actions_front_back(actions: torch.Tensor) -> torch.Tensor:
"""Applies a front-back symmetry transformation to the actions tensor.
This function modifies the given actions tensor by applying transformations
that represent a symmetry with respect to the front-back axis. This includes
flipping the joint positions, joint velocities, and last actions for the
ANYmal robot.
Args:
actions: The actions tensor to be transformed.
Returns:
The transformed actions tensor with front-back symmetry applied.
"""
actions = actions.clone()
actions[:] = _switch_anymal_joints_front_back(actions[:])
return actions
"""
Helper functions for symmetry.
In Isaac Sim, the joint ordering is as follows:
[
'LF_HAA', 'LH_HAA', 'RF_HAA', 'RH_HAA',
'LF_HFE', 'LH_HFE', 'RF_HFE', 'RH_HFE',
'LF_KFE', 'LH_KFE', 'RF_KFE', 'RH_KFE'
]
Correspondingly, the joint ordering for the ANYmal robot is:
* LF = left front --> [0, 4, 8]
* LH = left hind --> [1, 5, 9]
* RF = right front --> [2, 6, 10]
* RH = right hind --> [3, 7, 11]
"""
def _switch_anymal_joints_left_right(joint_data: torch.Tensor) -> torch.Tensor:
"""Applies a left-right symmetry transformation to the joint data tensor."""
joint_data_switched = torch.zeros_like(joint_data)
# left <-- right
joint_data_switched[..., [0, 4, 8, 1, 5, 9]] = joint_data[..., [2, 6, 10, 3, 7, 11]]
# right <-- left
joint_data_switched[..., [2, 6, 10, 3, 7, 11]] = joint_data[..., [0, 4, 8, 1, 5, 9]]
# Flip the sign of the HAA joints
joint_data_switched[..., [0, 1, 2, 3]] *= -1.0
return joint_data_switched
def _switch_anymal_joints_front_back(joint_data: torch.Tensor) -> torch.Tensor:
"""Applies a front-back symmetry transformation to the joint data tensor."""
joint_data_switched = torch.zeros_like(joint_data)
# front <-- hind
joint_data_switched[..., [0, 4, 8, 2, 6, 10]] = joint_data[..., [1, 5, 9, 3, 7, 11]]
# hind <-- front
joint_data_switched[..., [1, 5, 9, 3, 7, 11]] = joint_data[..., [0, 4, 8, 2, 6, 10]]
# Flip the sign of the HFE and KFE joints
joint_data_switched[..., 4:] *= -1
return joint_data_switched

View File

@@ -0,0 +1,54 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Common functions that can be used to activate certain terminations.
The functions can be passed to the :class:`isaaclab.managers.TerminationTermCfg` object to enable
the termination introduced by the function.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
import torch
from isaaclab.assets import RigidObject
from isaaclab.managers import SceneEntityCfg
if TYPE_CHECKING:
from isaaclab.envs import ManagerBasedRLEnv
def terrain_out_of_bounds(
env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), distance_buffer: float = 3.0
) -> torch.Tensor:
"""Terminate when the actor move too close to the edge of the terrain.
If the actor moves too close to the edge of the terrain, the termination is activated. The distance
to the edge of the terrain is calculated based on the size of the terrain and the distance buffer.
"""
if env.scene.cfg.terrain.terrain_type == "plane":
# we have infinite terrain because it is a plane
return torch.zeros(env.num_envs, dtype=torch.bool, device=env.device)
elif env.scene.cfg.terrain.terrain_type == "generator":
# obtain the size of the sub-terrains
terrain_gen_cfg = env.scene.terrain.cfg.terrain_generator
grid_width, grid_length = terrain_gen_cfg.size
n_rows, n_cols = terrain_gen_cfg.num_rows, terrain_gen_cfg.num_cols
border_width = terrain_gen_cfg.border_width
# compute the size of the map
map_width = n_rows * grid_width + 2 * border_width
map_height = n_cols * grid_length + 2 * border_width
# extract the used quantities (to enable type-hinting)
asset: RigidObject = env.scene[asset_cfg.name]
# check if the agent is out of bounds
x_out_of_bounds = torch.abs(asset.data.root_pos_w[:, 0]) > 0.5 * map_width - distance_buffer
y_out_of_bounds = torch.abs(asset.data.root_pos_w[:, 1]) > 0.5 * map_height - distance_buffer
return torch.logical_or(x_out_of_bounds, y_out_of_bounds)
else:
raise ValueError("Received unsupported terrain type, must be either 'plane' or 'generator'.")

View File

@@ -0,0 +1,142 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from isaaclab.managers import RewardTermCfg as RewTerm
from isaaclab.managers import SceneEntityCfg
from isaaclab.utils import configclass
import isaaclab_tasks.manager_based.locomotion.velocity.mdp as mdp
from isaaclab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg, RewardsCfg
##
# Pre-defined configs
##
from isaaclab_assets import H1_MINIMAL_CFG # isort: skip
@configclass
class H1Rewards(RewardsCfg):
"""Reward terms for the MDP."""
termination_penalty = RewTerm(func=mdp.is_terminated, weight=-200.0)
lin_vel_z_l2 = None
track_lin_vel_xy_exp = RewTerm(
func=mdp.track_lin_vel_xy_yaw_frame_exp,
weight=1.0,
params={"command_name": "base_velocity", "std": 0.5},
)
track_ang_vel_z_exp = RewTerm(
func=mdp.track_ang_vel_z_world_exp, weight=1.0, params={"command_name": "base_velocity", "std": 0.5}
)
feet_air_time = RewTerm(
func=mdp.feet_air_time_positive_biped,
weight=0.25,
params={
"command_name": "base_velocity",
"sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*ankle_link"),
"threshold": 0.4,
},
)
feet_slide = RewTerm(
func=mdp.feet_slide,
weight=-0.25,
params={
"sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*ankle_link"),
"asset_cfg": SceneEntityCfg("robot", body_names=".*ankle_link"),
},
)
# Penalize ankle joint limits
dof_pos_limits = RewTerm(
func=mdp.joint_pos_limits, weight=-1.0, params={"asset_cfg": SceneEntityCfg("robot", joint_names=".*_ankle")}
)
# Penalize deviation from default of the joints that are not essential for locomotion
joint_deviation_hip = RewTerm(
func=mdp.joint_deviation_l1,
weight=-0.2,
params={"asset_cfg": SceneEntityCfg("robot", joint_names=[".*_hip_yaw", ".*_hip_roll"])},
)
joint_deviation_arms = RewTerm(
func=mdp.joint_deviation_l1,
weight=-0.2,
params={"asset_cfg": SceneEntityCfg("robot", joint_names=[".*_shoulder_.*", ".*_elbow"])},
)
joint_deviation_torso = RewTerm(
func=mdp.joint_deviation_l1, weight=-0.1, params={"asset_cfg": SceneEntityCfg("robot", joint_names="torso")}
)
@configclass
class H1RoughEnvCfg(LocomotionVelocityRoughEnvCfg):
rewards: H1Rewards = H1Rewards()
def __post_init__(self):
# post init of parent
super().__post_init__()
# Scene
self.scene.robot = H1_MINIMAL_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
if self.scene.height_scanner:
self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/torso_link"
# Randomization
self.events.push_robot = None
self.events.add_base_mass = None
self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0)
self.events.base_external_force_torque.params["asset_cfg"].body_names = [".*torso_link"]
self.events.reset_base.params = {
"pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)},
"velocity_range": {
"x": (0.0, 0.0),
"y": (0.0, 0.0),
"z": (0.0, 0.0),
"roll": (0.0, 0.0),
"pitch": (0.0, 0.0),
"yaw": (0.0, 0.0),
},
}
self.events.base_com = None
# Rewards
self.rewards.undesired_contacts = None
self.rewards.flat_orientation_l2.weight = -1.0
self.rewards.dof_torques_l2.weight = 0.0
self.rewards.action_rate_l2.weight = -0.005
self.rewards.dof_acc_l2.weight = -1.25e-7
# Commands
self.commands.base_velocity.ranges.lin_vel_x = (0.0, 1.0)
self.commands.base_velocity.ranges.lin_vel_y = (0.0, 0.0)
self.commands.base_velocity.ranges.ang_vel_z = (-1.0, 1.0)
# Terminations
self.terminations.base_contact.params["sensor_cfg"].body_names = ".*torso_link"
@configclass
class H1RoughEnvCfg_PLAY(H1RoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
self.episode_length_s = 40.0
# spawn the robot randomly in the grid (instead of their terrain levels)
self.scene.terrain.max_init_terrain_level = None
# reduce the number of terrains to save memory
if self.scene.terrain.terrain_generator is not None:
self.scene.terrain.terrain_generator.num_rows = 5
self.scene.terrain.terrain_generator.num_cols = 5
self.scene.terrain.terrain_generator.curriculum = False
self.commands.base_velocity.ranges.lin_vel_x = (1.0, 1.0)
self.commands.base_velocity.ranges.lin_vel_y = (0.0, 0.0)
self.commands.base_velocity.ranges.ang_vel_z = (-1.0, 1.0)
self.commands.base_velocity.ranges.heading = (0.0, 0.0)
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing
self.events.base_external_force_torque = None
self.events.push_robot = None

View File

@@ -0,0 +1,12 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Locomotion environments with velocity-tracking commands.
These environments are based on the `legged_gym` environments provided by Rudin et al.
Reference:
https://github.com/leggedrobotics/legged_gym
"""

View File

@@ -0,0 +1,49 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
# from . import agents
from .agents import *
##
# Register Gym environments.
##
##
# MindRobot Left Arm IK-Rel (keyboard teleop)
##
# gym.register(
# id="Isaac-MindRobot-LeftArm-IK-Rel-v0",
# entry_point="isaaclab.envs:ManagerBasedRLEnv",
# kwargs={
# "env_cfg_entry_point": (
# "isaaclab_tasks.manager_based.manipulation.mindrobot."
# "mindrobot_left_arm_ik_env_cfg:MindRobotLeftArmIKRelEnvCfg"
# ),
# "robomimic_bc_cfg_entry_point": "isaaclab_tasks.manager_based.manipulation.mindrobot.agents:robomimic/bc_rnn_low_dim.json",
# },
# disable_env_checker=True,
# )
# ...existing code...
gym.register(
id="Isaac-MindRobot-LeftArm-IK-Rel-v0",
entry_point="isaaclab.envs:ManagerBasedRLEnv",
kwargs={
"env_cfg_entry_point": f"{__name__}.mindrobot_left_arm_ik_env_cfg:MindRobotLeftArmIKEnvCfg",
"robomimic_bc_cfg_entry_point": f"{__name__}.agents:robomimic/bc_rnn_low_dim.json",
},
disable_env_checker=True,
)
gym.register(
id="Isaac-MindRobot-LeftArm-IK-Abs-v0",
entry_point="isaaclab.envs:ManagerBasedRLEnv",
kwargs={
"env_cfg_entry_point": f"{__name__}.mindrobot_left_arm_ik_env_cfg:MindRobotLeftArmIKAbsEnvCfg",
"robomimic_bc_cfg_entry_point": f"{__name__}.agents:robomimic/bc_rnn_low_dim.json",
},
disable_env_checker=True,
)

View File

@@ -0,0 +1,4 @@
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause

View File

@@ -0,0 +1,219 @@
{
"algo_name": "bc",
"experiment": {
"name": "bc_rnn_image_franka_stack",
"validate": false,
"logging": {
"terminal_output_to_txt": true,
"log_tb": true
},
"save": {
"enabled": true,
"every_n_seconds": null,
"every_n_epochs": 20,
"epochs": [],
"on_best_validation": false,
"on_best_rollout_return": false,
"on_best_rollout_success_rate": true
},
"epoch_every_n_steps": 500,
"env": null,
"additional_envs": null,
"render": false,
"render_video": false,
"rollout": {
"enabled": false
}
},
"train": {
"data": null,
"num_data_workers": 4,
"hdf5_cache_mode": "low_dim",
"hdf5_use_swmr": true,
"hdf5_load_next_obs": false,
"hdf5_normalize_obs": false,
"hdf5_filter_key": null,
"hdf5_validation_filter_key": null,
"seq_length": 10,
"pad_seq_length": true,
"frame_stack": 1,
"pad_frame_stack": true,
"dataset_keys": [
"actions",
"rewards",
"dones"
],
"goal_mode": null,
"cuda": true,
"batch_size": 16,
"num_epochs": 600,
"seed": 101
},
"algo": {
"optim_params": {
"policy": {
"optimizer_type": "adam",
"learning_rate": {
"initial": 0.0001,
"decay_factor": 0.1,
"epoch_schedule": [],
"scheduler_type": "multistep"
},
"regularization": {
"L2": 0.0
}
}
},
"loss": {
"l2_weight": 1.0,
"l1_weight": 0.0,
"cos_weight": 0.0
},
"actor_layer_dims": [],
"gaussian": {
"enabled": false,
"fixed_std": false,
"init_std": 0.1,
"min_std": 0.01,
"std_activation": "softplus",
"low_noise_eval": true
},
"gmm": {
"enabled": true,
"num_modes": 5,
"min_std": 0.0001,
"std_activation": "softplus",
"low_noise_eval": true
},
"vae": {
"enabled": false,
"latent_dim": 14,
"latent_clip": null,
"kl_weight": 1.0,
"decoder": {
"is_conditioned": true,
"reconstruction_sum_across_elements": false
},
"prior": {
"learn": false,
"is_conditioned": false,
"use_gmm": false,
"gmm_num_modes": 10,
"gmm_learn_weights": false,
"use_categorical": false,
"categorical_dim": 10,
"categorical_gumbel_softmax_hard": false,
"categorical_init_temp": 1.0,
"categorical_temp_anneal_step": 0.001,
"categorical_min_temp": 0.3
},
"encoder_layer_dims": [
300,
400
],
"decoder_layer_dims": [
300,
400
],
"prior_layer_dims": [
300,
400
]
},
"rnn": {
"enabled": true,
"horizon": 10,
"hidden_dim": 1000,
"rnn_type": "LSTM",
"num_layers": 2,
"open_loop": false,
"kwargs": {
"bidirectional": false
}
},
"transformer": {
"enabled": false,
"context_length": 10,
"embed_dim": 512,
"num_layers": 6,
"num_heads": 8,
"emb_dropout": 0.1,
"attn_dropout": 0.1,
"block_output_dropout": 0.1,
"sinusoidal_embedding": false,
"activation": "gelu",
"supervise_all_steps": false,
"nn_parameter_for_timesteps": true
}
},
"observation": {
"modalities": {
"obs": {
"low_dim": [
"eef_pos",
"eef_quat",
"gripper_pos"
],
"rgb": [
"table_cam",
"wrist_cam"
],
"depth": [],
"scan": []
},
"goal": {
"low_dim": [],
"rgb": [],
"depth": [],
"scan": []
}
},
"encoder": {
"low_dim": {
"core_class": null,
"core_kwargs": {},
"obs_randomizer_class": null,
"obs_randomizer_kwargs": {}
},
"rgb": {
"core_class": "VisualCore",
"core_kwargs": {
"feature_dimension": 64,
"flatten": true,
"backbone_class": "ResNet18Conv",
"backbone_kwargs": {
"pretrained": false,
"input_coord_conv": false
},
"pool_class": "SpatialSoftmax",
"pool_kwargs": {
"num_kp": 32,
"learnable_temperature": false,
"temperature": 1.0,
"noise_std": 0.0,
"output_variance": false
}
},
"obs_randomizer_class": "CropRandomizer",
"obs_randomizer_kwargs": {
"crop_height": 181,
"crop_width": 181,
"num_crops": 1,
"pos_enc": false
}
},
"depth": {
"core_class": "VisualCore",
"core_kwargs": {},
"obs_randomizer_class": null,
"obs_randomizer_kwargs": {}
},
"scan": {
"core_class": "ScanCore",
"core_kwargs": {},
"obs_randomizer_class": null,
"obs_randomizer_kwargs": {}
}
}
}
}

View File

@@ -0,0 +1,218 @@
{
"algo_name": "bc",
"experiment": {
"name": "bc_rnn_image_franka_stack_cosmos",
"validate": false,
"logging": {
"terminal_output_to_txt": true,
"log_tb": true
},
"save": {
"enabled": true,
"every_n_seconds": null,
"every_n_epochs": 20,
"epochs": [],
"on_best_validation": false,
"on_best_rollout_return": false,
"on_best_rollout_success_rate": true
},
"epoch_every_n_steps": 500,
"env": null,
"additional_envs": null,
"render": false,
"render_video": false,
"rollout": {
"enabled": false
}
},
"train": {
"data": null,
"num_data_workers": 4,
"hdf5_cache_mode": "low_dim",
"hdf5_use_swmr": true,
"hdf5_load_next_obs": false,
"hdf5_normalize_obs": false,
"hdf5_filter_key": null,
"hdf5_validation_filter_key": null,
"seq_length": 10,
"pad_seq_length": true,
"frame_stack": 1,
"pad_frame_stack": true,
"dataset_keys": [
"actions",
"rewards",
"dones"
],
"goal_mode": null,
"cuda": true,
"batch_size": 16,
"num_epochs": 600,
"seed": 101
},
"algo": {
"optim_params": {
"policy": {
"optimizer_type": "adam",
"learning_rate": {
"initial": 0.0001,
"decay_factor": 0.1,
"epoch_schedule": [],
"scheduler_type": "multistep"
},
"regularization": {
"L2": 0.0
}
}
},
"loss": {
"l2_weight": 1.0,
"l1_weight": 0.0,
"cos_weight": 0.0
},
"actor_layer_dims": [],
"gaussian": {
"enabled": false,
"fixed_std": false,
"init_std": 0.1,
"min_std": 0.01,
"std_activation": "softplus",
"low_noise_eval": true
},
"gmm": {
"enabled": true,
"num_modes": 5,
"min_std": 0.0001,
"std_activation": "softplus",
"low_noise_eval": true
},
"vae": {
"enabled": false,
"latent_dim": 14,
"latent_clip": null,
"kl_weight": 1.0,
"decoder": {
"is_conditioned": true,
"reconstruction_sum_across_elements": false
},
"prior": {
"learn": false,
"is_conditioned": false,
"use_gmm": false,
"gmm_num_modes": 10,
"gmm_learn_weights": false,
"use_categorical": false,
"categorical_dim": 10,
"categorical_gumbel_softmax_hard": false,
"categorical_init_temp": 1.0,
"categorical_temp_anneal_step": 0.001,
"categorical_min_temp": 0.3
},
"encoder_layer_dims": [
300,
400
],
"decoder_layer_dims": [
300,
400
],
"prior_layer_dims": [
300,
400
]
},
"rnn": {
"enabled": true,
"horizon": 10,
"hidden_dim": 1000,
"rnn_type": "LSTM",
"num_layers": 2,
"open_loop": false,
"kwargs": {
"bidirectional": false
}
},
"transformer": {
"enabled": false,
"context_length": 10,
"embed_dim": 512,
"num_layers": 6,
"num_heads": 8,
"emb_dropout": 0.1,
"attn_dropout": 0.1,
"block_output_dropout": 0.1,
"sinusoidal_embedding": false,
"activation": "gelu",
"supervise_all_steps": false,
"nn_parameter_for_timesteps": true
}
},
"observation": {
"modalities": {
"obs": {
"low_dim": [
"eef_pos",
"eef_quat",
"gripper_pos"
],
"rgb": [
"table_cam"
],
"depth": [],
"scan": []
},
"goal": {
"low_dim": [],
"rgb": [],
"depth": [],
"scan": []
}
},
"encoder": {
"low_dim": {
"core_class": null,
"core_kwargs": {},
"obs_randomizer_class": null,
"obs_randomizer_kwargs": {}
},
"rgb": {
"core_class": "VisualCore",
"core_kwargs": {
"feature_dimension": 64,
"flatten": true,
"backbone_class": "ResNet18Conv",
"backbone_kwargs": {
"pretrained": false,
"input_coord_conv": false
},
"pool_class": "SpatialSoftmax",
"pool_kwargs": {
"num_kp": 32,
"learnable_temperature": false,
"temperature": 1.0,
"noise_std": 0.0,
"output_variance": false
}
},
"obs_randomizer_class": "CropRandomizer",
"obs_randomizer_kwargs": {
"crop_height": 180,
"crop_width": 180,
"num_crops": 1,
"pos_enc": false
}
},
"depth": {
"core_class": "VisualCore",
"core_kwargs": {},
"obs_randomizer_class": null,
"obs_randomizer_kwargs": {}
},
"scan": {
"core_class": "ScanCore",
"core_kwargs": {},
"obs_randomizer_class": null,
"obs_randomizer_kwargs": {}
}
}
}
}

View File

@@ -0,0 +1,101 @@
{
"algo_name": "bc",
"experiment": {
"name": "bc_rnn_low_dim_mindbot_stack",
"validate": false,
"logging": {
"terminal_output_to_txt": true,
"log_tb": true
},
"save": {
"enabled": true,
"every_n_seconds": null,
"every_n_epochs": 100,
"epochs": [],
"on_best_validation": false,
"on_best_rollout_return": false,
"on_best_rollout_success_rate": true
},
"epoch_every_n_steps": 100,
"env": null,
"additional_envs": null,
"render": false,
"render_video": false,
"rollout": {
"enabled": false
}
},
"train": {
"data": null,
"num_data_workers": 4,
"hdf5_cache_mode": "all",
"hdf5_use_swmr": true,
"hdf5_normalize_obs": false,
"hdf5_filter_key": null,
"hdf5_validation_filter_key": null,
"seq_length": 10,
"dataset_keys": [
"actions"
],
"goal_mode": null,
"cuda": true,
"batch_size": 100,
"num_epochs": 2000,
"seed": 101
},
"algo": {
"optim_params": {
"policy": {
"optimizer_type": "adam",
"learning_rate": {
"initial": 0.001,
"decay_factor": 0.1,
"epoch_schedule": [],
"scheduler_type": "multistep"
},
"regularization": {
"L2": 0.0
}
}
},
"loss": {
"l2_weight": 1.0,
"l1_weight": 0.0,
"cos_weight": 0.0
},
"actor_layer_dims": [],
"gmm": {
"enabled": true,
"num_modes": 5,
"min_std": 0.0001,
"std_activation": "softplus",
"low_noise_eval": true
},
"rnn": {
"enabled": true,
"horizon": 10,
"hidden_dim": 400,
"rnn_type": "LSTM",
"num_layers": 2,
"open_loop": false,
"kwargs": {
"bidirectional": false
}
}
},
"observation": {
"modalities": {
"obs": {
"low_dim": [
"eef_pos",
"eef_quat",
"gripper_pos",
"object"
],
"rgb": [],
"depth": [],
"scan": []
}
}
}
}

View File

@@ -0,0 +1,141 @@
# Copyright (c) 2024, MindRobot Project
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for MindRobot dual-arm robot.
Arms are RealMan RM-65 (6-DOF, 5kg payload, 7.2kg self-weight, 610mm reach).
Reference: https://develop.realman-robotics.com/robot4th/robotParameter/RM65OntologyParameters/
The following configurations are available:
* :obj:`MINDBOT_CFG`: Base configuration with gravity enabled (general purpose).
* :obj:`MINDBOT_HIGH_PD_CFG`: Gravity disabled, stiffer PD for IK task-space control.
"""
import isaaclab.sim as sim_utils
from isaaclab.actuators import ImplicitActuatorCfg
from isaaclab.assets import ArticulationCfg
from mindbot.utils.assets import MINDBOT_ASSETS_DIR
MINDBOT_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{MINDBOT_ASSETS_DIR}\\robots\\mindrobot\\mindrobot_cd2.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
max_depenetration_velocity=5.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
fix_root_link=False,
enabled_self_collisions=True,
solver_position_iteration_count=8,
solver_velocity_iteration_count=0,
),
),
init_state=ArticulationCfg.InitialStateCfg(
joint_pos={
# ====== Left arm (RM-65) — away from singularities ======
# Elbow singularity: q3=0; Wrist singularity: q5=0.
# The pose below keeps q3≠0 and q5≠0.
"l_joint1": 1.5708, # 135°
"l_joint2": -1.5708, # -70°
"l_joint3": -1.5708, # -90° (away from elbow singularity q3=0)
"l_joint4": 1.5708, # 90°
"l_joint5": 1.5708, # 90° (away from wrist singularity q5=0)
"l_joint6": -1.5708, # -70°
# ====== Right arm (RM-65) ======
"r_joint1": -2.3562,
"r_joint2": -1.2217,
"r_joint3": 1.5708,
"r_joint4": -1.5708,
"r_joint5": -1.5708,
"r_joint6": 1.2217,
# ====== Grippers (0=open) ======
"left_hand_joint_left": 0.0,
"left_hand_joint_right": 0.0,
"right_hand_joint_left": 0.0,
"right_hand_joint_right": 0.0,
# ====== Trunk & Head ======
"PrismaticJoint": 0.26,
"head_revoluteJoint": 0.0,
},
pos=(0.0, 0.0, 0.7),
),
actuators={
# RM-65 J1-J3: shoulder/elbow — max 180°/s ≈ 3.14 rad/s
"left_arm_shoulder": ImplicitActuatorCfg(
joint_names_expr=["l_joint[1-3]"],
effort_limit_sim=50.0,
velocity_limit_sim=3.14,
stiffness=80.0,
damping=4.0,
),
# RM-65 J4-J6: wrist — max 225°/s ≈ 3.93 rad/s
"left_arm_wrist": ImplicitActuatorCfg(
joint_names_expr=["l_joint[4-6]"],
effort_limit_sim=20.0,
velocity_limit_sim=3.93,
stiffness=80.0,
damping=4.0,
),
"right_arm_shoulder": ImplicitActuatorCfg(
joint_names_expr=["r_joint[1-3]"],
effort_limit_sim=50.0,
velocity_limit_sim=3.14,
stiffness=80.0,
damping=4.0,
),
"right_arm_wrist": ImplicitActuatorCfg(
joint_names_expr=["r_joint[4-6]"],
effort_limit_sim=20.0,
velocity_limit_sim=3.93,
stiffness=80.0,
damping=4.0,
),
"head": ImplicitActuatorCfg(
joint_names_expr=["head_revoluteJoint"],
effort_limit_sim=12.0,
stiffness=80.0,
damping=4.0,
),
"trunk": ImplicitActuatorCfg(
joint_names_expr=["PrismaticJoint"],
effort_limit_sim=200.0,
velocity_limit_sim=0.2,
stiffness=1000.0,
damping=100.0,
),
"wheels": ImplicitActuatorCfg(
# joint_names_expr=[".*_revolute_Joint"],
joint_names_expr=[
"right_b_revolute_Joint",
"left_b_revolute_Joint",
"left_f_revolute_Joint",
"right_f_revolute_Joint",
],
effort_limit_sim=200.0,
stiffness=0.0,
damping=100.0,
),
"grippers": ImplicitActuatorCfg(
joint_names_expr=[".*_hand_joint.*"],
effort_limit_sim=200.0,
stiffness=2e3,
damping=1e2,
),
},
soft_joint_pos_limit_factor=1.0,
)
"""Base configuration for MindRobot. Gravity enabled, low PD gains."""
MINDBOT_HIGH_PD_CFG = MINDBOT_CFG.copy()
MINDBOT_HIGH_PD_CFG.actuators["left_arm_shoulder"].stiffness = 400.0
MINDBOT_HIGH_PD_CFG.actuators["left_arm_shoulder"].damping = 80.0
MINDBOT_HIGH_PD_CFG.actuators["left_arm_wrist"].stiffness = 400.0
MINDBOT_HIGH_PD_CFG.actuators["left_arm_wrist"].damping = 80.0
MINDBOT_HIGH_PD_CFG.actuators["right_arm_shoulder"].stiffness = 400.0
MINDBOT_HIGH_PD_CFG.actuators["right_arm_shoulder"].damping = 80.0
MINDBOT_HIGH_PD_CFG.actuators["right_arm_wrist"].stiffness = 400.0
MINDBOT_HIGH_PD_CFG.actuators["right_arm_wrist"].damping = 80.0
"""IK task-space control configuration. Gravity disabled, stiffer PD for tracking."""

View File

@@ -0,0 +1,406 @@
# Copyright (c) 2024, MindRobot Project
# SPDX-License-Identifier: BSD-3-Clause
"""MindRobot left arm IK-Rel environment config for teleoperation."""
from __future__ import annotations
from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR
import torch
import isaaclab.sim as sim_utils
from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg
from isaaclab.controllers.differential_ik_cfg import DifferentialIKControllerCfg
from isaaclab.devices import DevicesCfg
from isaaclab.devices.keyboard import Se3KeyboardCfg
from isaaclab.devices.spacemouse import Se3SpaceMouseCfg
from isaaclab.devices.gamepad import Se3GamepadCfg
from isaaclab.envs import ManagerBasedRLEnvCfg
from isaaclab.envs.mdp.actions.actions_cfg import (
BinaryJointPositionActionCfg,
DifferentialInverseKinematicsActionCfg,
JointVelocityActionCfg,
)
from isaaclab.managers import EventTermCfg as EventTerm
from isaaclab.managers import ObservationGroupCfg as ObsGroup
from isaaclab.managers import ObservationTermCfg as ObsTerm
from isaaclab.managers import TerminationTermCfg as DoneTerm
from isaaclab.scene import InteractiveSceneCfg
from isaaclab.sensors.frame_transformer.frame_transformer_cfg import FrameTransformerCfg
from isaaclab.sensors.frame_transformer.frame_transformer_cfg import OffsetCfg
from isaaclab.sim.schemas.schemas_cfg import RigidBodyPropertiesCfg
from isaaclab.sim.spawners.from_files.from_files_cfg import GroundPlaneCfg
from isaaclab.utils import configclass
from isaaclab.markers.config import FRAME_MARKER_CFG
from isaaclab.devices.openxr import XrCfg
from isaaclab.sensors import CameraCfg
from isaaclab.envs import ViewerCfg
# 导入基础配置和MDP函数
from isaaclab_tasks.manager_based.manipulation.stack import mdp
# 导入 MindRobot 资产
from .mindrobot_cfg import MINDBOT_HIGH_PD_CFG
# 在文件开头添加
import isaaclab.utils.assets as assets_utils
from mindbot.utils.assets import MINDBOT_ASSETS_DIR
from mindbot.assets.lab import ROOM_CFG
from mindbot.assets.table import TABLE_CFG
from mindbot.assets.dryingbox import DRYINGBOX_CFG
# # 然后在 scene 配置中使用
# spawn=sim_utils.UsdFileCfg(
# usd_path=f"{assets_utils.ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd"
# ),
# =====================================================================
# Scene Configuration
# =====================================================================
@configclass
class MindRobotTeleopSceneCfg(InteractiveSceneCfg):
"""Minimal scene for MindRobot teleoperation: robot + table + optional cube."""
# Ground plane
plane = AssetBaseCfg(
prim_path="/World/GroundPlane",
init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.0, 0.0]),
spawn=GroundPlaneCfg(),
)
# # Table
# table = AssetBaseCfg(
# prim_path="{ENV_REGEX_NS}/Table",
# init_state=AssetBaseCfg.InitialStateCfg(pos=[0.5, 0, 0], rot=[0.707, 0, 0, 0.707]),
# spawn=sim_utils.UsdFileCfg(
# usd_path=f"{assets_utils.ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd"
# ),
# )
# Optional: Single cube for testing (can be removed if not needed)
cube = RigidObjectCfg(
prim_path="{ENV_REGEX_NS}/Cube",
spawn=sim_utils.CuboidCfg(
size=(0.04, 0.04, 0.04),
rigid_props=RigidBodyPropertiesCfg(),
mass_props=sim_utils.MassPropertiesCfg(mass=0.1),
collision_props=sim_utils.CollisionPropertiesCfg(),
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)),
),
init_state=RigidObjectCfg.InitialStateCfg(pos=(0.5, 0.0, 0.45)),
)
room = ROOM_CFG.replace(prim_path="{ENV_REGEX_NS}/Room")
table = TABLE_CFG.replace(prim_path="{ENV_REGEX_NS}/Table")
drying_box = DRYINGBOX_CFG.replace(prim_path="{ENV_REGEX_NS}/DryingBox")
# room = AssetBaseCfg(
# prim_path="{ENV_REGEX_NS}/Room",
# spawn=sim_utils.UsdFileCfg(
# usd_path=f"{MINDBOT_ASSETS_DIR}/twinlab/Collected_lab2/lab.usd",
# ),
# )
# MindRobot
robot: ArticulationCfg = MINDBOT_HIGH_PD_CFG.replace(
prim_path="{ENV_REGEX_NS}/Robot"
)
# Cameras — CameraCfg sensors are only needed when reading pixel data (e.g. IL data collection).
# For viewport display only, use omni.kit.viewport.utility to bind the camera prim directly.
# Uncomment below AND add --enable_cameras flag only when sensor data is required.
# left_hand_camera = CameraCfg(
# prim_path="{ENV_REGEX_NS}/Robot/rm_65_fb_left/Link_6/robot___left_8_02/cam_left_hand",
# update_period=1 / 30,
# height=480,
# width=640,
# data_types=["rgb"],
# spawn=None,
# )
# right_hand_camera = CameraCfg(
# prim_path="{ENV_REGEX_NS}/Robot/rm_65_b_right/Link6/robot___right_8_2/cam_right_hand",
# update_period=1 / 30,
# height=480,
# width=640,
# data_types=["rgb"],
# spawn=None,
# )
# head_camera = CameraCfg(
# prim_path="{ENV_REGEX_NS}/Robot/robot_head/cam_head",
# update_period=1 / 30,
# height=480,
# width=640,
# data_types=["rgb"],
# spawn=None,
# )
# chest_camera = CameraCfg(
# prim_path="{ENV_REGEX_NS}/Robot/robot_trunk/cam_chest",
# update_period=1 / 30,
# height=480,
# width=640,
# data_types=["rgb"],
# spawn=None,
# )
# Lighting
light = AssetBaseCfg(
prim_path="/World/light",
spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0),
)
# End-effector frame transformer
ee_frame: FrameTransformerCfg = None # Will be set in __post_init__
# =====================================================================
# Actions Configuration
# =====================================================================
@configclass
class MindRobotTeleopActionsCfg:
"""Actions for MindRobot left arm IK teleoperation."""
# Left arm IK control
arm_action = DifferentialInverseKinematicsActionCfg(
asset_name="robot",
joint_names=["l_joint[1-6]"],
body_name="left_hand_body",
controller=DifferentialIKControllerCfg(
command_type="pose",
use_relative_mode=True,
ik_method="dls",
),
scale=1,
body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(
pos=[0.0, 0.0, 0.0],
),
)
# Wheel velocity control for differential drive (skid-steer).
# Joint order in articulation: right_b, left_b, left_f, right_f
# (from terminal joint index listing: [2],[3],[4],[5]).
# Action vector: [right_b_vel, left_b_vel, left_f_vel, right_f_vel] in rad/s.
# wheel_action = JointVelocityActionCfg(
# asset_name="robot",
# # joint_names=[".*_revolute_Joint"],
# joint_names=[
# "right_b_revolute_Joint",
# "left_b_revolute_Joint",
# "left_f_revolute_Joint",
# "right_f_revolute_Joint",
# ],
# scale=1.0,
# )
# Left gripper control (binary: open/close)
gripper_action = BinaryJointPositionActionCfg(
asset_name="robot",
joint_names=["left_hand_joint_left", "left_hand_joint_right"],
open_command_expr={
"left_hand_joint_left": 0.0,
"left_hand_joint_right": 0.0,
},
close_command_expr={
"left_hand_joint_left": -0.03,
"left_hand_joint_right": 0.03,
},
)
# =====================================================================
# Observations Configuration
# =====================================================================
@configclass
class MindRobotTeleopObsCfg:
"""Minimal observations for MindRobot teleoperation."""
@configclass
class PolicyCfg(ObsGroup):
"""Observations for policy group - only robot state, no cube dependencies."""
actions = ObsTerm(func=mdp.last_action)
joint_pos = ObsTerm(func=mdp.joint_pos_rel)
joint_vel = ObsTerm(func=mdp.joint_vel_rel)
eef_pos = ObsTerm(func=mdp.ee_frame_pos)
eef_quat = ObsTerm(func=mdp.ee_frame_quat)
gripper_pos = ObsTerm(func=mdp.gripper_pos)
def __post_init__(self):
self.enable_corruption = False
self.concatenate_terms = False
# observation groups
policy: PolicyCfg = PolicyCfg()
# =====================================================================
# Terminations Configuration
# =====================================================================
@configclass
class MindRobotTeleopTerminationsCfg:
"""Minimal terminations for teleoperation - only time_out or none."""
# Optional: Keep time_out for safety, or remove entirely
time_out = DoneTerm(func=mdp.time_out, time_out=True)
# =====================================================================
# Events Configuration
# =====================================================================
def _disable_arm_gravity(env, env_ids: torch.Tensor):
"""Disable gravity for both arm subtrees; chassis/wheels/trunk keep gravity.
Called once at startup. The @apply_nested decorator on
modify_rigid_body_properties recurses into all rigid-body children under
the given prim path, so every link of the arm (including gripper) is covered.
Arm prim roots (per env):
Robot/rm_65_fb_left — left RM-65 arm + gripper
Robot/rm_65_fb_right — right RM-65 arm + gripper
"""
import isaaclab.sim.schemas as schemas
arm_cfg = RigidBodyPropertiesCfg(disable_gravity=True)
for env_id in range(env.num_envs):
for arm_path in [
f"/World/envs/env_{env_id}/Robot/rm_65_fb_left",
f"/World/envs/env_{env_id}/Robot/rm_65_b_right",
]:
schemas.modify_rigid_body_properties(arm_path, arm_cfg)
@configclass
class MindRobotTeleopEventsCfg:
"""Reset events for teleoperation: R键重置时将场景恢复到初始状态。"""
disable_arm_gravity = EventTerm(
func=_disable_arm_gravity,
mode="startup",
)
reset_scene = EventTerm(
func=mdp.reset_scene_to_default,
mode="reset",
)
# =====================================================================
# Main Environment Configuration
# =====================================================================
@configclass
class EmptyCfg:
pass
@configclass
class MindRobotLeftArmIKEnvCfg(ManagerBasedRLEnvCfg):
"""MindRobot 左臂 IK-Rel 遥操作环境配置(最小化版本)。"""
# Scene settings
scene: MindRobotTeleopSceneCfg = MindRobotTeleopSceneCfg(
num_envs=1,
env_spacing=2.5,
)
# Basic settings
observations: MindRobotTeleopObsCfg = MindRobotTeleopObsCfg()
actions: MindRobotTeleopActionsCfg = MindRobotTeleopActionsCfg()
# MDP settings
terminations: MindRobotTeleopTerminationsCfg = MindRobotTeleopTerminationsCfg()
events: MindRobotTeleopEventsCfg = MindRobotTeleopEventsCfg()
# Unused managers
commands: EmptyCfg = EmptyCfg()
rewards: EmptyCfg = EmptyCfg()
curriculum: EmptyCfg = EmptyCfg()
# XR configuration for hand tracking (if needed)
xr: XrCfg = XrCfg(
anchor_pos=(-0.1, -0.5, -1.05),
anchor_rot=(0.866, 0, 0, -0.5),
)
def __post_init__(self):
"""Post initialization."""
super().__post_init__()
self.viewer = ViewerCfg(
eye=(2.0, -1.5, 1.8), # 相机位置
lookat=(0.0, 0.0, 0.8), # 瞄准点(机器人腰部高度附近)
origin_type="world",
)
# General settings
self.decimation = 2
self.episode_length_s = 50.0
# Simulation settings
self.sim.dt = 0.01 # 100Hz
self.sim.render_interval = 2
# Keep the base mobile for teleoperation.
# The absolute arm IK command must therefore be expressed in the robot
# root frame instead of assuming a fixed world-aligned base.
robot_cfg = MINDBOT_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
robot_cfg.spawn.articulation_props.fix_root_link = False
self.scene.robot = robot_cfg
# Configure end-effector frame transformer
marker_cfg = FRAME_MARKER_CFG.copy()
marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1)
marker_cfg.prim_path = "/Visuals/FrameTransformer"
self.scene.ee_frame = FrameTransformerCfg(
prim_path="{ENV_REGEX_NS}/Robot/rm_65_fb_left/base_link",
debug_vis=False,
visualizer_cfg=marker_cfg,
target_frames=[
FrameTransformerCfg.FrameCfg(
prim_path="{ENV_REGEX_NS}/Robot/rm_65_fb_left/l_hand_01/left_hand_body",
name="end_effector",
offset=OffsetCfg(
pos=[0.0, 0.0, 0.0],
),
),
],
)
# Configure teleoperation devices
self.teleop_devices = DevicesCfg(
devices={
"keyboard": Se3KeyboardCfg(
pos_sensitivity=0.05,
rot_sensitivity=0.05,
sim_device=self.sim.device,
),
"spacemouse": Se3SpaceMouseCfg(
pos_sensitivity=0.05,
rot_sensitivity=0.05,
sim_device=self.sim.device,
),
"gamepad": Se3GamepadCfg(
pos_sensitivity=0.1,
rot_sensitivity=0.1,
sim_device=self.sim.device,
),
}
)
# Gripper parameters for status checking
self.gripper_joint_names = ["left_hand_joint_left", "left_hand_joint_right"]
self.gripper_open_val = 0.0
self.gripper_threshold = 0.005
@configclass
class MindRobotLeftArmIKAbsEnvCfg(MindRobotLeftArmIKEnvCfg):
"""MindRobot 左臂 IK-Abs 遥操作环境配置(绝对坐标系,适合 VR 控制器)。"""
def __post_init__(self):
super().__post_init__()
# Switch controller to absolute mode
self.actions.arm_action.controller.use_relative_mode = False

View File

@@ -21,7 +21,10 @@ from isaaclab.managers import CurriculumTermCfg as CurrTerm
from isaaclab.scene import InteractiveSceneCfg
from isaaclab.utils import configclass
from isaaclab.controllers.differential_ik_cfg import DifferentialIKControllerCfg
from isaaclab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg, BinaryJointPositionActionCfg
from isaaclab.envs.mdp.actions.actions_cfg import (
DifferentialInverseKinematicsActionCfg,
BinaryJointPositionActionCfg,
)
from isaaclab.sensors import CameraCfg
from . import mdp
@@ -32,12 +35,17 @@ from . import mdp
from mindbot.robot.mindbot import MINDBOT_CFG
from mindbot.utils.assets import MINDBOT_ASSETS_DIR
# ====== 其他物体配置 ======
from isaaclab.sim.spawners.from_files.from_files_cfg import UsdFileCfg
from isaaclab.sim.schemas.schemas_cfg import RigidBodyPropertiesCfg, CollisionPropertiesCfg
from isaaclab.sim.schemas.schemas_cfg import (
RigidBodyPropertiesCfg,
CollisionPropertiesCfg,
)
TABLE_CFG=RigidObjectCfg(
TABLE_CFG = RigidObjectCfg(
prim_path="{ENV_REGEX_NS}/Table",
init_state=RigidObjectCfg.InitialStateCfg(
pos=[0.95, -0.3, 0.005],
@@ -46,17 +54,17 @@ TABLE_CFG=RigidObjectCfg(
ang_vel=[0.0, 0.0, 0.0],
),
spawn=UsdFileCfg(
usd_path="C:/Users/PC/workpalce/maic_usd_assets/equipment/table/Table_C.usd",
usd_path=f"{MINDBOT_ASSETS_DIR}/equipment/table/Table_C.usd",
rigid_props=RigidBodyPropertiesCfg(
rigid_body_enabled= True,
rigid_body_enabled=True,
solver_position_iteration_count=32,
solver_velocity_iteration_count=16,
disable_gravity=False,
),
collision_props=CollisionPropertiesCfg(
collision_enabled=True,
contact_offset=0.0005,#original 0.02
rest_offset=0
contact_offset=0.0005, # original 0.02
rest_offset=0,
),
),
)
@@ -65,30 +73,30 @@ LID_CFG = RigidObjectCfg(
prim_path="{ENV_REGEX_NS}/Lid",
init_state=RigidObjectCfg.InitialStateCfg(
# pos=[0.9523,-0.2512,1.0923],
pos=[0.803 , -0.25, 1.0282],#(0.9988, -0.2977, 1.0498633)
pos=[0.803, -0.25, 1.0282], # (0.9988, -0.2977, 1.0498633)
# rot=[-0.7071, 0.0, 0.0, 0.7071],
rot=[0.0, 0.0, 0.0, 1.0],
lin_vel=[0.0, 0.0, 0.0],
ang_vel=[0.0, 0.0, 0.0],
),
spawn=UsdFileCfg(
usd_path="C:/Users/PC/workpalce/maic_usd_assets/assets/Collected_equipment001/Equipment/tube1.usd",
usd_path=f"{MINDBOT_ASSETS_DIR}/assets/Collected_equipment001/Equipment/tube1.usd",
# scale=(0.2, 0.2, 0.2),
rigid_props=RigidBodyPropertiesCfg(
rigid_body_enabled= True,
rigid_body_enabled=True,
solver_position_iteration_count=32,
solver_velocity_iteration_count=16,
max_angular_velocity=1000.0,
max_linear_velocity=1000.0,
max_depenetration_velocity=0.5,#original 5.0
linear_damping=5.0, #original 0.5
angular_damping=5.0, #original 0.5
max_depenetration_velocity=0.5, # original 5.0
linear_damping=5.0, # original 0.5
angular_damping=5.0, # original 0.5
disable_gravity=False,
),
collision_props=CollisionPropertiesCfg(
collision_enabled=True,
contact_offset=0.0005,#original 0.02
rest_offset=0
contact_offset=0.0005, # original 0.02
rest_offset=0,
),
),
)
@@ -104,7 +112,7 @@ LID_CFG = RigidObjectCfg(
# ang_vel=[0.0, 0.0, 0.0],
# ),
# spawn=UsdFileCfg(
# usd_path="C:/Users/PC/workpalce/maic_usd_assets/equipment/centrifuge/Lid_B.usd",
# usd_path="/home/maic/LYT/maic_usd_assets/equipment/centrifuge/Lid_B.usd",
# # scale=(0.2, 0.2, 0.2),
# rigid_props=RigidBodyPropertiesCfg(
# rigid_body_enabled= True,
@@ -127,9 +135,9 @@ LID_CFG = RigidObjectCfg(
CENTRIFUGE_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path="C:/Users/PC/workpalce/maic_usd_assets/equipment/centrifuge/centrifuge.usd",
usd_path=f"{MINDBOT_ASSETS_DIR}/equipment/centrifuge/centrifuge.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
disable_gravity=False,
max_depenetration_velocity=1.0,
linear_damping=0.5,
angular_damping=0.5,
@@ -140,45 +148,41 @@ CENTRIFUGE_CFG = ArticulationCfg(
solver_velocity_iteration_count=16,
stabilization_threshold=1e-6,
# 【重要】必须要固定底座,否则机器人按盖子时,离心机会翻倒
# fix_root_link=True,
# fix_root_link=True,
),
collision_props=CollisionPropertiesCfg(
collision_enabled=True,
contact_offset=0.0005,#original 0.02
rest_offset=0
contact_offset=0.0005, # original 0.02
rest_offset=0,
),
),
init_state=ArticulationCfg.InitialStateCfg(
# 1. 参照机器人配置,在这里定义初始关节角度
joint_pos={
"r1": math.radians(0.0), # 转子位置(无关紧要)
"r1": math.radians(0.0), # 转子位置(无关紧要)
# 【关键修改】:初始让盖子处于打开状态
# 您的 USD 限位是 (-100, 0)-100度为最大开启
"r2": math.radians(-100.0),
"r2": math.radians(-100.0),
},
pos=(0.80, -0.25, 0.8085),#(0.95, -0.3, 0.8085)
pos=(0.80, -0.25, 0.8085), # (0.95, -0.3, 0.8085)
rot=[-0.7071, 0.0, 0.0, 0.7071],
# rot=[0.0, 0.0, 0.0, 1.0],
),
actuators={
"lid_passive_mechanism": ImplicitActuatorCfg(
joint_names_expr=["r2"],
# 【关键差异说明】
# 机器人的手臂 stiffness 是 10000.0,因为机器人需要精准定位且不晃动。
#
#
# 但是!离心机盖子如果设为 10000它就会像焊死在空中一样机器人根本按不动。
# 这里设为 200.0 左右:
# 1. 力度足以克服重力,让盖子初始保持打开。
# 2. 力度又足够软,当机器人用力下压时,盖子会顺从地闭合。
stiffness=200.0,
stiffness=200.0,
# 阻尼:给一点阻力,模拟液压杆手感,防止像弹簧一样乱晃
damping=20.0,
damping=20.0,
# 确保力矩上限不要太小,否则托不住盖子
effort_limit_sim=1000.0,
effort_limit_sim=1000.0,
velocity_limit_sim=100.0,
),
# 转子可以硬一点,不需要被机器人按动
@@ -187,14 +191,14 @@ CENTRIFUGE_CFG = ArticulationCfg(
stiffness=0.0,
damping=10.0,
),
}
},
)
ROOM_CFG = AssetBaseCfg(
prim_path="{ENV_REGEX_NS}/Room",
spawn=UsdFileCfg(
usd_path="C:/Users/PC/workpalce/maic_usd_assets/twinlab/MIC_sim-3.0/244_140/room.usd",
usd_path=f"{MINDBOT_ASSETS_DIR}/twinlab/MIC_sim-3.0/244_140/room.usd",
),
)
@@ -217,7 +221,9 @@ class MindbotSceneCfg(InteractiveSceneCfg):
# Room: AssetBaseCfg = ROOM_CFG.replace(prim_path="{ENV_REGEX_NS}/Room")
Mindbot: ArticulationCfg = MINDBOT_CFG.replace(prim_path="{ENV_REGEX_NS}/Mindbot")
table: RigidObjectCfg = TABLE_CFG.replace(prim_path="{ENV_REGEX_NS}/Table")
centrifuge: ArticulationCfg = CENTRIFUGE_CFG.replace(prim_path="{ENV_REGEX_NS}/centrifuge")
centrifuge: ArticulationCfg = CENTRIFUGE_CFG.replace(
prim_path="{ENV_REGEX_NS}/centrifuge"
)
lid: RigidObjectCfg = LID_CFG.replace(prim_path="{ENV_REGEX_NS}/Lid")
# lights
dome_light = AssetBaseCfg(
@@ -236,7 +242,7 @@ class MindbotSceneCfg(InteractiveSceneCfg):
# )
"""
# left_hand_camera=CameraCfg(
# prim_path="{ENV_REGEX_NS}/Mindbot/rm_65_fb_left/Link_6/robot___left_8_02/cam_left_hand",
# prim_path="{ENV_REGEX_NS}/Mindbot/rm_65_fb_left/Link_6/robot___left_8_02/cam_left_hand",
# update_period=1/120,
# height=480,
# width=640,
@@ -244,7 +250,7 @@ class MindbotSceneCfg(InteractiveSceneCfg):
# spawn=None,
# )
# right_hand_camera=CameraCfg(
# prim_path="{ENV_REGEX_NS}/Mindbot/rm_65_b_right/Link6/robot___right_8_2/cam_right_hand",
# prim_path="{ENV_REGEX_NS}/Mindbot/rm_65_b_right/Link6/robot___right_8_2/cam_right_hand",
# update_period=1/120,
# height=480,
# width=640,
@@ -252,7 +258,7 @@ class MindbotSceneCfg(InteractiveSceneCfg):
# spawn=None,
# )
# head_camera=CameraCfg(
# prim_path="{ENV_REGEX_NS}/Mindbot/robot_head/cam_head",
# prim_path="{ENV_REGEX_NS}/Mindbot/robot_head/cam_head",
# update_period=1/120,
# height=480,
# width=640,
@@ -260,18 +266,23 @@ class MindbotSceneCfg(InteractiveSceneCfg):
# spawn=None,
# )
# chest_camera=CameraCfg(
# prim_path="{ENV_REGEX_NS}/Mindbot/robot_trunk/cam_chest",
# prim_path="{ENV_REGEX_NS}/Mindbot/robot_trunk/cam_chest",
# update_period=1/120,
# height=480,
# width=640,
# data_types=["rgb"],
# spawn=None,
# )
##
# MDP settings
##
def left_gripper_pos_w(env, asset_cfg: SceneEntityCfg = SceneEntityCfg("Mindbot")) -> torch.Tensor:
def left_gripper_pos_w(
env, asset_cfg: SceneEntityCfg = SceneEntityCfg("Mindbot")
) -> torch.Tensor:
asset = env.scene[asset_cfg.name]
body_ids, _ = asset.find_bodies(["left_hand_body"], preserve_order=True)
pos_w = asset.data.body_pos_w[:, body_ids[0]] - env.scene.env_origins
@@ -294,14 +305,12 @@ class ActionsCfg:
# ),
# scale=(0.01, 0.01, 0.01, 0.025, 0.025, 0.025),
# )
right_arm_fixed = mdp.JointPositionActionCfg(
asset_name="Mindbot",
joint_names=["r_joint[1-6]"], # 对应 l_joint1 到 l_joint6
joint_names=["r_joint[1-6]"], # 对应 l_joint1 到 l_joint6
# 1. 缩放设为 0这让 RL 策略无法控制它,它不会动
scale=0.0,
scale=0.0,
# 2. 偏移设为你的目标角度(弧度):这告诉电机“永远停在这里”
# 对应 (135, -70, -90, 90, 90, -70)
offset={
@@ -317,34 +326,35 @@ class ActionsCfg:
grippers_position = mdp.BinaryJointPositionActionCfg(
asset_name="Mindbot",
joint_names=["left_hand_joint_left", "left_hand_joint_right"],
# 修正:使用字典格式
# open_command_expr={"关节名正则": 值}
open_command_expr={"left_hand_joint_left": 0.0, "left_hand_joint_right": 0.0},
# close_command_expr={"关节名正则": 值}
close_command_expr={"left_hand_joint_left": -0.03, "left_hand_joint_right": 0.03},
close_command_expr={
"left_hand_joint_left": -0.03,
"left_hand_joint_right": 0.03,
},
)
trunk_position = mdp.JointPositionActionCfg(
asset_name="Mindbot",
joint_names=["PrismaticJoint"],
scale=0.00,
offset=0.24, # 0.3
clip=None
offset=0.24, # 0.3
clip=None,
)
centrifuge_lid_passive = mdp.JointPositionActionCfg(
asset_name="centrifuge", # 对应场景中的名称
asset_name="centrifuge", # 对应场景中的名称
joint_names=["r2"],
# 将 scale 设为 0意味着 RL 算法输出的任何值都会被乘 0即无法干扰它
scale=0.00,
scale=0.00,
# 将 offset 设为目标角度,这会成为 PD 控制器的恒定 Target
offset= -1.7453,
clip=None
offset=-1.7453,
clip=None,
)
@configclass
class ObservationsCfg:
"""Observation specifications for the MDP."""
@@ -353,15 +363,29 @@ class ObservationsCfg:
class PolicyCfg(ObsGroup):
"""Observations for policy group."""
mindbot_joint_pos_rel = ObsTerm(func=mdp.joint_pos_rel, params={"asset_cfg": SceneEntityCfg("Mindbot")})
mindbot_root_pos = ObsTerm(func=mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("Mindbot")})
mindbot_root_quat = ObsTerm(func=mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("Mindbot")})
left_gripper_pos = ObsTerm(func=left_gripper_pos_w,
params={"asset_cfg": SceneEntityCfg("Mindbot", body_names=["left_hand_body"])})
lid_pos_abs = ObsTerm(func=mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("lid")})
centrifuge_pos_abs = ObsTerm(func=mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("centrifuge")})
mindbot_joint_pos_rel = ObsTerm(
func=mdp.joint_pos_rel, params={"asset_cfg": SceneEntityCfg("Mindbot")}
)
mindbot_root_pos = ObsTerm(
func=mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("Mindbot")}
)
mindbot_root_quat = ObsTerm(
func=mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("Mindbot")}
)
left_gripper_pos = ObsTerm(
func=left_gripper_pos_w,
params={
"asset_cfg": SceneEntityCfg("Mindbot", body_names=["left_hand_body"])
},
)
lid_pos_abs = ObsTerm(
func=mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("lid")}
)
centrifuge_pos_abs = ObsTerm(
func=mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("centrifuge")}
)
def __post_init__(self) -> None:
self.enable_corruption = False
self.concatenate_terms = True
@@ -409,29 +433,36 @@ class EventCfg:
"velocity_range": (0.0, 0.0),
},
)
reset_table=EventTerm(func=mdp.reset_root_state_uniform,mode="reset",
params={"asset_cfg": SceneEntityCfg("table"), "pose_range": {"x": (0.0, 0.0)}, "velocity_range": {"x": (0.0, 0.0)}})
reset_table = EventTerm(
func=mdp.reset_root_state_uniform,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("table"),
"pose_range": {"x": (0.0, 0.0)},
"velocity_range": {"x": (0.0, 0.0)},
},
)
reset_centrifuge = EventTerm(
func=mdp.reset_root_state_uniform,
func=mdp.reset_root_state_uniform,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("centrifuge"),
# "pose_range": {"x": (-0.07, 0.07), "y": (-0.07, 0.07), "yaw": (-0.1, 0.1)},
"pose_range": {"x": (0.0, 0.0), "y": (0.0, 0.0), "z": (0.0, 0.0)},
"velocity_range": {"x": (0.0, 0.0)}
}
)
"asset_cfg": SceneEntityCfg("centrifuge"),
# "pose_range": {"x": (-0.07, 0.07), "y": (-0.07, 0.07), "yaw": (-0.1, 0.1)},
"pose_range": {"x": (0.0, 0.0), "y": (0.0, 0.0), "z": (0.0, 0.0)},
"velocity_range": {"x": (0.0, 0.0)},
},
)
reset_lid = EventTerm(
func=mdp.reset_root_state_uniform,
func=mdp.reset_root_state_uniform,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("lid"),
# "pose_range": {"x": (-0.03, 0.03), "y": (-0.03, 0.03), "yaw": (-1.5, 1.5)},
"pose_range": {"x": (0.0, 0.0), "y": (0.0, 0.0), "z": (0.0, 0.0)},
"velocity_range": {"x": (0.0, 0.0)}
}
)
"asset_cfg": SceneEntityCfg("lid"),
# "pose_range": {"x": (-0.03, 0.03), "y": (-0.03, 0.03), "yaw": (-1.5, 1.5)},
"pose_range": {"x": (0.0, 0.0), "y": (0.0, 0.0), "z": (0.0, 0.0)},
"velocity_range": {"x": (0.0, 0.0)},
},
)
@configclass
@@ -444,12 +475,12 @@ class RewardsCfg:
"lid_cfg": SceneEntityCfg("lid"),
"robot_cfg": SceneEntityCfg("Mindbot"),
"gripper_body_name": "right_hand_body",
"gripper_forward_axis": (0.0, 0.0, 1.0),
"gripper_width_axis": (0.0, 1.0, 0.0),
"gripper_forward_axis": (0.0, 0.0, 1.0),
"gripper_width_axis": (0.0, 1.0, 0.0),
"lid_handle_axis": (0.0, 1.0, 0.0),
"max_angle_deg": 85.0, # 允许60度内的偏差
},
weight=5.0 #original 5.0
weight=5.0, # original 5.0
)
# stage 2
@@ -462,9 +493,9 @@ class RewardsCfg:
"left_gripper_name": "right_hand_l",
"right_gripper_name": "right_hand__r",
"height_offset": 0.115, # Z方向lid 上方 0.1m
"std": 0.3, # 位置对齐的容忍度
"std": 0.3, # 位置对齐的容忍度
},
weight=3.0 #original 3.0
weight=3.0, # original 3.0
)
approach_lid_penalty = RewTerm(
func=mdp.gripper_lid_distance_penalty,
@@ -477,7 +508,7 @@ class RewardsCfg:
},
# weight 为负数表示惩罚。
# 假设距离 0.5m,惩罚就是 -0.5 * 2.0 = -1.0 分。
weight=-1.0
weight=-1.0,
)
# # 【新增】精细对齐 (引导进入 2cm 圈)
# gripper_lid_fine_alignment = RewTerm(
@@ -493,7 +524,7 @@ class RewardsCfg:
# weight=10.0 # 高权重
# )
# gripper_close_interaction = RewTerm(
# gripper_close_interaction = RewTerm(
# func=mdp.gripper_close_when_near,
# params={
# "lid_cfg": SceneEntityCfg("lid"),
@@ -527,12 +558,10 @@ class RewardsCfg:
# "std": 0.1
# },
# # 权重设大一点,告诉它这是最终目标
# weight=10.0
# weight=10.0
# )
@configclass
class TerminationsCfg:
"""Termination terms for the MDP."""
@@ -543,7 +572,7 @@ class TerminationsCfg:
func=mdp.base_height_failure,
params={"asset_cfg": SceneEntityCfg("Mindbot"), "maximum_height": 2.0},
)
lid_fly_away = DoneTerm(
func=mdp.base_height_failure,
params={"asset_cfg": SceneEntityCfg("lid"), "maximum_height": 2.0},
@@ -552,25 +581,25 @@ class TerminationsCfg:
# 新增:盖子掉落判定
# 如果盖子掉回桌面(高度接近初始高度),且已经运行了一段时间(避开刚开始重置的瞬间)
lid_dropped = DoneTerm(
func=mdp.base_height_failure, # 复用高度判定函数
func=mdp.base_height_failure, # 复用高度判定函数
params={
"asset_cfg": SceneEntityCfg("lid"),
"minimum_height": 0.79, # 假设初始高度是 0.9,低于 0.88 说明掉下去了或者被砸进去了
"asset_cfg": SceneEntityCfg("lid"),
"minimum_height": 0.79, # 假设初始高度是 0.9,低于 0.88 说明掉下去了或者被砸进去了
},
)
##
# Environment configuration
##
@configclass
class CurriculumCfg:
pass
# action_rate = CurrTerm(func=mdp.modify_reward_weight,
# params={"term_name": "action_rate", "weight": -1e-1, "num_steps": 10000})
# joint_vel = CurrTerm(func=mdp.modify_reward_weight,
# params={"term_name": "joint_vel", "weight": -1e-1, "num_steps": 10000})
@@ -579,10 +608,10 @@ class CurriculumCfg:
# params={
# # 路径rewards管理器 -> 你的奖励项名 -> params字典 -> std键
# "address": "rewards.gripper_lid_position_alignment.params.std",
# # 修改逻辑:使用我们刚才写的函数
# "modify_fn": mdp.annealing_std,
# # 传给函数的参数
# "modify_params": {
# "start_step": 00, # 约 600 轮
@@ -593,6 +622,7 @@ class CurriculumCfg:
# }
# )
@configclass
class MindbotEnvCfg(ManagerBasedRLEnvCfg):
# Scene settings
@@ -610,20 +640,20 @@ class MindbotEnvCfg(ManagerBasedRLEnvCfg):
def __post_init__(self) -> None:
"""Post initialization."""
# general settings
self.decimation = 4 #original 2 # 从 2 增加到 4控制频率从 25Hz 降到 12.5Hz
self.decimation = 4 # original 2 # 从 2 增加到 4控制频率从 25Hz 降到 12.5Hz
self.episode_length_s = 10
# viewer settings
self.viewer.eye = (3.5, 0.0, 13.2)#(3.5, 0.0, 3.2)
self.viewer.eye = (3.5, 0.0, 13.2) # (3.5, 0.0, 3.2)
# simulation settings
self.sim.dt = 1 / 120 #original 1 / 60
self.sim.dt = 1 / 120 # original 1 / 60
self.sim.render_interval = self.decimation
# # 1. 基础堆内存
self.sim.physx.gpu_heap_capacity = 256 * 1024 * 1024 # 256MB
self.sim.physx.gpu_collision_stack_size = 1024 * 1024 * 1024
self.sim.physx.gpu_collision_stack_size = 1024 * 1024 * 1024
self.sim.physx.gpu_temp_buffer_capacity = 128 * 1024 * 1024
self.sim.physx.gpu_max_rigid_contact_count = 16 * 1024 * 1024 # 200万个接触点
self.sim.physx.gpu_max_rigid_patch_count = 1024 * 1024 # 32万个 patch
self.sim.physx.gpu_max_rigid_contact_count = 16 * 1024 * 1024 # 200万个接触点
self.sim.physx.gpu_max_rigid_patch_count = 1024 * 1024 # 32万个 patch
# # 5. 聚合对容量 (针对复杂的 Articulation)
self.sim.physx.gpu_found_lost_pairs_capacity = 16 * 1024 * 1024
self.sim.physx.gpu_found_lost_aggregate_pairs_capacity = 16 * 1024 * 1024

View File

@@ -21,7 +21,10 @@ from isaaclab.managers import CurriculumTermCfg as CurrTerm
from isaaclab.scene import InteractiveSceneCfg
from isaaclab.utils import configclass
from isaaclab.controllers.differential_ik_cfg import DifferentialIKControllerCfg
from isaaclab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg, BinaryJointPositionActionCfg
from isaaclab.envs.mdp.actions.actions_cfg import (
DifferentialInverseKinematicsActionCfg,
BinaryJointPositionActionCfg,
)
from . import mdp
@@ -31,12 +34,17 @@ from . import mdp
from mindbot.robot.mindbot import MINDBOT_CFG
from mindbot.utils.assets import MINDBOT_ASSETS_DIR
# ====== 其他物体配置 ======
from isaaclab.sim.spawners.from_files.from_files_cfg import UsdFileCfg
from isaaclab.sim.schemas.schemas_cfg import RigidBodyPropertiesCfg, CollisionPropertiesCfg
from isaaclab.sim.schemas.schemas_cfg import (
RigidBodyPropertiesCfg,
CollisionPropertiesCfg,
)
TABLE_CFG=RigidObjectCfg(
TABLE_CFG = RigidObjectCfg(
prim_path="{ENV_REGEX_NS}/Table",
init_state=RigidObjectCfg.InitialStateCfg(
pos=[0.95, -0.3, 0.005],
@@ -45,17 +53,17 @@ TABLE_CFG=RigidObjectCfg(
ang_vel=[0.0, 0.0, 0.0],
),
spawn=UsdFileCfg(
usd_path="C:/Users/PC/workpalce/maic_usd_assets/equipment/table/Table_C.usd",
usd_path=f"{MINDBOT_ASSETS_DIR}/equipment/table/Table_C.usd",
rigid_props=RigidBodyPropertiesCfg(
rigid_body_enabled= True,
rigid_body_enabled=True,
solver_position_iteration_count=32,
solver_velocity_iteration_count=16,
disable_gravity=False,
),
collision_props=CollisionPropertiesCfg(
collision_enabled=True,
contact_offset=0.0005,#original 0.02
rest_offset=0
contact_offset=0.0005, # original 0.02
rest_offset=0,
),
),
)
@@ -69,23 +77,23 @@ LID_CFG = RigidObjectCfg(
ang_vel=[0.0, 0.0, 0.0],
),
spawn=UsdFileCfg(
usd_path="C:/Users/PC/workpalce/maic_usd_assets/equipment/ultrasound/ultrasound_lid.usd",
usd_path=f"{MINDBOT_ASSETS_DIR}/equipment/ultrasound/ultrasound_lid.usd",
# scale=(0.2, 0.2, 0.2),
rigid_props=RigidBodyPropertiesCfg(
rigid_body_enabled= True,
rigid_body_enabled=True,
solver_position_iteration_count=32,
solver_velocity_iteration_count=16,
max_angular_velocity=1000.0,
max_linear_velocity=1000.0,
max_depenetration_velocity=0.5,#original 5.0
linear_damping=5.0, #original 0.5
angular_damping=5.0, #original 0.5
max_depenetration_velocity=0.5, # original 5.0
linear_damping=5.0, # original 0.5
angular_damping=5.0, # original 0.5
disable_gravity=False,
),
collision_props=CollisionPropertiesCfg(
collision_enabled=True,
contact_offset=0.0005,#original 0.02
rest_offset=0
contact_offset=0.0005, # original 0.02
rest_offset=0,
),
),
)
@@ -93,13 +101,12 @@ LID_CFG = RigidObjectCfg(
ULTRASOUND_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path="C:/Users/PC/workpalce/maic_usd_assets/equipment/ultrasound/ultrasound.usd",
usd_path=f"{MINDBOT_ASSETS_DIR}/equipment/ultrasound/ultrasound.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
max_depenetration_velocity=1.0
disable_gravity=False, max_depenetration_velocity=1.0
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=False,#
enabled_self_collisions=False, #
solver_position_iteration_count=32,
solver_velocity_iteration_count=16,
stabilization_threshold=1e-6,
@@ -112,13 +119,12 @@ ULTRASOUND_CFG = ArticulationCfg(
"passive_damper": ImplicitActuatorCfg(
# ".*" 表示匹配该USD文件内的所有关节无论是轮子、屏幕转轴还是其他
joint_names_expr=[".*"],
stiffness=0.0,
damping=1000.0,
effort_limit_sim=10000.0,
stiffness=0.0,
damping=1000.0,
effort_limit_sim=10000.0,
velocity_limit_sim=100.0,
),
}
},
)
##
@@ -139,7 +145,9 @@ class MindbotSceneCfg(InteractiveSceneCfg):
# robot
Mindbot: ArticulationCfg = MINDBOT_CFG.replace(prim_path="{ENV_REGEX_NS}/Mindbot")
table: RigidObjectCfg = TABLE_CFG.replace(prim_path="{ENV_REGEX_NS}/Table")
ultrasound: ArticulationCfg = ULTRASOUND_CFG.replace(prim_path="{ENV_REGEX_NS}/ultrasound")
ultrasound: ArticulationCfg = ULTRASOUND_CFG.replace(
prim_path="{ENV_REGEX_NS}/ultrasound"
)
lid: RigidObjectCfg = LID_CFG.replace(prim_path="{ENV_REGEX_NS}/Lid")
# lights
dome_light = AssetBaseCfg(
@@ -152,7 +160,10 @@ class MindbotSceneCfg(InteractiveSceneCfg):
# MDP settings
##
def left_gripper_pos_w(env, asset_cfg: SceneEntityCfg = SceneEntityCfg("Mindbot")) -> torch.Tensor:
def left_gripper_pos_w(
env, asset_cfg: SceneEntityCfg = SceneEntityCfg("Mindbot")
) -> torch.Tensor:
asset = env.scene[asset_cfg.name]
body_ids, _ = asset.find_bodies(["left_hand_body"], preserve_order=True)
pos_w = asset.data.body_pos_w[:, body_ids[0]] - env.scene.env_origins
@@ -167,11 +178,11 @@ class ActionsCfg:
left_arm_ee = DifferentialInverseKinematicsActionCfg(
asset_name="Mindbot",
joint_names=["l_joint[1-6]"], # 左臂的6个关节
body_name="left_hand_body", # 末端执行器body名称
body_name="left_hand_body", # 末端执行器body名称
controller=DifferentialIKControllerCfg(
command_type="pose", # 控制位置+姿态
use_relative_mode=True, # 相对模式:动作是增量
ik_method="dls", # Damped Least Squares方法
command_type="pose", # 控制位置+姿态
use_relative_mode=True, # 相对模式:动作是增量
ik_method="dls", # Damped Least Squares方法
),
scale=(0.01, 0.01, 0.01, 0.025, 0.025, 0.025),
)
@@ -197,22 +208,22 @@ class ActionsCfg:
grippers_position = mdp.BinaryJointPositionActionCfg(
asset_name="Mindbot",
joint_names=["left_hand_joint_left", "left_hand_joint_right"],
# 修正:使用字典格式
# open_command_expr={"关节名正则": 值}
open_command_expr={"left_hand_joint_left": 0.0, "left_hand_joint_right": 0.0},
# close_command_expr={"关节名正则": 值}
close_command_expr={"left_hand_joint_left": -0.03, "left_hand_joint_right": 0.03},
close_command_expr={
"left_hand_joint_left": -0.03,
"left_hand_joint_right": 0.03,
},
)
trunk_position = mdp.JointPositionActionCfg(
asset_name="Mindbot",
joint_names=["PrismaticJoint"],
scale=0.00,
offset=0.3,
clip=None
clip=None,
)
@@ -224,15 +235,29 @@ class ObservationsCfg:
class PolicyCfg(ObsGroup):
"""Observations for policy group."""
mindbot_joint_pos_rel = ObsTerm(func=mdp.joint_pos_rel, params={"asset_cfg": SceneEntityCfg("Mindbot")})
mindbot_root_pos = ObsTerm(func=mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("Mindbot")})
mindbot_root_quat = ObsTerm(func=mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("Mindbot")})
left_gripper_pos = ObsTerm(func=left_gripper_pos_w,
params={"asset_cfg": SceneEntityCfg("Mindbot", body_names=["left_hand_body"])})
lid_pos_abs = ObsTerm(func=mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("lid")})
ultrasound_pos_abs = ObsTerm(func=mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("ultrasound")})
mindbot_joint_pos_rel = ObsTerm(
func=mdp.joint_pos_rel, params={"asset_cfg": SceneEntityCfg("Mindbot")}
)
mindbot_root_pos = ObsTerm(
func=mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("Mindbot")}
)
mindbot_root_quat = ObsTerm(
func=mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("Mindbot")}
)
left_gripper_pos = ObsTerm(
func=left_gripper_pos_w,
params={
"asset_cfg": SceneEntityCfg("Mindbot", body_names=["left_hand_body"])
},
)
lid_pos_abs = ObsTerm(
func=mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("lid")}
)
ultrasound_pos_abs = ObsTerm(
func=mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("ultrasound")}
)
def __post_init__(self) -> None:
self.enable_corruption = False
self.concatenate_terms = True
@@ -244,6 +269,7 @@ class ObservationsCfg:
@configclass
class EventCfg:
"""Configuration for events."""
# reset_mindbot_root=EventTerm(
# func=mdp.reset_root_state_uniform,
# mode="reset",
@@ -273,18 +299,39 @@ class EventCfg:
"velocity_range": (0.0, 0.0),
},
)
reset_table=EventTerm(func=mdp.reset_root_state_uniform,mode="reset",
params={"asset_cfg": SceneEntityCfg("table"), "pose_range": {"x": (0.0, 0.0)}, "velocity_range": {"x": (0.0, 0.0)}})
reset_ultrasound = EventTerm(func=mdp.reset_root_state_uniform, mode="reset",
params={"asset_cfg": SceneEntityCfg("ultrasound"), "pose_range": {"x": (0.0, 0.0)}, "velocity_range": {"x": (0.0, 0.0)}})
reset_lid = EventTerm(func=mdp.reset_root_state_uniform, mode="reset",
params={"asset_cfg": SceneEntityCfg("lid"), "pose_range": {"x": (0.0, 0.0)}, "velocity_range": {"x": (0.0, 0.0)}})
reset_table = EventTerm(
func=mdp.reset_root_state_uniform,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("table"),
"pose_range": {"x": (0.0, 0.0)},
"velocity_range": {"x": (0.0, 0.0)},
},
)
reset_ultrasound = EventTerm(
func=mdp.reset_root_state_uniform,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("ultrasound"),
"pose_range": {"x": (0.0, 0.0)},
"velocity_range": {"x": (0.0, 0.0)},
},
)
reset_lid = EventTerm(
func=mdp.reset_root_state_uniform,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("lid"),
"pose_range": {"x": (0.0, 0.0)},
"velocity_range": {"x": (0.0, 0.0)},
},
)
@configclass
class RewardsCfg:
"""Reward terms for the MDP."""
# 姿态对齐奖励: stage 1
# gripper_lid_orientation_alignment = RewTerm(
# func=mdp.gripper_lid_orientation_alignment,
@@ -292,8 +339,8 @@ class RewardsCfg:
# "lid_cfg": SceneEntityCfg("lid"),
# "robot_cfg": SceneEntityCfg("Mindbot"),
# "gripper_body_name": "left_hand_body",
# "gripper_forward_axis": (0.0, 0.0, 1.0),
# "gripper_width_axis": (0.0, 1.0, 0.0),
# "gripper_forward_axis": (0.0, 0.0, 1.0),
# "gripper_width_axis": (0.0, 1.0, 0.0),
# "lid_handle_axis": (0.0, 1.0, 0.0)
# },
# weight=1.0
@@ -305,12 +352,12 @@ class RewardsCfg:
"lid_cfg": SceneEntityCfg("lid"),
"robot_cfg": SceneEntityCfg("Mindbot"),
"gripper_body_name": "left_hand_body",
"gripper_forward_axis": (0.0, 0.0, 1.0),
"gripper_width_axis": (0.0, 1.0, 0.0),
"gripper_forward_axis": (0.0, 0.0, 1.0),
"gripper_width_axis": (0.0, 1.0, 0.0),
"lid_handle_axis": (0.0, 1.0, 0.0),
"max_angle_deg": 85.0, # 允许60度内的偏差
},
weight=5
weight=5,
)
# stage 2
@@ -325,7 +372,7 @@ class RewardsCfg:
"height_offset": 0.07, # Z方向lid 上方 0.1m
"std": 0.3, # 位置对齐的容忍度
},
weight=3.0
weight=3.0,
)
# 【新增】精细对齐 (引导进入 2cm 圈)
@@ -336,13 +383,13 @@ class RewardsCfg:
"robot_cfg": SceneEntityCfg("Mindbot"),
"left_gripper_name": "left_hand__l",
"right_gripper_name": "left_hand_r",
"height_offset": 0.07, # 【注意】这个高度必须非常准,和 interaction 判定高度一致
"std": 0.05, # 非常陡峭的梯度,只在 5-10cm 内有效
"height_offset": 0.07, # 【注意】这个高度必须非常准,和 interaction 判定高度一致
"std": 0.05, # 非常陡峭的梯度,只在 5-10cm 内有效
},
weight=10.0 # 高权重
weight=10.0, # 高权重
)
gripper_close_interaction = RewTerm(
gripper_close_interaction = RewTerm(
func=mdp.gripper_close_when_near,
params={
"lid_cfg": SceneEntityCfg("lid"),
@@ -354,34 +401,32 @@ class RewardsCfg:
"height_offset": 0.04,
"grasp_radius": 0.03,
},
weight=10.0
weight=10.0,
)
lid_lifted_reward = RewTerm(
func=mdp.lid_is_lifted, # 使用 rewards.py 中已有的函数
func=mdp.lid_is_lifted, # 使用 rewards.py 中已有的函数
params={
"lid_cfg": SceneEntityCfg("lid"),
"minimal_height": 1.0, # 根据你的场景调整,比初始高度高 5cm 左右
"minimal_height": 1.0, # 根据你的场景调整,比初始高度高 5cm 左右
},
weight=10.0 # 给一个足够大的权重
weight=10.0, # 给一个足够大的权重
)
lid_lifting_reward = RewTerm(
func=mdp.lid_lift_progress_reward,
params={
"lid_cfg": SceneEntityCfg("lid"),
"initial_height": 0.8, # 请务必核实这个值!在 print 里看一下 lid 的 z 坐标
"initial_height": 0.8, # 请务必核实这个值!在 print 里看一下 lid 的 z 坐标
"target_height_lift": 0.2,
"height_offset": 0.07, # 与其他奖励保持一致
"std": 0.1
"height_offset": 0.07, # 与其他奖励保持一致
"std": 0.1,
},
# 权重设大一点,告诉它这是最终目标
weight=10.0
weight=10.0,
)
@configclass
class TerminationsCfg:
"""Termination terms for the MDP."""
@@ -392,7 +437,7 @@ class TerminationsCfg:
func=mdp.base_height_failure,
params={"asset_cfg": SceneEntityCfg("Mindbot"), "maximum_height": 2.0},
)
lid_fly_away = DoneTerm(
func=mdp.base_height_failure,
params={"asset_cfg": SceneEntityCfg("lid"), "maximum_height": 2.0},
@@ -401,25 +446,25 @@ class TerminationsCfg:
# 新增:盖子掉落判定
# 如果盖子掉回桌面(高度接近初始高度),且已经运行了一段时间(避开刚开始重置的瞬间)
lid_dropped = DoneTerm(
func=mdp.base_height_failure, # 复用高度判定函数
func=mdp.base_height_failure, # 复用高度判定函数
params={
"asset_cfg": SceneEntityCfg("lid"),
"minimum_height": 0.79, # 假设初始高度是 0.9,低于 0.88 说明掉下去了或者被砸进去了
"asset_cfg": SceneEntityCfg("lid"),
"minimum_height": 0.79, # 假设初始高度是 0.9,低于 0.88 说明掉下去了或者被砸进去了
},
)
##
# Environment configuration
##
@configclass
class CurriculumCfg:
pass
# action_rate = CurrTerm(func=mdp.modify_reward_weight,
# params={"term_name": "action_rate", "weight": -1e-1, "num_steps": 10000})
# joint_vel = CurrTerm(func=mdp.modify_reward_weight,
# params={"term_name": "joint_vel", "weight": -1e-1, "num_steps": 10000})
@@ -428,10 +473,10 @@ class CurriculumCfg:
# params={
# # 路径rewards管理器 -> 你的奖励项名 -> params字典 -> std键
# "address": "rewards.gripper_lid_position_alignment.params.std",
# # 修改逻辑:使用我们刚才写的函数
# "modify_fn": mdp.annealing_std,
# # 传给函数的参数
# "modify_params": {
# "start_step": 00, # 约 600 轮
@@ -442,6 +487,7 @@ class CurriculumCfg:
# }
# )
@configclass
class MindbotEnvCfg(ManagerBasedRLEnvCfg):
# Scene settings
@@ -459,20 +505,20 @@ class MindbotEnvCfg(ManagerBasedRLEnvCfg):
def __post_init__(self) -> None:
"""Post initialization."""
# general settings
self.decimation = 4 #original 2 # 从 2 增加到 4控制频率从 25Hz 降到 12.5Hz
self.decimation = 4 # original 2 # 从 2 增加到 4控制频率从 25Hz 降到 12.5Hz
self.episode_length_s = 10
# viewer settings
self.viewer.eye = (3.5, 0.0, 3.2)
# simulation settings
self.sim.dt = 1 / 120 #original 1 / 60
self.sim.dt = 1 / 120 # original 1 / 60
self.sim.render_interval = self.decimation
# # 1. 基础堆内存
self.sim.physx.gpu_heap_capacity = 256 * 1024 * 1024 # 256MB
self.sim.physx.gpu_collision_stack_size = 1024 * 1024 * 1024
self.sim.physx.gpu_collision_stack_size = 1024 * 1024 * 1024
self.sim.physx.gpu_temp_buffer_capacity = 128 * 1024 * 1024
self.sim.physx.gpu_max_rigid_contact_count = 16 * 1024 * 1024 # 200万个接触点
self.sim.physx.gpu_max_rigid_patch_count = 1024 * 1024 # 32万个 patch
self.sim.physx.gpu_max_rigid_contact_count = 16 * 1024 * 1024 # 200万个接触点
self.sim.physx.gpu_max_rigid_patch_count = 1024 * 1024 # 32万个 patch
# # 5. 聚合对容量 (针对复杂的 Articulation)
self.sim.physx.gpu_found_lost_pairs_capacity = 16 * 1024 * 1024
self.sim.physx.gpu_found_lost_aggregate_pairs_capacity = 16 * 1024 * 1024

Some files were not shown because too many files have changed in this diff Show More