first commit
This commit is contained in:
72
scripts/list_envs.py
Normal file
72
scripts/list_envs.py
Normal file
@@ -0,0 +1,72 @@
|
||||
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
|
||||
# All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
"""
|
||||
Script to print all the available environments in Isaac Lab.
|
||||
|
||||
The script iterates over all registered environments and stores the details in a table.
|
||||
It prints the name of the environment, the entry point and the config file.
|
||||
|
||||
All the environments are registered in the `mindbot` extension. They start
|
||||
with `Isaac` in their name.
|
||||
"""
|
||||
|
||||
"""Launch Isaac Sim Simulator first."""
|
||||
|
||||
import argparse
|
||||
|
||||
from isaaclab.app import AppLauncher
|
||||
|
||||
# add argparse arguments
|
||||
parser = argparse.ArgumentParser(description="List Isaac Lab environments.")
|
||||
parser.add_argument("--keyword", type=str, default=None, help="Keyword to filter environments.")
|
||||
# parse the arguments
|
||||
args_cli = parser.parse_args()
|
||||
|
||||
# launch omniverse app
|
||||
app_launcher = AppLauncher(headless=True)
|
||||
simulation_app = app_launcher.app
|
||||
|
||||
|
||||
"""Rest everything follows."""
|
||||
|
||||
import gymnasium as gym
|
||||
from prettytable import PrettyTable
|
||||
|
||||
import mindbot.tasks # noqa: F401
|
||||
|
||||
|
||||
def main():
|
||||
"""Print all environments registered in `mindbot` extension."""
|
||||
# print all the available environments
|
||||
table = PrettyTable(["S. No.", "Task Name", "Entry Point", "Config"])
|
||||
table.title = "Available Environments in Isaac Lab"
|
||||
# set alignment of table columns
|
||||
table.align["Task Name"] = "l"
|
||||
table.align["Entry Point"] = "l"
|
||||
table.align["Config"] = "l"
|
||||
|
||||
# count of environments
|
||||
index = 0
|
||||
# acquire all Isaac environments names
|
||||
for task_spec in gym.registry.values():
|
||||
if "Template-" in task_spec.id and (args_cli.keyword is None or args_cli.keyword in task_spec.id):
|
||||
# add details to table
|
||||
table.add_row([index + 1, task_spec.id, task_spec.entry_point, task_spec.kwargs["env_cfg_entry_point"]])
|
||||
# increment count
|
||||
index += 1
|
||||
|
||||
print(table)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
# run the main function
|
||||
main()
|
||||
except Exception as e:
|
||||
raise e
|
||||
finally:
|
||||
# close the app
|
||||
simulation_app.close()
|
||||
72
scripts/random_agent.py
Normal file
72
scripts/random_agent.py
Normal file
@@ -0,0 +1,72 @@
|
||||
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
|
||||
# All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
"""Script to an environment with random action agent."""
|
||||
|
||||
"""Launch Isaac Sim Simulator first."""
|
||||
|
||||
import argparse
|
||||
|
||||
from isaaclab.app import AppLauncher
|
||||
|
||||
# add argparse arguments
|
||||
parser = argparse.ArgumentParser(description="Random agent for Isaac Lab environments.")
|
||||
parser.add_argument(
|
||||
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
|
||||
)
|
||||
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
|
||||
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
|
||||
# append AppLauncher cli args
|
||||
AppLauncher.add_app_launcher_args(parser)
|
||||
# parse the arguments
|
||||
args_cli = parser.parse_args()
|
||||
|
||||
# launch omniverse app
|
||||
app_launcher = AppLauncher(args_cli)
|
||||
simulation_app = app_launcher.app
|
||||
|
||||
"""Rest everything follows."""
|
||||
|
||||
import gymnasium as gym
|
||||
import torch
|
||||
|
||||
import isaaclab_tasks # noqa: F401
|
||||
from isaaclab_tasks.utils import parse_env_cfg
|
||||
|
||||
import mindbot.tasks # noqa: F401
|
||||
|
||||
|
||||
def main():
|
||||
"""Random actions agent with Isaac Lab environment."""
|
||||
# create environment configuration
|
||||
env_cfg = parse_env_cfg(
|
||||
args_cli.task, device=args_cli.device, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
|
||||
)
|
||||
# create environment
|
||||
env = gym.make(args_cli.task, cfg=env_cfg)
|
||||
|
||||
# print info (this is vectorized environment)
|
||||
print(f"[INFO]: Gym observation space: {env.observation_space}")
|
||||
print(f"[INFO]: Gym action space: {env.action_space}")
|
||||
# reset environment
|
||||
env.reset()
|
||||
# simulate environment
|
||||
while simulation_app.is_running():
|
||||
# run everything in inference mode
|
||||
with torch.inference_mode():
|
||||
# sample actions from -1 to 1
|
||||
actions = 2 * torch.rand(env.action_space.shape, device=env.unwrapped.device) - 1
|
||||
# apply actions
|
||||
env.step(actions)
|
||||
|
||||
# close the simulator
|
||||
env.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# run the main function
|
||||
main()
|
||||
# close sim app
|
||||
simulation_app.close()
|
||||
239
scripts/rl_games/play.py
Normal file
239
scripts/rl_games/play.py
Normal file
@@ -0,0 +1,239 @@
|
||||
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
|
||||
# All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
"""Script to play a checkpoint if an RL agent from RL-Games."""
|
||||
|
||||
"""Launch Isaac Sim Simulator first."""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
from isaaclab.app import AppLauncher
|
||||
|
||||
# add argparse arguments
|
||||
parser = argparse.ArgumentParser(description="Play a checkpoint of an RL agent from RL-Games.")
|
||||
parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.")
|
||||
parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).")
|
||||
parser.add_argument(
|
||||
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
|
||||
)
|
||||
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
|
||||
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
|
||||
parser.add_argument(
|
||||
"--agent", type=str, default="rl_games_cfg_entry_point", help="Name of the RL agent configuration entry point."
|
||||
)
|
||||
parser.add_argument("--checkpoint", type=str, default=None, help="Path to model checkpoint.")
|
||||
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
|
||||
parser.add_argument(
|
||||
"--use_pretrained_checkpoint",
|
||||
action="store_true",
|
||||
help="Use the pre-trained checkpoint from Nucleus.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--use_last_checkpoint",
|
||||
action="store_true",
|
||||
help="When no checkpoint provided, use the last saved model. Otherwise use the best saved model.",
|
||||
)
|
||||
parser.add_argument("--real-time", action="store_true", default=False, help="Run in real-time, if possible.")
|
||||
# append AppLauncher cli args
|
||||
AppLauncher.add_app_launcher_args(parser)
|
||||
# parse the arguments
|
||||
args_cli, hydra_args = parser.parse_known_args()
|
||||
# always enable cameras to record video
|
||||
if args_cli.video:
|
||||
args_cli.enable_cameras = True
|
||||
|
||||
# clear out sys.argv for Hydra
|
||||
sys.argv = [sys.argv[0]] + hydra_args
|
||||
# launch omniverse app
|
||||
app_launcher = AppLauncher(args_cli)
|
||||
simulation_app = app_launcher.app
|
||||
|
||||
"""Rest everything follows."""
|
||||
|
||||
|
||||
import gymnasium as gym
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
import torch
|
||||
|
||||
from rl_games.common import env_configurations, vecenv
|
||||
from rl_games.common.player import BasePlayer
|
||||
from rl_games.torch_runner import Runner
|
||||
|
||||
from isaaclab.envs import (
|
||||
DirectMARLEnv,
|
||||
DirectMARLEnvCfg,
|
||||
DirectRLEnvCfg,
|
||||
ManagerBasedRLEnvCfg,
|
||||
multi_agent_to_single_agent,
|
||||
)
|
||||
from isaaclab.utils.assets import retrieve_file_path
|
||||
from isaaclab.utils.dict import print_dict
|
||||
|
||||
from isaaclab_rl.rl_games import RlGamesGpuEnv, RlGamesVecEnvWrapper
|
||||
from isaaclab_rl.utils.pretrained_checkpoint import get_published_pretrained_checkpoint
|
||||
|
||||
import isaaclab_tasks # noqa: F401
|
||||
from isaaclab_tasks.utils import get_checkpoint_path
|
||||
from isaaclab_tasks.utils.hydra import hydra_task_config
|
||||
|
||||
import mindbot.tasks # noqa: F401
|
||||
|
||||
|
||||
@hydra_task_config(args_cli.task, args_cli.agent)
|
||||
def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agent_cfg: dict):
|
||||
"""Play with RL-Games agent."""
|
||||
# grab task name for checkpoint path
|
||||
task_name = args_cli.task.split(":")[-1]
|
||||
train_task_name = task_name.replace("-Play", "")
|
||||
|
||||
# override configurations with non-hydra CLI arguments
|
||||
env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs
|
||||
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
|
||||
|
||||
# randomly sample a seed if seed = -1
|
||||
if args_cli.seed == -1:
|
||||
args_cli.seed = random.randint(0, 10000)
|
||||
|
||||
agent_cfg["params"]["seed"] = args_cli.seed if args_cli.seed is not None else agent_cfg["params"]["seed"]
|
||||
# set the environment seed (after multi-gpu config for updated rank from agent seed)
|
||||
# note: certain randomizations occur in the environment initialization so we set the seed here
|
||||
env_cfg.seed = agent_cfg["params"]["seed"]
|
||||
|
||||
# specify directory for logging experiments
|
||||
log_root_path = os.path.join("logs", "rl_games", agent_cfg["params"]["config"]["name"])
|
||||
log_root_path = os.path.abspath(log_root_path)
|
||||
print(f"[INFO] Loading experiment from directory: {log_root_path}")
|
||||
# find checkpoint
|
||||
if args_cli.use_pretrained_checkpoint:
|
||||
resume_path = get_published_pretrained_checkpoint("rl_games", train_task_name)
|
||||
if not resume_path:
|
||||
print("[INFO] Unfortunately a pre-trained checkpoint is currently unavailable for this task.")
|
||||
return
|
||||
elif args_cli.checkpoint is None:
|
||||
# specify directory for logging runs
|
||||
run_dir = agent_cfg["params"]["config"].get("full_experiment_name", ".*")
|
||||
# specify name of checkpoint
|
||||
if args_cli.use_last_checkpoint:
|
||||
checkpoint_file = ".*"
|
||||
else:
|
||||
# this loads the best checkpoint
|
||||
checkpoint_file = f"{agent_cfg['params']['config']['name']}.pth"
|
||||
# get path to previous checkpoint
|
||||
resume_path = get_checkpoint_path(log_root_path, run_dir, checkpoint_file, other_dirs=["nn"])
|
||||
else:
|
||||
resume_path = retrieve_file_path(args_cli.checkpoint)
|
||||
log_dir = os.path.dirname(os.path.dirname(resume_path))
|
||||
|
||||
# set the log directory for the environment (works for all environment types)
|
||||
env_cfg.log_dir = log_dir
|
||||
|
||||
# wrap around environment for rl-games
|
||||
rl_device = agent_cfg["params"]["config"]["device"]
|
||||
clip_obs = agent_cfg["params"]["env"].get("clip_observations", math.inf)
|
||||
clip_actions = agent_cfg["params"]["env"].get("clip_actions", math.inf)
|
||||
obs_groups = agent_cfg["params"]["env"].get("obs_groups")
|
||||
concate_obs_groups = agent_cfg["params"]["env"].get("concate_obs_groups", True)
|
||||
|
||||
# create isaac environment
|
||||
env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)
|
||||
|
||||
# convert to single-agent instance if required by the RL algorithm
|
||||
if isinstance(env.unwrapped, DirectMARLEnv):
|
||||
env = multi_agent_to_single_agent(env)
|
||||
|
||||
# wrap for video recording
|
||||
if args_cli.video:
|
||||
video_kwargs = {
|
||||
"video_folder": os.path.join(log_root_path, log_dir, "videos", "play"),
|
||||
"step_trigger": lambda step: step == 0,
|
||||
"video_length": args_cli.video_length,
|
||||
"disable_logger": True,
|
||||
}
|
||||
print("[INFO] Recording videos during training.")
|
||||
print_dict(video_kwargs, nesting=4)
|
||||
env = gym.wrappers.RecordVideo(env, **video_kwargs)
|
||||
|
||||
# wrap around environment for rl-games
|
||||
env = RlGamesVecEnvWrapper(env, rl_device, clip_obs, clip_actions, obs_groups, concate_obs_groups)
|
||||
|
||||
# register the environment to rl-games registry
|
||||
# note: in agents configuration: environment name must be "rlgpu"
|
||||
vecenv.register(
|
||||
"IsaacRlgWrapper", lambda config_name, num_actors, **kwargs: RlGamesGpuEnv(config_name, num_actors, **kwargs)
|
||||
)
|
||||
env_configurations.register("rlgpu", {"vecenv_type": "IsaacRlgWrapper", "env_creator": lambda **kwargs: env})
|
||||
|
||||
# load previously trained model
|
||||
agent_cfg["params"]["load_checkpoint"] = True
|
||||
agent_cfg["params"]["load_path"] = resume_path
|
||||
print(f"[INFO]: Loading model checkpoint from: {agent_cfg['params']['load_path']}")
|
||||
|
||||
# set number of actors into agent config
|
||||
agent_cfg["params"]["config"]["num_actors"] = env.unwrapped.num_envs
|
||||
# create runner from rl-games
|
||||
runner = Runner()
|
||||
runner.load(agent_cfg)
|
||||
# obtain the agent from the runner
|
||||
agent: BasePlayer = runner.create_player()
|
||||
agent.restore(resume_path)
|
||||
agent.reset()
|
||||
|
||||
dt = env.unwrapped.step_dt
|
||||
|
||||
# reset environment
|
||||
obs = env.reset()
|
||||
if isinstance(obs, dict):
|
||||
obs = obs["obs"]
|
||||
timestep = 0
|
||||
# required: enables the flag for batched observations
|
||||
_ = agent.get_batch_size(obs, 1)
|
||||
# initialize RNN states if used
|
||||
if agent.is_rnn:
|
||||
agent.init_rnn()
|
||||
# simulate environment
|
||||
# note: We simplified the logic in rl-games player.py (:func:`BasePlayer.run()`) function in an
|
||||
# attempt to have complete control over environment stepping. However, this removes other
|
||||
# operations such as masking that is used for multi-agent learning by RL-Games.
|
||||
while simulation_app.is_running():
|
||||
start_time = time.time()
|
||||
# run everything in inference mode
|
||||
with torch.inference_mode():
|
||||
# convert obs to agent format
|
||||
obs = agent.obs_to_torch(obs)
|
||||
# agent stepping
|
||||
actions = agent.get_action(obs, is_deterministic=agent.is_deterministic)
|
||||
# env stepping
|
||||
obs, _, dones, _ = env.step(actions)
|
||||
|
||||
# perform operations for terminated episodes
|
||||
if len(dones) > 0:
|
||||
# reset rnn state for terminated episodes
|
||||
if agent.is_rnn and agent.states is not None:
|
||||
for s in agent.states:
|
||||
s[:, dones, :] = 0.0
|
||||
if args_cli.video:
|
||||
timestep += 1
|
||||
# exit the play loop after recording one video
|
||||
if timestep == args_cli.video_length:
|
||||
break
|
||||
|
||||
# time delay for real-time evaluation
|
||||
sleep_time = dt - (time.time() - start_time)
|
||||
if args_cli.real_time and sleep_time > 0:
|
||||
time.sleep(sleep_time)
|
||||
|
||||
# close the simulator
|
||||
env.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# run the main function
|
||||
main()
|
||||
# close sim app
|
||||
simulation_app.close()
|
||||
261
scripts/rl_games/train.py
Normal file
261
scripts/rl_games/train.py
Normal file
@@ -0,0 +1,261 @@
|
||||
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
|
||||
# All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
"""Script to train RL agent with RL-Games."""
|
||||
|
||||
"""Launch Isaac Sim Simulator first."""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
from distutils.util import strtobool
|
||||
|
||||
from isaaclab.app import AppLauncher
|
||||
|
||||
# add argparse arguments
|
||||
parser = argparse.ArgumentParser(description="Train an RL agent with RL-Games.")
|
||||
parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.")
|
||||
parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).")
|
||||
parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).")
|
||||
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
|
||||
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
|
||||
parser.add_argument(
|
||||
"--agent", type=str, default="rl_games_cfg_entry_point", help="Name of the RL agent configuration entry point."
|
||||
)
|
||||
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
|
||||
parser.add_argument(
|
||||
"--distributed", action="store_true", default=False, help="Run training with multiple GPUs or nodes."
|
||||
)
|
||||
parser.add_argument("--checkpoint", type=str, default=None, help="Path to model checkpoint.")
|
||||
parser.add_argument("--sigma", type=str, default=None, help="The policy's initial standard deviation.")
|
||||
parser.add_argument("--max_iterations", type=int, default=None, help="RL Policy training iterations.")
|
||||
parser.add_argument("--wandb-project-name", type=str, default=None, help="the wandb's project name")
|
||||
parser.add_argument("--wandb-entity", type=str, default=None, help="the entity (team) of wandb's project")
|
||||
parser.add_argument("--wandb-name", type=str, default=None, help="the name of wandb's run")
|
||||
parser.add_argument(
|
||||
"--track",
|
||||
type=lambda x: bool(strtobool(x)),
|
||||
default=False,
|
||||
nargs="?",
|
||||
const=True,
|
||||
help="if toggled, this experiment will be tracked with Weights and Biases",
|
||||
)
|
||||
parser.add_argument("--export_io_descriptors", action="store_true", default=False, help="Export IO descriptors.")
|
||||
parser.add_argument(
|
||||
"--ray-proc-id", "-rid", type=int, default=None, help="Automatically configured by Ray integration, otherwise None."
|
||||
)
|
||||
# append AppLauncher cli args
|
||||
AppLauncher.add_app_launcher_args(parser)
|
||||
# parse the arguments
|
||||
args_cli, hydra_args = parser.parse_known_args()
|
||||
# always enable cameras to record video
|
||||
if args_cli.video:
|
||||
args_cli.enable_cameras = True
|
||||
|
||||
# clear out sys.argv for Hydra
|
||||
sys.argv = [sys.argv[0]] + hydra_args
|
||||
|
||||
# launch omniverse app
|
||||
app_launcher = AppLauncher(args_cli)
|
||||
simulation_app = app_launcher.app
|
||||
|
||||
"""Rest everything follows."""
|
||||
|
||||
import gymnasium as gym
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
from datetime import datetime
|
||||
|
||||
from rl_games.common import env_configurations, vecenv
|
||||
from rl_games.common.algo_observer import IsaacAlgoObserver
|
||||
from rl_games.torch_runner import Runner
|
||||
|
||||
from isaaclab.envs import (
|
||||
DirectMARLEnv,
|
||||
DirectMARLEnvCfg,
|
||||
DirectRLEnvCfg,
|
||||
ManagerBasedRLEnvCfg,
|
||||
multi_agent_to_single_agent,
|
||||
)
|
||||
from isaaclab.utils.assets import retrieve_file_path
|
||||
from isaaclab.utils.dict import print_dict
|
||||
from isaaclab.utils.io import dump_yaml
|
||||
|
||||
from isaaclab_rl.rl_games import MultiObserver, PbtAlgoObserver, RlGamesGpuEnv, RlGamesVecEnvWrapper
|
||||
|
||||
import isaaclab_tasks # noqa: F401
|
||||
from isaaclab_tasks.utils.hydra import hydra_task_config
|
||||
|
||||
# import logger
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import mindbot.tasks # noqa: F401
|
||||
|
||||
|
||||
@hydra_task_config(args_cli.task, args_cli.agent)
|
||||
def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agent_cfg: dict):
|
||||
"""Train with RL-Games agent."""
|
||||
# override configurations with non-hydra CLI arguments
|
||||
env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs
|
||||
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
|
||||
# check for invalid combination of CPU device with distributed training
|
||||
if args_cli.distributed and args_cli.device is not None and "cpu" in args_cli.device:
|
||||
raise ValueError(
|
||||
"Distributed training is not supported when using CPU device. "
|
||||
"Please use GPU device (e.g., --device cuda) for distributed training."
|
||||
)
|
||||
|
||||
# randomly sample a seed if seed = -1
|
||||
if args_cli.seed == -1:
|
||||
args_cli.seed = random.randint(0, 10000)
|
||||
|
||||
agent_cfg["params"]["seed"] = args_cli.seed if args_cli.seed is not None else agent_cfg["params"]["seed"]
|
||||
agent_cfg["params"]["config"]["max_epochs"] = (
|
||||
args_cli.max_iterations if args_cli.max_iterations is not None else agent_cfg["params"]["config"]["max_epochs"]
|
||||
)
|
||||
if args_cli.checkpoint is not None:
|
||||
resume_path = retrieve_file_path(args_cli.checkpoint)
|
||||
agent_cfg["params"]["load_checkpoint"] = True
|
||||
agent_cfg["params"]["load_path"] = resume_path
|
||||
print(f"[INFO]: Loading model checkpoint from: {agent_cfg['params']['load_path']}")
|
||||
train_sigma = float(args_cli.sigma) if args_cli.sigma is not None else None
|
||||
|
||||
# multi-gpu training config
|
||||
if args_cli.distributed:
|
||||
agent_cfg["params"]["seed"] += app_launcher.global_rank
|
||||
agent_cfg["params"]["config"]["device"] = f"cuda:{app_launcher.local_rank}"
|
||||
agent_cfg["params"]["config"]["device_name"] = f"cuda:{app_launcher.local_rank}"
|
||||
agent_cfg["params"]["config"]["multi_gpu"] = True
|
||||
# update env config device
|
||||
env_cfg.sim.device = f"cuda:{app_launcher.local_rank}"
|
||||
|
||||
# set the environment seed (after multi-gpu config for updated rank from agent seed)
|
||||
# note: certain randomizations occur in the environment initialization so we set the seed here
|
||||
env_cfg.seed = agent_cfg["params"]["seed"]
|
||||
|
||||
# specify directory for logging experiments
|
||||
config_name = agent_cfg["params"]["config"]["name"]
|
||||
log_root_path = os.path.join("logs", "rl_games", config_name)
|
||||
if "pbt" in agent_cfg and agent_cfg["pbt"]["directory"] != ".":
|
||||
log_root_path = os.path.join(agent_cfg["pbt"]["directory"], log_root_path)
|
||||
else:
|
||||
log_root_path = os.path.abspath(log_root_path)
|
||||
|
||||
print(f"[INFO] Logging experiment in directory: {log_root_path}")
|
||||
# specify directory for logging runs
|
||||
log_dir = agent_cfg["params"]["config"].get("full_experiment_name", datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
|
||||
# set directory into agent config
|
||||
# logging directory path: <train_dir>/<full_experiment_name>
|
||||
agent_cfg["params"]["config"]["train_dir"] = log_root_path
|
||||
agent_cfg["params"]["config"]["full_experiment_name"] = log_dir
|
||||
wandb_project = config_name if args_cli.wandb_project_name is None else args_cli.wandb_project_name
|
||||
experiment_name = log_dir if args_cli.wandb_name is None else args_cli.wandb_name
|
||||
|
||||
# dump the configuration into log-directory
|
||||
dump_yaml(os.path.join(log_root_path, log_dir, "params", "env.yaml"), env_cfg)
|
||||
dump_yaml(os.path.join(log_root_path, log_dir, "params", "agent.yaml"), agent_cfg)
|
||||
print(f"Exact experiment name requested from command line: {os.path.join(log_root_path, log_dir)}")
|
||||
|
||||
# read configurations about the agent-training
|
||||
rl_device = agent_cfg["params"]["config"]["device"]
|
||||
clip_obs = agent_cfg["params"]["env"].get("clip_observations", math.inf)
|
||||
clip_actions = agent_cfg["params"]["env"].get("clip_actions", math.inf)
|
||||
obs_groups = agent_cfg["params"]["env"].get("obs_groups")
|
||||
concate_obs_groups = agent_cfg["params"]["env"].get("concate_obs_groups", True)
|
||||
|
||||
# set the IO descriptors export flag if requested
|
||||
if isinstance(env_cfg, ManagerBasedRLEnvCfg):
|
||||
env_cfg.export_io_descriptors = args_cli.export_io_descriptors
|
||||
else:
|
||||
logger.warning(
|
||||
"IO descriptors are only supported for manager based RL environments. No IO descriptors will be exported."
|
||||
)
|
||||
|
||||
# set the log directory for the environment (works for all environment types)
|
||||
env_cfg.log_dir = os.path.join(log_root_path, log_dir)
|
||||
|
||||
# create isaac environment
|
||||
env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)
|
||||
|
||||
# convert to single-agent instance if required by the RL algorithm
|
||||
if isinstance(env.unwrapped, DirectMARLEnv):
|
||||
env = multi_agent_to_single_agent(env)
|
||||
|
||||
# wrap for video recording
|
||||
if args_cli.video:
|
||||
video_kwargs = {
|
||||
"video_folder": os.path.join(log_root_path, log_dir, "videos", "train"),
|
||||
"step_trigger": lambda step: step % args_cli.video_interval == 0,
|
||||
"video_length": args_cli.video_length,
|
||||
"disable_logger": True,
|
||||
}
|
||||
print("[INFO] Recording videos during training.")
|
||||
print_dict(video_kwargs, nesting=4)
|
||||
env = gym.wrappers.RecordVideo(env, **video_kwargs)
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
# wrap around environment for rl-games
|
||||
env = RlGamesVecEnvWrapper(env, rl_device, clip_obs, clip_actions, obs_groups, concate_obs_groups)
|
||||
|
||||
# register the environment to rl-games registry
|
||||
# note: in agents configuration: environment name must be "rlgpu"
|
||||
vecenv.register(
|
||||
"IsaacRlgWrapper", lambda config_name, num_actors, **kwargs: RlGamesGpuEnv(config_name, num_actors, **kwargs)
|
||||
)
|
||||
env_configurations.register("rlgpu", {"vecenv_type": "IsaacRlgWrapper", "env_creator": lambda **kwargs: env})
|
||||
|
||||
# set number of actors into agent config
|
||||
agent_cfg["params"]["config"]["num_actors"] = env.unwrapped.num_envs
|
||||
# create runner from rl-games
|
||||
|
||||
if "pbt" in agent_cfg and agent_cfg["pbt"]["enabled"]:
|
||||
observers = MultiObserver([IsaacAlgoObserver(), PbtAlgoObserver(agent_cfg, args_cli)])
|
||||
runner = Runner(observers)
|
||||
else:
|
||||
runner = Runner(IsaacAlgoObserver())
|
||||
|
||||
runner.load(agent_cfg)
|
||||
|
||||
# reset the agent and env
|
||||
runner.reset()
|
||||
# train the agent
|
||||
|
||||
global_rank = int(os.getenv("RANK", "0"))
|
||||
if args_cli.track and global_rank == 0:
|
||||
if args_cli.wandb_entity is None:
|
||||
raise ValueError("Weights and Biases entity must be specified for tracking.")
|
||||
import wandb
|
||||
|
||||
wandb.init(
|
||||
project=wandb_project,
|
||||
entity=args_cli.wandb_entity,
|
||||
name=experiment_name,
|
||||
sync_tensorboard=True,
|
||||
monitor_gym=True,
|
||||
save_code=True,
|
||||
)
|
||||
if not wandb.run.resumed:
|
||||
wandb.config.update({"env_cfg": env_cfg.to_dict()})
|
||||
wandb.config.update({"agent_cfg": agent_cfg})
|
||||
|
||||
if args_cli.checkpoint is not None:
|
||||
runner.run({"train": True, "play": False, "sigma": train_sigma, "checkpoint": resume_path})
|
||||
else:
|
||||
runner.run({"train": True, "play": False, "sigma": train_sigma})
|
||||
|
||||
print(f"Training time: {round(time.time() - start_time, 2)} seconds")
|
||||
|
||||
# close the simulator
|
||||
env.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# run the main function
|
||||
main()
|
||||
# close sim app
|
||||
simulation_app.close()
|
||||
91
scripts/rsl_rl/cli_args.py
Normal file
91
scripts/rsl_rl/cli_args.py
Normal file
@@ -0,0 +1,91 @@
|
||||
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
|
||||
# All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import random
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from isaaclab_rl.rsl_rl import RslRlBaseRunnerCfg
|
||||
|
||||
|
||||
def add_rsl_rl_args(parser: argparse.ArgumentParser):
|
||||
"""Add RSL-RL arguments to the parser.
|
||||
|
||||
Args:
|
||||
parser: The parser to add the arguments to.
|
||||
"""
|
||||
# create a new argument group
|
||||
arg_group = parser.add_argument_group("rsl_rl", description="Arguments for RSL-RL agent.")
|
||||
# -- experiment arguments
|
||||
arg_group.add_argument(
|
||||
"--experiment_name", type=str, default=None, help="Name of the experiment folder where logs will be stored."
|
||||
)
|
||||
arg_group.add_argument("--run_name", type=str, default=None, help="Run name suffix to the log directory.")
|
||||
# -- load arguments
|
||||
arg_group.add_argument("--resume", action="store_true", default=False, help="Whether to resume from a checkpoint.")
|
||||
arg_group.add_argument("--load_run", type=str, default=None, help="Name of the run folder to resume from.")
|
||||
arg_group.add_argument("--checkpoint", type=str, default=None, help="Checkpoint file to resume from.")
|
||||
# -- logger arguments
|
||||
arg_group.add_argument(
|
||||
"--logger", type=str, default=None, choices={"wandb", "tensorboard", "neptune"}, help="Logger module to use."
|
||||
)
|
||||
arg_group.add_argument(
|
||||
"--log_project_name", type=str, default=None, help="Name of the logging project when using wandb or neptune."
|
||||
)
|
||||
|
||||
|
||||
def parse_rsl_rl_cfg(task_name: str, args_cli: argparse.Namespace) -> RslRlBaseRunnerCfg:
|
||||
"""Parse configuration for RSL-RL agent based on inputs.
|
||||
|
||||
Args:
|
||||
task_name: The name of the environment.
|
||||
args_cli: The command line arguments.
|
||||
|
||||
Returns:
|
||||
The parsed configuration for RSL-RL agent based on inputs.
|
||||
"""
|
||||
from isaaclab_tasks.utils.parse_cfg import load_cfg_from_registry
|
||||
|
||||
# load the default configuration
|
||||
rslrl_cfg: RslRlBaseRunnerCfg = load_cfg_from_registry(task_name, "rsl_rl_cfg_entry_point")
|
||||
rslrl_cfg = update_rsl_rl_cfg(rslrl_cfg, args_cli)
|
||||
return rslrl_cfg
|
||||
|
||||
|
||||
def update_rsl_rl_cfg(agent_cfg: RslRlBaseRunnerCfg, args_cli: argparse.Namespace):
|
||||
"""Update configuration for RSL-RL agent based on inputs.
|
||||
|
||||
Args:
|
||||
agent_cfg: The configuration for RSL-RL agent.
|
||||
args_cli: The command line arguments.
|
||||
|
||||
Returns:
|
||||
The updated configuration for RSL-RL agent based on inputs.
|
||||
"""
|
||||
# override the default configuration with CLI arguments
|
||||
if hasattr(args_cli, "seed") and args_cli.seed is not None:
|
||||
# randomly sample a seed if seed = -1
|
||||
if args_cli.seed == -1:
|
||||
args_cli.seed = random.randint(0, 10000)
|
||||
agent_cfg.seed = args_cli.seed
|
||||
if args_cli.resume is not None:
|
||||
agent_cfg.resume = args_cli.resume
|
||||
if args_cli.load_run is not None:
|
||||
agent_cfg.load_run = args_cli.load_run
|
||||
if args_cli.checkpoint is not None:
|
||||
agent_cfg.load_checkpoint = args_cli.checkpoint
|
||||
if args_cli.run_name is not None:
|
||||
agent_cfg.run_name = args_cli.run_name
|
||||
if args_cli.logger is not None:
|
||||
agent_cfg.logger = args_cli.logger
|
||||
# set the project name for wandb and neptune
|
||||
if agent_cfg.logger in {"wandb", "neptune"} and args_cli.log_project_name:
|
||||
agent_cfg.wandb_project = args_cli.log_project_name
|
||||
agent_cfg.neptune_project = args_cli.log_project_name
|
||||
|
||||
return agent_cfg
|
||||
210
scripts/rsl_rl/play.py
Normal file
210
scripts/rsl_rl/play.py
Normal file
@@ -0,0 +1,210 @@
|
||||
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
|
||||
# All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
"""Script to play a checkpoint if an RL agent from RSL-RL."""
|
||||
|
||||
"""Launch Isaac Sim Simulator first."""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
from isaaclab.app import AppLauncher
|
||||
|
||||
# local imports
|
||||
import cli_args # isort: skip
|
||||
|
||||
# add argparse arguments
|
||||
parser = argparse.ArgumentParser(description="Train an RL agent with RSL-RL.")
|
||||
parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.")
|
||||
parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).")
|
||||
parser.add_argument(
|
||||
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
|
||||
)
|
||||
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
|
||||
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
|
||||
parser.add_argument(
|
||||
"--agent", type=str, default="rsl_rl_cfg_entry_point", help="Name of the RL agent configuration entry point."
|
||||
)
|
||||
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
|
||||
parser.add_argument(
|
||||
"--use_pretrained_checkpoint",
|
||||
action="store_true",
|
||||
help="Use the pre-trained checkpoint from Nucleus.",
|
||||
)
|
||||
parser.add_argument("--real-time", action="store_true", default=False, help="Run in real-time, if possible.")
|
||||
# append RSL-RL cli arguments
|
||||
cli_args.add_rsl_rl_args(parser)
|
||||
# append AppLauncher cli args
|
||||
AppLauncher.add_app_launcher_args(parser)
|
||||
# parse the arguments
|
||||
args_cli, hydra_args = parser.parse_known_args()
|
||||
# always enable cameras to record video
|
||||
if args_cli.video:
|
||||
args_cli.enable_cameras = True
|
||||
|
||||
# clear out sys.argv for Hydra
|
||||
sys.argv = [sys.argv[0]] + hydra_args
|
||||
|
||||
# launch omniverse app
|
||||
app_launcher = AppLauncher(args_cli)
|
||||
simulation_app = app_launcher.app
|
||||
|
||||
"""Rest everything follows."""
|
||||
|
||||
import gymnasium as gym
|
||||
import os
|
||||
import time
|
||||
import torch
|
||||
|
||||
from rsl_rl.runners import DistillationRunner, OnPolicyRunner
|
||||
|
||||
from isaaclab.envs import (
|
||||
DirectMARLEnv,
|
||||
DirectMARLEnvCfg,
|
||||
DirectRLEnvCfg,
|
||||
ManagerBasedRLEnvCfg,
|
||||
multi_agent_to_single_agent,
|
||||
)
|
||||
from isaaclab.utils.assets import retrieve_file_path
|
||||
from isaaclab.utils.dict import print_dict
|
||||
|
||||
from isaaclab_rl.rsl_rl import RslRlBaseRunnerCfg, RslRlVecEnvWrapper, export_policy_as_jit, export_policy_as_onnx
|
||||
from isaaclab_rl.utils.pretrained_checkpoint import get_published_pretrained_checkpoint
|
||||
|
||||
import isaaclab_tasks # noqa: F401
|
||||
from isaaclab_tasks.utils import get_checkpoint_path
|
||||
from isaaclab_tasks.utils.hydra import hydra_task_config
|
||||
|
||||
import mindbot.tasks # noqa: F401
|
||||
|
||||
|
||||
@hydra_task_config(args_cli.task, args_cli.agent)
|
||||
def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agent_cfg: RslRlBaseRunnerCfg):
|
||||
"""Play with RSL-RL agent."""
|
||||
# grab task name for checkpoint path
|
||||
task_name = args_cli.task.split(":")[-1]
|
||||
train_task_name = task_name.replace("-Play", "")
|
||||
|
||||
# override configurations with non-hydra CLI arguments
|
||||
agent_cfg: RslRlBaseRunnerCfg = cli_args.update_rsl_rl_cfg(agent_cfg, args_cli)
|
||||
env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs
|
||||
|
||||
# set the environment seed
|
||||
# note: certain randomizations occur in the environment initialization so we set the seed here
|
||||
env_cfg.seed = agent_cfg.seed
|
||||
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
|
||||
|
||||
# specify directory for logging experiments
|
||||
log_root_path = os.path.join("logs", "rsl_rl", agent_cfg.experiment_name)
|
||||
log_root_path = os.path.abspath(log_root_path)
|
||||
print(f"[INFO] Loading experiment from directory: {log_root_path}")
|
||||
if args_cli.use_pretrained_checkpoint:
|
||||
resume_path = get_published_pretrained_checkpoint("rsl_rl", train_task_name)
|
||||
if not resume_path:
|
||||
print("[INFO] Unfortunately a pre-trained checkpoint is currently unavailable for this task.")
|
||||
return
|
||||
elif args_cli.checkpoint:
|
||||
resume_path = retrieve_file_path(args_cli.checkpoint)
|
||||
else:
|
||||
resume_path = get_checkpoint_path(log_root_path, agent_cfg.load_run, agent_cfg.load_checkpoint)
|
||||
|
||||
log_dir = os.path.dirname(resume_path)
|
||||
|
||||
# set the log directory for the environment (works for all environment types)
|
||||
env_cfg.log_dir = log_dir
|
||||
|
||||
# create isaac environment
|
||||
env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)
|
||||
|
||||
# convert to single-agent instance if required by the RL algorithm
|
||||
if isinstance(env.unwrapped, DirectMARLEnv):
|
||||
env = multi_agent_to_single_agent(env)
|
||||
|
||||
# wrap for video recording
|
||||
if args_cli.video:
|
||||
video_kwargs = {
|
||||
"video_folder": os.path.join(log_dir, "videos", "play"),
|
||||
"step_trigger": lambda step: step == 0,
|
||||
"video_length": args_cli.video_length,
|
||||
"disable_logger": True,
|
||||
}
|
||||
print("[INFO] Recording videos during training.")
|
||||
print_dict(video_kwargs, nesting=4)
|
||||
env = gym.wrappers.RecordVideo(env, **video_kwargs)
|
||||
|
||||
# wrap around environment for rsl-rl
|
||||
env = RslRlVecEnvWrapper(env, clip_actions=agent_cfg.clip_actions)
|
||||
|
||||
print(f"[INFO]: Loading model checkpoint from: {resume_path}")
|
||||
# load previously trained model
|
||||
if agent_cfg.class_name == "OnPolicyRunner":
|
||||
runner = OnPolicyRunner(env, agent_cfg.to_dict(), log_dir=None, device=agent_cfg.device)
|
||||
elif agent_cfg.class_name == "DistillationRunner":
|
||||
runner = DistillationRunner(env, agent_cfg.to_dict(), log_dir=None, device=agent_cfg.device)
|
||||
else:
|
||||
raise ValueError(f"Unsupported runner class: {agent_cfg.class_name}")
|
||||
runner.load(resume_path)
|
||||
|
||||
# obtain the trained policy for inference
|
||||
policy = runner.get_inference_policy(device=env.unwrapped.device)
|
||||
|
||||
# extract the neural network module
|
||||
# we do this in a try-except to maintain backwards compatibility.
|
||||
try:
|
||||
# version 2.3 onwards
|
||||
policy_nn = runner.alg.policy
|
||||
except AttributeError:
|
||||
# version 2.2 and below
|
||||
policy_nn = runner.alg.actor_critic
|
||||
|
||||
# extract the normalizer
|
||||
if hasattr(policy_nn, "actor_obs_normalizer"):
|
||||
normalizer = policy_nn.actor_obs_normalizer
|
||||
elif hasattr(policy_nn, "student_obs_normalizer"):
|
||||
normalizer = policy_nn.student_obs_normalizer
|
||||
else:
|
||||
normalizer = None
|
||||
|
||||
# export policy to onnx/jit
|
||||
export_model_dir = os.path.join(os.path.dirname(resume_path), "exported")
|
||||
export_policy_as_jit(policy_nn, normalizer=normalizer, path=export_model_dir, filename="policy.pt")
|
||||
export_policy_as_onnx(policy_nn, normalizer=normalizer, path=export_model_dir, filename="policy.onnx")
|
||||
|
||||
dt = env.unwrapped.step_dt
|
||||
|
||||
# reset environment
|
||||
obs = env.get_observations()
|
||||
timestep = 0
|
||||
# simulate environment
|
||||
while simulation_app.is_running():
|
||||
start_time = time.time()
|
||||
# run everything in inference mode
|
||||
with torch.inference_mode():
|
||||
# agent stepping
|
||||
actions = policy(obs)
|
||||
# env stepping
|
||||
obs, _, dones, _ = env.step(actions)
|
||||
# reset recurrent states for episodes that have terminated
|
||||
policy_nn.reset(dones)
|
||||
if args_cli.video:
|
||||
timestep += 1
|
||||
# Exit the play loop after recording one video
|
||||
if timestep == args_cli.video_length:
|
||||
break
|
||||
|
||||
# time delay for real-time evaluation
|
||||
sleep_time = dt - (time.time() - start_time)
|
||||
if args_cli.real_time and sleep_time > 0:
|
||||
time.sleep(sleep_time)
|
||||
|
||||
# close the simulator
|
||||
env.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# run the main function
|
||||
main()
|
||||
# close sim app
|
||||
simulation_app.close()
|
||||
233
scripts/rsl_rl/train.py
Normal file
233
scripts/rsl_rl/train.py
Normal file
@@ -0,0 +1,233 @@
|
||||
# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
|
||||
# All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
"""Script to train RL agent with RSL-RL."""
|
||||
|
||||
"""Launch Isaac Sim Simulator first."""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
from isaaclab.app import AppLauncher
|
||||
|
||||
# local imports
|
||||
import cli_args # isort: skip
|
||||
|
||||
# add argparse arguments
|
||||
parser = argparse.ArgumentParser(description="Train an RL agent with RSL-RL.")
|
||||
parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.")
|
||||
parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).")
|
||||
parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).")
|
||||
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
|
||||
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
|
||||
parser.add_argument(
|
||||
"--agent", type=str, default="rsl_rl_cfg_entry_point", help="Name of the RL agent configuration entry point."
|
||||
)
|
||||
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
|
||||
parser.add_argument("--max_iterations", type=int, default=None, help="RL Policy training iterations.")
|
||||
parser.add_argument(
|
||||
"--distributed", action="store_true", default=False, help="Run training with multiple GPUs or nodes."
|
||||
)
|
||||
parser.add_argument("--export_io_descriptors", action="store_true", default=False, help="Export IO descriptors.")
|
||||
# append RSL-RL cli arguments
|
||||
cli_args.add_rsl_rl_args(parser)
|
||||
# append AppLauncher cli args
|
||||
AppLauncher.add_app_launcher_args(parser)
|
||||
args_cli, hydra_args = parser.parse_known_args()
|
||||
|
||||
# always enable cameras to record video
|
||||
if args_cli.video:
|
||||
args_cli.enable_cameras = True
|
||||
|
||||
# clear out sys.argv for Hydra
|
||||
sys.argv = [sys.argv[0]] + hydra_args
|
||||
|
||||
# launch omniverse app
|
||||
app_launcher = AppLauncher(args_cli)
|
||||
simulation_app = app_launcher.app
|
||||
|
||||
"""Check for minimum supported RSL-RL version."""
|
||||
|
||||
import importlib.metadata as metadata
|
||||
import platform
|
||||
|
||||
from packaging import version
|
||||
|
||||
# check minimum supported rsl-rl version
|
||||
RSL_RL_VERSION = "3.0.1"
|
||||
installed_version = metadata.version("rsl-rl-lib")
|
||||
if version.parse(installed_version) < version.parse(RSL_RL_VERSION):
|
||||
if platform.system() == "Windows":
|
||||
cmd = [r".\isaaclab.bat", "-p", "-m", "pip", "install", f"rsl-rl-lib=={RSL_RL_VERSION}"]
|
||||
else:
|
||||
cmd = ["./isaaclab.sh", "-p", "-m", "pip", "install", f"rsl-rl-lib=={RSL_RL_VERSION}"]
|
||||
print(
|
||||
f"Please install the correct version of RSL-RL.\nExisting version is: '{installed_version}'"
|
||||
f" and required version is: '{RSL_RL_VERSION}'.\nTo install the correct version, run:"
|
||||
f"\n\n\t{' '.join(cmd)}\n"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
"""Rest everything follows."""
|
||||
|
||||
import gymnasium as gym
|
||||
import os
|
||||
import torch
|
||||
from datetime import datetime
|
||||
|
||||
import omni
|
||||
from rsl_rl.runners import DistillationRunner, OnPolicyRunner
|
||||
|
||||
from isaaclab.envs import (
|
||||
DirectMARLEnv,
|
||||
DirectMARLEnvCfg,
|
||||
DirectRLEnvCfg,
|
||||
ManagerBasedRLEnvCfg,
|
||||
multi_agent_to_single_agent,
|
||||
)
|
||||
from isaaclab.utils.dict import print_dict
|
||||
from isaaclab.utils.io import dump_yaml
|
||||
|
||||
from isaaclab_rl.rsl_rl import RslRlBaseRunnerCfg, RslRlVecEnvWrapper
|
||||
|
||||
import isaaclab_tasks # noqa: F401
|
||||
from isaaclab_tasks.utils import get_checkpoint_path
|
||||
from isaaclab_tasks.utils.hydra import hydra_task_config
|
||||
|
||||
import mindbot.tasks # noqa: F401
|
||||
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
torch.backends.cudnn.allow_tf32 = True
|
||||
torch.backends.cudnn.deterministic = False
|
||||
torch.backends.cudnn.benchmark = False
|
||||
|
||||
|
||||
@hydra_task_config(args_cli.task, args_cli.agent)
|
||||
def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agent_cfg: RslRlBaseRunnerCfg):
|
||||
"""Train with RSL-RL agent."""
|
||||
# override configurations with non-hydra CLI arguments
|
||||
agent_cfg = cli_args.update_rsl_rl_cfg(agent_cfg, args_cli)
|
||||
env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs
|
||||
agent_cfg.max_iterations = (
|
||||
args_cli.max_iterations if args_cli.max_iterations is not None else agent_cfg.max_iterations
|
||||
)
|
||||
|
||||
# set the environment seed
|
||||
# note: certain randomizations occur in the environment initialization so we set the seed here
|
||||
env_cfg.seed = agent_cfg.seed
|
||||
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
|
||||
# check for invalid combination of CPU device with distributed training
|
||||
if args_cli.distributed and args_cli.device is not None and "cpu" in args_cli.device:
|
||||
raise ValueError(
|
||||
"Distributed training is not supported when using CPU device. "
|
||||
"Please use GPU device (e.g., --device cuda) for distributed training."
|
||||
)
|
||||
|
||||
# multi-gpu training configuration
|
||||
if args_cli.distributed:
|
||||
env_cfg.sim.device = f"cuda:{app_launcher.local_rank}"
|
||||
agent_cfg.device = f"cuda:{app_launcher.local_rank}"
|
||||
|
||||
# set seed to have diversity in different threads
|
||||
seed = agent_cfg.seed + app_launcher.local_rank
|
||||
env_cfg.seed = seed
|
||||
agent_cfg.seed = seed
|
||||
|
||||
# specify directory for logging experiments
|
||||
log_root_path = os.path.join("logs", "rsl_rl", agent_cfg.experiment_name)
|
||||
log_root_path = os.path.abspath(log_root_path)
|
||||
print(f"[INFO] Logging experiment in directory: {log_root_path}")
|
||||
# specify directory for logging runs: {time-stamp}_{run_name}
|
||||
log_dir = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
# The Ray Tune workflow extracts experiment name using the logging line below, hence, do not change it (see PR #2346, comment-2819298849)
|
||||
print(f"Exact experiment name requested from command line: {log_dir}")
|
||||
if agent_cfg.run_name:
|
||||
log_dir += f"_{agent_cfg.run_name}"
|
||||
log_dir = os.path.join(log_root_path, log_dir)
|
||||
|
||||
# set the IO descriptors export flag if requested
|
||||
if isinstance(env_cfg, ManagerBasedRLEnvCfg):
|
||||
env_cfg.export_io_descriptors = args_cli.export_io_descriptors
|
||||
else:
|
||||
omni.log.warn(
|
||||
"IO descriptors are only supported for manager based RL environments. No IO descriptors will be exported."
|
||||
)
|
||||
|
||||
# set the log directory for the environment (works for all environment types)
|
||||
env_cfg.log_dir = log_dir
|
||||
|
||||
# create isaac environment
|
||||
env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)
|
||||
|
||||
# convert to single-agent instance if required by the RL algorithm
|
||||
if isinstance(env.unwrapped, DirectMARLEnv):
|
||||
env = multi_agent_to_single_agent(env)
|
||||
|
||||
# save resume path before creating a new log_dir
|
||||
if agent_cfg.resume or agent_cfg.algorithm.class_name == "Distillation":
|
||||
resume_path = get_checkpoint_path(log_root_path, agent_cfg.load_run, agent_cfg.load_checkpoint)
|
||||
|
||||
# wrap for video recording
|
||||
if args_cli.video:
|
||||
video_kwargs = {
|
||||
"video_folder": os.path.join(log_dir, "videos", "train"),
|
||||
"step_trigger": lambda step: step % args_cli.video_interval == 0,
|
||||
"video_length": args_cli.video_length,
|
||||
"disable_logger": True,
|
||||
}
|
||||
print("[INFO] Recording videos during training.")
|
||||
print_dict(video_kwargs, nesting=4)
|
||||
env = gym.wrappers.RecordVideo(env, **video_kwargs)
|
||||
|
||||
# wrap around environment for rsl-rl
|
||||
env = RslRlVecEnvWrapper(env, clip_actions=agent_cfg.clip_actions)
|
||||
|
||||
# # 添加pdb断点来打印left_arm_ee
|
||||
# import pdb
|
||||
# original_step = env.step
|
||||
# def step_with_debug(action):
|
||||
# result = original_step(action)
|
||||
# # 访问底层环境的action_manager
|
||||
# unwrapped_env = env.unwrapped
|
||||
# if hasattr(unwrapped_env, 'action_manager'):
|
||||
# left_arm_ee_term = unwrapped_env.action_manager.get_term("left_arm_ee")
|
||||
# if left_arm_ee_term is not None:
|
||||
# print(f"left_arm_ee raw_actions: {left_arm_ee_term.raw_actions}")
|
||||
# print(f"left_arm_ee processed_actions: {left_arm_ee_term.processed_actions}")
|
||||
# pdb.set_trace() # 在这里设置断点
|
||||
# return result
|
||||
# env.step = step_with_debug
|
||||
|
||||
# create runner from rsl-rl
|
||||
if agent_cfg.class_name == "OnPolicyRunner":
|
||||
runner = OnPolicyRunner(env, agent_cfg.to_dict(), log_dir=log_dir, device=agent_cfg.device)
|
||||
elif agent_cfg.class_name == "DistillationRunner":
|
||||
runner = DistillationRunner(env, agent_cfg.to_dict(), log_dir=log_dir, device=agent_cfg.device)
|
||||
else:
|
||||
raise ValueError(f"Unsupported runner class: {agent_cfg.class_name}")
|
||||
# write git state to logs
|
||||
runner.add_git_repo_to_log(__file__)
|
||||
# load the checkpoint
|
||||
if agent_cfg.resume or agent_cfg.algorithm.class_name == "Distillation":
|
||||
print(f"[INFO]: Loading model checkpoint from: {resume_path}")
|
||||
# load previously trained model
|
||||
runner.load(resume_path)
|
||||
|
||||
# dump the configuration into log-directory
|
||||
dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg)
|
||||
dump_yaml(os.path.join(log_dir, "params", "agent.yaml"), agent_cfg)
|
||||
|
||||
# run training
|
||||
runner.learn(num_learning_iterations=agent_cfg.max_iterations, init_at_random_ep_len=True)
|
||||
|
||||
# close the simulator
|
||||
env.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# run the main function
|
||||
main()
|
||||
# close sim app
|
||||
simulation_app.close()
|
||||
213
scripts/sb3/play.py
Normal file
213
scripts/sb3/play.py
Normal file
@@ -0,0 +1,213 @@
|
||||
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
|
||||
# All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
"""Script to play a checkpoint if an RL agent from Stable-Baselines3."""
|
||||
|
||||
"""Launch Isaac Sim Simulator first."""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from isaaclab.app import AppLauncher
|
||||
|
||||
# add argparse arguments
|
||||
parser = argparse.ArgumentParser(description="Play a checkpoint of an RL agent from Stable-Baselines3.")
|
||||
parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.")
|
||||
parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).")
|
||||
parser.add_argument(
|
||||
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
|
||||
)
|
||||
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
|
||||
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
|
||||
parser.add_argument(
|
||||
"--agent", type=str, default="sb3_cfg_entry_point", help="Name of the RL agent configuration entry point."
|
||||
)
|
||||
parser.add_argument("--checkpoint", type=str, default=None, help="Path to model checkpoint.")
|
||||
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
|
||||
parser.add_argument(
|
||||
"--use_pretrained_checkpoint",
|
||||
action="store_true",
|
||||
help="Use the pre-trained checkpoint from Nucleus.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--use_last_checkpoint",
|
||||
action="store_true",
|
||||
help="When no checkpoint provided, use the last saved model. Otherwise use the best saved model.",
|
||||
)
|
||||
parser.add_argument("--real-time", action="store_true", default=False, help="Run in real-time, if possible.")
|
||||
parser.add_argument(
|
||||
"--keep_all_info",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Use a slower SB3 wrapper but keep all the extra training info.",
|
||||
)
|
||||
# append AppLauncher cli args
|
||||
AppLauncher.add_app_launcher_args(parser)
|
||||
# parse the arguments
|
||||
args_cli, hydra_args = parser.parse_known_args()
|
||||
|
||||
# always enable cameras to record video
|
||||
if args_cli.video:
|
||||
args_cli.enable_cameras = True
|
||||
|
||||
# clear out sys.argv for Hydra
|
||||
sys.argv = [sys.argv[0]] + hydra_args
|
||||
# launch omniverse app
|
||||
app_launcher = AppLauncher(args_cli)
|
||||
simulation_app = app_launcher.app
|
||||
|
||||
"""Rest everything follows."""
|
||||
|
||||
import gymnasium as gym
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
import torch
|
||||
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.vec_env import VecNormalize
|
||||
|
||||
from isaaclab.envs import (
|
||||
DirectMARLEnv,
|
||||
DirectMARLEnvCfg,
|
||||
DirectRLEnvCfg,
|
||||
ManagerBasedRLEnvCfg,
|
||||
multi_agent_to_single_agent,
|
||||
)
|
||||
from isaaclab.utils.dict import print_dict
|
||||
|
||||
from isaaclab_rl.sb3 import Sb3VecEnvWrapper, process_sb3_cfg
|
||||
from isaaclab_rl.utils.pretrained_checkpoint import get_published_pretrained_checkpoint
|
||||
|
||||
import isaaclab_tasks # noqa: F401
|
||||
from isaaclab_tasks.utils.hydra import hydra_task_config
|
||||
from isaaclab_tasks.utils.parse_cfg import get_checkpoint_path
|
||||
|
||||
import mindbot.tasks # noqa: F401
|
||||
|
||||
|
||||
@hydra_task_config(args_cli.task, args_cli.agent)
|
||||
def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agent_cfg: dict):
|
||||
"""Play with stable-baselines agent."""
|
||||
# grab task name for checkpoint path
|
||||
task_name = args_cli.task.split(":")[-1]
|
||||
train_task_name = task_name.replace("-Play", "")
|
||||
# randomly sample a seed if seed = -1
|
||||
if args_cli.seed == -1:
|
||||
args_cli.seed = random.randint(0, 10000)
|
||||
|
||||
# override configurations with non-hydra CLI arguments
|
||||
env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs
|
||||
agent_cfg["seed"] = args_cli.seed if args_cli.seed is not None else agent_cfg["seed"]
|
||||
# set the environment seed
|
||||
# note: certain randomizations occur in the environment initialization so we set the seed here
|
||||
env_cfg.seed = agent_cfg["seed"]
|
||||
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
|
||||
|
||||
# directory for logging into
|
||||
log_root_path = os.path.join("logs", "sb3", train_task_name)
|
||||
log_root_path = os.path.abspath(log_root_path)
|
||||
# checkpoint and log_dir stuff
|
||||
if args_cli.use_pretrained_checkpoint:
|
||||
checkpoint_path = get_published_pretrained_checkpoint("sb3", train_task_name)
|
||||
if not checkpoint_path:
|
||||
print("[INFO] Unfortunately a pre-trained checkpoint is currently unavailable for this task.")
|
||||
return
|
||||
elif args_cli.checkpoint is None:
|
||||
# FIXME: last checkpoint doesn't seem to really use the last one'
|
||||
if args_cli.use_last_checkpoint:
|
||||
checkpoint = "model_.*.zip"
|
||||
else:
|
||||
checkpoint = "model.zip"
|
||||
checkpoint_path = get_checkpoint_path(log_root_path, ".*", checkpoint, sort_alpha=False)
|
||||
else:
|
||||
checkpoint_path = args_cli.checkpoint
|
||||
log_dir = os.path.dirname(checkpoint_path)
|
||||
|
||||
# set the log directory for the environment (works for all environment types)
|
||||
env_cfg.log_dir = log_dir
|
||||
|
||||
# create isaac environment
|
||||
env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)
|
||||
|
||||
# post-process agent configuration
|
||||
agent_cfg = process_sb3_cfg(agent_cfg, env.unwrapped.num_envs)
|
||||
|
||||
# convert to single-agent instance if required by the RL algorithm
|
||||
if isinstance(env.unwrapped, DirectMARLEnv):
|
||||
env = multi_agent_to_single_agent(env)
|
||||
|
||||
# wrap for video recording
|
||||
if args_cli.video:
|
||||
video_kwargs = {
|
||||
"video_folder": os.path.join(log_dir, "videos", "play"),
|
||||
"step_trigger": lambda step: step == 0,
|
||||
"video_length": args_cli.video_length,
|
||||
"disable_logger": True,
|
||||
}
|
||||
print("[INFO] Recording videos during training.")
|
||||
print_dict(video_kwargs, nesting=4)
|
||||
env = gym.wrappers.RecordVideo(env, **video_kwargs)
|
||||
# wrap around environment for stable baselines
|
||||
env = Sb3VecEnvWrapper(env, fast_variant=not args_cli.keep_all_info)
|
||||
|
||||
vec_norm_path = checkpoint_path.replace("/model", "/model_vecnormalize").replace(".zip", ".pkl")
|
||||
vec_norm_path = Path(vec_norm_path)
|
||||
|
||||
# normalize environment (if needed)
|
||||
if vec_norm_path.exists():
|
||||
print(f"Loading saved normalization: {vec_norm_path}")
|
||||
env = VecNormalize.load(vec_norm_path, env)
|
||||
# do not update them at test time
|
||||
env.training = False
|
||||
# reward normalization is not needed at test time
|
||||
env.norm_reward = False
|
||||
elif "normalize_input" in agent_cfg:
|
||||
env = VecNormalize(
|
||||
env,
|
||||
training=True,
|
||||
norm_obs="normalize_input" in agent_cfg and agent_cfg.pop("normalize_input"),
|
||||
clip_obs="clip_obs" in agent_cfg and agent_cfg.pop("clip_obs"),
|
||||
)
|
||||
|
||||
# create agent from stable baselines
|
||||
print(f"Loading checkpoint from: {checkpoint_path}")
|
||||
agent = PPO.load(checkpoint_path, env, print_system_info=True)
|
||||
|
||||
dt = env.unwrapped.step_dt
|
||||
|
||||
# reset environment
|
||||
obs = env.reset()
|
||||
timestep = 0
|
||||
# simulate environment
|
||||
while simulation_app.is_running():
|
||||
start_time = time.time()
|
||||
# run everything in inference mode
|
||||
with torch.inference_mode():
|
||||
# agent stepping
|
||||
actions, _ = agent.predict(obs, deterministic=True)
|
||||
# env stepping
|
||||
obs, _, _, _ = env.step(actions)
|
||||
if args_cli.video:
|
||||
timestep += 1
|
||||
# Exit the play loop after recording one video
|
||||
if timestep == args_cli.video_length:
|
||||
break
|
||||
|
||||
# time delay for real-time evaluation
|
||||
sleep_time = dt - (time.time() - start_time)
|
||||
if args_cli.real_time and sleep_time > 0:
|
||||
time.sleep(sleep_time)
|
||||
|
||||
# close the simulator
|
||||
env.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# run the main function
|
||||
main()
|
||||
# close sim app
|
||||
simulation_app.close()
|
||||
239
scripts/sb3/train.py
Normal file
239
scripts/sb3/train.py
Normal file
@@ -0,0 +1,239 @@
|
||||
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
|
||||
# All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
|
||||
"""Script to train RL agent with Stable Baselines3."""
|
||||
|
||||
"""Launch Isaac Sim Simulator first."""
|
||||
|
||||
import argparse
|
||||
import contextlib
|
||||
import signal
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from isaaclab.app import AppLauncher
|
||||
|
||||
# add argparse arguments
|
||||
parser = argparse.ArgumentParser(description="Train an RL agent with Stable-Baselines3.")
|
||||
parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.")
|
||||
parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).")
|
||||
parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).")
|
||||
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
|
||||
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
|
||||
parser.add_argument(
|
||||
"--agent", type=str, default="sb3_cfg_entry_point", help="Name of the RL agent configuration entry point."
|
||||
)
|
||||
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
|
||||
parser.add_argument("--log_interval", type=int, default=100_000, help="Log data every n timesteps.")
|
||||
parser.add_argument("--checkpoint", type=str, default=None, help="Continue the training from checkpoint.")
|
||||
parser.add_argument("--max_iterations", type=int, default=None, help="RL Policy training iterations.")
|
||||
parser.add_argument("--export_io_descriptors", action="store_true", default=False, help="Export IO descriptors.")
|
||||
parser.add_argument(
|
||||
"--keep_all_info",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Use a slower SB3 wrapper but keep all the extra training info.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ray-proc-id", "-rid", type=int, default=None, help="Automatically configured by Ray integration, otherwise None."
|
||||
)
|
||||
# append AppLauncher cli args
|
||||
AppLauncher.add_app_launcher_args(parser)
|
||||
# parse the arguments
|
||||
args_cli, hydra_args = parser.parse_known_args()
|
||||
# always enable cameras to record video
|
||||
if args_cli.video:
|
||||
args_cli.enable_cameras = True
|
||||
|
||||
# clear out sys.argv for Hydra
|
||||
sys.argv = [sys.argv[0]] + hydra_args
|
||||
|
||||
# launch omniverse app
|
||||
app_launcher = AppLauncher(args_cli)
|
||||
simulation_app = app_launcher.app
|
||||
|
||||
|
||||
def cleanup_pbar(*args):
|
||||
"""
|
||||
A small helper to stop training and
|
||||
cleanup progress bar properly on ctrl+c
|
||||
"""
|
||||
import gc
|
||||
|
||||
tqdm_objects = [obj for obj in gc.get_objects() if "tqdm" in type(obj).__name__]
|
||||
for tqdm_object in tqdm_objects:
|
||||
if "tqdm_rich" in type(tqdm_object).__name__:
|
||||
tqdm_object.close()
|
||||
raise KeyboardInterrupt
|
||||
|
||||
|
||||
# disable KeyboardInterrupt override
|
||||
signal.signal(signal.SIGINT, cleanup_pbar)
|
||||
|
||||
"""Rest everything follows."""
|
||||
|
||||
import gymnasium as gym
|
||||
import logging
|
||||
import numpy as np
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
from datetime import datetime
|
||||
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.callbacks import CheckpointCallback, LogEveryNTimesteps
|
||||
from stable_baselines3.common.vec_env import VecNormalize
|
||||
|
||||
from isaaclab.envs import (
|
||||
DirectMARLEnv,
|
||||
DirectMARLEnvCfg,
|
||||
DirectRLEnvCfg,
|
||||
ManagerBasedRLEnvCfg,
|
||||
multi_agent_to_single_agent,
|
||||
)
|
||||
from isaaclab.utils.dict import print_dict
|
||||
from isaaclab.utils.io import dump_yaml
|
||||
|
||||
from isaaclab_rl.sb3 import Sb3VecEnvWrapper, process_sb3_cfg
|
||||
|
||||
import isaaclab_tasks # noqa: F401
|
||||
from isaaclab_tasks.utils.hydra import hydra_task_config
|
||||
|
||||
# import logger
|
||||
logger = logging.getLogger(__name__)
|
||||
import mindbot.tasks # noqa: F401
|
||||
|
||||
|
||||
@hydra_task_config(args_cli.task, args_cli.agent)
|
||||
def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agent_cfg: dict):
|
||||
"""Train with stable-baselines agent."""
|
||||
# randomly sample a seed if seed = -1
|
||||
if args_cli.seed == -1:
|
||||
args_cli.seed = random.randint(0, 10000)
|
||||
|
||||
# override configurations with non-hydra CLI arguments
|
||||
env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs
|
||||
agent_cfg["seed"] = args_cli.seed if args_cli.seed is not None else agent_cfg["seed"]
|
||||
# max iterations for training
|
||||
if args_cli.max_iterations is not None:
|
||||
agent_cfg["n_timesteps"] = args_cli.max_iterations * agent_cfg["n_steps"] * env_cfg.scene.num_envs
|
||||
|
||||
# set the environment seed
|
||||
# note: certain randomizations occur in the environment initialization so we set the seed here
|
||||
env_cfg.seed = agent_cfg["seed"]
|
||||
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
|
||||
|
||||
# directory for logging into
|
||||
run_info = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
log_root_path = os.path.abspath(os.path.join("logs", "sb3", args_cli.task))
|
||||
print(f"[INFO] Logging experiment in directory: {log_root_path}")
|
||||
# The Ray Tune workflow extracts experiment name using the logging line below, hence, do not change it (see PR #2346, comment-2819298849)
|
||||
print(f"Exact experiment name requested from command line: {run_info}")
|
||||
log_dir = os.path.join(log_root_path, run_info)
|
||||
# dump the configuration into log-directory
|
||||
dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg)
|
||||
dump_yaml(os.path.join(log_dir, "params", "agent.yaml"), agent_cfg)
|
||||
|
||||
# save command used to run the script
|
||||
command = " ".join(sys.orig_argv)
|
||||
(Path(log_dir) / "command.txt").write_text(command)
|
||||
|
||||
# post-process agent configuration
|
||||
agent_cfg = process_sb3_cfg(agent_cfg, env_cfg.scene.num_envs)
|
||||
# read configurations about the agent-training
|
||||
policy_arch = agent_cfg.pop("policy")
|
||||
n_timesteps = agent_cfg.pop("n_timesteps")
|
||||
|
||||
# set the IO descriptors export flag if requested
|
||||
if isinstance(env_cfg, ManagerBasedRLEnvCfg):
|
||||
env_cfg.export_io_descriptors = args_cli.export_io_descriptors
|
||||
else:
|
||||
logger.warning(
|
||||
"IO descriptors are only supported for manager based RL environments. No IO descriptors will be exported."
|
||||
)
|
||||
|
||||
# set the log directory for the environment (works for all environment types)
|
||||
env_cfg.log_dir = log_dir
|
||||
|
||||
# create isaac environment
|
||||
env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)
|
||||
|
||||
# convert to single-agent instance if required by the RL algorithm
|
||||
if isinstance(env.unwrapped, DirectMARLEnv):
|
||||
env = multi_agent_to_single_agent(env)
|
||||
|
||||
# wrap for video recording
|
||||
if args_cli.video:
|
||||
video_kwargs = {
|
||||
"video_folder": os.path.join(log_dir, "videos", "train"),
|
||||
"step_trigger": lambda step: step % args_cli.video_interval == 0,
|
||||
"video_length": args_cli.video_length,
|
||||
"disable_logger": True,
|
||||
}
|
||||
print("[INFO] Recording videos during training.")
|
||||
print_dict(video_kwargs, nesting=4)
|
||||
env = gym.wrappers.RecordVideo(env, **video_kwargs)
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
# wrap around environment for stable baselines
|
||||
env = Sb3VecEnvWrapper(env, fast_variant=not args_cli.keep_all_info)
|
||||
|
||||
norm_keys = {"normalize_input", "normalize_value", "clip_obs"}
|
||||
norm_args = {}
|
||||
for key in norm_keys:
|
||||
if key in agent_cfg:
|
||||
norm_args[key] = agent_cfg.pop(key)
|
||||
|
||||
if norm_args and norm_args.get("normalize_input"):
|
||||
print(f"Normalizing input, {norm_args=}")
|
||||
env = VecNormalize(
|
||||
env,
|
||||
training=True,
|
||||
norm_obs=norm_args["normalize_input"],
|
||||
norm_reward=norm_args.get("normalize_value", False),
|
||||
clip_obs=norm_args.get("clip_obs", 100.0),
|
||||
gamma=agent_cfg["gamma"],
|
||||
clip_reward=np.inf,
|
||||
)
|
||||
|
||||
# create agent from stable baselines
|
||||
agent = PPO(policy_arch, env, verbose=1, tensorboard_log=log_dir, **agent_cfg)
|
||||
if args_cli.checkpoint is not None:
|
||||
agent = agent.load(args_cli.checkpoint, env, print_system_info=True)
|
||||
|
||||
# callbacks for agent
|
||||
checkpoint_callback = CheckpointCallback(save_freq=1000, save_path=log_dir, name_prefix="model", verbose=2)
|
||||
callbacks = [checkpoint_callback, LogEveryNTimesteps(n_steps=args_cli.log_interval)]
|
||||
|
||||
# train the agent
|
||||
with contextlib.suppress(KeyboardInterrupt):
|
||||
agent.learn(
|
||||
total_timesteps=n_timesteps,
|
||||
callback=callbacks,
|
||||
progress_bar=True,
|
||||
log_interval=None,
|
||||
)
|
||||
# save the final model
|
||||
agent.save(os.path.join(log_dir, "model"))
|
||||
print("Saving to:")
|
||||
print(os.path.join(log_dir, "model.zip"))
|
||||
|
||||
if isinstance(env, VecNormalize):
|
||||
print("Saving normalization")
|
||||
env.save(os.path.join(log_dir, "model_vecnormalize.pkl"))
|
||||
|
||||
print(f"Training time: {round(time.time() - start_time, 2)} seconds")
|
||||
|
||||
# close the simulator
|
||||
env.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# run the main function
|
||||
main()
|
||||
# close sim app
|
||||
simulation_app.close()
|
||||
250
scripts/skrl/play.py
Normal file
250
scripts/skrl/play.py
Normal file
@@ -0,0 +1,250 @@
|
||||
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
|
||||
# All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
"""
|
||||
Script to play a checkpoint of an RL agent from skrl.
|
||||
|
||||
Visit the skrl documentation (https://skrl.readthedocs.io) to see the examples structured in
|
||||
a more user-friendly way.
|
||||
"""
|
||||
|
||||
"""Launch Isaac Sim Simulator first."""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
from isaaclab.app import AppLauncher
|
||||
|
||||
# add argparse arguments
|
||||
parser = argparse.ArgumentParser(description="Play a checkpoint of an RL agent from skrl.")
|
||||
parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.")
|
||||
parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).")
|
||||
parser.add_argument(
|
||||
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
|
||||
)
|
||||
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
|
||||
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
|
||||
parser.add_argument(
|
||||
"--agent",
|
||||
type=str,
|
||||
default=None,
|
||||
help=(
|
||||
"Name of the RL agent configuration entry point. Defaults to None, in which case the argument "
|
||||
"--algorithm is used to determine the default agent configuration entry point."
|
||||
),
|
||||
)
|
||||
parser.add_argument("--checkpoint", type=str, default=None, help="Path to model checkpoint.")
|
||||
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
|
||||
parser.add_argument(
|
||||
"--use_pretrained_checkpoint",
|
||||
action="store_true",
|
||||
help="Use the pre-trained checkpoint from Nucleus.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ml_framework",
|
||||
type=str,
|
||||
default="torch",
|
||||
choices=["torch", "jax", "jax-numpy"],
|
||||
help="The ML framework used for training the skrl agent.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--algorithm",
|
||||
type=str,
|
||||
default="PPO",
|
||||
choices=["AMP", "PPO", "IPPO", "MAPPO"],
|
||||
help="The RL algorithm used for training the skrl agent.",
|
||||
)
|
||||
parser.add_argument("--real-time", action="store_true", default=False, help="Run in real-time, if possible.")
|
||||
|
||||
# append AppLauncher cli args
|
||||
AppLauncher.add_app_launcher_args(parser)
|
||||
# parse the arguments
|
||||
args_cli, hydra_args = parser.parse_known_args()
|
||||
# always enable cameras to record video
|
||||
if args_cli.video:
|
||||
args_cli.enable_cameras = True
|
||||
|
||||
# clear out sys.argv for Hydra
|
||||
sys.argv = [sys.argv[0]] + hydra_args
|
||||
# launch omniverse app
|
||||
app_launcher = AppLauncher(args_cli)
|
||||
simulation_app = app_launcher.app
|
||||
|
||||
"""Rest everything follows."""
|
||||
|
||||
import gymnasium as gym
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
import torch
|
||||
from packaging import version
|
||||
|
||||
import skrl
|
||||
|
||||
# check for minimum supported skrl version
|
||||
SKRL_VERSION = "1.4.3"
|
||||
if version.parse(skrl.__version__) < version.parse(SKRL_VERSION):
|
||||
skrl.logger.error(
|
||||
f"Unsupported skrl version: {skrl.__version__}. "
|
||||
f"Install supported version using 'pip install skrl>={SKRL_VERSION}'"
|
||||
)
|
||||
exit()
|
||||
|
||||
if args_cli.ml_framework.startswith("torch"):
|
||||
from skrl.utils.runner.torch import Runner
|
||||
elif args_cli.ml_framework.startswith("jax"):
|
||||
from skrl.utils.runner.jax import Runner
|
||||
|
||||
from isaaclab.envs import (
|
||||
DirectMARLEnv,
|
||||
DirectMARLEnvCfg,
|
||||
DirectRLEnvCfg,
|
||||
ManagerBasedRLEnvCfg,
|
||||
multi_agent_to_single_agent,
|
||||
)
|
||||
from isaaclab.utils.dict import print_dict
|
||||
|
||||
from isaaclab_rl.skrl import SkrlVecEnvWrapper
|
||||
from isaaclab_rl.utils.pretrained_checkpoint import get_published_pretrained_checkpoint
|
||||
|
||||
import isaaclab_tasks # noqa: F401
|
||||
from isaaclab_tasks.utils import get_checkpoint_path
|
||||
from isaaclab_tasks.utils.hydra import hydra_task_config
|
||||
|
||||
import mindbot.tasks # noqa: F401
|
||||
|
||||
# config shortcuts
|
||||
if args_cli.agent is None:
|
||||
algorithm = args_cli.algorithm.lower()
|
||||
agent_cfg_entry_point = "skrl_cfg_entry_point" if algorithm in ["ppo"] else f"skrl_{algorithm}_cfg_entry_point"
|
||||
else:
|
||||
agent_cfg_entry_point = args_cli.agent
|
||||
algorithm = agent_cfg_entry_point.split("_cfg")[0].split("skrl_")[-1].lower()
|
||||
|
||||
|
||||
@hydra_task_config(args_cli.task, agent_cfg_entry_point)
|
||||
def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, experiment_cfg: dict):
|
||||
"""Play with skrl agent."""
|
||||
# grab task name for checkpoint path
|
||||
task_name = args_cli.task.split(":")[-1]
|
||||
train_task_name = task_name.replace("-Play", "")
|
||||
|
||||
# override configurations with non-hydra CLI arguments
|
||||
env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs
|
||||
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
|
||||
|
||||
# configure the ML framework into the global skrl variable
|
||||
if args_cli.ml_framework.startswith("jax"):
|
||||
skrl.config.jax.backend = "jax" if args_cli.ml_framework == "jax" else "numpy"
|
||||
|
||||
# randomly sample a seed if seed = -1
|
||||
if args_cli.seed == -1:
|
||||
args_cli.seed = random.randint(0, 10000)
|
||||
|
||||
# set the agent and environment seed from command line
|
||||
# note: certain randomization occur in the environment initialization so we set the seed here
|
||||
experiment_cfg["seed"] = args_cli.seed if args_cli.seed is not None else experiment_cfg["seed"]
|
||||
env_cfg.seed = experiment_cfg["seed"]
|
||||
|
||||
# specify directory for logging experiments (load checkpoint)
|
||||
log_root_path = os.path.join("logs", "skrl", experiment_cfg["agent"]["experiment"]["directory"])
|
||||
log_root_path = os.path.abspath(log_root_path)
|
||||
print(f"[INFO] Loading experiment from directory: {log_root_path}")
|
||||
# get checkpoint path
|
||||
if args_cli.use_pretrained_checkpoint:
|
||||
resume_path = get_published_pretrained_checkpoint("skrl", train_task_name)
|
||||
if not resume_path:
|
||||
print("[INFO] Unfortunately a pre-trained checkpoint is currently unavailable for this task.")
|
||||
return
|
||||
elif args_cli.checkpoint:
|
||||
resume_path = os.path.abspath(args_cli.checkpoint)
|
||||
else:
|
||||
resume_path = get_checkpoint_path(
|
||||
log_root_path, run_dir=f".*_{algorithm}_{args_cli.ml_framework}", other_dirs=["checkpoints"]
|
||||
)
|
||||
log_dir = os.path.dirname(os.path.dirname(resume_path))
|
||||
|
||||
# set the log directory for the environment (works for all environment types)
|
||||
env_cfg.log_dir = log_dir
|
||||
|
||||
# create isaac environment
|
||||
env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)
|
||||
|
||||
# convert to single-agent instance if required by the RL algorithm
|
||||
if isinstance(env.unwrapped, DirectMARLEnv) and algorithm in ["ppo"]:
|
||||
env = multi_agent_to_single_agent(env)
|
||||
|
||||
# get environment (step) dt for real-time evaluation
|
||||
try:
|
||||
dt = env.step_dt
|
||||
except AttributeError:
|
||||
dt = env.unwrapped.step_dt
|
||||
|
||||
# wrap for video recording
|
||||
if args_cli.video:
|
||||
video_kwargs = {
|
||||
"video_folder": os.path.join(log_dir, "videos", "play"),
|
||||
"step_trigger": lambda step: step == 0,
|
||||
"video_length": args_cli.video_length,
|
||||
"disable_logger": True,
|
||||
}
|
||||
print("[INFO] Recording videos during training.")
|
||||
print_dict(video_kwargs, nesting=4)
|
||||
env = gym.wrappers.RecordVideo(env, **video_kwargs)
|
||||
|
||||
# wrap around environment for skrl
|
||||
env = SkrlVecEnvWrapper(env, ml_framework=args_cli.ml_framework) # same as: `wrap_env(env, wrapper="auto")`
|
||||
|
||||
# configure and instantiate the skrl runner
|
||||
# https://skrl.readthedocs.io/en/latest/api/utils/runner.html
|
||||
experiment_cfg["trainer"]["close_environment_at_exit"] = False
|
||||
experiment_cfg["agent"]["experiment"]["write_interval"] = 0 # don't log to TensorBoard
|
||||
experiment_cfg["agent"]["experiment"]["checkpoint_interval"] = 0 # don't generate checkpoints
|
||||
runner = Runner(env, experiment_cfg)
|
||||
|
||||
print(f"[INFO] Loading model checkpoint from: {resume_path}")
|
||||
runner.agent.load(resume_path)
|
||||
# set agent to evaluation mode
|
||||
runner.agent.set_running_mode("eval")
|
||||
|
||||
# reset environment
|
||||
obs, _ = env.reset()
|
||||
timestep = 0
|
||||
# simulate environment
|
||||
while simulation_app.is_running():
|
||||
start_time = time.time()
|
||||
|
||||
# run everything in inference mode
|
||||
with torch.inference_mode():
|
||||
# agent stepping
|
||||
outputs = runner.agent.act(obs, timestep=0, timesteps=0)
|
||||
# - multi-agent (deterministic) actions
|
||||
if hasattr(env, "possible_agents"):
|
||||
actions = {a: outputs[-1][a].get("mean_actions", outputs[0][a]) for a in env.possible_agents}
|
||||
# - single-agent (deterministic) actions
|
||||
else:
|
||||
actions = outputs[-1].get("mean_actions", outputs[0])
|
||||
# env stepping
|
||||
obs, _, _, _, _ = env.step(actions)
|
||||
if args_cli.video:
|
||||
timestep += 1
|
||||
# exit the play loop after recording one video
|
||||
if timestep == args_cli.video_length:
|
||||
break
|
||||
|
||||
# time delay for real-time evaluation
|
||||
sleep_time = dt - (time.time() - start_time)
|
||||
if args_cli.real_time and sleep_time > 0:
|
||||
time.sleep(sleep_time)
|
||||
|
||||
# close the simulator
|
||||
env.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# run the main function
|
||||
main()
|
||||
# close sim app
|
||||
simulation_app.close()
|
||||
245
scripts/skrl/train.py
Normal file
245
scripts/skrl/train.py
Normal file
@@ -0,0 +1,245 @@
|
||||
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
|
||||
# All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
"""
|
||||
Script to train RL agent with skrl.
|
||||
|
||||
Visit the skrl documentation (https://skrl.readthedocs.io) to see the examples structured in
|
||||
a more user-friendly way.
|
||||
"""
|
||||
|
||||
"""Launch Isaac Sim Simulator first."""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
from isaaclab.app import AppLauncher
|
||||
|
||||
# add argparse arguments
|
||||
parser = argparse.ArgumentParser(description="Train an RL agent with skrl.")
|
||||
parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.")
|
||||
parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).")
|
||||
parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).")
|
||||
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
|
||||
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
|
||||
parser.add_argument(
|
||||
"--agent",
|
||||
type=str,
|
||||
default=None,
|
||||
help=(
|
||||
"Name of the RL agent configuration entry point. Defaults to None, in which case the argument "
|
||||
"--algorithm is used to determine the default agent configuration entry point."
|
||||
),
|
||||
)
|
||||
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
|
||||
parser.add_argument(
|
||||
"--distributed", action="store_true", default=False, help="Run training with multiple GPUs or nodes."
|
||||
)
|
||||
parser.add_argument("--checkpoint", type=str, default=None, help="Path to model checkpoint to resume training.")
|
||||
parser.add_argument("--max_iterations", type=int, default=None, help="RL Policy training iterations.")
|
||||
parser.add_argument("--export_io_descriptors", action="store_true", default=False, help="Export IO descriptors.")
|
||||
parser.add_argument(
|
||||
"--ml_framework",
|
||||
type=str,
|
||||
default="torch",
|
||||
choices=["torch", "jax", "jax-numpy"],
|
||||
help="The ML framework used for training the skrl agent.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--algorithm",
|
||||
type=str,
|
||||
default="PPO",
|
||||
choices=["AMP", "PPO", "IPPO", "MAPPO"],
|
||||
help="The RL algorithm used for training the skrl agent.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ray-proc-id", "-rid", type=int, default=None, help="Automatically configured by Ray integration, otherwise None."
|
||||
)
|
||||
# append AppLauncher cli args
|
||||
AppLauncher.add_app_launcher_args(parser)
|
||||
# parse the arguments
|
||||
args_cli, hydra_args = parser.parse_known_args()
|
||||
# always enable cameras to record video
|
||||
if args_cli.video:
|
||||
args_cli.enable_cameras = True
|
||||
|
||||
# clear out sys.argv for Hydra
|
||||
sys.argv = [sys.argv[0]] + hydra_args
|
||||
|
||||
# launch omniverse app
|
||||
app_launcher = AppLauncher(args_cli)
|
||||
simulation_app = app_launcher.app
|
||||
|
||||
"""Rest everything follows."""
|
||||
|
||||
import gymnasium as gym
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
from datetime import datetime
|
||||
from packaging import version
|
||||
|
||||
import skrl
|
||||
|
||||
# check for minimum supported skrl version
|
||||
SKRL_VERSION = "1.4.3"
|
||||
if version.parse(skrl.__version__) < version.parse(SKRL_VERSION):
|
||||
skrl.logger.error(
|
||||
f"Unsupported skrl version: {skrl.__version__}. "
|
||||
f"Install supported version using 'pip install skrl>={SKRL_VERSION}'"
|
||||
)
|
||||
exit()
|
||||
|
||||
if args_cli.ml_framework.startswith("torch"):
|
||||
from skrl.utils.runner.torch import Runner
|
||||
elif args_cli.ml_framework.startswith("jax"):
|
||||
from skrl.utils.runner.jax import Runner
|
||||
|
||||
from isaaclab.envs import (
|
||||
DirectMARLEnv,
|
||||
DirectMARLEnvCfg,
|
||||
DirectRLEnvCfg,
|
||||
ManagerBasedRLEnvCfg,
|
||||
multi_agent_to_single_agent,
|
||||
)
|
||||
from isaaclab.utils.assets import retrieve_file_path
|
||||
from isaaclab.utils.dict import print_dict
|
||||
from isaaclab.utils.io import dump_yaml
|
||||
|
||||
from isaaclab_rl.skrl import SkrlVecEnvWrapper
|
||||
|
||||
import isaaclab_tasks # noqa: F401
|
||||
from isaaclab_tasks.utils.hydra import hydra_task_config
|
||||
|
||||
# import logger
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import mindbot.tasks # noqa: F401
|
||||
|
||||
# config shortcuts
|
||||
if args_cli.agent is None:
|
||||
algorithm = args_cli.algorithm.lower()
|
||||
agent_cfg_entry_point = "skrl_cfg_entry_point" if algorithm in ["ppo"] else f"skrl_{algorithm}_cfg_entry_point"
|
||||
else:
|
||||
agent_cfg_entry_point = args_cli.agent
|
||||
algorithm = agent_cfg_entry_point.split("_cfg")[0].split("skrl_")[-1].lower()
|
||||
|
||||
|
||||
@hydra_task_config(args_cli.task, agent_cfg_entry_point)
|
||||
def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agent_cfg: dict):
|
||||
"""Train with skrl agent."""
|
||||
# override configurations with non-hydra CLI arguments
|
||||
env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs
|
||||
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
|
||||
|
||||
# check for invalid combination of CPU device with distributed training
|
||||
if args_cli.distributed and args_cli.device is not None and "cpu" in args_cli.device:
|
||||
raise ValueError(
|
||||
"Distributed training is not supported when using CPU device. "
|
||||
"Please use GPU device (e.g., --device cuda) for distributed training."
|
||||
)
|
||||
|
||||
# multi-gpu training config
|
||||
if args_cli.distributed:
|
||||
env_cfg.sim.device = f"cuda:{app_launcher.local_rank}"
|
||||
# max iterations for training
|
||||
if args_cli.max_iterations:
|
||||
agent_cfg["trainer"]["timesteps"] = args_cli.max_iterations * agent_cfg["agent"]["rollouts"]
|
||||
agent_cfg["trainer"]["close_environment_at_exit"] = False
|
||||
# configure the ML framework into the global skrl variable
|
||||
if args_cli.ml_framework.startswith("jax"):
|
||||
skrl.config.jax.backend = "jax" if args_cli.ml_framework == "jax" else "numpy"
|
||||
|
||||
# randomly sample a seed if seed = -1
|
||||
if args_cli.seed == -1:
|
||||
args_cli.seed = random.randint(0, 10000)
|
||||
|
||||
# set the agent and environment seed from command line
|
||||
# note: certain randomization occur in the environment initialization so we set the seed here
|
||||
agent_cfg["seed"] = args_cli.seed if args_cli.seed is not None else agent_cfg["seed"]
|
||||
env_cfg.seed = agent_cfg["seed"]
|
||||
|
||||
# specify directory for logging experiments
|
||||
log_root_path = os.path.join("logs", "skrl", agent_cfg["agent"]["experiment"]["directory"])
|
||||
log_root_path = os.path.abspath(log_root_path)
|
||||
print(f"[INFO] Logging experiment in directory: {log_root_path}")
|
||||
# specify directory for logging runs: {time-stamp}_{run_name}
|
||||
log_dir = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + f"_{algorithm}_{args_cli.ml_framework}"
|
||||
# The Ray Tune workflow extracts experiment name using the logging line below, hence, do not change it (see PR #2346, comment-2819298849)
|
||||
print(f"Exact experiment name requested from command line: {log_dir}")
|
||||
if agent_cfg["agent"]["experiment"]["experiment_name"]:
|
||||
log_dir += f'_{agent_cfg["agent"]["experiment"]["experiment_name"]}'
|
||||
# set directory into agent config
|
||||
agent_cfg["agent"]["experiment"]["directory"] = log_root_path
|
||||
agent_cfg["agent"]["experiment"]["experiment_name"] = log_dir
|
||||
# update log_dir
|
||||
log_dir = os.path.join(log_root_path, log_dir)
|
||||
|
||||
# dump the configuration into log-directory
|
||||
dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg)
|
||||
dump_yaml(os.path.join(log_dir, "params", "agent.yaml"), agent_cfg)
|
||||
|
||||
# get checkpoint path (to resume training)
|
||||
resume_path = retrieve_file_path(args_cli.checkpoint) if args_cli.checkpoint else None
|
||||
|
||||
# set the IO descriptors export flag if requested
|
||||
if isinstance(env_cfg, ManagerBasedRLEnvCfg):
|
||||
env_cfg.export_io_descriptors = args_cli.export_io_descriptors
|
||||
else:
|
||||
logger.warning(
|
||||
"IO descriptors are only supported for manager based RL environments. No IO descriptors will be exported."
|
||||
)
|
||||
|
||||
# set the log directory for the environment (works for all environment types)
|
||||
env_cfg.log_dir = log_dir
|
||||
|
||||
# create isaac environment
|
||||
env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)
|
||||
|
||||
# convert to single-agent instance if required by the RL algorithm
|
||||
if isinstance(env.unwrapped, DirectMARLEnv) and algorithm in ["ppo"]:
|
||||
env = multi_agent_to_single_agent(env)
|
||||
|
||||
# wrap for video recording
|
||||
if args_cli.video:
|
||||
video_kwargs = {
|
||||
"video_folder": os.path.join(log_dir, "videos", "train"),
|
||||
"step_trigger": lambda step: step % args_cli.video_interval == 0,
|
||||
"video_length": args_cli.video_length,
|
||||
"disable_logger": True,
|
||||
}
|
||||
print("[INFO] Recording videos during training.")
|
||||
print_dict(video_kwargs, nesting=4)
|
||||
env = gym.wrappers.RecordVideo(env, **video_kwargs)
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
# wrap around environment for skrl
|
||||
env = SkrlVecEnvWrapper(env, ml_framework=args_cli.ml_framework) # same as: `wrap_env(env, wrapper="auto")`
|
||||
|
||||
# configure and instantiate the skrl runner
|
||||
# https://skrl.readthedocs.io/en/latest/api/utils/runner.html
|
||||
runner = Runner(env, agent_cfg)
|
||||
|
||||
# load checkpoint (if specified)
|
||||
if resume_path:
|
||||
print(f"[INFO] Loading model checkpoint from: {resume_path}")
|
||||
runner.agent.load(resume_path)
|
||||
|
||||
# run training
|
||||
runner.run()
|
||||
|
||||
print(f"Training time: {round(time.time() - start_time, 2)} seconds")
|
||||
|
||||
# close the simulator
|
||||
env.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# run the main function
|
||||
main()
|
||||
# close sim app
|
||||
simulation_app.close()
|
||||
72
scripts/zero_agent.py
Normal file
72
scripts/zero_agent.py
Normal file
@@ -0,0 +1,72 @@
|
||||
# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
|
||||
# All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
"""Script to run an environment with zero action agent."""
|
||||
|
||||
"""Launch Isaac Sim Simulator first."""
|
||||
|
||||
import argparse
|
||||
|
||||
from isaaclab.app import AppLauncher
|
||||
|
||||
# add argparse arguments
|
||||
parser = argparse.ArgumentParser(description="Zero agent for Isaac Lab environments.")
|
||||
parser.add_argument(
|
||||
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
|
||||
)
|
||||
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
|
||||
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
|
||||
# append AppLauncher cli args
|
||||
AppLauncher.add_app_launcher_args(parser)
|
||||
# parse the arguments
|
||||
args_cli = parser.parse_args()
|
||||
|
||||
# launch omniverse app
|
||||
app_launcher = AppLauncher(args_cli)
|
||||
simulation_app = app_launcher.app
|
||||
|
||||
"""Rest everything follows."""
|
||||
|
||||
import gymnasium as gym
|
||||
import torch
|
||||
|
||||
import isaaclab_tasks # noqa: F401
|
||||
from isaaclab_tasks.utils import parse_env_cfg
|
||||
|
||||
import mindbot.tasks # noqa: F401
|
||||
|
||||
|
||||
def main():
|
||||
"""Zero actions agent with Isaac Lab environment."""
|
||||
# parse configuration
|
||||
env_cfg = parse_env_cfg(
|
||||
args_cli.task, device=args_cli.device, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
|
||||
)
|
||||
# create environment
|
||||
env = gym.make(args_cli.task, cfg=env_cfg)
|
||||
|
||||
# print info (this is vectorized environment)
|
||||
print(f"[INFO]: Gym observation space: {env.observation_space}")
|
||||
print(f"[INFO]: Gym action space: {env.action_space}")
|
||||
# reset environment
|
||||
env.reset()
|
||||
# simulate environment
|
||||
while simulation_app.is_running():
|
||||
# run everything in inference mode
|
||||
with torch.inference_mode():
|
||||
# compute zero actions
|
||||
actions = torch.zeros(env.action_space.shape, device=env.unwrapped.device)
|
||||
# apply actions
|
||||
env.step(actions)
|
||||
|
||||
# close the simulator
|
||||
env.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# run the main function
|
||||
main()
|
||||
# close sim app
|
||||
simulation_app.close()
|
||||
Reference in New Issue
Block a user