离心机加试管初步配置

This commit is contained in:
2026-02-02 16:39:33 +08:00
parent 196378f2d3
commit 9325184f4d
6 changed files with 431 additions and 271 deletions

View File

@@ -89,59 +89,6 @@ import numpy as np
"""use gpu compute to record video"""
class MultiCameraRecorder:
def __init__(self, env, camera_names: list[str], env_indices: list[int], output_dir: str, fps: int = 30):
self.env = env
self.camera_names = camera_names
self.env_indices = env_indices
self.output_dir = output_dir
self.fps = fps
self.frames = {cam_name: {env_idx: [] for env_idx in env_indices} for cam_name in camera_names}
os.makedirs(self.output_dir, exist_ok=True)
self.cameras = {}
for name in camera_names:
if name in self.env.unwrapped.scene.keys():
self.cameras[name] = self.env.unwrapped.scene[name]
print(f"[INFO] Camera {name} linked.")
def record_step(self):
"""保持在 GPU 上克隆数据"""
for cam_name, camera_obj in self.cameras.items():
# 获取数据前强制同步一次(防止后端丢失)
rgb_data = camera_obj.data.output["rgb"]
for env_idx in self.env_indices:
# 使用 .clone() 保持在 GPU但要注意显存占用
self.frames[cam_name][env_idx].append(rgb_data[env_idx].clone())
def save_videos(self, filename_suffix=""):
print(f"[INFO] Saving videos from GPU to Disk...")
for cam_name, env_dict in self.frames.items():
for env_idx, frame_list in env_dict.items():
if not frame_list: continue
# 转换为 torchvision 格式 (T, C, H, W)
video_tensor = torch.stack(frame_list)
if video_tensor.shape[-1] == 4: # RGBA -> RGB
video_tensor = video_tensor[..., :3]
# 移动到 CPU 并保存
video_cpu = video_tensor.cpu()
output_path = os.path.join(self.output_dir, f"{cam_name}_env{env_idx}_{filename_suffix}.mp4")
# 使用 torchvision 写入 (T, H, W, C)
torchvision.io.write_video(output_path, video_cpu, fps=self.fps)
# 【关键】保存后立即释放显存
del video_tensor
del video_cpu
frame_list.clear()
torch.cuda.empty_cache()
"""use cpu compute to record video"""
# # 2. 修改 MultiCameraRecorder 类
# class MultiCameraRecorder:
# def __init__(self, env, camera_names: list[str], env_indices: list[int], output_dir: str, fps: int = 30):
# self.env = env
@@ -151,57 +98,110 @@ class MultiCameraRecorder:
# self.fps = fps
# self.frames = {cam_name: {env_idx: [] for env_idx in env_indices} for cam_name in camera_names}
# os.makedirs(self.output_dir, exist_ok=True)
# self.cameras = {}
# for name in camera_names:
# try:
# if name in self.env.unwrapped.scene.keys():
# self.cameras[name] = self.env.unwrapped.scene[name]
# print(f"[INFO][MultiCameraRecorder] Found camera: {name}")
# except KeyError:
# print(f"[WARN][MultiCameraRecorder] Camera '{name}' not found!")
# print(f"[INFO] Camera {name} linked.")
# def record_step(self):
# """在每个仿真步调用"""
# """保持在 GPU 上克隆数据"""
# for cam_name, camera_obj in self.cameras.items():
# # [关键修改] 获取数据前先确保数据已同步
# # 这可以防止读取到正在渲染中的内存导致 access violation
# rgb_data = camera_obj.data.output["rgb"]
# # 获取数据前强制同步一次(防止后端丢失)
# rgb_data = camera_obj.data.output["rgb"]
# for env_idx in self.env_indices:
# if env_idx >= rgb_data.shape[0]: continue
# # 转换为 CPU 上的 numpy这种方式通常比 torchvision 的 tensor 堆叠更稳
# frame = rgb_data[env_idx].clone().detach().cpu().numpy()
# self.frames[cam_name][env_idx].append(frame)
# # 使用 .clone() 保持在 GPU但要注意显存占用
# self.frames[cam_name][env_idx].append(rgb_data[env_idx].clone())
# def save_videos(self, filename_suffix=""):
# """循环结束后调用"""
# print(f"[INFO][MultiCameraRecorder] Saving videos...")
# print(f"[INFO] Saving videos from GPU to Disk...")
# for cam_name, env_dict in self.frames.items():
# for env_idx, frame_list in env_dict.items():
# if not frame_list: continue
# print(f" -> Saving {cam_name} (Env {env_idx})...")
# # 转换为 torchvision 格式 (T, C, H, W)
# video_tensor = torch.stack(frame_list)
# if video_tensor.shape[-1] == 4: # RGBA -> RGB
# video_tensor = video_tensor[..., :3]
# # 处理格式并使用 imageio 保存
# processed_frames = []
# for img in frame_list:
# # [0, 1] -> [0, 255]
# if img.dtype != np.uint8:
# if img.max() <= 1.01: img = (img * 255).astype(np.uint8)
# else: img = img.astype(np.uint8)
# # 去掉 Alpha 通道
# if img.shape[-1] == 4: img = img[:, :, :3]
# processed_frames.append(img)
# # 移动到 CPU 并保存
# video_cpu = video_tensor.cpu()
# output_path = os.path.join(self.output_dir, f"{cam_name}_env{env_idx}_{filename_suffix}.mp4")
# # 使用 torchvision 写入 (T, H, W, C)
# torchvision.io.write_video(output_path, video_cpu, fps=self.fps)
# # 【关键】保存后立即释放显存
# del video_tensor
# del video_cpu
# frame_list.clear()
# torch.cuda.empty_cache()
# fname = f"{cam_name}_env{env_idx}_{filename_suffix}.mp4"
# output_path = os.path.join(self.output_dir, fname)
"""use cpu compute to record video"""
# # 2. 修改 MultiCameraRecorder 类
class MultiCameraRecorder:
def __init__(self, env, camera_names: list[str], env_indices: list[int], output_dir: str, fps: int = 30):
self.env = env
self.camera_names = camera_names
self.env_indices = env_indices
self.output_dir = output_dir
self.fps = fps
self.frames = {cam_name: {env_idx: [] for env_idx in env_indices} for cam_name in camera_names}
os.makedirs(self.output_dir, exist_ok=True)
self.cameras = {}
for name in camera_names:
try:
self.cameras[name] = self.env.unwrapped.scene[name]
print(f"[INFO][MultiCameraRecorder] Found camera: {name}")
except KeyError:
print(f"[WARN][MultiCameraRecorder] Camera '{name}' not found!")
def record_step(self):
"""在每个仿真步调用"""
for cam_name, camera_obj in self.cameras.items():
# [关键修改] 获取数据前先确保数据已同步
# 这可以防止读取到正在渲染中的内存导致 access violation
rgb_data = camera_obj.data.output["rgb"]
for env_idx in self.env_indices:
if env_idx >= rgb_data.shape[0]: continue
# try:
# # 使用 imageio 写入视频
# imageio.mimsave(output_path, processed_frames, fps=self.fps)
# print(f" Saved: {output_path}")
# except Exception as e:
# print(f" [ERROR] Failed to save {fname}: {e}")
# 转换为 CPU 上的 numpy这种方式通常比 torchvision 的 tensor 堆叠更稳
frame = rgb_data[env_idx].clone().detach().cpu().numpy()
self.frames[cam_name][env_idx].append(frame)
def save_videos(self, filename_suffix=""):
"""循环结束后调用"""
print(f"[INFO][MultiCameraRecorder] Saving videos...")
for cam_name, env_dict in self.frames.items():
for env_idx, frame_list in env_dict.items():
if not frame_list: continue
print(f" -> Saving {cam_name} (Env {env_idx})...")
# 处理格式并使用 imageio 保存
processed_frames = []
for img in frame_list:
# [0, 1] -> [0, 255]
if img.dtype != np.uint8:
if img.max() <= 1.01: img = (img * 255).astype(np.uint8)
else: img = img.astype(np.uint8)
# 去掉 Alpha 通道
if img.shape[-1] == 4: img = img[:, :, :3]
processed_frames.append(img)
fname = f"{cam_name}_env{env_idx}_{filename_suffix}.mp4"
output_path = os.path.join(self.output_dir, fname)
try:
# 使用 imageio 写入视频
imageio.mimsave(output_path, processed_frames, fps=self.fps)
print(f" Saved: {output_path}")
except Exception as e:
print(f" [ERROR] Failed to save {fname}: {e}")

View File

@@ -52,6 +52,11 @@ def main():
print(f"[INFO]: Gym action space: {env.action_space}")
# reset environment
env.reset()
# 计数器:每 120 步打印一次
step_count = 0
print_interval = 120
# simulate environment
while simulation_app.is_running():
# run everything in inference mode
@@ -60,6 +65,84 @@ def main():
actions = torch.zeros(env.action_space.shape, device=env.unwrapped.device)
# apply actions
env.step(actions)
# ... (前面的代码保持不变)
# 每 120 步打印一次坐标
step_count += 1
if step_count % print_interval == 0:
scene = env.unwrapped.scene
# 1. 获取 centrifuge 的世界坐标
try:
centrifuge = scene["centrifuge"]
centrifuge_pos = centrifuge.data.root_pos_w[0].cpu().numpy()
centrifuge_quat = centrifuge.data.root_quat_w[0].cpu().numpy()
print(f"[Step {step_count}] CENTRIFUGE - Pos: {centrifuge_pos}, Quat: {centrifuge_quat}")
except KeyError:
print(f"[Step {step_count}] CENTRIFUGE - Not found")
# 2. 获取 Reservoir_A 的世界坐标
# 注意:这里的 key ("reservoir_a") 必须与你在 SceneCfg 中定义的名称一致
try:
# 如果你在配置里命名为 "reservoir_a"
reservoir = scene["reservoir_a"]
res_pos = reservoir.data.root_pos_w[0].cpu().numpy()
res_quat = reservoir.data.root_quat_w[0].cpu().numpy()
print(f"[Step {step_count}] RESERVOIR_A - Pos: {res_pos}, Quat: {res_quat}")
except KeyError:
# 如果 Reservoir_A 是 centrifuge 机器人(Articulation)的一个 Link身体部件
# 我们可以从 centrifuge 的 body 数据中获取
try:
centrifuge = scene["centrifuge"]
# 找到名为 'Reservoir_A' 的 link 索引
body_names = centrifuge.body_names
if "Reservoir_A" in body_names:
idx = body_names.index("Reservoir_A")
res_pos = centrifuge.data.body_pos_w[0, idx].cpu().numpy()
res_quat = centrifuge.data.body_quat_w[0, idx].cpu().numpy()
print(f"[Step {step_count}] RESERVOIR_A (Link) - Pos: {res_pos}, Quat: {res_quat}")
else:
print(f"[Step {step_count}] RESERVOIR_A - Not found in scene keys or links")
except Exception:
print(f"[Step {step_count}] RESERVOIR_A - Not found")
# 3. 获取 lid 的世界坐标
try:
lid = scene["lid"]
lid_pos = lid.data.root_pos_w[0].cpu().numpy()
lid_quat = lid.data.root_quat_w[0].cpu().numpy()
print(f"[Step {step_count}] LID - Pos: {lid_pos}, Quat: {lid_quat}")
except KeyError:
print(f"[Step {step_count}] LID - Not found")
print("-" * 80)
# ... (后面的代码保持不变)
# 每 120 步打印一次坐标
# step_count += 1
# if step_count % print_interval == 0:
# scene = env.unwrapped.scene
# # 获取 centrifuge 的世界坐标root position
# try:
# centrifuge = scene["centrifuge"]
# centrifuge_pos = centrifuge.data.root_pos_w[0].cpu().numpy() # 取第一个环境
# centrifuge_quat = centrifuge.data.root_quat_w[0].cpu().numpy()
# print(f"[Step {step_count}] CENTRIFUGE_CFG - World Position: {centrifuge_pos}, Quaternion: {centrifuge_quat}")
# except KeyError:
# print(f"[Step {step_count}] CENTRIFUGE_CFG - Not found in scene")
# # 获取 lid 的世界坐标
# try:
# lid = scene["lid"]
# lid_pos = lid.data.root_pos_w[0].cpu().numpy() # 取第一个环境
# lid_quat = lid.data.root_quat_w[0].cpu().numpy()
# print(f"[Step {step_count}] LID_CFG - World Position: {lid_pos}, Quaternion: {lid_quat}")
# except KeyError:
# print(f"[Step {step_count}] LID_CFG - Not found in scene")
# print("-" * 80)
# close the simulator
env.close()
@@ -70,3 +153,80 @@ if __name__ == "__main__":
main()
# close sim app
simulation_app.close()
# # Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# # All rights reserved.
# #
# # SPDX-License-Identifier: BSD-3-Clause
# """Script to run an environment with zero action agent."""
# """Launch Isaac Sim Simulator first."""
# import argparse
# from isaaclab.app import AppLauncher
# # add argparse arguments
# parser = argparse.ArgumentParser(description="Zero agent for Isaac Lab environments.")
# parser.add_argument(
# "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
# )
# parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
# parser.add_argument("--task", type=str, default=None, help="Name of the task.")
# # append AppLauncher cli args
# AppLauncher.add_app_launcher_args(parser)
# # parse the arguments
# args_cli = parser.parse_args()
# # launch omniverse app
# app_launcher = AppLauncher(args_cli)
# simulation_app = app_launcher.app
# """Rest everything follows."""
# import gymnasium as gym
# import torch
# import isaaclab_tasks # noqa: F401
# from isaaclab_tasks.utils import parse_env_cfg
# import mindbot.tasks # noqa: F401
# def main():
# """Zero actions agent with Isaac Lab environment."""
# # parse configuration
# env_cfg = parse_env_cfg(
# args_cli.task, device=args_cli.device, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
# )
# # create environment
# env = gym.make(args_cli.task, cfg=env_cfg)
# # print info (this is vectorized environment)
# print(f"[INFO]: Gym observation space: {env.observation_space}")
# print(f"[INFO]: Gym action space: {env.action_space}")
# # reset environment
# env.reset()
# # simulate environment
# step_count = 0
# print_interval = 120
# while simulation_app.is_running():
# # run everything in inference mode
# with torch.inference_mode():
# # compute zero actions
# actions = torch.zeros(env.action_space.shape, device=env.unwrapped.device)
# # apply actions
# env.step(actions)
# # close the simulator
# env.close()
# if __name__ == "__main__":
# # run the main function
# main()
# # close sim app
# simulation_app.close()