* (unscrewing things up) (#2288) * fix: expose a function explicitly building a frame for inference * fix: first make dataset frame, then make ready for inference * fix: reducing reliance on lerobot record for policy's ouptuts too * fix: encapsulating squeezing out + device handling from predict action * fix: remove duplicated call to build_inference_frame and add a function to only perform data type handling (whole conversion is: keys matching + data type conversion) * refactor(envs): add custom-observation-size (#2167) * fix: add MockMotorBus to MockRobot * rl: first drafts * add: all components of HIL SERL * fix: actor block works * fix: less friction, less friction * add: hil-serl complete example * fix: dataset names * fix: restructuring example folder * fix: act works but found bug in how ACT works * fix: same path for both pre and postprocessors * fix: paths * add: example usage for act * add: using ACT example * fix: training examples * fix: using examples * fix: camera index * fix: rename workflows into tutorial so that the path of the files is lerobot/examples/tutorial/... * fix: upload everything in one repo * fix: model name * fix: simplify model path * add: VLAs example --------- Signed-off-by: Francesco Capuano <74058581+fracapuano@users.noreply.github.com> * fix: minor fix using named attributes * fix: change model to act * fix: named attributes for inference frame building * fix: minor fixes to smolvla * fix: small changes to pi0 * remove: old file that should have never been committed (ups sorry sorry) --------- Signed-off-by: Francesco Capuano <74058581+fracapuano@users.noreply.github.com>
68 lines
2.7 KiB
Python
68 lines
2.7 KiB
Python
import torch
|
|
|
|
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
|
|
from lerobot.datasets.utils import hw_to_dataset_features
|
|
from lerobot.policies.factory import make_pre_post_processors
|
|
from lerobot.policies.pi0.modeling_pi0 import PI0Policy
|
|
from lerobot.policies.utils import build_inference_frame, make_robot_action
|
|
from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig
|
|
from lerobot.robots.so100_follower.so100_follower import SO100Follower
|
|
|
|
MAX_EPISODES = 5
|
|
MAX_STEPS_PER_EPISODE = 20
|
|
|
|
device = torch.device("mps") # or "cuda" or "cpu"
|
|
model_id = "lerobot/pi0_base"
|
|
|
|
model = PI0Policy.from_pretrained(model_id)
|
|
|
|
preprocess, postprocess = make_pre_post_processors(
|
|
model.config,
|
|
model_id,
|
|
# This overrides allows to run on MPS, otherwise defaults to CUDA (if available)
|
|
preprocessor_overrides={"device_processor": {"device": str(device)}},
|
|
)
|
|
|
|
# find ports using lerobot-find-port
|
|
follower_port = ... # something like "/dev/tty.usbmodem58760431631"
|
|
|
|
# the robot ids are used the load the right calibration files
|
|
follower_id = ... # something like "follower_so100"
|
|
|
|
# Robot and environment configuration
|
|
# Camera keys must match the name and resolutions of the ones used for training!
|
|
# You can check the camera keys expected by a model in the info.json card on the model card on the Hub
|
|
camera_config = {
|
|
"base_0_rgb": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=30),
|
|
"left_wrist_0_rgb": OpenCVCameraConfig(index_or_path=1, width=640, height=480, fps=30),
|
|
"right_wrist_0_rgb": OpenCVCameraConfig(index_or_path=2, width=640, height=480, fps=30),
|
|
}
|
|
|
|
robot_cfg = SO100FollowerConfig(port=follower_port, id=follower_id, cameras=camera_config)
|
|
robot = SO100Follower(robot_cfg)
|
|
robot.connect()
|
|
|
|
task = "" # something like "pick the red block"
|
|
robot_type = "" # something like "so100_follower" for multi-embodiment datasets
|
|
|
|
# This is used to match the raw observation keys to the keys expected by the policy
|
|
action_features = hw_to_dataset_features(robot.action_features, "action")
|
|
obs_features = hw_to_dataset_features(robot.observation_features, "observation")
|
|
dataset_features = {**action_features, **obs_features}
|
|
|
|
for _ in range(MAX_EPISODES):
|
|
for _ in range(MAX_STEPS_PER_EPISODE):
|
|
obs = robot.get_observation()
|
|
obs_frame = build_inference_frame(
|
|
observation=obs, ds_features=dataset_features, device=device, task=task, robot_type=robot_type
|
|
)
|
|
|
|
obs = preprocess(obs_frame)
|
|
|
|
action = model.select_action(obs)
|
|
action = postprocess(action)
|
|
action = make_robot_action(action, dataset_features)
|
|
robot.send_action(action)
|
|
|
|
print("Episode finished! Starting new episode...")
|