Remove deprecated scripts & tests

This commit is contained in:
Simon Alibert
2025-05-08 18:08:38 +02:00
parent cb9cac6a1b
commit dd3e305164
7 changed files with 2 additions and 1694 deletions

View File

@@ -19,9 +19,8 @@ import traceback
import pytest
from serial import SerialException
from lerobot import available_cameras, available_motors, available_robots
from lerobot.common.robots.utils import make_robot
from tests.utils import DEVICE, make_camera, make_motors_bus
from lerobot import available_cameras
from tests.utils import DEVICE, make_camera
# Import fixture modules as plugins
pytest_plugins = [
@@ -64,21 +63,11 @@ def _check_component_availability(component_type, available_components, make_com
return False
@pytest.fixture
def is_robot_available(robot_type):
return _check_component_availability(robot_type, available_robots, make_robot)
@pytest.fixture
def is_camera_available(camera_type):
return _check_component_availability(camera_type, available_cameras, make_camera)
@pytest.fixture
def is_motor_available(motor_type):
return _check_component_availability(motor_type, available_motors, make_motors_bus)
@pytest.fixture
def patch_builtins_input(monkeypatch):
def print_text(text=None):

View File

@@ -1,443 +0,0 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for physical robots and their mocked versions.
If the physical robots are not connected to the computer, or not working,
the test will be skipped.
Example of running a specific test:
```bash
pytest -sx tests/test_control_robot.py::test_teleoperate
```
Example of running test on real robots connected to the computer:
```bash
pytest -sx 'tests/test_control_robot.py::test_teleoperate[koch-False]'
pytest -sx 'tests/test_control_robot.py::test_teleoperate[koch_bimanual-False]'
pytest -sx 'tests/test_control_robot.py::test_teleoperate[aloha-False]'
```
Example of running test on a mocked version of robots:
```bash
pytest -sx 'tests/test_control_robot.py::test_teleoperate[koch-True]'
pytest -sx 'tests/test_control_robot.py::test_teleoperate[koch_bimanual-True]'
pytest -sx 'tests/test_control_robot.py::test_teleoperate[aloha-True]'
```
"""
import multiprocessing
from unittest.mock import patch
import pytest
from lerobot.common.policies.act.configuration_act import ACTConfig
from lerobot.common.policies.factory import make_policy
from lerobot.configs.control import (
CalibrateControlConfig,
RecordControlConfig,
ReplayControlConfig,
TeleoperateControlConfig,
)
from lerobot.configs.policies import PreTrainedConfig
from lerobot.scripts.control_robot import calibrate, record, replay, teleoperate
from tests.robots.test_robots import make_robot
from tests.utils import TEST_ROBOT_TYPES, mock_calibration_dir, require_robot
@pytest.mark.parametrize("robot_type, mock", TEST_ROBOT_TYPES)
@require_robot
def test_teleoperate(tmp_path, request, robot_type, mock):
robot_kwargs = {"robot_type": robot_type, "mock": mock}
if mock and robot_type != "aloha":
request.getfixturevalue("patch_builtins_input")
# Create an empty calibration directory to trigger manual calibration
# and avoid writing calibration files in user .cache/calibration folder
calibration_dir = tmp_path / robot_type
mock_calibration_dir(calibration_dir)
robot_kwargs["calibration_dir"] = calibration_dir
else:
# Use the default .cache/calibration folder when mock=False
pass
robot = make_robot(**robot_kwargs)
teleoperate(robot, TeleoperateControlConfig(teleop_time_s=1))
teleoperate(robot, TeleoperateControlConfig(fps=30, teleop_time_s=1))
teleoperate(robot, TeleoperateControlConfig(fps=60, teleop_time_s=1))
del robot
@pytest.mark.parametrize("robot_type, mock", TEST_ROBOT_TYPES)
@require_robot
def test_calibrate(tmp_path, request, robot_type, mock):
robot_kwargs = {"robot_type": robot_type, "mock": mock}
if mock:
request.getfixturevalue("patch_builtins_input")
# Create an empty calibration directory to trigger manual calibration
calibration_dir = tmp_path / robot_type
robot_kwargs["calibration_dir"] = calibration_dir
robot = make_robot(**robot_kwargs)
calib_cfg = CalibrateControlConfig(arms=robot.available_arms)
calibrate(robot, calib_cfg)
del robot
@pytest.mark.parametrize("robot_type, mock", TEST_ROBOT_TYPES)
@require_robot
def test_record_without_cameras(tmp_path, request, robot_type, mock):
robot_kwargs = {"robot_type": robot_type, "mock": mock}
# Avoid using cameras
robot_kwargs["cameras"] = {}
if mock and robot_type != "aloha":
request.getfixturevalue("patch_builtins_input")
# Create an empty calibration directory to trigger manual calibration
# and avoid writing calibration files in user .cache/calibration folder
calibration_dir = tmp_path / robot_type
mock_calibration_dir(calibration_dir)
robot_kwargs["calibration_dir"] = calibration_dir
else:
# Use the default .cache/calibration folder when mock=False
pass
repo_id = "lerobot/debug"
root = tmp_path / "data" / repo_id
single_task = "Do something."
robot = make_robot(**robot_kwargs)
rec_cfg = RecordControlConfig(
repo_id=repo_id,
single_task=single_task,
root=root,
fps=30,
warmup_time_s=0.1,
episode_time_s=1,
reset_time_s=0.1,
num_episodes=2,
push_to_hub=False,
video=False,
play_sounds=False,
)
record(robot, rec_cfg)
@pytest.mark.parametrize("robot_type, mock", TEST_ROBOT_TYPES)
@require_robot
def test_record_and_replay_and_policy(tmp_path, request, robot_type, mock):
robot_kwargs = {"robot_type": robot_type, "mock": mock}
if mock and robot_type != "aloha":
request.getfixturevalue("patch_builtins_input")
# Create an empty calibration directory to trigger manual calibration
# and avoid writing calibration files in user .cache/calibration folder
calibration_dir = tmp_path / robot_type
mock_calibration_dir(calibration_dir)
robot_kwargs["calibration_dir"] = calibration_dir
else:
# Use the default .cache/calibration folder when mock=False
pass
repo_id = "lerobot_test/debug"
root = tmp_path / "data" / repo_id
single_task = "Do something."
robot = make_robot(**robot_kwargs)
rec_cfg = RecordControlConfig(
repo_id=repo_id,
single_task=single_task,
root=root,
fps=1,
warmup_time_s=0.1,
episode_time_s=1,
reset_time_s=0.1,
num_episodes=2,
push_to_hub=False,
# TODO(rcadene, aliberts): test video=True
video=False,
display_data=False,
play_sounds=False,
)
dataset = record(robot, rec_cfg)
assert dataset.meta.total_episodes == 2
assert len(dataset) == 2
replay_cfg = ReplayControlConfig(episode=0, fps=1, root=root, repo_id=repo_id, play_sounds=False)
replay(robot, replay_cfg)
policy_cfg = ACTConfig()
policy = make_policy(policy_cfg, ds_meta=dataset.meta)
out_dir = tmp_path / "logger"
pretrained_policy_path = out_dir / "checkpoints/last/pretrained_model"
policy.save_pretrained(pretrained_policy_path)
# In `examples/9_use_aloha.md`, we advise using `num_image_writer_processes=1`
# during inference, to reach constant fps, so we test this here.
if robot_type == "aloha":
num_image_writer_processes = 1
# `multiprocessing.set_start_method("spawn", force=True)` avoids a hanging issue
# before exiting pytest. However, it outputs the following error in the log:
# Traceback (most recent call last):
# File "<string>", line 1, in <module>
# File "/Users/rcadene/miniconda3/envs/lerobot/lib/python3.10/multiprocessing/spawn.py", line 116, in spawn_main
# exitcode = _main(fd, parent_sentinel)
# File "/Users/rcadene/miniconda3/envs/lerobot/lib/python3.10/multiprocessing/spawn.py", line 126, in _main
# self = reduction.pickle.load(from_parent)
# File "/Users/rcadene/miniconda3/envs/lerobot/lib/python3.10/multiprocessing/synchronize.py", line 110, in __setstate__
# self._semlock = _multiprocessing.SemLock._rebuild(*state)
# FileNotFoundError: [Errno 2] No such file or directory
# TODO(rcadene, aliberts): fix FileNotFoundError in multiprocessing
multiprocessing.set_start_method("spawn", force=True)
else:
num_image_writer_processes = 0
eval_repo_id = "lerobot/eval_debug"
eval_root = tmp_path / "data" / eval_repo_id
rec_eval_cfg = RecordControlConfig(
repo_id=eval_repo_id,
root=eval_root,
single_task=single_task,
fps=1,
warmup_time_s=0.1,
episode_time_s=1,
reset_time_s=0.1,
num_episodes=2,
push_to_hub=False,
video=False,
display_data=False,
play_sounds=False,
num_image_writer_processes=num_image_writer_processes,
)
rec_eval_cfg.policy = PreTrainedConfig.from_pretrained(pretrained_policy_path)
rec_eval_cfg.policy.pretrained_path = pretrained_policy_path
dataset = record(robot, rec_eval_cfg)
assert dataset.num_episodes == 2
assert len(dataset) == 2
del robot
@pytest.mark.parametrize("robot_type, mock", [("koch", True)])
@require_robot
def test_resume_record(tmp_path, request, robot_type, mock):
robot_kwargs = {"robot_type": robot_type, "mock": mock}
if mock and robot_type != "aloha":
request.getfixturevalue("patch_builtins_input")
# Create an empty calibration directory to trigger manual calibration
# and avoid writing calibration files in user .cache/calibration folder
calibration_dir = tmp_path / robot_type
mock_calibration_dir(calibration_dir)
robot_kwargs["calibration_dir"] = calibration_dir
else:
# Use the default .cache/calibration folder when mock=False
pass
robot = make_robot(**robot_kwargs)
repo_id = "lerobot/debug"
root = tmp_path / "data" / repo_id
single_task = "Do something."
rec_cfg = RecordControlConfig(
repo_id=repo_id,
root=root,
single_task=single_task,
fps=1,
warmup_time_s=0,
episode_time_s=1,
push_to_hub=False,
video=False,
display_data=False,
play_sounds=False,
num_episodes=1,
)
dataset = record(robot, rec_cfg)
assert len(dataset) == 1, f"`dataset` should contain 1 frame, not {len(dataset)}"
with pytest.raises(FileExistsError):
# Dataset already exists, but resume=False by default
record(robot, rec_cfg)
rec_cfg.resume = True
dataset = record(robot, rec_cfg)
assert len(dataset) == 2, f"`dataset` should contain 2 frames, not {len(dataset)}"
@pytest.mark.parametrize("robot_type, mock", [("koch", True)])
@require_robot
def test_record_with_event_rerecord_episode(tmp_path, request, robot_type, mock):
robot_kwargs = {"robot_type": robot_type, "mock": mock}
if mock and robot_type != "aloha":
request.getfixturevalue("patch_builtins_input")
# Create an empty calibration directory to trigger manual calibration
# and avoid writing calibration files in user .cache/calibration folder
calibration_dir = tmp_path / robot_type
mock_calibration_dir(calibration_dir)
robot_kwargs["calibration_dir"] = calibration_dir
else:
# Use the default .cache/calibration folder when mock=False
pass
robot = make_robot(**robot_kwargs)
with patch("lerobot.scripts.control_robot.init_keyboard_listener") as mock_listener:
mock_events = {}
mock_events["exit_early"] = True
mock_events["rerecord_episode"] = True
mock_events["stop_recording"] = False
mock_listener.return_value = (None, mock_events)
repo_id = "lerobot/debug"
root = tmp_path / "data" / repo_id
single_task = "Do something."
rec_cfg = RecordControlConfig(
repo_id=repo_id,
root=root,
single_task=single_task,
fps=1,
warmup_time_s=0,
episode_time_s=1,
num_episodes=1,
push_to_hub=False,
video=False,
display_data=False,
play_sounds=False,
)
dataset = record(robot, rec_cfg)
assert not mock_events["rerecord_episode"], "`rerecord_episode` wasn't properly reset to False"
assert not mock_events["exit_early"], "`exit_early` wasn't properly reset to False"
assert len(dataset) == 1, "`dataset` should contain only 1 frame"
@pytest.mark.parametrize("robot_type, mock", [("koch", True)])
@require_robot
def test_record_with_event_exit_early(tmp_path, request, robot_type, mock):
robot_kwargs = {"robot_type": robot_type, "mock": mock}
if mock:
request.getfixturevalue("patch_builtins_input")
# Create an empty calibration directory to trigger manual calibration
# and avoid writing calibration files in user .cache/calibration folder
calibration_dir = tmp_path / robot_type
mock_calibration_dir(calibration_dir)
robot_kwargs["calibration_dir"] = calibration_dir
else:
# Use the default .cache/calibration folder when mock=False
pass
robot = make_robot(**robot_kwargs)
with patch("lerobot.scripts.control_robot.init_keyboard_listener") as mock_listener:
mock_events = {}
mock_events["exit_early"] = True
mock_events["rerecord_episode"] = False
mock_events["stop_recording"] = False
mock_listener.return_value = (None, mock_events)
repo_id = "lerobot/debug"
root = tmp_path / "data" / repo_id
single_task = "Do something."
rec_cfg = RecordControlConfig(
repo_id=repo_id,
root=root,
single_task=single_task,
fps=2,
warmup_time_s=0,
episode_time_s=1,
num_episodes=1,
push_to_hub=False,
video=False,
display_data=False,
play_sounds=False,
)
dataset = record(robot, rec_cfg)
assert not mock_events["exit_early"], "`exit_early` wasn't properly reset to False"
assert len(dataset) == 1, "`dataset` should contain only 1 frame"
@pytest.mark.parametrize(
"robot_type, mock, num_image_writer_processes", [("koch", True, 0), ("koch", True, 1)]
)
@require_robot
def test_record_with_event_stop_recording(tmp_path, request, robot_type, mock, num_image_writer_processes):
robot_kwargs = {"robot_type": robot_type, "mock": mock}
if mock:
request.getfixturevalue("patch_builtins_input")
# Create an empty calibration directory to trigger manual calibration
# and avoid writing calibration files in user .cache/calibration folder
calibration_dir = tmp_path / robot_type
mock_calibration_dir(calibration_dir)
robot_kwargs["calibration_dir"] = calibration_dir
else:
# Use the default .cache/calibration folder when mock=False
pass
robot = make_robot(**robot_kwargs)
with patch("lerobot.scripts.control_robot.init_keyboard_listener") as mock_listener:
mock_events = {}
mock_events["exit_early"] = True
mock_events["rerecord_episode"] = False
mock_events["stop_recording"] = True
mock_listener.return_value = (None, mock_events)
repo_id = "lerobot/debug"
root = tmp_path / "data" / repo_id
single_task = "Do something."
rec_cfg = RecordControlConfig(
repo_id=repo_id,
root=root,
single_task=single_task,
fps=1,
warmup_time_s=0,
episode_time_s=1,
reset_time_s=0.1,
num_episodes=2,
push_to_hub=False,
video=False,
display_data=False,
play_sounds=False,
num_image_writer_processes=num_image_writer_processes,
)
dataset = record(robot, rec_cfg)
assert not mock_events["exit_early"], "`exit_early` wasn't properly reset to False"
assert len(dataset) == 1, "`dataset` should contain only 1 frame"

View File

@@ -1,144 +0,0 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for physical robots and their mocked versions.
If the physical robots are not connected to the computer, or not working,
the test will be skipped.
Example of running a specific test:
```bash
pytest -sx tests/test_robots.py::test_robot
```
Example of running test on real robots connected to the computer:
```bash
pytest -sx 'tests/test_robots.py::test_robot[koch-False]'
pytest -sx 'tests/test_robots.py::test_robot[koch_bimanual-False]'
pytest -sx 'tests/test_robots.py::test_robot[aloha-False]'
```
Example of running test on a mocked version of robots:
```bash
pytest -sx 'tests/test_robots.py::test_robot[koch-True]'
pytest -sx 'tests/test_robots.py::test_robot[koch_bimanual-True]'
pytest -sx 'tests/test_robots.py::test_robot[aloha-True]'
```
"""
import pytest
import torch
from lerobot.common.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
from lerobot.common.robots.utils import make_robot
from tests.utils import TEST_ROBOT_TYPES, mock_calibration_dir, require_robot
@pytest.mark.parametrize("robot_type, mock", TEST_ROBOT_TYPES)
@require_robot
def test_robot(tmp_path, request, robot_type, mock):
# TODO(rcadene): measure fps in nightly?
# TODO(rcadene): test logs
# TODO(rcadene): add compatibility with other robots
robot_kwargs = {"robot_type": robot_type, "mock": mock}
if robot_type == "aloha" and mock:
# To simplify unit test, we do not rerun manual calibration for Aloha mock=True.
# Instead, we use the files from '.cache/calibration/aloha_default'
pass
else:
if mock:
request.getfixturevalue("patch_builtins_input")
# Create an empty calibration directory to trigger manual calibration
calibration_dir = tmp_path / robot_type
mock_calibration_dir(calibration_dir)
robot_kwargs["calibration_dir"] = calibration_dir
# Test using robot before connecting raises an error
robot = make_robot(**robot_kwargs)
with pytest.raises(DeviceNotConnectedError):
robot.teleop_step()
with pytest.raises(DeviceNotConnectedError):
robot.teleop_step(record_data=True)
with pytest.raises(DeviceNotConnectedError):
robot.capture_observation()
with pytest.raises(DeviceNotConnectedError):
robot.send_action(None)
with pytest.raises(DeviceNotConnectedError):
robot.disconnect()
# Test deleting the object without connecting first
del robot
# Test connecting (triggers manual calibration)
robot = make_robot(**robot_kwargs)
robot.connect()
assert robot.is_connected
# Test connecting twice raises an error
with pytest.raises(DeviceAlreadyConnectedError):
robot.connect()
# TODO(rcadene, aliberts): Test disconnecting with `__del__` instead of `disconnect`
# del robot
robot.disconnect()
# Test teleop can run
robot = make_robot(**robot_kwargs)
robot.connect()
robot.teleop_step()
# Test data recorded during teleop are well formatted
observation, action = robot.teleop_step(record_data=True)
# State
assert "observation.state" in observation
assert isinstance(observation["observation.state"], torch.Tensor)
assert observation["observation.state"].ndim == 1
dim_state = sum(len(robot.follower_arms[name].motors) for name in robot.follower_arms)
assert observation["observation.state"].shape[0] == dim_state
# Cameras
for name in robot.cameras:
assert f"observation.images.{name}" in observation
assert isinstance(observation[f"observation.images.{name}"], torch.Tensor)
assert observation[f"observation.images.{name}"].ndim == 3
# Action
assert "action" in action
assert isinstance(action["action"], torch.Tensor)
assert action["action"].ndim == 1
dim_action = sum(len(robot.follower_arms[name].motors) for name in robot.follower_arms)
assert action["action"].shape[0] == dim_action
# TODO(rcadene): test if observation and action data are returned as expected
# Test capture_observation can run and observation returned are the same (since the arm didnt move)
captured_observation = robot.capture_observation()
assert set(captured_observation.keys()) == set(observation.keys())
for name in captured_observation:
if "image" in name:
# TODO(rcadene): skipping image for now as it's challenging to assess equality between two consecutive frames
continue
torch.testing.assert_close(captured_observation[name], observation[name], rtol=1e-4, atol=1)
assert captured_observation[name].shape == observation[name].shape
# Test send_action can run
robot.send_action(action["action"])
# Test disconnecting
robot.disconnect()
assert not robot.is_connected
for name in robot.follower_arms:
assert not robot.follower_arms[name].is_connected
for name in robot.leader_arms:
assert not robot.leader_arms[name].is_connected
for name in robot.cameras:
assert not robot.cameras[name].is_connected

View File

@@ -13,11 +13,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import platform
from functools import wraps
from pathlib import Path
import pytest
import torch
@@ -189,46 +187,6 @@ def require_package(package_name):
return decorator
def require_robot(func):
"""
Decorator that skips the test if a robot is not available
The decorated function must have two arguments `request` and `robot_type`.
Example of usage:
```python
@pytest.mark.parametrize(
"robot_type", ["koch", "aloha"]
)
@require_robot
def test_require_robot(request, robot_type):
pass
```
"""
@wraps(func)
def wrapper(*args, **kwargs):
# Access the pytest request context to get the is_robot_available fixture
request = kwargs.get("request")
robot_type = kwargs.get("robot_type")
mock = kwargs.get("mock")
if robot_type is None:
raise ValueError("The 'robot_type' must be an argument of the test function.")
if request is None:
raise ValueError("The 'request' fixture must be an argument of the test function.")
if mock is None:
raise ValueError("The 'mock' variable must be an argument of the test function.")
# Run test with a real robot. Skip test if robot connection fails.
if not mock and not request.getfixturevalue("is_robot_available"):
pytest.skip(f"A {robot_type} robot is not available.")
return func(*args, **kwargs)
return wrapper
def require_camera(func):
@wraps(func)
def wrapper(*args, **kwargs):
@@ -252,55 +210,6 @@ def require_camera(func):
return wrapper
def require_motor(func):
@wraps(func)
def wrapper(*args, **kwargs):
# Access the pytest request context to get the is_motor_available fixture
request = kwargs.get("request")
motor_type = kwargs.get("motor_type")
mock = kwargs.get("mock")
if request is None:
raise ValueError("The 'request' fixture must be an argument of the test function.")
if motor_type is None:
raise ValueError("The 'motor_type' must be an argument of the test function.")
if mock is None:
raise ValueError("The 'mock' variable must be an argument of the test function.")
if not mock and not request.getfixturevalue("is_motor_available"):
pytest.skip(f"A {motor_type} motor is not available.")
return func(*args, **kwargs)
return wrapper
def mock_calibration_dir(calibration_dir):
# TODO(rcadene): remove this hack
# calibration file produced with Moss v1, but works with Koch, Koch bimanual and SO-100
example_calib = {
"homing_offset": [-1416, -845, 2130, 2872, 1950, -2211],
"drive_mode": [0, 0, 1, 1, 1, 0],
"start_pos": [1442, 843, 2166, 2849, 1988, 1835],
"end_pos": [2440, 1869, -1106, -1848, -926, 3235],
"calib_mode": ["DEGREE", "DEGREE", "DEGREE", "DEGREE", "DEGREE", "LINEAR"],
"motor_names": ["shoulder_pan", "shoulder_lift", "elbow_flex", "wrist_flex", "wrist_roll", "gripper"],
}
Path(str(calibration_dir)).mkdir(parents=True, exist_ok=True)
with open(calibration_dir / "main_follower.json", "w") as f:
json.dump(example_calib, f)
with open(calibration_dir / "main_leader.json", "w") as f:
json.dump(example_calib, f)
with open(calibration_dir / "left_follower.json", "w") as f:
json.dump(example_calib, f)
with open(calibration_dir / "left_leader.json", "w") as f:
json.dump(example_calib, f)
with open(calibration_dir / "right_follower.json", "w") as f:
json.dump(example_calib, f)
with open(calibration_dir / "right_leader.json", "w") as f:
json.dump(example_calib, f)
# TODO(rcadene, aliberts): remove this dark pattern that overrides
def make_camera(camera_type: str, **kwargs) -> Camera:
if camera_type == "opencv":