[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
This commit is contained in:
committed by
AdilZouitine
parent
76df8a31b3
commit
38f5fa4523
@@ -52,7 +52,13 @@ def save_dataset_to_safetensors(output_dir, repo_id="lerobot/pusht"):
|
||||
save_file(dataset[i + 1], repo_dir / f"frame_{i + 1}.safetensors")
|
||||
|
||||
# save 2 frames at the middle of first episode
|
||||
i = int((dataset.episode_data_index["to"][0].item() - dataset.episode_data_index["from"][0].item()) / 2)
|
||||
i = int(
|
||||
(
|
||||
dataset.episode_data_index["to"][0].item()
|
||||
- dataset.episode_data_index["from"][0].item()
|
||||
)
|
||||
/ 2
|
||||
)
|
||||
save_file(dataset[i], repo_dir / f"frame_{i}.safetensors")
|
||||
save_file(dataset[i + 1], repo_dir / f"frame_{i + 1}.safetensors")
|
||||
|
||||
|
||||
@@ -30,7 +30,9 @@ class config: # noqa: N801
|
||||
def enable_device(self, device_id: str):
|
||||
self.device_enabled = device_id
|
||||
|
||||
def enable_stream(self, stream_type: stream, width=None, height=None, color_format=None, fps=None):
|
||||
def enable_stream(
|
||||
self, stream_type: stream, width=None, height=None, color_format=None, fps=None
|
||||
):
|
||||
self.stream_type = stream_type
|
||||
# Overwrite default values when possible
|
||||
self.width = 848 if width is None else width
|
||||
|
||||
@@ -37,7 +37,10 @@ pytest -sx 'tests/test_cameras.py::test_camera[intelrealsense-True]'
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError
|
||||
from lerobot.common.robot_devices.utils import (
|
||||
RobotDeviceAlreadyConnectedError,
|
||||
RobotDeviceNotConnectedError,
|
||||
)
|
||||
from tests.utils import TEST_CAMERA_TYPES, make_camera, require_camera
|
||||
|
||||
# Maximum absolute difference between two consecutive images recorded by a camera.
|
||||
@@ -112,7 +115,11 @@ def test_camera(request, camera_type, mock):
|
||||
)
|
||||
# TODO(rcadene): properly set `rtol`
|
||||
np.testing.assert_allclose(
|
||||
color_image, async_color_image, rtol=1e-5, atol=MAX_PIXEL_DIFFERENCE, err_msg=error_msg
|
||||
color_image,
|
||||
async_color_image,
|
||||
rtol=1e-5,
|
||||
atol=MAX_PIXEL_DIFFERENCE,
|
||||
err_msg=error_msg,
|
||||
)
|
||||
|
||||
# Test disconnecting
|
||||
@@ -131,7 +138,11 @@ def test_camera(request, camera_type, mock):
|
||||
assert camera.color_mode == "bgr"
|
||||
bgr_color_image = camera.read()
|
||||
np.testing.assert_allclose(
|
||||
color_image, bgr_color_image[:, :, [2, 1, 0]], rtol=1e-5, atol=MAX_PIXEL_DIFFERENCE, err_msg=error_msg
|
||||
color_image,
|
||||
bgr_color_image[:, :, [2, 1, 0]],
|
||||
rtol=1e-5,
|
||||
atol=MAX_PIXEL_DIFFERENCE,
|
||||
err_msg=error_msg,
|
||||
)
|
||||
del camera
|
||||
|
||||
@@ -166,7 +177,11 @@ def test_camera(request, camera_type, mock):
|
||||
rot_color_image = camera.read()
|
||||
|
||||
np.testing.assert_allclose(
|
||||
rot_color_image, manual_rot_img, rtol=1e-5, atol=MAX_PIXEL_DIFFERENCE, err_msg=error_msg
|
||||
rot_color_image,
|
||||
manual_rot_img,
|
||||
rtol=1e-5,
|
||||
atol=MAX_PIXEL_DIFFERENCE,
|
||||
err_msg=error_msg,
|
||||
)
|
||||
del camera
|
||||
|
||||
@@ -200,7 +215,9 @@ def test_save_images_from_cameras(tmp_path, request, camera_type, mock):
|
||||
if camera_type == "opencv":
|
||||
from lerobot.common.robot_devices.cameras.opencv import save_images_from_cameras
|
||||
elif camera_type == "intelrealsense":
|
||||
from lerobot.common.robot_devices.cameras.intelrealsense import save_images_from_cameras
|
||||
from lerobot.common.robot_devices.cameras.intelrealsense import (
|
||||
save_images_from_cameras,
|
||||
)
|
||||
|
||||
# Small `record_time_s` to speedup unit tests
|
||||
save_images_from_cameras(tmp_path, record_time_s=0.02, mock=mock)
|
||||
|
||||
@@ -91,7 +91,12 @@ def patch_builtins_input(monkeypatch):
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--seed", action="store", default="42", help="Set random seed for reproducibility")
|
||||
parser.addoption(
|
||||
"--seed",
|
||||
action="store",
|
||||
default="42",
|
||||
help="Set random seed for reproducibility",
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
|
||||
@@ -364,10 +364,16 @@ def test_save_each_transform(img_tensor_factory, tmp_path):
|
||||
for transform in transforms:
|
||||
transform_dir = tmp_path / transform
|
||||
assert transform_dir.exists(), f"{transform} directory was not created."
|
||||
assert any(transform_dir.iterdir()), f"No transformed images found in {transform} directory."
|
||||
assert any(
|
||||
transform_dir.iterdir()
|
||||
), f"No transformed images found in {transform} directory."
|
||||
|
||||
# Check for specific files within each transform directory
|
||||
expected_files = [f"{i}.png" for i in range(1, n_examples + 1)] + ["min.png", "max.png", "mean.png"]
|
||||
expected_files = [f"{i}.png" for i in range(1, n_examples + 1)] + [
|
||||
"min.png",
|
||||
"max.png",
|
||||
"mean.png",
|
||||
]
|
||||
for file_name in expected_files:
|
||||
assert (transform_dir / file_name).exists(), (
|
||||
f"{file_name} was not found in {transform} directory."
|
||||
|
||||
@@ -187,7 +187,9 @@ def test_save_image_torch(tmp_path, img_tensor_factory):
|
||||
writer.wait_until_done()
|
||||
assert fpath.exists()
|
||||
saved_image = np.array(Image.open(fpath))
|
||||
expected_image = (image_tensor.permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8)
|
||||
expected_image = (image_tensor.permute(1, 2, 0).cpu().numpy() * 255).astype(
|
||||
np.uint8
|
||||
)
|
||||
assert np.array_equal(expected_image, saved_image)
|
||||
finally:
|
||||
writer.stop()
|
||||
@@ -202,7 +204,9 @@ def test_save_image_torch_multiprocessing(tmp_path, img_tensor_factory):
|
||||
writer.wait_until_done()
|
||||
assert fpath.exists()
|
||||
saved_image = np.array(Image.open(fpath))
|
||||
expected_image = (image_tensor.permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8)
|
||||
expected_image = (image_tensor.permute(1, 2, 0).cpu().numpy() * 255).astype(
|
||||
np.uint8
|
||||
)
|
||||
assert np.array_equal(expected_image, saved_image)
|
||||
finally:
|
||||
writer.stop()
|
||||
@@ -292,7 +296,9 @@ def test_wait_until_done(tmp_path, img_array_factory):
|
||||
writer = AsyncImageWriter(num_processes=0, num_threads=4)
|
||||
try:
|
||||
num_images = 100
|
||||
image_arrays = [img_array_factory(height=500, width=500) for _ in range(num_images)]
|
||||
image_arrays = [
|
||||
img_array_factory(height=500, width=500) for _ in range(num_images)
|
||||
]
|
||||
fpaths = [tmp_path / f"frame_{i:06d}.png" for i in range(num_images)]
|
||||
for image_array, fpath in zip(image_arrays, fpaths, strict=True):
|
||||
fpath.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
@@ -44,13 +44,23 @@ def make_new_buffer(
|
||||
return buffer, write_dir
|
||||
|
||||
|
||||
def make_spoof_data_frames(n_episodes: int, n_frames_per_episode: int) -> dict[str, np.ndarray]:
|
||||
def make_spoof_data_frames(
|
||||
n_episodes: int, n_frames_per_episode: int
|
||||
) -> dict[str, np.ndarray]:
|
||||
new_data = {
|
||||
data_key: np.arange(n_frames_per_episode * n_episodes * np.prod(data_shape)).reshape(-1, *data_shape),
|
||||
data_key: np.arange(
|
||||
n_frames_per_episode * n_episodes * np.prod(data_shape)
|
||||
).reshape(-1, *data_shape),
|
||||
OnlineBuffer.INDEX_KEY: np.arange(n_frames_per_episode * n_episodes),
|
||||
OnlineBuffer.EPISODE_INDEX_KEY: np.repeat(np.arange(n_episodes), n_frames_per_episode),
|
||||
OnlineBuffer.FRAME_INDEX_KEY: np.tile(np.arange(n_frames_per_episode), n_episodes),
|
||||
OnlineBuffer.TIMESTAMP_KEY: np.tile(np.arange(n_frames_per_episode) / fps, n_episodes),
|
||||
OnlineBuffer.EPISODE_INDEX_KEY: np.repeat(
|
||||
np.arange(n_episodes), n_frames_per_episode
|
||||
),
|
||||
OnlineBuffer.FRAME_INDEX_KEY: np.tile(
|
||||
np.arange(n_frames_per_episode), n_episodes
|
||||
),
|
||||
OnlineBuffer.TIMESTAMP_KEY: np.tile(
|
||||
np.arange(n_frames_per_episode) / fps, n_episodes
|
||||
),
|
||||
}
|
||||
return new_data
|
||||
|
||||
@@ -219,47 +229,72 @@ def test_compute_sampler_weights_trivial(
|
||||
online_dataset_size: int,
|
||||
online_sampling_ratio: float,
|
||||
):
|
||||
offline_dataset = lerobot_dataset_factory(tmp_path, total_episodes=1, total_frames=offline_dataset_size)
|
||||
offline_dataset = lerobot_dataset_factory(
|
||||
tmp_path, total_episodes=1, total_frames=offline_dataset_size
|
||||
)
|
||||
online_dataset, _ = make_new_buffer()
|
||||
if online_dataset_size > 0:
|
||||
online_dataset.add_data(
|
||||
make_spoof_data_frames(n_episodes=2, n_frames_per_episode=online_dataset_size // 2)
|
||||
make_spoof_data_frames(
|
||||
n_episodes=2, n_frames_per_episode=online_dataset_size // 2
|
||||
)
|
||||
)
|
||||
|
||||
weights = compute_sampler_weights(
|
||||
offline_dataset, online_dataset=online_dataset, online_sampling_ratio=online_sampling_ratio
|
||||
offline_dataset,
|
||||
online_dataset=online_dataset,
|
||||
online_sampling_ratio=online_sampling_ratio,
|
||||
)
|
||||
if offline_dataset_size == 0 or online_dataset_size == 0:
|
||||
expected_weights = torch.ones(offline_dataset_size + online_dataset_size)
|
||||
elif online_sampling_ratio == 0:
|
||||
expected_weights = torch.cat([torch.ones(offline_dataset_size), torch.zeros(online_dataset_size)])
|
||||
expected_weights = torch.cat(
|
||||
[torch.ones(offline_dataset_size), torch.zeros(online_dataset_size)]
|
||||
)
|
||||
elif online_sampling_ratio == 1:
|
||||
expected_weights = torch.cat([torch.zeros(offline_dataset_size), torch.ones(online_dataset_size)])
|
||||
expected_weights = torch.cat(
|
||||
[torch.zeros(offline_dataset_size), torch.ones(online_dataset_size)]
|
||||
)
|
||||
expected_weights /= expected_weights.sum()
|
||||
torch.testing.assert_close(weights, expected_weights)
|
||||
|
||||
|
||||
def test_compute_sampler_weights_nontrivial_ratio(lerobot_dataset_factory, tmp_path):
|
||||
# Arbitrarily set small dataset sizes, making sure to have uneven sizes.
|
||||
offline_dataset = lerobot_dataset_factory(tmp_path, total_episodes=1, total_frames=4)
|
||||
offline_dataset = lerobot_dataset_factory(
|
||||
tmp_path, total_episodes=1, total_frames=4
|
||||
)
|
||||
online_dataset, _ = make_new_buffer()
|
||||
online_dataset.add_data(make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2))
|
||||
online_dataset.add_data(
|
||||
make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2)
|
||||
)
|
||||
online_sampling_ratio = 0.8
|
||||
weights = compute_sampler_weights(
|
||||
offline_dataset, online_dataset=online_dataset, online_sampling_ratio=online_sampling_ratio
|
||||
offline_dataset,
|
||||
online_dataset=online_dataset,
|
||||
online_sampling_ratio=online_sampling_ratio,
|
||||
)
|
||||
torch.testing.assert_close(
|
||||
weights, torch.tensor([0.05, 0.05, 0.05, 0.05, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
|
||||
)
|
||||
|
||||
|
||||
def test_compute_sampler_weights_nontrivial_ratio_and_drop_last_n(lerobot_dataset_factory, tmp_path):
|
||||
def test_compute_sampler_weights_nontrivial_ratio_and_drop_last_n(
|
||||
lerobot_dataset_factory, tmp_path
|
||||
):
|
||||
# Arbitrarily set small dataset sizes, making sure to have uneven sizes.
|
||||
offline_dataset = lerobot_dataset_factory(tmp_path, total_episodes=1, total_frames=4)
|
||||
offline_dataset = lerobot_dataset_factory(
|
||||
tmp_path, total_episodes=1, total_frames=4
|
||||
)
|
||||
online_dataset, _ = make_new_buffer()
|
||||
online_dataset.add_data(make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2))
|
||||
online_dataset.add_data(
|
||||
make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2)
|
||||
)
|
||||
weights = compute_sampler_weights(
|
||||
offline_dataset, online_dataset=online_dataset, online_sampling_ratio=0.8, online_drop_n_last_frames=1
|
||||
offline_dataset,
|
||||
online_dataset=online_dataset,
|
||||
online_sampling_ratio=0.8,
|
||||
online_drop_n_last_frames=1,
|
||||
)
|
||||
torch.testing.assert_close(
|
||||
weights, torch.tensor([0.05, 0.05, 0.05, 0.05, 0.2, 0.0, 0.2, 0.0, 0.2, 0.0, 0.2, 0.0])
|
||||
@@ -268,9 +303,13 @@ def test_compute_sampler_weights_nontrivial_ratio_and_drop_last_n(lerobot_datase
|
||||
|
||||
def test_compute_sampler_weights_drop_n_last_frames(lerobot_dataset_factory, tmp_path):
|
||||
"""Note: test copied from test_sampler."""
|
||||
offline_dataset = lerobot_dataset_factory(tmp_path, total_episodes=1, total_frames=2)
|
||||
offline_dataset = lerobot_dataset_factory(
|
||||
tmp_path, total_episodes=1, total_frames=2
|
||||
)
|
||||
online_dataset, _ = make_new_buffer()
|
||||
online_dataset.add_data(make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2))
|
||||
online_dataset.add_data(
|
||||
make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2)
|
||||
)
|
||||
|
||||
weights = compute_sampler_weights(
|
||||
offline_dataset,
|
||||
|
||||
@@ -15,7 +15,9 @@
|
||||
# limitations under the License.
|
||||
from datasets import Dataset
|
||||
|
||||
from lerobot.common.datasets.push_dataset_to_hub.utils import calculate_episode_data_index
|
||||
from lerobot.common.datasets.push_dataset_to_hub.utils import (
|
||||
calculate_episode_data_index,
|
||||
)
|
||||
from lerobot.common.datasets.sampler import EpisodeAwareSampler
|
||||
from lerobot.common.datasets.utils import (
|
||||
hf_transform_to_torch,
|
||||
|
||||
30
tests/fixtures/constants.py
vendored
30
tests/fixtures/constants.py
vendored
@@ -20,17 +20,39 @@ DUMMY_MOTOR_FEATURES = {
|
||||
"action": {
|
||||
"dtype": "float32",
|
||||
"shape": (6,),
|
||||
"names": ["shoulder_pan", "shoulder_lift", "elbow_flex", "wrist_flex", "wrist_roll", "gripper"],
|
||||
"names": [
|
||||
"shoulder_pan",
|
||||
"shoulder_lift",
|
||||
"elbow_flex",
|
||||
"wrist_flex",
|
||||
"wrist_roll",
|
||||
"gripper",
|
||||
],
|
||||
},
|
||||
"state": {
|
||||
"dtype": "float32",
|
||||
"shape": (6,),
|
||||
"names": ["shoulder_pan", "shoulder_lift", "elbow_flex", "wrist_flex", "wrist_roll", "gripper"],
|
||||
"names": [
|
||||
"shoulder_pan",
|
||||
"shoulder_lift",
|
||||
"elbow_flex",
|
||||
"wrist_flex",
|
||||
"wrist_roll",
|
||||
"gripper",
|
||||
],
|
||||
},
|
||||
}
|
||||
DUMMY_CAMERA_FEATURES = {
|
||||
"laptop": {"shape": (480, 640, 3), "names": ["height", "width", "channels"], "info": None},
|
||||
"phone": {"shape": (480, 640, 3), "names": ["height", "width", "channels"], "info": None},
|
||||
"laptop": {
|
||||
"shape": (480, 640, 3),
|
||||
"names": ["height", "width", "channels"],
|
||||
"info": None,
|
||||
},
|
||||
"phone": {
|
||||
"shape": (480, 640, 3),
|
||||
"names": ["height", "width", "channels"],
|
||||
"info": None,
|
||||
},
|
||||
}
|
||||
DEFAULT_FPS = 30
|
||||
DUMMY_VIDEO_INFO = {
|
||||
|
||||
74
tests/fixtures/dataset_factories.py
vendored
74
tests/fixtures/dataset_factories.py
vendored
@@ -23,7 +23,11 @@ import PIL.Image
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset, LeRobotDatasetMetadata
|
||||
from lerobot.common.datasets.lerobot_dataset import (
|
||||
CODEBASE_VERSION,
|
||||
LeRobotDataset,
|
||||
LeRobotDatasetMetadata,
|
||||
)
|
||||
from lerobot.common.datasets.utils import (
|
||||
DEFAULT_CHUNK_SIZE,
|
||||
DEFAULT_FEATURES,
|
||||
@@ -54,7 +58,9 @@ def get_task_index(task_dicts: dict, task: str) -> int:
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def img_tensor_factory():
|
||||
def _create_img_tensor(height=100, width=100, channels=3, dtype=torch.float32) -> torch.Tensor:
|
||||
def _create_img_tensor(
|
||||
height=100, width=100, channels=3, dtype=torch.float32
|
||||
) -> torch.Tensor:
|
||||
return torch.rand((channels, height, width), dtype=dtype)
|
||||
|
||||
return _create_img_tensor
|
||||
@@ -62,10 +68,14 @@ def img_tensor_factory():
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def img_array_factory():
|
||||
def _create_img_array(height=100, width=100, channels=3, dtype=np.uint8) -> np.ndarray:
|
||||
def _create_img_array(
|
||||
height=100, width=100, channels=3, dtype=np.uint8
|
||||
) -> np.ndarray:
|
||||
if np.issubdtype(dtype, np.unsignedinteger):
|
||||
# Int array in [0, 255] range
|
||||
img_array = np.random.randint(0, 256, size=(height, width, channels), dtype=dtype)
|
||||
img_array = np.random.randint(
|
||||
0, 256, size=(height, width, channels), dtype=dtype
|
||||
)
|
||||
elif np.issubdtype(dtype, np.floating):
|
||||
# Float array in [0, 1] range
|
||||
img_array = np.random.rand(height, width, channels).astype(dtype)
|
||||
@@ -94,10 +104,13 @@ def features_factory():
|
||||
) -> dict:
|
||||
if use_videos:
|
||||
camera_ft = {
|
||||
key: {"dtype": "video", **ft, **DUMMY_VIDEO_INFO} for key, ft in camera_features.items()
|
||||
key: {"dtype": "video", **ft, **DUMMY_VIDEO_INFO}
|
||||
for key, ft in camera_features.items()
|
||||
}
|
||||
else:
|
||||
camera_ft = {key: {"dtype": "image", **ft} for key, ft in camera_features.items()}
|
||||
camera_ft = {
|
||||
key: {"dtype": "image", **ft} for key, ft in camera_features.items()
|
||||
}
|
||||
return {
|
||||
**motor_features,
|
||||
**camera_ft,
|
||||
@@ -215,7 +228,9 @@ def episodes_factory(tasks_factory):
|
||||
if total_episodes <= 0 or total_frames <= 0:
|
||||
raise ValueError("num_episodes and total_length must be positive integers.")
|
||||
if total_frames < total_episodes:
|
||||
raise ValueError("total_length must be greater than or equal to num_episodes.")
|
||||
raise ValueError(
|
||||
"total_length must be greater than or equal to num_episodes."
|
||||
)
|
||||
|
||||
if not tasks:
|
||||
min_tasks = 2 if multi_task else 1
|
||||
@@ -223,10 +238,14 @@ def episodes_factory(tasks_factory):
|
||||
tasks = tasks_factory(total_tasks)
|
||||
|
||||
if total_episodes < len(tasks) and not multi_task:
|
||||
raise ValueError("The number of tasks should be less than the number of episodes.")
|
||||
raise ValueError(
|
||||
"The number of tasks should be less than the number of episodes."
|
||||
)
|
||||
|
||||
# Generate random lengths that sum up to total_length
|
||||
lengths = np.random.multinomial(total_frames, [1 / total_episodes] * total_episodes).tolist()
|
||||
lengths = np.random.multinomial(
|
||||
total_frames, [1 / total_episodes] * total_episodes
|
||||
).tolist()
|
||||
|
||||
tasks_list = [task_dict["task"] for task_dict in tasks.values()]
|
||||
num_tasks_available = len(tasks_list)
|
||||
@@ -234,9 +253,13 @@ def episodes_factory(tasks_factory):
|
||||
episodes = {}
|
||||
remaining_tasks = tasks_list.copy()
|
||||
for ep_idx in range(total_episodes):
|
||||
num_tasks_in_episode = random.randint(1, min(3, num_tasks_available)) if multi_task else 1
|
||||
num_tasks_in_episode = (
|
||||
random.randint(1, min(3, num_tasks_available)) if multi_task else 1
|
||||
)
|
||||
tasks_to_sample = remaining_tasks if remaining_tasks else tasks_list
|
||||
episode_tasks = random.sample(tasks_to_sample, min(num_tasks_in_episode, len(tasks_to_sample)))
|
||||
episode_tasks = random.sample(
|
||||
tasks_to_sample, min(num_tasks_in_episode, len(tasks_to_sample))
|
||||
)
|
||||
if remaining_tasks:
|
||||
for task in episode_tasks:
|
||||
remaining_tasks.remove(task)
|
||||
@@ -253,7 +276,9 @@ def episodes_factory(tasks_factory):
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def hf_dataset_factory(features_factory, tasks_factory, episodes_factory, img_array_factory):
|
||||
def hf_dataset_factory(
|
||||
features_factory, tasks_factory, episodes_factory, img_array_factory
|
||||
):
|
||||
def _create_hf_dataset(
|
||||
features: dict | None = None,
|
||||
tasks: list[dict] | None = None,
|
||||
@@ -275,10 +300,15 @@ def hf_dataset_factory(features_factory, tasks_factory, episodes_factory, img_ar
|
||||
timestamp_col = np.concatenate((timestamp_col, np.arange(ep_dict["length"]) / fps))
|
||||
frame_index_col = np.concatenate((frame_index_col, np.arange(ep_dict["length"], dtype=int)))
|
||||
episode_index_col = np.concatenate(
|
||||
(episode_index_col, np.full(ep_dict["length"], ep_dict["episode_index"], dtype=int))
|
||||
(
|
||||
episode_index_col,
|
||||
np.full(ep_dict["length"], ep_dict["episode_index"], dtype=int),
|
||||
)
|
||||
)
|
||||
ep_task_index = get_task_index(tasks, ep_dict["tasks"][0])
|
||||
task_index = np.concatenate((task_index, np.full(ep_dict["length"], ep_task_index, dtype=int)))
|
||||
task_index = np.concatenate(
|
||||
(task_index, np.full(ep_dict["length"], ep_task_index, dtype=int))
|
||||
)
|
||||
|
||||
index_col = np.arange(len(episode_index_col))
|
||||
|
||||
@@ -290,7 +320,9 @@ def hf_dataset_factory(features_factory, tasks_factory, episodes_factory, img_ar
|
||||
for _ in range(len(index_col))
|
||||
]
|
||||
elif ft["shape"][0] > 1 and ft["dtype"] != "video":
|
||||
robot_cols[key] = np.random.random((len(index_col), ft["shape"][0])).astype(ft["dtype"])
|
||||
robot_cols[key] = np.random.random(
|
||||
(len(index_col), ft["shape"][0])
|
||||
).astype(ft["dtype"])
|
||||
|
||||
hf_features = get_hf_features_from_features(features)
|
||||
dataset = datasets.Dataset.from_dict(
|
||||
@@ -340,7 +372,9 @@ def lerobot_dataset_metadata_factory(
|
||||
tasks = tasks_factory(total_tasks=info["total_tasks"])
|
||||
if not episodes:
|
||||
episodes = episodes_factory(
|
||||
total_episodes=info["total_episodes"], total_frames=info["total_frames"], tasks=tasks
|
||||
total_episodes=info["total_episodes"],
|
||||
total_frames=info["total_frames"],
|
||||
tasks=tasks,
|
||||
)
|
||||
|
||||
mock_snapshot_download = mock_snapshot_download_factory(
|
||||
@@ -392,7 +426,9 @@ def lerobot_dataset_factory(
|
||||
) -> LeRobotDataset:
|
||||
if not info:
|
||||
info = info_factory(
|
||||
total_episodes=total_episodes, total_frames=total_frames, total_tasks=total_tasks
|
||||
total_episodes=total_episodes,
|
||||
total_frames=total_frames,
|
||||
total_tasks=total_tasks,
|
||||
)
|
||||
if not stats:
|
||||
stats = stats_factory(features=info["features"])
|
||||
@@ -408,7 +444,9 @@ def lerobot_dataset_factory(
|
||||
multi_task=multi_task,
|
||||
)
|
||||
if not hf_dataset:
|
||||
hf_dataset = hf_dataset_factory(tasks=tasks, episodes=episode_dicts, fps=info["fps"])
|
||||
hf_dataset = hf_dataset_factory(
|
||||
tasks=tasks, episodes=episode_dicts, fps=info["fps"]
|
||||
)
|
||||
|
||||
mock_snapshot_download = mock_snapshot_download_factory(
|
||||
info=info,
|
||||
|
||||
5
tests/fixtures/files.py
vendored
5
tests/fixtures/files.py
vendored
@@ -102,7 +102,10 @@ def episode_path(episodes_factory):
|
||||
@pytest.fixture(scope="session")
|
||||
def single_episode_parquet_path(hf_dataset_factory, info_factory):
|
||||
def _create_single_episode_parquet(
|
||||
dir: Path, ep_idx: int = 0, hf_dataset: datasets.Dataset | None = None, info: dict | None = None
|
||||
dir: Path,
|
||||
ep_idx: int = 0,
|
||||
hf_dataset: datasets.Dataset | None = None,
|
||||
info: dict | None = None,
|
||||
) -> Path:
|
||||
if not info:
|
||||
info = info_factory()
|
||||
|
||||
24
tests/fixtures/hub.py
vendored
24
tests/fixtures/hub.py
vendored
@@ -67,15 +67,21 @@ def mock_snapshot_download_factory(
|
||||
tasks = tasks_factory(total_tasks=info["total_tasks"])
|
||||
if not episodes:
|
||||
episodes = episodes_factory(
|
||||
total_episodes=info["total_episodes"], total_frames=info["total_frames"], tasks=tasks
|
||||
total_episodes=info["total_episodes"],
|
||||
total_frames=info["total_frames"],
|
||||
tasks=tasks,
|
||||
)
|
||||
if not hf_dataset:
|
||||
hf_dataset = hf_dataset_factory(tasks=tasks, episodes=episodes, fps=info["fps"])
|
||||
hf_dataset = hf_dataset_factory(
|
||||
tasks=tasks, episodes=episodes, fps=info["fps"]
|
||||
)
|
||||
|
||||
def _extract_episode_index_from_path(fpath: str) -> int:
|
||||
path = Path(fpath)
|
||||
if path.suffix == ".parquet" and path.stem.startswith("episode_"):
|
||||
episode_index = int(path.stem[len("episode_") :]) # 'episode_000000' -> 0
|
||||
episode_index = int(
|
||||
path.stem[len("episode_") :]
|
||||
) # 'episode_000000' -> 0
|
||||
return episode_index
|
||||
else:
|
||||
return None
|
||||
@@ -100,12 +106,16 @@ def mock_snapshot_download_factory(
|
||||
for episode_dict in episodes.values():
|
||||
ep_idx = episode_dict["episode_index"]
|
||||
ep_chunk = ep_idx // info["chunks_size"]
|
||||
data_path = info["data_path"].format(episode_chunk=ep_chunk, episode_index=ep_idx)
|
||||
data_path = info["data_path"].format(
|
||||
episode_chunk=ep_chunk, episode_index=ep_idx
|
||||
)
|
||||
data_files.append(data_path)
|
||||
all_files.extend(data_files)
|
||||
|
||||
allowed_files = filter_repo_objects(
|
||||
all_files, allow_patterns=allow_patterns, ignore_patterns=ignore_patterns
|
||||
all_files,
|
||||
allow_patterns=allow_patterns,
|
||||
ignore_patterns=ignore_patterns,
|
||||
)
|
||||
|
||||
# Create allowed files
|
||||
@@ -113,7 +123,9 @@ def mock_snapshot_download_factory(
|
||||
if rel_path.startswith("data/"):
|
||||
episode_index = _extract_episode_index_from_path(rel_path)
|
||||
if episode_index is not None:
|
||||
_ = single_episode_parquet_path(local_dir, episode_index, hf_dataset, info)
|
||||
_ = single_episode_parquet_path(
|
||||
local_dir, episode_index, hf_dataset, info
|
||||
)
|
||||
if rel_path == INFO_PATH:
|
||||
_ = info_path(local_dir, info)
|
||||
elif rel_path == STATS_PATH:
|
||||
|
||||
@@ -80,7 +80,9 @@ class GroupSyncRead:
|
||||
def addParam(self, motor_index): # noqa: N802
|
||||
# Initialize motor default values
|
||||
if motor_index not in self.packet_handler.data:
|
||||
self.packet_handler.data[motor_index] = get_default_motor_values(motor_index)
|
||||
self.packet_handler.data[motor_index] = get_default_motor_values(
|
||||
motor_index
|
||||
)
|
||||
|
||||
def txRxPacket(self): # noqa: N802
|
||||
return COMM_SUCCESS
|
||||
|
||||
@@ -91,7 +91,9 @@ class GroupSyncRead:
|
||||
def addParam(self, motor_index): # noqa: N802
|
||||
# Initialize motor default values
|
||||
if motor_index not in self.packet_handler.data:
|
||||
self.packet_handler.data[motor_index] = get_default_motor_values(motor_index)
|
||||
self.packet_handler.data[motor_index] = get_default_motor_values(
|
||||
motor_index
|
||||
)
|
||||
|
||||
def txRxPacket(self): # noqa: N802
|
||||
return COMM_SUCCESS
|
||||
|
||||
@@ -43,7 +43,10 @@ import time
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError
|
||||
from lerobot.common.robot_devices.utils import (
|
||||
RobotDeviceAlreadyConnectedError,
|
||||
RobotDeviceNotConnectedError,
|
||||
)
|
||||
from lerobot.scripts.find_motors_bus_port import find_port
|
||||
from tests.utils import TEST_MOTOR_TYPES, make_motors_bus, require_motor
|
||||
|
||||
@@ -76,7 +79,9 @@ def test_configure_motors_all_ids_1(request, motor_type, mock):
|
||||
else:
|
||||
raise ValueError(motor_type)
|
||||
|
||||
input("Are you sure you want to re-configure the motors? Press enter to continue...")
|
||||
input(
|
||||
"Are you sure you want to re-configure the motors? Press enter to continue..."
|
||||
)
|
||||
# This test expect the configuration was already correct.
|
||||
motors_bus = make_motors_bus(motor_type, mock=mock)
|
||||
motors_bus.connect()
|
||||
|
||||
@@ -25,7 +25,10 @@ from torchmetrics import AUROC, Accuracy, F1Score, Precision, Recall
|
||||
from torchvision.datasets import CIFAR10
|
||||
from torchvision.transforms import ToTensor
|
||||
|
||||
from lerobot.common.policies.hilserl.classifier.modeling_classifier import Classifier, ClassifierConfig
|
||||
from lerobot.common.policies.hilserl.classifier.modeling_classifier import (
|
||||
Classifier,
|
||||
ClassifierConfig,
|
||||
)
|
||||
|
||||
BATCH_SIZE = 1000
|
||||
LR = 0.1
|
||||
@@ -43,7 +46,9 @@ def train_evaluate_multiclass_classifier():
|
||||
logging.info(
|
||||
f"Start multiclass classifier train eval with {DEVICE} device, batch size {BATCH_SIZE}, learning rate {LR}"
|
||||
)
|
||||
multiclass_config = ClassifierConfig(model_name="microsoft/resnet-18", device=DEVICE, num_classes=10)
|
||||
multiclass_config = ClassifierConfig(
|
||||
model_name="microsoft/resnet-18", device=DEVICE, num_classes=10
|
||||
)
|
||||
multiclass_classifier = Classifier(multiclass_config)
|
||||
|
||||
trainset = CIFAR10(root="data", train=True, download=True, transform=ToTensor())
|
||||
@@ -114,10 +119,18 @@ def train_evaluate_multiclass_classifier():
|
||||
test_probs = torch.stack(test_probs)
|
||||
|
||||
accuracy = Accuracy(task="multiclass", num_classes=multiclass_num_classes)
|
||||
precision = Precision(task="multiclass", average="weighted", num_classes=multiclass_num_classes)
|
||||
recall = Recall(task="multiclass", average="weighted", num_classes=multiclass_num_classes)
|
||||
f1 = F1Score(task="multiclass", average="weighted", num_classes=multiclass_num_classes)
|
||||
auroc = AUROC(task="multiclass", num_classes=multiclass_num_classes, average="weighted")
|
||||
precision = Precision(
|
||||
task="multiclass", average="weighted", num_classes=multiclass_num_classes
|
||||
)
|
||||
recall = Recall(
|
||||
task="multiclass", average="weighted", num_classes=multiclass_num_classes
|
||||
)
|
||||
f1 = F1Score(
|
||||
task="multiclass", average="weighted", num_classes=multiclass_num_classes
|
||||
)
|
||||
auroc = AUROC(
|
||||
task="multiclass", num_classes=multiclass_num_classes, average="weighted"
|
||||
)
|
||||
|
||||
# Calculate metrics
|
||||
acc = accuracy(test_predictions, test_labels)
|
||||
@@ -146,18 +159,28 @@ def train_evaluate_binary_classifier():
|
||||
new_label = float(1.0) if label == target_class else float(0.0)
|
||||
new_targets.append(new_label)
|
||||
|
||||
dataset.targets = new_targets # Replace the original labels with the binary ones
|
||||
dataset.targets = (
|
||||
new_targets # Replace the original labels with the binary ones
|
||||
)
|
||||
return dataset
|
||||
|
||||
binary_train_dataset = CIFAR10(root="data", train=True, download=True, transform=ToTensor())
|
||||
binary_test_dataset = CIFAR10(root="data", train=False, download=True, transform=ToTensor())
|
||||
binary_train_dataset = CIFAR10(
|
||||
root="data", train=True, download=True, transform=ToTensor()
|
||||
)
|
||||
binary_test_dataset = CIFAR10(
|
||||
root="data", train=False, download=True, transform=ToTensor()
|
||||
)
|
||||
|
||||
# Apply one-vs-rest labeling
|
||||
binary_train_dataset = one_vs_rest(binary_train_dataset, target_binary_class)
|
||||
binary_test_dataset = one_vs_rest(binary_test_dataset, target_binary_class)
|
||||
|
||||
binary_trainloader = DataLoader(binary_train_dataset, batch_size=BATCH_SIZE, shuffle=True)
|
||||
binary_testloader = DataLoader(binary_test_dataset, batch_size=BATCH_SIZE, shuffle=False)
|
||||
binary_trainloader = DataLoader(
|
||||
binary_train_dataset, batch_size=BATCH_SIZE, shuffle=True
|
||||
)
|
||||
binary_testloader = DataLoader(
|
||||
binary_test_dataset, batch_size=BATCH_SIZE, shuffle=False
|
||||
)
|
||||
|
||||
binary_epoch = 1
|
||||
|
||||
|
||||
@@ -9,7 +9,9 @@ from tests.utils import require_package
|
||||
|
||||
def test_classifier_output():
|
||||
output = ClassifierOutput(
|
||||
logits=torch.tensor([1, 2, 3]), probabilities=torch.tensor([0.1, 0.2, 0.3]), hidden_states=None
|
||||
logits=torch.tensor([1, 2, 3]),
|
||||
probabilities=torch.tensor([0.1, 0.2, 0.3]),
|
||||
hidden_states=None,
|
||||
)
|
||||
|
||||
assert (
|
||||
@@ -20,7 +22,9 @@ def test_classifier_output():
|
||||
|
||||
@require_package("transformers")
|
||||
def test_binary_classifier_with_default_params():
|
||||
from lerobot.common.policies.hilserl.classifier.modeling_classifier import Classifier
|
||||
from lerobot.common.policies.hilserl.classifier.modeling_classifier import (
|
||||
Classifier,
|
||||
)
|
||||
|
||||
config = ClassifierConfig()
|
||||
classifier = Classifier(config)
|
||||
@@ -41,7 +45,9 @@ def test_binary_classifier_with_default_params():
|
||||
|
||||
@require_package("transformers")
|
||||
def test_multiclass_classifier():
|
||||
from lerobot.common.policies.hilserl.classifier.modeling_classifier import Classifier
|
||||
from lerobot.common.policies.hilserl.classifier.modeling_classifier import (
|
||||
Classifier,
|
||||
)
|
||||
|
||||
num_classes = 5
|
||||
config = ClassifierConfig(num_classes=num_classes)
|
||||
@@ -63,7 +69,9 @@ def test_multiclass_classifier():
|
||||
|
||||
@require_package("transformers")
|
||||
def test_default_device():
|
||||
from lerobot.common.policies.hilserl.classifier.modeling_classifier import Classifier
|
||||
from lerobot.common.policies.hilserl.classifier.modeling_classifier import (
|
||||
Classifier,
|
||||
)
|
||||
|
||||
config = ClassifierConfig()
|
||||
assert config.device == "cpu"
|
||||
@@ -75,7 +83,9 @@ def test_default_device():
|
||||
|
||||
@require_package("transformers")
|
||||
def test_explicit_device_setup():
|
||||
from lerobot.common.policies.hilserl.classifier.modeling_classifier import Classifier
|
||||
from lerobot.common.policies.hilserl.classifier.modeling_classifier import (
|
||||
Classifier,
|
||||
)
|
||||
|
||||
config = ClassifierConfig(device="meta")
|
||||
assert config.device == "meta"
|
||||
|
||||
@@ -172,7 +172,9 @@ def test_policy(ds_repo_id, env_name, env_kwargs, policy_name, policy_kwargs):
|
||||
# Test updating the policy (and test that it does not mutate the batch)
|
||||
batch_ = deepcopy(batch)
|
||||
policy.forward(batch)
|
||||
assert set(batch) == set(batch_), "Batch keys are not the same after a forward pass."
|
||||
assert set(batch) == set(
|
||||
batch_
|
||||
), "Batch keys are not the same after a forward pass."
|
||||
assert all(
|
||||
torch.equal(batch[k], batch_[k]) if isinstance(batch[k], torch.Tensor) else batch[k] == batch_[k]
|
||||
for k in batch
|
||||
@@ -186,7 +188,9 @@ def test_policy(ds_repo_id, env_name, env_kwargs, policy_name, policy_kwargs):
|
||||
observation = preprocess_observation(observation)
|
||||
|
||||
# send observation to device/gpu
|
||||
observation = {key: observation[key].to(DEVICE, non_blocking=True) for key in observation}
|
||||
observation = {
|
||||
key: observation[key].to(DEVICE, non_blocking=True) for key in observation
|
||||
}
|
||||
|
||||
# get the next action for the environment (also check that the observation batch is not modified)
|
||||
observation_ = deepcopy(observation)
|
||||
@@ -452,7 +456,9 @@ def test_act_temporal_ensembler():
|
||||
batch_size = batch_seq.shape[0]
|
||||
# Exponential weighting (normalized). Unsqueeze once to match the position of the `episode_length`
|
||||
# dimension of `batch_seq`.
|
||||
weights = torch.exp(-temporal_ensemble_coeff * torch.arange(chunk_size)).unsqueeze(-1)
|
||||
weights = torch.exp(-temporal_ensemble_coeff * torch.arange(chunk_size)).unsqueeze(
|
||||
-1
|
||||
)
|
||||
|
||||
# Simulate stepping through a rollout and computing a batch of actions with model on each step.
|
||||
for i in range(episode_length):
|
||||
@@ -475,7 +481,8 @@ def test_act_temporal_ensembler():
|
||||
episode_step_indices = torch.arange(i + 1)[-len(chunk_indices) :]
|
||||
seq_slice = batch_seq[:, episode_step_indices, chunk_indices]
|
||||
offline_avg = (
|
||||
einops.reduce(seq_slice * weights[: i + 1], "b s 1 -> b 1", "sum") / weights[: i + 1].sum()
|
||||
einops.reduce(seq_slice * weights[: i + 1], "b s 1 -> b 1", "sum")
|
||||
/ weights[: i + 1].sum()
|
||||
)
|
||||
# Sanity check. The average should be between the extrema.
|
||||
assert torch.all(einops.reduce(seq_slice, "b s 1 -> b 1", "min") <= offline_avg)
|
||||
|
||||
@@ -335,8 +335,12 @@ def test_record_with_event_rerecord_episode(tmp_path, request, robot_type, mock)
|
||||
)
|
||||
dataset = record(robot, rec_cfg)
|
||||
|
||||
assert not mock_events["rerecord_episode"], "`rerecord_episode` wasn't properly reset to False"
|
||||
assert not mock_events["exit_early"], "`exit_early` wasn't properly reset to False"
|
||||
assert not mock_events[
|
||||
"rerecord_episode"
|
||||
], "`rerecord_episode` wasn't properly reset to False"
|
||||
assert not mock_events[
|
||||
"exit_early"
|
||||
], "`exit_early` wasn't properly reset to False"
|
||||
assert len(dataset) == 1, "`dataset` should contain only 1 frame"
|
||||
|
||||
|
||||
@@ -391,7 +395,8 @@ def test_record_with_event_exit_early(tmp_path, request, robot_type, mock):
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"robot_type, mock, num_image_writer_processes", [("koch", True, 0), ("koch", True, 1)]
|
||||
"robot_type, mock, num_image_writer_processes",
|
||||
[("koch", True, 0), ("koch", True, 1)],
|
||||
)
|
||||
@require_robot
|
||||
def test_record_with_event_stop_recording(tmp_path, request, robot_type, mock, num_image_writer_processes):
|
||||
|
||||
@@ -105,7 +105,9 @@ def test_robot(tmp_path, request, robot_type, mock):
|
||||
assert "observation.state" in observation
|
||||
assert isinstance(observation["observation.state"], torch.Tensor)
|
||||
assert observation["observation.state"].ndim == 1
|
||||
dim_state = sum(len(robot.follower_arms[name].motors) for name in robot.follower_arms)
|
||||
dim_state = sum(
|
||||
len(robot.follower_arms[name].motors) for name in robot.follower_arms
|
||||
)
|
||||
assert observation["observation.state"].shape[0] == dim_state
|
||||
# Cameras
|
||||
for name in robot.cameras:
|
||||
@@ -116,7 +118,9 @@ def test_robot(tmp_path, request, robot_type, mock):
|
||||
assert "action" in action
|
||||
assert isinstance(action["action"], torch.Tensor)
|
||||
assert action["action"].ndim == 1
|
||||
dim_action = sum(len(robot.follower_arms[name].motors) for name in robot.follower_arms)
|
||||
dim_action = sum(
|
||||
len(robot.follower_arms[name].motors) for name in robot.follower_arms
|
||||
)
|
||||
assert action["action"].shape[0] == dim_action
|
||||
# TODO(rcadene): test if observation and action data are returned as expected
|
||||
|
||||
|
||||
@@ -9,7 +9,9 @@ from hydra import compose, initialize_config_dir
|
||||
from torch import nn
|
||||
from torch.utils.data import Dataset
|
||||
|
||||
from lerobot.common.policies.hilserl.classifier.configuration_classifier import ClassifierConfig
|
||||
from lerobot.common.policies.hilserl.classifier.configuration_classifier import (
|
||||
ClassifierConfig,
|
||||
)
|
||||
from lerobot.common.policies.hilserl.classifier.modeling_classifier import Classifier
|
||||
from lerobot.scripts.train_hilserl_classifier import (
|
||||
create_balanced_sampler,
|
||||
@@ -34,7 +36,9 @@ class MockDataset(Dataset):
|
||||
|
||||
def make_dummy_model():
|
||||
model_config = ClassifierConfig(
|
||||
num_classes=2, model_name="hf-tiny-model-private/tiny-random-ResNetModel", num_cameras=1
|
||||
num_classes=2,
|
||||
model_name="hf-tiny-model-private/tiny-random-ResNetModel",
|
||||
num_cameras=1,
|
||||
)
|
||||
model = Classifier(config=model_config)
|
||||
return model
|
||||
@@ -65,7 +69,9 @@ def test_create_balanced_sampler():
|
||||
labels = [item["label"] for item in data]
|
||||
class_counts = torch.tensor([labels.count(0), labels.count(1)], dtype=torch.float32)
|
||||
class_weights = 1.0 / class_counts
|
||||
expected_weights = torch.tensor([class_weights[label] for label in labels], dtype=torch.float32)
|
||||
expected_weights = torch.tensor(
|
||||
[class_weights[label] for label in labels], dtype=torch.float32
|
||||
)
|
||||
|
||||
# Test that the weights are correct
|
||||
assert torch.allclose(weights, expected_weights)
|
||||
@@ -149,7 +155,9 @@ def test_validate():
|
||||
|
||||
def test_train_epoch_multiple_cameras():
|
||||
model_config = ClassifierConfig(
|
||||
num_classes=2, model_name="hf-tiny-model-private/tiny-random-ResNetModel", num_cameras=2
|
||||
num_classes=2,
|
||||
model_name="hf-tiny-model-private/tiny-random-ResNetModel",
|
||||
num_cameras=2,
|
||||
)
|
||||
model = Classifier(config=model_config)
|
||||
|
||||
@@ -216,10 +224,16 @@ def test_resume_function(
|
||||
):
|
||||
# Initialize Hydra
|
||||
test_file_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
config_dir = os.path.abspath(os.path.join(test_file_dir, "..", "lerobot", "configs", "policy"))
|
||||
assert os.path.exists(config_dir), f"Config directory does not exist at {config_dir}"
|
||||
config_dir = os.path.abspath(
|
||||
os.path.join(test_file_dir, "..", "lerobot", "configs", "policy")
|
||||
)
|
||||
assert os.path.exists(
|
||||
config_dir
|
||||
), f"Config directory does not exist at {config_dir}"
|
||||
|
||||
with initialize_config_dir(config_dir=config_dir, job_name="test_app", version_base="1.2"):
|
||||
with initialize_config_dir(
|
||||
config_dir=config_dir, job_name="test_app", version_base="1.2"
|
||||
):
|
||||
cfg = compose(
|
||||
config_name="hilserl_classifier",
|
||||
overrides=[
|
||||
@@ -244,7 +258,9 @@ def test_resume_function(
|
||||
mock_init_hydra_config.return_value = cfg
|
||||
|
||||
# Mock dataset
|
||||
dataset = MockDataset([{"image": torch.rand(3, 224, 224), "label": i % 2} for i in range(10)])
|
||||
dataset = MockDataset(
|
||||
[{"image": torch.rand(3, 224, 224), "label": i % 2} for i in range(10)]
|
||||
)
|
||||
mock_dataset.return_value = dataset
|
||||
|
||||
# Mock checkpoint handling
|
||||
|
||||
@@ -47,7 +47,9 @@ for motor_type in available_motors:
|
||||
OPENCV_CAMERA_INDEX = int(os.environ.get("LEROBOT_TEST_OPENCV_CAMERA_INDEX", 0))
|
||||
INTELREALSENSE_SERIAL_NUMBER = int(os.environ.get("LEROBOT_TEST_INTELREALSENSE_SERIAL_NUMBER", 128422271614))
|
||||
|
||||
DYNAMIXEL_PORT = os.environ.get("LEROBOT_TEST_DYNAMIXEL_PORT", "/dev/tty.usbmodem575E0032081")
|
||||
DYNAMIXEL_PORT = os.environ.get(
|
||||
"LEROBOT_TEST_DYNAMIXEL_PORT", "/dev/tty.usbmodem575E0032081"
|
||||
)
|
||||
DYNAMIXEL_MOTORS = {
|
||||
"shoulder_pan": [1, "xl430-w250"],
|
||||
"shoulder_lift": [2, "xl430-w250"],
|
||||
@@ -57,7 +59,9 @@ DYNAMIXEL_MOTORS = {
|
||||
"gripper": [6, "xl330-m288"],
|
||||
}
|
||||
|
||||
FEETECH_PORT = os.environ.get("LEROBOT_TEST_FEETECH_PORT", "/dev/tty.usbmodem585A0080971")
|
||||
FEETECH_PORT = os.environ.get(
|
||||
"LEROBOT_TEST_FEETECH_PORT", "/dev/tty.usbmodem585A0080971"
|
||||
)
|
||||
FEETECH_MOTORS = {
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
@@ -156,9 +160,13 @@ def require_package_arg(func):
|
||||
if "required_packages" in arg_names:
|
||||
# Get the index of 'required_packages' and retrieve the value from args
|
||||
index = arg_names.index("required_packages")
|
||||
required_packages = args[index] if len(args) > index else kwargs.get("required_packages")
|
||||
required_packages = (
|
||||
args[index] if len(args) > index else kwargs.get("required_packages")
|
||||
)
|
||||
else:
|
||||
raise ValueError("Function does not have 'required_packages' as an argument.")
|
||||
raise ValueError(
|
||||
"Function does not have 'required_packages' as an argument."
|
||||
)
|
||||
|
||||
if required_packages is None:
|
||||
return func(*args, **kwargs)
|
||||
@@ -215,11 +223,17 @@ def require_robot(func):
|
||||
mock = kwargs.get("mock")
|
||||
|
||||
if robot_type is None:
|
||||
raise ValueError("The 'robot_type' must be an argument of the test function.")
|
||||
raise ValueError(
|
||||
"The 'robot_type' must be an argument of the test function."
|
||||
)
|
||||
if request is None:
|
||||
raise ValueError("The 'request' fixture must be an argument of the test function.")
|
||||
raise ValueError(
|
||||
"The 'request' fixture must be an argument of the test function."
|
||||
)
|
||||
if mock is None:
|
||||
raise ValueError("The 'mock' variable must be an argument of the test function.")
|
||||
raise ValueError(
|
||||
"The 'mock' variable must be an argument of the test function."
|
||||
)
|
||||
|
||||
# Run test with a real robot. Skip test if robot connection fails.
|
||||
if not mock and not request.getfixturevalue("is_robot_available"):
|
||||
@@ -239,11 +253,17 @@ def require_camera(func):
|
||||
mock = kwargs.get("mock")
|
||||
|
||||
if request is None:
|
||||
raise ValueError("The 'request' fixture must be an argument of the test function.")
|
||||
raise ValueError(
|
||||
"The 'request' fixture must be an argument of the test function."
|
||||
)
|
||||
if camera_type is None:
|
||||
raise ValueError("The 'camera_type' must be an argument of the test function.")
|
||||
raise ValueError(
|
||||
"The 'camera_type' must be an argument of the test function."
|
||||
)
|
||||
if mock is None:
|
||||
raise ValueError("The 'mock' variable must be an argument of the test function.")
|
||||
raise ValueError(
|
||||
"The 'mock' variable must be an argument of the test function."
|
||||
)
|
||||
|
||||
if not mock and not request.getfixturevalue("is_camera_available"):
|
||||
pytest.skip(f"A {camera_type} camera is not available.")
|
||||
@@ -262,11 +282,17 @@ def require_motor(func):
|
||||
mock = kwargs.get("mock")
|
||||
|
||||
if request is None:
|
||||
raise ValueError("The 'request' fixture must be an argument of the test function.")
|
||||
raise ValueError(
|
||||
"The 'request' fixture must be an argument of the test function."
|
||||
)
|
||||
if motor_type is None:
|
||||
raise ValueError("The 'motor_type' must be an argument of the test function.")
|
||||
raise ValueError(
|
||||
"The 'motor_type' must be an argument of the test function."
|
||||
)
|
||||
if mock is None:
|
||||
raise ValueError("The 'mock' variable must be an argument of the test function.")
|
||||
raise ValueError(
|
||||
"The 'mock' variable must be an argument of the test function."
|
||||
)
|
||||
|
||||
if not mock and not request.getfixturevalue("is_motor_available"):
|
||||
pytest.skip(f"A {motor_type} motor is not available.")
|
||||
@@ -285,7 +311,14 @@ def mock_calibration_dir(calibration_dir):
|
||||
"start_pos": [1442, 843, 2166, 2849, 1988, 1835],
|
||||
"end_pos": [2440, 1869, -1106, -1848, -926, 3235],
|
||||
"calib_mode": ["DEGREE", "DEGREE", "DEGREE", "DEGREE", "DEGREE", "LINEAR"],
|
||||
"motor_names": ["shoulder_pan", "shoulder_lift", "elbow_flex", "wrist_flex", "wrist_roll", "gripper"],
|
||||
"motor_names": [
|
||||
"shoulder_pan",
|
||||
"shoulder_lift",
|
||||
"elbow_flex",
|
||||
"wrist_flex",
|
||||
"wrist_roll",
|
||||
"gripper",
|
||||
],
|
||||
}
|
||||
Path(str(calibration_dir)).mkdir(parents=True, exist_ok=True)
|
||||
with open(calibration_dir / "main_follower.json", "w") as f:
|
||||
|
||||
Reference in New Issue
Block a user