[pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
This commit is contained in:
pre-commit-ci[bot]
2025-03-24 13:41:27 +00:00
committed by Michel Aractingi
parent 2abbd60a0d
commit 0ea27704f6
123 changed files with 1161 additions and 3425 deletions

View File

@@ -52,13 +52,7 @@ def save_dataset_to_safetensors(output_dir, repo_id="lerobot/pusht"):
save_file(dataset[i + 1], repo_dir / f"frame_{i + 1}.safetensors")
# save 2 frames at the middle of first episode
i = int(
(
dataset.episode_data_index["to"][0].item()
- dataset.episode_data_index["from"][0].item()
)
/ 2
)
i = int((dataset.episode_data_index["to"][0].item() - dataset.episode_data_index["from"][0].item()) / 2)
save_file(dataset[i], repo_dir / f"frame_{i}.safetensors")
save_file(dataset[i + 1], repo_dir / f"frame_{i + 1}.safetensors")

View File

@@ -51,9 +51,7 @@ def get_policy_stats(ds_repo_id: str, policy_name: str, policy_kwargs: dict):
batch = next(iter(dataloader))
loss, output_dict = policy.forward(batch)
if output_dict is not None:
output_dict = {
k: v for k, v in output_dict.items() if isinstance(v, torch.Tensor)
}
output_dict = {k: v for k, v in output_dict.items() if isinstance(v, torch.Tensor)}
output_dict["loss"] = loss
else:
output_dict = {"loss": loss}
@@ -71,9 +69,7 @@ def get_policy_stats(ds_repo_id: str, policy_name: str, policy_kwargs: dict):
param_stats = {}
for key, param in policy.named_parameters():
param_stats[f"{key}_mean"] = param.mean()
param_stats[f"{key}_std"] = (
param.std() if param.numel() > 1 else torch.tensor(float(0.0))
)
param_stats[f"{key}_std"] = param.std() if param.numel() > 1 else torch.tensor(float(0.0))
optimizer.zero_grad()
policy.reset()
@@ -100,15 +96,11 @@ def get_policy_stats(ds_repo_id: str, policy_name: str, policy_kwargs: dict):
else:
actions_queue = train_cfg.policy.n_action_repeats
actions = {
str(i): policy.select_action(obs).contiguous() for i in range(actions_queue)
}
actions = {str(i): policy.select_action(obs).contiguous() for i in range(actions_queue)}
return output_dict, grad_stats, param_stats, actions
def save_policy_to_safetensors(
output_dir: Path, ds_repo_id: str, policy_name: str, policy_kwargs: dict
):
def save_policy_to_safetensors(output_dir: Path, ds_repo_id: str, policy_name: str, policy_kwargs: dict):
if output_dir.exists():
print(f"Overwrite existing safetensors in '{output_dir}':")
print(f" - Validate with: `git add {output_dir}`")
@@ -116,9 +108,7 @@ def save_policy_to_safetensors(
shutil.rmtree(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
output_dict, grad_stats, param_stats, actions = get_policy_stats(
ds_repo_id, policy_name, policy_kwargs
)
output_dict, grad_stats, param_stats, actions = get_policy_stats(ds_repo_id, policy_name, policy_kwargs)
save_file(output_dict, output_dir / "output_dict.safetensors")
save_file(grad_stats, output_dir / "grad_stats.safetensors")
save_file(param_stats, output_dir / "param_stats.safetensors")
@@ -151,7 +141,5 @@ if __name__ == "__main__":
raise RuntimeError("No policies were provided!")
for ds_repo_id, policy, policy_kwargs, file_name_extra in artifacts_cfg:
ds_name = ds_repo_id.split("/")[-1]
output_dir = (
Path("tests/artifacts/policies") / f"{ds_name}_{policy}_{file_name_extra}"
)
output_dir = Path("tests/artifacts/policies") / f"{ds_name}_{policy}_{file_name_extra}"
save_policy_to_safetensors(output_dir, ds_repo_id, policy, policy_kwargs)

View File

@@ -30,9 +30,7 @@ class config: # noqa: N801
def enable_device(self, device_id: str):
self.device_enabled = device_id
def enable_stream(
self, stream_type: stream, width=None, height=None, color_format=None, fps=None
):
def enable_stream(self, stream_type: stream, width=None, height=None, color_format=None, fps=None):
self.stream_type = stream_type
# Overwrite default values when possible
self.width = 848 if width is None else width

View File

@@ -9,9 +9,7 @@ from lerobot.common.envs.configs import EnvConfig
from lerobot.configs.parser import PluginLoadError, load_plugin, parse_plugin_args, wrap
def create_plugin_code(
*, base_class: str = "EnvConfig", plugin_name: str = "test_env"
) -> str:
def create_plugin_code(*, base_class: str = "EnvConfig", plugin_name: str = "test_env") -> str:
"""Creates a dummy plugin module that implements its own EnvConfig subclass."""
return f"""
from dataclasses import dataclass

View File

@@ -31,11 +31,7 @@ from lerobot.common.datasets.compute_stats import (
def mock_load_image_as_numpy(path, dtype, channel_first):
return (
np.ones((3, 32, 32), dtype=dtype)
if channel_first
else np.ones((32, 32, 3), dtype=dtype)
)
return np.ones((3, 32, 32), dtype=dtype) if channel_first else np.ones((32, 32, 3), dtype=dtype)
@pytest.fixture
@@ -81,20 +77,9 @@ def test_sample_images(mock_load):
def test_get_feature_stats_images():
data = np.random.rand(100, 3, 32, 32)
stats = get_feature_stats(data, axis=(0, 2, 3), keepdims=True)
assert (
"min" in stats
and "max" in stats
and "mean" in stats
and "std" in stats
and "count" in stats
)
assert "min" in stats and "max" in stats and "mean" in stats and "std" in stats and "count" in stats
np.testing.assert_equal(stats["count"], np.array([100]))
assert (
stats["min"].shape
== stats["max"].shape
== stats["mean"].shape
== stats["std"].shape
)
assert stats["min"].shape == stats["max"].shape == stats["mean"].shape == stats["std"].shape
def test_get_feature_stats_axis_0_keepdims(sample_array):
@@ -315,47 +300,31 @@ def test_aggregate_stats():
for ep_stats in all_stats:
for fkey, stats in ep_stats.items():
for k in stats:
stats[k] = np.array(
stats[k], dtype=np.int64 if k == "count" else np.float32
)
stats[k] = np.array(stats[k], dtype=np.int64 if k == "count" else np.float32)
if fkey == "observation.image" and k != "count":
stats[k] = stats[k].reshape(
3, 1, 1
) # for normalization on image channels
stats[k] = stats[k].reshape(3, 1, 1) # for normalization on image channels
else:
stats[k] = stats[k].reshape(1)
# cast to numpy
for fkey, stats in expected_agg_stats.items():
for k in stats:
stats[k] = np.array(
stats[k], dtype=np.int64 if k == "count" else np.float32
)
stats[k] = np.array(stats[k], dtype=np.int64 if k == "count" else np.float32)
if fkey == "observation.image" and k != "count":
stats[k] = stats[k].reshape(
3, 1, 1
) # for normalization on image channels
stats[k] = stats[k].reshape(3, 1, 1) # for normalization on image channels
else:
stats[k] = stats[k].reshape(1)
results = aggregate_stats(all_stats)
for fkey in expected_agg_stats:
np.testing.assert_allclose(
results[fkey]["min"], expected_agg_stats[fkey]["min"]
)
np.testing.assert_allclose(
results[fkey]["max"], expected_agg_stats[fkey]["max"]
)
np.testing.assert_allclose(
results[fkey]["mean"], expected_agg_stats[fkey]["mean"]
)
np.testing.assert_allclose(results[fkey]["min"], expected_agg_stats[fkey]["min"])
np.testing.assert_allclose(results[fkey]["max"], expected_agg_stats[fkey]["max"])
np.testing.assert_allclose(results[fkey]["mean"], expected_agg_stats[fkey]["mean"])
np.testing.assert_allclose(
results[fkey]["std"],
expected_agg_stats[fkey]["std"],
atol=1e-04,
rtol=1e-04,
)
np.testing.assert_allclose(
results[fkey]["count"], expected_agg_stats[fkey]["count"]
)
np.testing.assert_allclose(results[fkey]["count"], expected_agg_stats[fkey]["count"])

View File

@@ -72,9 +72,7 @@ def test_same_attributes_defined(tmp_path, lerobot_dataset_factory):
# Instantiate both ways
robot = make_robot("koch", mock=True)
root_create = tmp_path / "create"
dataset_create = LeRobotDataset.create(
repo_id=DUMMY_REPO_ID, fps=30, robot=robot, root=root_create
)
dataset_create = LeRobotDataset.create(repo_id=DUMMY_REPO_ID, fps=30, robot=robot, root=root_create)
root_init = tmp_path / "init"
dataset_init = lerobot_dataset_factory(root=root_init)
@@ -129,9 +127,7 @@ def test_add_frame_extra_feature(tmp_path, empty_lerobot_dataset_factory):
ValueError,
match="Feature mismatch in `frame` dictionary:\nExtra features: {'extra'}\n",
):
dataset.add_frame(
{"state": torch.randn(1), "task": "Dummy task", "extra": "dummy_extra"}
)
dataset.add_frame({"state": torch.randn(1), "task": "Dummy task", "extra": "dummy_extra"})
def test_add_frame_wrong_type(tmp_path, empty_lerobot_dataset_factory):
@@ -141,9 +137,7 @@ def test_add_frame_wrong_type(tmp_path, empty_lerobot_dataset_factory):
ValueError,
match="The feature 'state' of dtype 'float16' is not of the expected dtype 'float32'.\n",
):
dataset.add_frame(
{"state": torch.randn(1, dtype=torch.float16), "task": "Dummy task"}
)
dataset.add_frame({"state": torch.randn(1, dtype=torch.float16), "task": "Dummy task"})
def test_add_frame_wrong_shape(tmp_path, empty_lerobot_dataset_factory):
@@ -151,9 +145,7 @@ def test_add_frame_wrong_shape(tmp_path, empty_lerobot_dataset_factory):
dataset = empty_lerobot_dataset_factory(root=tmp_path / "test", features=features)
with pytest.raises(
ValueError,
match=re.escape(
"The feature 'state' of shape '(1,)' does not have the expected shape '(2,)'.\n"
),
match=re.escape("The feature 'state' of shape '(1,)' does not have the expected shape '(2,)'.\n"),
):
dataset.add_frame({"state": torch.randn(1), "task": "Dummy task"})
@@ -175,9 +167,7 @@ def test_add_frame_wrong_shape_torch_ndim_0(tmp_path, empty_lerobot_dataset_fact
dataset = empty_lerobot_dataset_factory(root=tmp_path / "test", features=features)
with pytest.raises(
ValueError,
match=re.escape(
"The feature 'state' of shape '()' does not have the expected shape '(1,)'.\n"
),
match=re.escape("The feature 'state' of shape '()' does not have the expected shape '(1,)'.\n"),
):
dataset.add_frame({"state": torch.tensor(1.0), "task": "Dummy task"})
@@ -471,9 +461,7 @@ def test_flatten_unflatten_dict():
d = unflatten_dict(flatten_dict(d))
# test equality between nested dicts
assert json.dumps(original_d, sort_keys=True) == json.dumps(d, sort_keys=True), (
f"{original_d} != {d}"
)
assert json.dumps(original_d, sort_keys=True) == json.dumps(d, sort_keys=True), f"{original_d} != {d}"
@pytest.mark.parametrize(
@@ -527,13 +515,7 @@ def test_backward_compatibility(repo_id):
load_and_compare(i + 1)
# test 2 frames at the middle of first episode
i = int(
(
dataset.episode_data_index["to"][0].item()
- dataset.episode_data_index["from"][0].item()
)
/ 2
)
i = int((dataset.episode_data_index["to"][0].item() - dataset.episode_data_index["from"][0].item()) / 2)
load_and_compare(i)
load_and_compare(i + 1)

View File

@@ -71,12 +71,8 @@ def unsynced_timestamps_factory(synced_timestamps_factory):
def _create_unsynced_timestamps(
fps: int = 30, tolerance_s: float = 1e-4
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
timestamps, episode_indices, episode_data_index = synced_timestamps_factory(
fps=fps
)
timestamps[30] += (
tolerance_s * 1.1
) # Modify a single timestamp just outside tolerance
timestamps, episode_indices, episode_data_index = synced_timestamps_factory(fps=fps)
timestamps[30] += tolerance_s * 1.1 # Modify a single timestamp just outside tolerance
return timestamps, episode_indices, episode_data_index
return _create_unsynced_timestamps
@@ -87,12 +83,8 @@ def slightly_off_timestamps_factory(synced_timestamps_factory):
def _create_slightly_off_timestamps(
fps: int = 30, tolerance_s: float = 1e-4
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
timestamps, episode_indices, episode_data_index = synced_timestamps_factory(
fps=fps
)
timestamps[30] += (
tolerance_s * 0.9
) # Modify a single timestamp just inside tolerance
timestamps, episode_indices, episode_data_index = synced_timestamps_factory(fps=fps)
timestamps[30] += tolerance_s * 0.9 # Modify a single timestamp just inside tolerance
return timestamps, episode_indices, episode_data_index
return _create_slightly_off_timestamps
@@ -105,9 +97,7 @@ def valid_delta_timestamps_factory():
keys: list = DUMMY_MOTOR_FEATURES,
min_max_range: tuple[int, int] = (-10, 10),
) -> dict:
delta_timestamps = {
key: [i * (1 / fps) for i in range(*min_max_range)] for key in keys
}
delta_timestamps = {key: [i * (1 / fps) for i in range(*min_max_range)] for key in keys}
return delta_timestamps
return _create_valid_delta_timestamps
@@ -144,9 +134,7 @@ def slightly_off_delta_timestamps_factory(valid_delta_timestamps_factory):
@pytest.fixture(scope="module")
def delta_indices_factory():
def _delta_indices(
keys: list = DUMMY_MOTOR_FEATURES, min_max_range: tuple[int, int] = (-10, 10)
) -> dict:
def _delta_indices(keys: list = DUMMY_MOTOR_FEATURES, min_max_range: tuple[int, int] = (-10, 10)) -> dict:
return {key: list(range(*min_max_range)) for key in keys}
return _delta_indices
@@ -198,9 +186,7 @@ def test_check_timestamps_sync_unsynced_no_exception(unsynced_timestamps_factory
def test_check_timestamps_sync_slightly_off(slightly_off_timestamps_factory):
fps = 30
tolerance_s = 1e-4
timestamps, ep_idx, ep_data_index = slightly_off_timestamps_factory(
fps, tolerance_s
)
timestamps, ep_idx, ep_data_index = slightly_off_timestamps_factory(fps, tolerance_s)
result = check_timestamps_sync(
timestamps=timestamps,
episode_indices=ep_idx,
@@ -241,9 +227,7 @@ def test_check_delta_timestamps_valid(valid_delta_timestamps_factory):
def test_check_delta_timestamps_slightly_off(slightly_off_delta_timestamps_factory):
fps = 30
tolerance_s = 1e-4
slightly_off_delta_timestamps = slightly_off_delta_timestamps_factory(
fps, tolerance_s
)
slightly_off_delta_timestamps = slightly_off_delta_timestamps_factory(fps, tolerance_s)
result = check_delta_timestamps(
delta_timestamps=slightly_off_delta_timestamps,
fps=fps,

View File

@@ -82,11 +82,7 @@ def test_get_image_transforms_brightness(img_tensor_factory, min_max):
img_tensor = img_tensor_factory()
tf_cfg = ImageTransformsConfig(
enable=True,
tfs={
"brightness": ImageTransformConfig(
type="ColorJitter", kwargs={"brightness": min_max}
)
},
tfs={"brightness": ImageTransformConfig(type="ColorJitter", kwargs={"brightness": min_max})},
)
tf_actual = ImageTransforms(tf_cfg)
tf_expected = v2.ColorJitter(brightness=min_max)
@@ -98,11 +94,7 @@ def test_get_image_transforms_contrast(img_tensor_factory, min_max):
img_tensor = img_tensor_factory()
tf_cfg = ImageTransformsConfig(
enable=True,
tfs={
"contrast": ImageTransformConfig(
type="ColorJitter", kwargs={"contrast": min_max}
)
},
tfs={"contrast": ImageTransformConfig(type="ColorJitter", kwargs={"contrast": min_max})},
)
tf_actual = ImageTransforms(tf_cfg)
tf_expected = v2.ColorJitter(contrast=min_max)
@@ -114,11 +106,7 @@ def test_get_image_transforms_saturation(img_tensor_factory, min_max):
img_tensor = img_tensor_factory()
tf_cfg = ImageTransformsConfig(
enable=True,
tfs={
"saturation": ImageTransformConfig(
type="ColorJitter", kwargs={"saturation": min_max}
)
},
tfs={"saturation": ImageTransformConfig(type="ColorJitter", kwargs={"saturation": min_max})},
)
tf_actual = ImageTransforms(tf_cfg)
tf_expected = v2.ColorJitter(saturation=min_max)
@@ -142,11 +130,7 @@ def test_get_image_transforms_sharpness(img_tensor_factory, min_max):
img_tensor = img_tensor_factory()
tf_cfg = ImageTransformsConfig(
enable=True,
tfs={
"sharpness": ImageTransformConfig(
type="SharpnessJitter", kwargs={"sharpness": min_max}
)
},
tfs={"sharpness": ImageTransformConfig(type="SharpnessJitter", kwargs={"sharpness": min_max})},
)
tf_actual = ImageTransforms(tf_cfg)
tf_expected = SharpnessJitter(sharpness=min_max)
@@ -362,9 +346,7 @@ def test_save_all_transforms(img_tensor_factory, tmp_path):
# Check if the combined transforms directory exists and contains the right files
combined_transforms_dir = tmp_path / "all"
assert combined_transforms_dir.exists(), (
"Combined transforms directory was not created."
)
assert combined_transforms_dir.exists(), "Combined transforms directory was not created."
assert any(combined_transforms_dir.iterdir()), (
"No transformed images found in combined transforms directory."
)
@@ -386,9 +368,7 @@ def test_save_each_transform(img_tensor_factory, tmp_path):
for transform in transforms:
transform_dir = tmp_path / transform
assert transform_dir.exists(), f"{transform} directory was not created."
assert any(transform_dir.iterdir()), (
f"No transformed images found in {transform} directory."
)
assert any(transform_dir.iterdir()), f"No transformed images found in {transform} directory."
# Check for specific files within each transform directory
expected_files = [f"{i}.png" for i in range(1, n_examples + 1)] + [

View File

@@ -187,9 +187,7 @@ def test_save_image_torch(tmp_path, img_tensor_factory):
writer.wait_until_done()
assert fpath.exists()
saved_image = np.array(Image.open(fpath))
expected_image = (image_tensor.permute(1, 2, 0).cpu().numpy() * 255).astype(
np.uint8
)
expected_image = (image_tensor.permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8)
assert np.array_equal(expected_image, saved_image)
finally:
writer.stop()
@@ -204,9 +202,7 @@ def test_save_image_torch_multiprocessing(tmp_path, img_tensor_factory):
writer.wait_until_done()
assert fpath.exists()
saved_image = np.array(Image.open(fpath))
expected_image = (image_tensor.permute(1, 2, 0).cpu().numpy() * 255).astype(
np.uint8
)
expected_image = (image_tensor.permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8)
assert np.array_equal(expected_image, saved_image)
finally:
writer.stop()
@@ -296,9 +292,7 @@ def test_wait_until_done(tmp_path, img_array_factory):
writer = AsyncImageWriter(num_processes=0, num_threads=4)
try:
num_images = 100
image_arrays = [
img_array_factory(height=500, width=500) for _ in range(num_images)
]
image_arrays = [img_array_factory(height=500, width=500) for _ in range(num_images)]
fpaths = [tmp_path / f"frame_{i:06d}.png" for i in range(num_images)]
for image_array, fpath in zip(image_arrays, fpaths, strict=True):
fpath.parent.mkdir(parents=True, exist_ok=True)

View File

@@ -44,23 +44,13 @@ def make_new_buffer(
return buffer, write_dir
def make_spoof_data_frames(
n_episodes: int, n_frames_per_episode: int
) -> dict[str, np.ndarray]:
def make_spoof_data_frames(n_episodes: int, n_frames_per_episode: int) -> dict[str, np.ndarray]:
new_data = {
data_key: np.arange(
n_frames_per_episode * n_episodes * np.prod(data_shape)
).reshape(-1, *data_shape),
data_key: np.arange(n_frames_per_episode * n_episodes * np.prod(data_shape)).reshape(-1, *data_shape),
OnlineBuffer.INDEX_KEY: np.arange(n_frames_per_episode * n_episodes),
OnlineBuffer.EPISODE_INDEX_KEY: np.repeat(
np.arange(n_episodes), n_frames_per_episode
),
OnlineBuffer.FRAME_INDEX_KEY: np.tile(
np.arange(n_frames_per_episode), n_episodes
),
OnlineBuffer.TIMESTAMP_KEY: np.tile(
np.arange(n_frames_per_episode) / fps, n_episodes
),
OnlineBuffer.EPISODE_INDEX_KEY: np.repeat(np.arange(n_episodes), n_frames_per_episode),
OnlineBuffer.FRAME_INDEX_KEY: np.tile(np.arange(n_frames_per_episode), n_episodes),
OnlineBuffer.TIMESTAMP_KEY: np.tile(np.arange(n_frames_per_episode) / fps, n_episodes),
}
return new_data
@@ -176,9 +166,7 @@ def test_delta_timestamps_within_tolerance():
buffer.tolerance_s = 0.04
item = buffer[2]
data, is_pad = item["index"], item[f"index{OnlineBuffer.IS_PAD_POSTFIX}"]
torch.testing.assert_close(
data, torch.tensor([0, 2, 3]), msg="Data does not match expected values"
)
torch.testing.assert_close(data, torch.tensor([0, 2, 3]), msg="Data does not match expected values")
assert not is_pad.any(), "Unexpected padding detected"
@@ -214,9 +202,7 @@ def test_delta_timestamps_outside_tolerance_outside_episode_range():
buffer.tolerance_s = 0.04
item = buffer[2]
data, is_pad = item["index"], item["index_is_pad"]
assert torch.equal(data, torch.tensor([0, 0, 2, 4, 4])), (
"Data does not match expected values"
)
assert torch.equal(data, torch.tensor([0, 0, 2, 4, 4])), "Data does not match expected values"
assert torch.equal(is_pad, torch.tensor([True, False, False, True, True])), (
"Padding does not match expected values"
)
@@ -233,15 +219,11 @@ def test_compute_sampler_weights_trivial(
online_dataset_size: int,
online_sampling_ratio: float,
):
offline_dataset = lerobot_dataset_factory(
tmp_path, total_episodes=1, total_frames=offline_dataset_size
)
offline_dataset = lerobot_dataset_factory(tmp_path, total_episodes=1, total_frames=offline_dataset_size)
online_dataset, _ = make_new_buffer()
if online_dataset_size > 0:
online_dataset.add_data(
make_spoof_data_frames(
n_episodes=2, n_frames_per_episode=online_dataset_size // 2
)
make_spoof_data_frames(n_episodes=2, n_frames_per_episode=online_dataset_size // 2)
)
weights = compute_sampler_weights(
@@ -252,26 +234,18 @@ def test_compute_sampler_weights_trivial(
if offline_dataset_size == 0 or online_dataset_size == 0:
expected_weights = torch.ones(offline_dataset_size + online_dataset_size)
elif online_sampling_ratio == 0:
expected_weights = torch.cat(
[torch.ones(offline_dataset_size), torch.zeros(online_dataset_size)]
)
expected_weights = torch.cat([torch.ones(offline_dataset_size), torch.zeros(online_dataset_size)])
elif online_sampling_ratio == 1:
expected_weights = torch.cat(
[torch.zeros(offline_dataset_size), torch.ones(online_dataset_size)]
)
expected_weights = torch.cat([torch.zeros(offline_dataset_size), torch.ones(online_dataset_size)])
expected_weights /= expected_weights.sum()
torch.testing.assert_close(weights, expected_weights)
def test_compute_sampler_weights_nontrivial_ratio(lerobot_dataset_factory, tmp_path):
# Arbitrarily set small dataset sizes, making sure to have uneven sizes.
offline_dataset = lerobot_dataset_factory(
tmp_path, total_episodes=1, total_frames=4
)
offline_dataset = lerobot_dataset_factory(tmp_path, total_episodes=1, total_frames=4)
online_dataset, _ = make_new_buffer()
online_dataset.add_data(
make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2)
)
online_dataset.add_data(make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2))
online_sampling_ratio = 0.8
weights = compute_sampler_weights(
offline_dataset,
@@ -284,17 +258,11 @@ def test_compute_sampler_weights_nontrivial_ratio(lerobot_dataset_factory, tmp_p
)
def test_compute_sampler_weights_nontrivial_ratio_and_drop_last_n(
lerobot_dataset_factory, tmp_path
):
def test_compute_sampler_weights_nontrivial_ratio_and_drop_last_n(lerobot_dataset_factory, tmp_path):
# Arbitrarily set small dataset sizes, making sure to have uneven sizes.
offline_dataset = lerobot_dataset_factory(
tmp_path, total_episodes=1, total_frames=4
)
offline_dataset = lerobot_dataset_factory(tmp_path, total_episodes=1, total_frames=4)
online_dataset, _ = make_new_buffer()
online_dataset.add_data(
make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2)
)
online_dataset.add_data(make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2))
weights = compute_sampler_weights(
offline_dataset,
online_dataset=online_dataset,
@@ -309,13 +277,9 @@ def test_compute_sampler_weights_nontrivial_ratio_and_drop_last_n(
def test_compute_sampler_weights_drop_n_last_frames(lerobot_dataset_factory, tmp_path):
"""Note: test copied from test_sampler."""
offline_dataset = lerobot_dataset_factory(
tmp_path, total_episodes=1, total_frames=2
)
offline_dataset = lerobot_dataset_factory(tmp_path, total_episodes=1, total_frames=2)
online_dataset, _ = make_new_buffer()
online_dataset.add_data(
make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2)
)
online_dataset.add_data(make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2))
weights = compute_sampler_weights(
offline_dataset,
@@ -324,6 +288,4 @@ def test_compute_sampler_weights_drop_n_last_frames(lerobot_dataset_factory, tmp
online_sampling_ratio=0.5,
online_drop_n_last_frames=1,
)
torch.testing.assert_close(
weights, torch.tensor([0.5, 0, 0.125, 0, 0.125, 0, 0.125, 0, 0.125, 0])
)
torch.testing.assert_close(weights, torch.tensor([0.5, 0, 0.125, 0, 0.125, 0, 0.125, 0, 0.125, 0]))

View File

@@ -58,9 +58,7 @@ def get_task_index(task_dicts: dict, task: str) -> int:
@pytest.fixture(scope="session")
def img_tensor_factory():
def _create_img_tensor(
height=100, width=100, channels=3, dtype=torch.float32
) -> torch.Tensor:
def _create_img_tensor(height=100, width=100, channels=3, dtype=torch.float32) -> torch.Tensor:
return torch.rand((channels, height, width), dtype=dtype)
return _create_img_tensor
@@ -68,14 +66,10 @@ def img_tensor_factory():
@pytest.fixture(scope="session")
def img_array_factory():
def _create_img_array(
height=100, width=100, channels=3, dtype=np.uint8
) -> np.ndarray:
def _create_img_array(height=100, width=100, channels=3, dtype=np.uint8) -> np.ndarray:
if np.issubdtype(dtype, np.unsignedinteger):
# Int array in [0, 255] range
img_array = np.random.randint(
0, 256, size=(height, width, channels), dtype=dtype
)
img_array = np.random.randint(0, 256, size=(height, width, channels), dtype=dtype)
elif np.issubdtype(dtype, np.floating):
# Float array in [0, 1] range
img_array = np.random.rand(height, width, channels).astype(dtype)
@@ -104,13 +98,10 @@ def features_factory():
) -> dict:
if use_videos:
camera_ft = {
key: {"dtype": "video", **ft, **DUMMY_VIDEO_INFO}
for key, ft in camera_features.items()
key: {"dtype": "video", **ft, **DUMMY_VIDEO_INFO} for key, ft in camera_features.items()
}
else:
camera_ft = {
key: {"dtype": "image", **ft} for key, ft in camera_features.items()
}
camera_ft = {key: {"dtype": "image", **ft} for key, ft in camera_features.items()}
return {
**motor_features,
**camera_ft,
@@ -231,9 +222,7 @@ def episodes_factory(tasks_factory):
if total_episodes <= 0 or total_frames <= 0:
raise ValueError("num_episodes and total_length must be positive integers.")
if total_frames < total_episodes:
raise ValueError(
"total_length must be greater than or equal to num_episodes."
)
raise ValueError("total_length must be greater than or equal to num_episodes.")
if not tasks:
min_tasks = 2 if multi_task else 1
@@ -241,14 +230,10 @@ def episodes_factory(tasks_factory):
tasks = tasks_factory(total_tasks)
if total_episodes < len(tasks) and not multi_task:
raise ValueError(
"The number of tasks should be less than the number of episodes."
)
raise ValueError("The number of tasks should be less than the number of episodes.")
# Generate random lengths that sum up to total_length
lengths = np.random.multinomial(
total_frames, [1 / total_episodes] * total_episodes
).tolist()
lengths = np.random.multinomial(total_frames, [1 / total_episodes] * total_episodes).tolist()
tasks_list = [task_dict["task"] for task_dict in tasks.values()]
num_tasks_available = len(tasks_list)
@@ -256,13 +241,9 @@ def episodes_factory(tasks_factory):
episodes = {}
remaining_tasks = tasks_list.copy()
for ep_idx in range(total_episodes):
num_tasks_in_episode = (
random.randint(1, min(3, num_tasks_available)) if multi_task else 1
)
num_tasks_in_episode = random.randint(1, min(3, num_tasks_available)) if multi_task else 1
tasks_to_sample = remaining_tasks if remaining_tasks else tasks_list
episode_tasks = random.sample(
tasks_to_sample, min(num_tasks_in_episode, len(tasks_to_sample))
)
episode_tasks = random.sample(tasks_to_sample, min(num_tasks_in_episode, len(tasks_to_sample)))
if remaining_tasks:
for task in episode_tasks:
remaining_tasks.remove(task)
@@ -279,9 +260,7 @@ def episodes_factory(tasks_factory):
@pytest.fixture(scope="session")
def hf_dataset_factory(
features_factory, tasks_factory, episodes_factory, img_array_factory
):
def hf_dataset_factory(features_factory, tasks_factory, episodes_factory, img_array_factory):
def _create_hf_dataset(
features: dict | None = None,
tasks: list[dict] | None = None,
@@ -300,12 +279,8 @@ def hf_dataset_factory(
episode_index_col = np.array([], dtype=np.int64)
task_index = np.array([], dtype=np.int64)
for ep_dict in episodes.values():
timestamp_col = np.concatenate(
(timestamp_col, np.arange(ep_dict["length"]) / fps)
)
frame_index_col = np.concatenate(
(frame_index_col, np.arange(ep_dict["length"], dtype=int))
)
timestamp_col = np.concatenate((timestamp_col, np.arange(ep_dict["length"]) / fps))
frame_index_col = np.concatenate((frame_index_col, np.arange(ep_dict["length"], dtype=int)))
episode_index_col = np.concatenate(
(
episode_index_col,
@@ -313,9 +288,7 @@ def hf_dataset_factory(
)
)
ep_task_index = get_task_index(tasks, ep_dict["tasks"][0])
task_index = np.concatenate(
(task_index, np.full(ep_dict["length"], ep_task_index, dtype=int))
)
task_index = np.concatenate((task_index, np.full(ep_dict["length"], ep_task_index, dtype=int)))
index_col = np.arange(len(episode_index_col))
@@ -327,9 +300,7 @@ def hf_dataset_factory(
for _ in range(len(index_col))
]
elif ft["shape"][0] > 1 and ft["dtype"] != "video":
robot_cols[key] = np.random.random(
(len(index_col), ft["shape"][0])
).astype(ft["dtype"])
robot_cols[key] = np.random.random((len(index_col), ft["shape"][0])).astype(ft["dtype"])
hf_features = get_hf_features_from_features(features)
dataset = datasets.Dataset.from_dict(
@@ -392,9 +363,7 @@ def lerobot_dataset_metadata_factory(
episodes=episodes,
)
with (
patch(
"lerobot.common.datasets.lerobot_dataset.get_safe_version"
) as mock_get_safe_version_patch,
patch("lerobot.common.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version_patch,
patch(
"lerobot.common.datasets.lerobot_dataset.snapshot_download"
) as mock_snapshot_download_patch,
@@ -442,9 +411,7 @@ def lerobot_dataset_factory(
if not stats:
stats = stats_factory(features=info["features"])
if not episodes_stats:
episodes_stats = episodes_stats_factory(
features=info["features"], total_episodes=total_episodes
)
episodes_stats = episodes_stats_factory(features=info["features"], total_episodes=total_episodes)
if not tasks:
tasks = tasks_factory(total_tasks=info["total_tasks"])
if not episode_dicts:
@@ -455,9 +422,7 @@ def lerobot_dataset_factory(
multi_task=multi_task,
)
if not hf_dataset:
hf_dataset = hf_dataset_factory(
tasks=tasks, episodes=episode_dicts, fps=info["fps"]
)
hf_dataset = hf_dataset_factory(tasks=tasks, episodes=episode_dicts, fps=info["fps"])
mock_snapshot_download = mock_snapshot_download_factory(
info=info,
@@ -477,12 +442,8 @@ def lerobot_dataset_factory(
episodes=episode_dicts,
)
with (
patch(
"lerobot.common.datasets.lerobot_dataset.LeRobotDatasetMetadata"
) as mock_metadata_patch,
patch(
"lerobot.common.datasets.lerobot_dataset.get_safe_version"
) as mock_get_safe_version_patch,
patch("lerobot.common.datasets.lerobot_dataset.LeRobotDatasetMetadata") as mock_metadata_patch,
patch("lerobot.common.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version_patch,
patch(
"lerobot.common.datasets.lerobot_dataset.snapshot_download"
) as mock_snapshot_download_patch,

View File

@@ -59,9 +59,7 @@ def stats_path(stats_factory):
@pytest.fixture(scope="session")
def episodes_stats_path(episodes_stats_factory):
def _create_episodes_stats_jsonl_file(
dir: Path, episodes_stats: list[dict] | None = None
) -> Path:
def _create_episodes_stats_jsonl_file(dir: Path, episodes_stats: list[dict] | None = None) -> Path:
if not episodes_stats:
episodes_stats = episodes_stats_factory()
fpath = dir / EPISODES_STATS_PATH

16
tests/fixtures/hub.py vendored
View File

@@ -72,16 +72,12 @@ def mock_snapshot_download_factory(
tasks=tasks,
)
if not hf_dataset:
hf_dataset = hf_dataset_factory(
tasks=tasks, episodes=episodes, fps=info["fps"]
)
hf_dataset = hf_dataset_factory(tasks=tasks, episodes=episodes, fps=info["fps"])
def _extract_episode_index_from_path(fpath: str) -> int:
path = Path(fpath)
if path.suffix == ".parquet" and path.stem.startswith("episode_"):
episode_index = int(
path.stem[len("episode_") :]
) # 'episode_000000' -> 0
episode_index = int(path.stem[len("episode_") :]) # 'episode_000000' -> 0
return episode_index
else:
return None
@@ -112,9 +108,7 @@ def mock_snapshot_download_factory(
for episode_dict in episodes.values():
ep_idx = episode_dict["episode_index"]
ep_chunk = ep_idx // info["chunks_size"]
data_path = info["data_path"].format(
episode_chunk=ep_chunk, episode_index=ep_idx
)
data_path = info["data_path"].format(episode_chunk=ep_chunk, episode_index=ep_idx)
data_files.append(data_path)
all_files.extend(data_files)
@@ -129,9 +123,7 @@ def mock_snapshot_download_factory(
if rel_path.startswith("data/"):
episode_index = _extract_episode_index_from_path(rel_path)
if episode_index is not None:
_ = single_episode_parquet_path(
local_dir, episode_index, hf_dataset, info
)
_ = single_episode_parquet_path(local_dir, episode_index, hf_dataset, info)
if rel_path == INFO_PATH:
_ = info_path(local_dir, info)
elif rel_path == STATS_PATH:

View File

@@ -35,7 +35,5 @@ def optimizer(model_params):
@pytest.fixture
def scheduler(optimizer):
config = VQBeTSchedulerConfig(
num_warmup_steps=10, num_vqvae_training_steps=20, num_cycles=0.5
)
config = VQBeTSchedulerConfig(num_warmup_steps=10, num_vqvae_training_steps=20, num_cycles=0.5)
return config.build(optimizer, num_training_steps=100)

View File

@@ -80,9 +80,7 @@ class GroupSyncRead:
def addParam(self, motor_index): # noqa: N802
# Initialize motor default values
if motor_index not in self.packet_handler.data:
self.packet_handler.data[motor_index] = get_default_motor_values(
motor_index
)
self.packet_handler.data[motor_index] = get_default_motor_values(motor_index)
def txRxPacket(self): # noqa: N802
return COMM_SUCCESS

View File

@@ -91,9 +91,7 @@ class GroupSyncRead:
def addParam(self, motor_index): # noqa: N802
# Initialize motor default values
if motor_index not in self.packet_handler.data:
self.packet_handler.data[motor_index] = get_default_motor_values(
motor_index
)
self.packet_handler.data[motor_index] = get_default_motor_values(motor_index)
def txRxPacket(self): # noqa: N802
return COMM_SUCCESS

View File

@@ -79,9 +79,7 @@ def test_configure_motors_all_ids_1(request, motor_type, mock):
else:
raise ValueError(motor_type)
input(
"Are you sure you want to re-configure the motors? Press enter to continue..."
)
input("Are you sure you want to re-configure the motors? Press enter to continue...")
# This test expect the configuration was already correct.
motors_bus = make_motors_bus(motor_type, mock=mock)
motors_bus.connect()

View File

@@ -43,9 +43,7 @@ def test_diffuser_scheduler(optimizer):
def test_vqbet_scheduler(optimizer):
config = VQBeTSchedulerConfig(
num_warmup_steps=10, num_vqvae_training_steps=20, num_cycles=0.5
)
config = VQBeTSchedulerConfig(num_warmup_steps=10, num_vqvae_training_steps=20, num_cycles=0.5)
scheduler = config.build(optimizer, num_training_steps=100)
assert isinstance(scheduler, LambdaLR)

View File

@@ -46,9 +46,7 @@ def train_evaluate_multiclass_classifier():
logging.info(
f"Start multiclass classifier train eval with {DEVICE} device, batch size {BATCH_SIZE}, learning rate {LR}"
)
multiclass_config = ClassifierConfig(
model_name="microsoft/resnet-18", device=DEVICE, num_classes=10
)
multiclass_config = ClassifierConfig(model_name="microsoft/resnet-18", device=DEVICE, num_classes=10)
multiclass_classifier = Classifier(multiclass_config)
trainset = CIFAR10(root="data", train=True, download=True, transform=ToTensor())
@@ -119,18 +117,10 @@ def train_evaluate_multiclass_classifier():
test_probs = torch.stack(test_probs)
accuracy = Accuracy(task="multiclass", num_classes=multiclass_num_classes)
precision = Precision(
task="multiclass", average="weighted", num_classes=multiclass_num_classes
)
recall = Recall(
task="multiclass", average="weighted", num_classes=multiclass_num_classes
)
f1 = F1Score(
task="multiclass", average="weighted", num_classes=multiclass_num_classes
)
auroc = AUROC(
task="multiclass", num_classes=multiclass_num_classes, average="weighted"
)
precision = Precision(task="multiclass", average="weighted", num_classes=multiclass_num_classes)
recall = Recall(task="multiclass", average="weighted", num_classes=multiclass_num_classes)
f1 = F1Score(task="multiclass", average="weighted", num_classes=multiclass_num_classes)
auroc = AUROC(task="multiclass", num_classes=multiclass_num_classes, average="weighted")
# Calculate metrics
acc = accuracy(test_predictions, test_labels)
@@ -159,28 +149,18 @@ def train_evaluate_binary_classifier():
new_label = float(1.0) if label == target_class else float(0.0)
new_targets.append(new_label)
dataset.targets = (
new_targets # Replace the original labels with the binary ones
)
dataset.targets = new_targets # Replace the original labels with the binary ones
return dataset
binary_train_dataset = CIFAR10(
root="data", train=True, download=True, transform=ToTensor()
)
binary_test_dataset = CIFAR10(
root="data", train=False, download=True, transform=ToTensor()
)
binary_train_dataset = CIFAR10(root="data", train=True, download=True, transform=ToTensor())
binary_test_dataset = CIFAR10(root="data", train=False, download=True, transform=ToTensor())
# Apply one-vs-rest labeling
binary_train_dataset = one_vs_rest(binary_train_dataset, target_binary_class)
binary_test_dataset = one_vs_rest(binary_test_dataset, target_binary_class)
binary_trainloader = DataLoader(
binary_train_dataset, batch_size=BATCH_SIZE, shuffle=True
)
binary_testloader = DataLoader(
binary_test_dataset, batch_size=BATCH_SIZE, shuffle=False
)
binary_trainloader = DataLoader(binary_train_dataset, batch_size=BATCH_SIZE, shuffle=True)
binary_testloader = DataLoader(binary_test_dataset, batch_size=BATCH_SIZE, shuffle=False)
binary_epoch = 1

View File

@@ -196,13 +196,9 @@ def test_policy(ds_repo_id, env_name, env_kwargs, policy_name, policy_kwargs):
# Test updating the policy (and test that it does not mutate the batch)
batch_ = deepcopy(batch)
policy.forward(batch)
assert set(batch) == set(batch_), (
"Batch keys are not the same after a forward pass."
)
assert set(batch) == set(batch_), "Batch keys are not the same after a forward pass."
assert all(
torch.equal(batch[k], batch_[k])
if isinstance(batch[k], torch.Tensor)
else batch[k] == batch_[k]
torch.equal(batch[k], batch_[k]) if isinstance(batch[k], torch.Tensor) else batch[k] == batch_[k]
for k in batch
), "Batch values are not the same after a forward pass."
@@ -214,9 +210,7 @@ def test_policy(ds_repo_id, env_name, env_kwargs, policy_name, policy_kwargs):
observation = preprocess_observation(observation)
# send observation to device/gpu
observation = {
key: observation[key].to(DEVICE, non_blocking=True) for key in observation
}
observation = {key: observation[key].to(DEVICE, non_blocking=True) for key in observation}
# get the next action for the environment (also check that the observation batch is not modified)
observation_ = deepcopy(observation)
@@ -241,12 +235,8 @@ def test_act_backbone_lr():
cfg = TrainPipelineConfig(
# TODO(rcadene, aliberts): remove dataset download
dataset=DatasetConfig(
repo_id="lerobot/aloha_sim_insertion_scripted", episodes=[0]
),
policy=make_policy_config(
"act", optimizer_lr=0.01, optimizer_lr_backbone=0.001
),
dataset=DatasetConfig(repo_id="lerobot/aloha_sim_insertion_scripted", episodes=[0]),
policy=make_policy_config("act", optimizer_lr=0.01, optimizer_lr_backbone=0.001),
)
cfg.validate() # Needed for auto-setting some parameters
@@ -269,9 +259,7 @@ def test_policy_defaults(dummy_dataset_metadata, policy_name: str):
policy_cls = get_policy_class(policy_name)
policy_cfg = make_policy_config(policy_name)
features = dataset_to_policy_features(dummy_dataset_metadata.features)
policy_cfg.output_features = {
key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION
}
policy_cfg.output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION}
policy_cfg.input_features = {
key: ft for key, ft in features.items() if key not in policy_cfg.output_features
}
@@ -283,9 +271,7 @@ def test_save_and_load_pretrained(dummy_dataset_metadata, tmp_path, policy_name:
policy_cls = get_policy_class(policy_name)
policy_cfg = make_policy_config(policy_name)
features = dataset_to_policy_features(dummy_dataset_metadata.features)
policy_cfg.output_features = {
key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION
}
policy_cfg.output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION}
policy_cfg.input_features = {
key: ft for key, ft in features.items() if key not in policy_cfg.output_features
}
@@ -294,9 +280,7 @@ def test_save_and_load_pretrained(dummy_dataset_metadata, tmp_path, policy_name:
save_dir = tmp_path / f"test_save_and_load_pretrained_{policy_cls.__name__}"
policy.save_pretrained(save_dir)
loaded_policy = policy_cls.from_pretrained(save_dir, config=policy_cfg)
torch.testing.assert_close(
list(policy.parameters()), list(loaded_policy.parameters()), rtol=0, atol=0
)
torch.testing.assert_close(list(policy.parameters()), list(loaded_policy.parameters()), rtol=0, atol=0)
@pytest.mark.parametrize("insert_temporal_dim", [False, True])
@@ -436,9 +420,7 @@ def test_normalize(insert_temporal_dim):
# pass if it's run on another platform due to floating point errors
@require_x86_64_kernel
@require_cpu
def test_backward_compatibility(
ds_repo_id: str, policy_name: str, policy_kwargs: dict, file_name_extra: str
):
def test_backward_compatibility(ds_repo_id: str, policy_name: str, policy_kwargs: dict, file_name_extra: str):
"""
NOTE: If this test does not pass, and you have intentionally changed something in the policy:
1. Inspect the differences in policy outputs and make sure you can account for them. Your PR should
@@ -452,17 +434,13 @@ def test_backward_compatibility(
6. Remember to stage and commit the resulting changes to `tests/artifacts`.
"""
ds_name = ds_repo_id.split("/")[-1]
artifact_dir = (
Path("tests/artifacts/policies") / f"{ds_name}_{policy_name}_{file_name_extra}"
)
artifact_dir = Path("tests/artifacts/policies") / f"{ds_name}_{policy_name}_{file_name_extra}"
saved_output_dict = load_file(artifact_dir / "output_dict.safetensors")
saved_grad_stats = load_file(artifact_dir / "grad_stats.safetensors")
saved_param_stats = load_file(artifact_dir / "param_stats.safetensors")
saved_actions = load_file(artifact_dir / "actions.safetensors")
output_dict, grad_stats, param_stats, actions = get_policy_stats(
ds_repo_id, policy_name, policy_kwargs
)
output_dict, grad_stats, param_stats, actions = get_policy_stats(ds_repo_id, policy_name, policy_kwargs)
for key in saved_output_dict:
torch.testing.assert_close(output_dict[key], saved_output_dict[key])
@@ -471,12 +449,8 @@ def test_backward_compatibility(
for key in saved_param_stats:
torch.testing.assert_close(param_stats[key], saved_param_stats[key])
for key in saved_actions:
rtol, atol = (
(2e-3, 5e-6) if policy_name == "diffusion" else (None, None)
) # HACK
torch.testing.assert_close(
actions[key], saved_actions[key], rtol=rtol, atol=atol
)
rtol, atol = (2e-3, 5e-6) if policy_name == "diffusion" else (None, None) # HACK
torch.testing.assert_close(actions[key], saved_actions[key], rtol=rtol, atol=atol)
def test_act_temporal_ensembler():
@@ -502,9 +476,7 @@ def test_act_temporal_ensembler():
batch_size = batch_seq.shape[0]
# Exponential weighting (normalized). Unsqueeze once to match the position of the `episode_length`
# dimension of `batch_seq`.
weights = torch.exp(-temporal_ensemble_coeff * torch.arange(chunk_size)).unsqueeze(
-1
)
weights = torch.exp(-temporal_ensemble_coeff * torch.arange(chunk_size)).unsqueeze(-1)
# Simulate stepping through a rollout and computing a batch of actions with model on each step.
for i in range(episode_length):
@@ -527,8 +499,7 @@ def test_act_temporal_ensembler():
episode_step_indices = torch.arange(i + 1)[-len(chunk_indices) :]
seq_slice = batch_seq[:, episode_step_indices, chunk_indices]
offline_avg = (
einops.reduce(seq_slice * weights[: i + 1], "b s 1 -> b 1", "sum")
/ weights[: i + 1].sum()
einops.reduce(seq_slice * weights[: i + 1], "b s 1 -> b 1", "sum") / weights[: i + 1].sum()
)
# Sanity check. The average should be between the extrema.
assert torch.all(einops.reduce(seq_slice, "b s 1 -> b 1", "min") <= offline_avg)

View File

@@ -179,9 +179,7 @@ def test_record_and_replay_and_policy(tmp_path, request, robot_type, mock):
assert dataset.meta.total_episodes == 2
assert len(dataset) == 2
replay_cfg = ReplayControlConfig(
episode=0, fps=1, root=root, repo_id=repo_id, play_sounds=False
)
replay_cfg = ReplayControlConfig(episode=0, fps=1, root=root, repo_id=repo_id, play_sounds=False)
replay(robot, replay_cfg)
policy_cfg = ACTConfig()
@@ -336,12 +334,8 @@ def test_record_with_event_rerecord_episode(tmp_path, request, robot_type, mock)
)
dataset = record(robot, rec_cfg)
assert not mock_events["rerecord_episode"], (
"`rerecord_episode` wasn't properly reset to False"
)
assert not mock_events["exit_early"], (
"`exit_early` wasn't properly reset to False"
)
assert not mock_events["rerecord_episode"], "`rerecord_episode` wasn't properly reset to False"
assert not mock_events["exit_early"], "`exit_early` wasn't properly reset to False"
assert len(dataset) == 1, "`dataset` should contain only 1 frame"
@@ -391,9 +385,7 @@ def test_record_with_event_exit_early(tmp_path, request, robot_type, mock):
dataset = record(robot, rec_cfg)
assert not mock_events["exit_early"], (
"`exit_early` wasn't properly reset to False"
)
assert not mock_events["exit_early"], "`exit_early` wasn't properly reset to False"
assert len(dataset) == 1, "`dataset` should contain only 1 frame"
@@ -402,9 +394,7 @@ def test_record_with_event_exit_early(tmp_path, request, robot_type, mock):
[("koch", True, 0), ("koch", True, 1)],
)
@require_robot
def test_record_with_event_stop_recording(
tmp_path, request, robot_type, mock, num_image_writer_processes
):
def test_record_with_event_stop_recording(tmp_path, request, robot_type, mock, num_image_writer_processes):
robot_kwargs = {"robot_type": robot_type, "mock": mock}
if mock:
@@ -450,7 +440,5 @@ def test_record_with_event_stop_recording(
dataset = record(robot, rec_cfg)
assert not mock_events["exit_early"], (
"`exit_early` wasn't properly reset to False"
)
assert not mock_events["exit_early"], "`exit_early` wasn't properly reset to False"
assert len(dataset) == 1, "`dataset` should contain only 1 frame"

View File

@@ -108,9 +108,7 @@ def test_robot(tmp_path, request, robot_type, mock):
assert "observation.state" in observation
assert isinstance(observation["observation.state"], torch.Tensor)
assert observation["observation.state"].ndim == 1
dim_state = sum(
len(robot.follower_arms[name].motors) for name in robot.follower_arms
)
dim_state = sum(len(robot.follower_arms[name].motors) for name in robot.follower_arms)
assert observation["observation.state"].shape[0] == dim_state
# Cameras
for name in robot.cameras:
@@ -121,9 +119,7 @@ def test_robot(tmp_path, request, robot_type, mock):
assert "action" in action
assert isinstance(action["action"], torch.Tensor)
assert action["action"].ndim == 1
dim_action = sum(
len(robot.follower_arms[name].motors) for name in robot.follower_arms
)
dim_action = sum(len(robot.follower_arms[name].motors) for name in robot.follower_arms)
assert action["action"].shape[0] == dim_action
# TODO(rcadene): test if observation and action data are returned as expected
@@ -134,9 +130,7 @@ def test_robot(tmp_path, request, robot_type, mock):
if "image" in name:
# TODO(rcadene): skipping image for now as it's challenging to assess equality between two consecutive frames
continue
torch.testing.assert_close(
captured_observation[name], observation[name], rtol=1e-4, atol=1
)
torch.testing.assert_close(captured_observation[name], observation[name], rtol=1e-4, atol=1)
assert captured_observation[name].shape == observation[name].shape
# Test send_action can run

View File

@@ -69,9 +69,7 @@ def test_create_balanced_sampler():
labels = [item["label"] for item in data]
class_counts = torch.tensor([labels.count(0), labels.count(1)], dtype=torch.float32)
class_weights = 1.0 / class_counts
expected_weights = torch.tensor(
[class_weights[label] for label in labels], dtype=torch.float32
)
expected_weights = torch.tensor([class_weights[label] for label in labels], dtype=torch.float32)
# Test that the weights are correct
assert torch.allclose(weights, expected_weights)
@@ -224,16 +222,10 @@ def test_resume_function(
):
# Initialize Hydra
test_file_dir = os.path.dirname(os.path.abspath(__file__))
config_dir = os.path.abspath(
os.path.join(test_file_dir, "..", "lerobot", "configs", "policy")
)
assert os.path.exists(config_dir), (
f"Config directory does not exist at {config_dir}"
)
config_dir = os.path.abspath(os.path.join(test_file_dir, "..", "lerobot", "configs", "policy"))
assert os.path.exists(config_dir), f"Config directory does not exist at {config_dir}"
with initialize_config_dir(
config_dir=config_dir, job_name="test_app", version_base="1.2"
):
with initialize_config_dir(config_dir=config_dir, job_name="test_app", version_base="1.2"):
cfg = compose(
config_name="hilserl_classifier",
overrides=[
@@ -258,9 +250,7 @@ def test_resume_function(
mock_init_hydra_config.return_value = cfg
# Mock dataset
dataset = MockDataset(
[{"image": torch.rand(3, 224, 224), "label": i % 2} for i in range(10)]
)
dataset = MockDataset([{"image": torch.rand(3, 224, 224), "label": i % 2} for i in range(10)])
mock_dataset.return_value = dataset
# Mock checkpoint handling

View File

@@ -31,11 +31,7 @@ from lerobot.common.robot_devices.motors.utils import (
)
from lerobot.common.utils.import_utils import is_package_available
DEVICE = (
os.environ.get("LEROBOT_TEST_DEVICE", "cuda")
if torch.cuda.is_available()
else "cpu"
)
DEVICE = os.environ.get("LEROBOT_TEST_DEVICE", "cuda") if torch.cuda.is_available() else "cpu"
TEST_ROBOT_TYPES = []
for robot_type in available_robots:
@@ -51,13 +47,9 @@ for motor_type in available_motors:
# Camera indices used for connecting physical cameras
OPENCV_CAMERA_INDEX = int(os.environ.get("LEROBOT_TEST_OPENCV_CAMERA_INDEX", 0))
INTELREALSENSE_SERIAL_NUMBER = int(
os.environ.get("LEROBOT_TEST_INTELREALSENSE_SERIAL_NUMBER", 128422271614)
)
INTELREALSENSE_SERIAL_NUMBER = int(os.environ.get("LEROBOT_TEST_INTELREALSENSE_SERIAL_NUMBER", 128422271614))
DYNAMIXEL_PORT = os.environ.get(
"LEROBOT_TEST_DYNAMIXEL_PORT", "/dev/tty.usbmodem575E0032081"
)
DYNAMIXEL_PORT = os.environ.get("LEROBOT_TEST_DYNAMIXEL_PORT", "/dev/tty.usbmodem575E0032081")
DYNAMIXEL_MOTORS = {
"shoulder_pan": [1, "xl430-w250"],
"shoulder_lift": [2, "xl430-w250"],
@@ -67,9 +59,7 @@ DYNAMIXEL_MOTORS = {
"gripper": [6, "xl330-m288"],
}
FEETECH_PORT = os.environ.get(
"LEROBOT_TEST_FEETECH_PORT", "/dev/tty.usbmodem585A0080971"
)
FEETECH_PORT = os.environ.get("LEROBOT_TEST_FEETECH_PORT", "/dev/tty.usbmodem585A0080971")
FEETECH_MOTORS = {
"shoulder_pan": [1, "sts3215"],
"shoulder_lift": [2, "sts3215"],
@@ -168,13 +158,9 @@ def require_package_arg(func):
if "required_packages" in arg_names:
# Get the index of 'required_packages' and retrieve the value from args
index = arg_names.index("required_packages")
required_packages = (
args[index] if len(args) > index else kwargs.get("required_packages")
)
required_packages = args[index] if len(args) > index else kwargs.get("required_packages")
else:
raise ValueError(
"Function does not have 'required_packages' as an argument."
)
raise ValueError("Function does not have 'required_packages' as an argument.")
if required_packages is None:
return func(*args, **kwargs)
@@ -231,17 +217,11 @@ def require_robot(func):
mock = kwargs.get("mock")
if robot_type is None:
raise ValueError(
"The 'robot_type' must be an argument of the test function."
)
raise ValueError("The 'robot_type' must be an argument of the test function.")
if request is None:
raise ValueError(
"The 'request' fixture must be an argument of the test function."
)
raise ValueError("The 'request' fixture must be an argument of the test function.")
if mock is None:
raise ValueError(
"The 'mock' variable must be an argument of the test function."
)
raise ValueError("The 'mock' variable must be an argument of the test function.")
# Run test with a real robot. Skip test if robot connection fails.
if not mock and not request.getfixturevalue("is_robot_available"):
@@ -261,17 +241,11 @@ def require_camera(func):
mock = kwargs.get("mock")
if request is None:
raise ValueError(
"The 'request' fixture must be an argument of the test function."
)
raise ValueError("The 'request' fixture must be an argument of the test function.")
if camera_type is None:
raise ValueError(
"The 'camera_type' must be an argument of the test function."
)
raise ValueError("The 'camera_type' must be an argument of the test function.")
if mock is None:
raise ValueError(
"The 'mock' variable must be an argument of the test function."
)
raise ValueError("The 'mock' variable must be an argument of the test function.")
if not mock and not request.getfixturevalue("is_camera_available"):
pytest.skip(f"A {camera_type} camera is not available.")
@@ -290,17 +264,11 @@ def require_motor(func):
mock = kwargs.get("mock")
if request is None:
raise ValueError(
"The 'request' fixture must be an argument of the test function."
)
raise ValueError("The 'request' fixture must be an argument of the test function.")
if motor_type is None:
raise ValueError(
"The 'motor_type' must be an argument of the test function."
)
raise ValueError("The 'motor_type' must be an argument of the test function.")
if mock is None:
raise ValueError(
"The 'mock' variable must be an argument of the test function."
)
raise ValueError("The 'mock' variable must be an argument of the test function.")
if not mock and not request.getfixturevalue("is_motor_available"):
pytest.skip(f"A {motor_type} motor is not available.")

View File

@@ -91,9 +91,7 @@ def test_metrics_tracker_step(mock_metrics):
def test_metrics_tracker_getattr(mock_metrics):
tracker = MetricsTracker(
batch_size=32, num_frames=1000, num_episodes=50, metrics=mock_metrics
)
tracker = MetricsTracker(batch_size=32, num_frames=1000, num_episodes=50, metrics=mock_metrics)
assert tracker.loss == mock_metrics["loss"]
assert tracker.accuracy == mock_metrics["accuracy"]
with pytest.raises(AttributeError):
@@ -101,17 +99,13 @@ def test_metrics_tracker_getattr(mock_metrics):
def test_metrics_tracker_setattr(mock_metrics):
tracker = MetricsTracker(
batch_size=32, num_frames=1000, num_episodes=50, metrics=mock_metrics
)
tracker = MetricsTracker(batch_size=32, num_frames=1000, num_episodes=50, metrics=mock_metrics)
tracker.loss = 2.0
assert tracker.loss.val == 2.0
def test_metrics_tracker_str(mock_metrics):
tracker = MetricsTracker(
batch_size=32, num_frames=1000, num_episodes=50, metrics=mock_metrics
)
tracker = MetricsTracker(batch_size=32, num_frames=1000, num_episodes=50, metrics=mock_metrics)
tracker.loss.update(3.456, 1)
tracker.accuracy.update(0.876, 1)
output = str(tracker)
@@ -120,9 +114,7 @@ def test_metrics_tracker_str(mock_metrics):
def test_metrics_tracker_to_dict(mock_metrics):
tracker = MetricsTracker(
batch_size=32, num_frames=1000, num_episodes=50, metrics=mock_metrics
)
tracker = MetricsTracker(batch_size=32, num_frames=1000, num_episodes=50, metrics=mock_metrics)
tracker.loss.update(5, 2)
metrics_dict = tracker.to_dict()
assert isinstance(metrics_dict, dict)
@@ -131,9 +123,7 @@ def test_metrics_tracker_to_dict(mock_metrics):
def test_metrics_tracker_reset_averages(mock_metrics):
tracker = MetricsTracker(
batch_size=32, num_frames=1000, num_episodes=50, metrics=mock_metrics
)
tracker = MetricsTracker(batch_size=32, num_frames=1000, num_episodes=50, metrics=mock_metrics)
tracker.loss.update(10, 3)
tracker.accuracy.update(0.95, 5)
tracker.reset_averages()

View File

@@ -118,9 +118,5 @@ def test_seeded_context(fixed_seed):
seeded_val2 = (random.random(), np.random.rand(), torch.rand(1).item())
assert seeded_val1 == seeded_val2
assert all(
a != b for a, b in zip(val1, seeded_val1, strict=True)
) # changed inside the context
assert all(
a != b for a, b in zip(val2, seeded_val2, strict=True)
) # changed again after exiting
assert all(a != b for a, b in zip(val1, seeded_val1, strict=True)) # changed inside the context
assert all(a != b for a, b in zip(val2, seeded_val2, strict=True)) # changed again after exiting

View File

@@ -91,9 +91,7 @@ def test_save_training_state(tmp_path, optimizer, scheduler):
def test_save_load_training_state(tmp_path, optimizer, scheduler):
save_training_state(tmp_path, 10, optimizer, scheduler)
loaded_step, loaded_optimizer, loaded_scheduler = load_training_state(
tmp_path, optimizer, scheduler
)
loaded_step, loaded_optimizer, loaded_scheduler = load_training_state(tmp_path, optimizer, scheduler)
assert loaded_step == 10
assert loaded_optimizer is optimizer
assert loaded_scheduler is scheduler