Merge remote-tracking branch 'upstream/main' into unify_policy_api

This commit is contained in:
Alexander Soare
2024-04-17 08:08:57 +01:00
53 changed files with 3184 additions and 1124 deletions

View File

@@ -0,0 +1,55 @@
{
"citation": "",
"description": "",
"features": {
"observation.images.top": {
"_type": "Image"
},
"observation.state": {
"feature": {
"dtype": "float32",
"_type": "Value"
},
"length": 14,
"_type": "Sequence"
},
"action": {
"feature": {
"dtype": "float32",
"_type": "Value"
},
"length": 14,
"_type": "Sequence"
},
"episode_id": {
"dtype": "int64",
"_type": "Value"
},
"frame_id": {
"dtype": "int64",
"_type": "Value"
},
"timestamp": {
"dtype": "float32",
"_type": "Value"
},
"next.done": {
"dtype": "bool",
"_type": "Value"
},
"episode_data_index_from": {
"dtype": "int64",
"_type": "Value"
},
"episode_data_index_to": {
"dtype": "int64",
"_type": "Value"
},
"index": {
"dtype": "int64",
"_type": "Value"
}
},
"homepage": "",
"license": ""
}

View File

@@ -0,0 +1,13 @@
{
"_data_files": [
{
"filename": "data-00000-of-00001.arrow"
}
],
"_fingerprint": "d79cf82ffc86f110",
"_format_columns": null,
"_format_kwargs": {},
"_format_type": "torch",
"_output_all_columns": false,
"_split": null
}

View File

@@ -0,0 +1,55 @@
{
"citation": "",
"description": "",
"features": {
"observation.images.top": {
"_type": "Image"
},
"observation.state": {
"feature": {
"dtype": "float32",
"_type": "Value"
},
"length": 14,
"_type": "Sequence"
},
"action": {
"feature": {
"dtype": "float32",
"_type": "Value"
},
"length": 14,
"_type": "Sequence"
},
"episode_id": {
"dtype": "int64",
"_type": "Value"
},
"frame_id": {
"dtype": "int64",
"_type": "Value"
},
"timestamp": {
"dtype": "float32",
"_type": "Value"
},
"next.done": {
"dtype": "bool",
"_type": "Value"
},
"episode_data_index_from": {
"dtype": "int64",
"_type": "Value"
},
"episode_data_index_to": {
"dtype": "int64",
"_type": "Value"
},
"index": {
"dtype": "int64",
"_type": "Value"
}
},
"homepage": "",
"license": ""
}

View File

@@ -0,0 +1,13 @@
{
"_data_files": [
{
"filename": "data-00000-of-00001.arrow"
}
],
"_fingerprint": "d8e4a817b5449498",
"_format_columns": null,
"_format_kwargs": {},
"_format_type": "torch",
"_output_all_columns": false,
"_split": null
}

View File

@@ -0,0 +1,55 @@
{
"citation": "",
"description": "",
"features": {
"observation.images.top": {
"_type": "Image"
},
"observation.state": {
"feature": {
"dtype": "float32",
"_type": "Value"
},
"length": 14,
"_type": "Sequence"
},
"action": {
"feature": {
"dtype": "float32",
"_type": "Value"
},
"length": 14,
"_type": "Sequence"
},
"episode_id": {
"dtype": "int64",
"_type": "Value"
},
"frame_id": {
"dtype": "int64",
"_type": "Value"
},
"timestamp": {
"dtype": "float32",
"_type": "Value"
},
"next.done": {
"dtype": "bool",
"_type": "Value"
},
"episode_data_index_from": {
"dtype": "int64",
"_type": "Value"
},
"episode_data_index_to": {
"dtype": "int64",
"_type": "Value"
},
"index": {
"dtype": "int64",
"_type": "Value"
}
},
"homepage": "",
"license": ""
}

View File

@@ -0,0 +1,13 @@
{
"_data_files": [
{
"filename": "data-00000-of-00001.arrow"
}
],
"_fingerprint": "f03482befa767127",
"_format_columns": null,
"_format_kwargs": {},
"_format_type": "torch",
"_output_all_columns": false,
"_split": null
}

View File

@@ -0,0 +1,55 @@
{
"citation": "",
"description": "",
"features": {
"observation.images.top": {
"_type": "Image"
},
"observation.state": {
"feature": {
"dtype": "float32",
"_type": "Value"
},
"length": 14,
"_type": "Sequence"
},
"action": {
"feature": {
"dtype": "float32",
"_type": "Value"
},
"length": 14,
"_type": "Sequence"
},
"episode_id": {
"dtype": "int64",
"_type": "Value"
},
"frame_id": {
"dtype": "int64",
"_type": "Value"
},
"timestamp": {
"dtype": "float32",
"_type": "Value"
},
"next.done": {
"dtype": "bool",
"_type": "Value"
},
"episode_data_index_from": {
"dtype": "int64",
"_type": "Value"
},
"episode_data_index_to": {
"dtype": "int64",
"_type": "Value"
},
"index": {
"dtype": "int64",
"_type": "Value"
}
},
"homepage": "",
"license": ""
}

View File

@@ -0,0 +1,13 @@
{
"_data_files": [
{
"filename": "data-00000-of-00001.arrow"
}
],
"_fingerprint": "93e03c6320c7d56e",
"_format_columns": null,
"_format_kwargs": {},
"_format_type": "torch",
"_output_all_columns": false,
"_split": null
}

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,63 @@
{
"citation": "",
"description": "",
"features": {
"observation.image": {
"_type": "Image"
},
"observation.state": {
"feature": {
"dtype": "float32",
"_type": "Value"
},
"length": 2,
"_type": "Sequence"
},
"action": {
"feature": {
"dtype": "float32",
"_type": "Value"
},
"length": 2,
"_type": "Sequence"
},
"episode_id": {
"dtype": "int64",
"_type": "Value"
},
"frame_id": {
"dtype": "int64",
"_type": "Value"
},
"timestamp": {
"dtype": "float32",
"_type": "Value"
},
"next.reward": {
"dtype": "float32",
"_type": "Value"
},
"next.done": {
"dtype": "bool",
"_type": "Value"
},
"next.success": {
"dtype": "bool",
"_type": "Value"
},
"episode_data_index_from": {
"dtype": "int64",
"_type": "Value"
},
"episode_data_index_to": {
"dtype": "int64",
"_type": "Value"
},
"index": {
"dtype": "int64",
"_type": "Value"
}
},
"homepage": "",
"license": ""
}

View File

@@ -0,0 +1,13 @@
{
"_data_files": [
{
"filename": "data-00000-of-00001.arrow"
}
],
"_fingerprint": "21bb9a76ed78a475",
"_format_columns": null,
"_format_kwargs": {},
"_format_type": "torch",
"_output_all_columns": false,
"_split": null
}

View File

@@ -0,0 +1,59 @@
{
"citation": "",
"description": "",
"features": {
"observation.image": {
"_type": "Image"
},
"observation.state": {
"feature": {
"dtype": "float32",
"_type": "Value"
},
"length": 4,
"_type": "Sequence"
},
"action": {
"feature": {
"dtype": "float32",
"_type": "Value"
},
"length": 4,
"_type": "Sequence"
},
"episode_id": {
"dtype": "int64",
"_type": "Value"
},
"frame_id": {
"dtype": "int64",
"_type": "Value"
},
"timestamp": {
"dtype": "float32",
"_type": "Value"
},
"next.reward": {
"dtype": "float32",
"_type": "Value"
},
"next.done": {
"dtype": "bool",
"_type": "Value"
},
"episode_data_index_from": {
"dtype": "int64",
"_type": "Value"
},
"episode_data_index_to": {
"dtype": "int64",
"_type": "Value"
},
"index": {
"dtype": "int64",
"_type": "Value"
}
},
"homepage": "",
"license": ""
}

View File

@@ -0,0 +1,13 @@
{
"_data_files": [
{
"filename": "data-00000-of-00001.arrow"
}
],
"_fingerprint": "a95cbec45e3bb9d6",
"_format_columns": null,
"_format_kwargs": {},
"_format_type": "torch",
"_output_all_columns": false,
"_split": null
}

View File

@@ -4,13 +4,12 @@ import einops
import pytest
import torch
from lerobot.common.datasets.utils import compute_stats, get_stats_einops_patterns, load_data_with_delta_timestamps
from lerobot.common.datasets.xarm import XarmDataset
from lerobot.common.datasets.utils import compute_stats, get_stats_einops_patterns, load_previous_and_future_frames
from lerobot.common.transforms import Prod
from lerobot.common.utils import init_hydra_config
import logging
from lerobot.common.datasets.factory import make_dataset
from datasets import Dataset
from .utils import DEVICE, DEFAULT_CONFIG_PATH
@@ -38,7 +37,7 @@ def test_factory(env_name, dataset_id, policy_name):
keys_ndim_required = [
("action", 1, True),
("episode", 0, True),
("episode_id", 0, True),
("frame_id", 0, True),
("timestamp", 0, True),
# TODO(rcadene): should we rename it agent_pos?
@@ -94,6 +93,8 @@ def test_compute_stats():
We compare with taking a straight min, mean, max, std of all the data in one pass (which we can do
because we are working with a small dataset).
"""
from lerobot.common.datasets.xarm import XarmDataset
DATA_DIR = Path(os.environ["DATA_DIR"]) if "DATA_DIR" in os.environ else None
# get transform to convert images from uint8 [0,255] to float32 [0,1]
@@ -114,7 +115,13 @@ def test_compute_stats():
stats_patterns = get_stats_einops_patterns(dataset)
# get all frames from the dataset in the same dtype and range as during compute_stats
data_dict = transform(dataset.data_dict)
dataloader = torch.utils.data.DataLoader(
dataset,
num_workers=8,
batch_size=len(dataset),
shuffle=False,
)
data_dict = next(iter(dataloader))
# compute stats based on all frames from the dataset without any batching
expected_stats = {}
@@ -145,47 +152,50 @@ def test_compute_stats():
# assert torch.allclose(loaded_stats[k]["max"], expected_stats[k]["max"])
def test_load_data_with_delta_timestamps_within_tolerance():
data_dict = {
"timestamp": torch.tensor([0.1, 0.2, 0.3, 0.4, 0.5]),
"index": torch.tensor([0, 1, 2, 3, 4]),
}
data_ids_per_episode = {0: torch.tensor([0, 1, 2, 3, 4])}
def test_load_previous_and_future_frames_within_tolerance():
data_dict = Dataset.from_dict({
"timestamp": [0.1, 0.2, 0.3, 0.4, 0.5],
"index": [0, 1, 2, 3, 4],
"episode_data_index_from": [0, 0, 0, 0, 0],
"episode_data_index_to": [5, 5, 5, 5, 5],
})
data_dict = data_dict.with_format("torch")
item = data_dict[2]
delta_timestamps = {"index": [-0.2, 0, 0.139]}
key = "index"
current_ts = 0.3
episode = 0
tol = 0.04
data, is_pad = load_data_with_delta_timestamps(data_dict, data_ids_per_episode, delta_timestamps, key, current_ts, episode, tol)
assert not is_pad.any(), "Unexpected padding detected"
item = load_previous_and_future_frames(item, data_dict, delta_timestamps, tol)
data, is_pad = item["index"], item["index_is_pad"]
assert torch.equal(data, torch.tensor([0, 2, 3])), "Data does not match expected values"
assert not is_pad.any(), "Unexpected padding detected"
def test_load_data_with_delta_timestamps_outside_tolerance_inside_episode_range():
data_dict = {
"timestamp": torch.tensor([0.1, 0.2, 0.3, 0.4, 0.5]),
"index": torch.tensor([0, 1, 2, 3, 4]),
}
data_ids_per_episode = {0: torch.tensor([0, 1, 2, 3, 4])}
def test_load_previous_and_future_frames_outside_tolerance_inside_episode_range():
data_dict = Dataset.from_dict({
"timestamp": [0.1, 0.2, 0.3, 0.4, 0.5],
"index": [0, 1, 2, 3, 4],
"episode_data_index_from": [0, 0, 0, 0, 0],
"episode_data_index_to": [5, 5, 5, 5, 5],
})
data_dict = data_dict.with_format("torch")
item = data_dict[2]
delta_timestamps = {"index": [-0.2, 0, 0.141]}
key = "index"
current_ts = 0.3
episode = 0
tol = 0.04
with pytest.raises(AssertionError):
load_data_with_delta_timestamps(data_dict, data_ids_per_episode, delta_timestamps, key, current_ts, episode, tol)
load_previous_and_future_frames(item, data_dict, delta_timestamps, tol)
def test_load_data_with_delta_timestamps_outside_tolerance_outside_episode_range():
data_dict = {
"timestamp": torch.tensor([0.1, 0.2, 0.3, 0.4, 0.5]),
"index": torch.tensor([0, 1, 2, 3, 4]),
}
data_ids_per_episode = {0: torch.tensor([0, 1, 2, 3, 4])}
def test_load_previous_and_future_frames_outside_tolerance_outside_episode_range():
data_dict = Dataset.from_dict({
"timestamp": [0.1, 0.2, 0.3, 0.4, 0.5],
"index": [0, 1, 2, 3, 4],
"episode_data_index_from": [0, 0, 0, 0, 0],
"episode_data_index_to": [5, 5, 5, 5, 5],
})
data_dict = data_dict.with_format("torch")
item = data_dict[2]
delta_timestamps = {"index": [-0.3, -0.24, 0, 0.26, 0.3]}
key = "index"
current_ts = 0.3
episode = 0
tol = 0.04
data, is_pad = load_data_with_delta_timestamps(data_dict, data_ids_per_episode, delta_timestamps, key, current_ts, episode, tol)
assert torch.equal(is_pad, torch.tensor([True, False, False, True, True])), "Padding does not match expected values"
item = load_previous_and_future_frames(item, data_dict, delta_timestamps, tol)
data, is_pad = item["index"], item["index_is_pad"]
assert torch.equal(data, torch.tensor([0, 0, 2, 4, 4])), "Data does not match expected values"
assert torch.equal(is_pad, torch.tensor([True, False, False, True, True])), "Padding does not match expected values"