Compare commits

..

3 Commits

Author SHA1 Message Date
Remi Cadene
43f61136b0 Add aloha_hdf5.py 2025-01-28 11:30:23 +01:00
Simon Alibert
4def6d6ac2 Fix cluster image (#653) 2025-01-24 11:25:22 +01:00
Jochen Görtler
d8560b8d5f Bumprerun-sdk dependency to 0.21.0 (#618)
Co-authored-by: Simon Alibert <75076266+aliberts@users.noreply.github.com>
2025-01-20 09:50:11 +01:00
7 changed files with 239 additions and 300 deletions

View File

@@ -13,7 +13,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
sed gawk grep curl wget zip unzip \
tcpdump sysstat screen tmux \
libglib2.0-0 libgl1-mesa-glx libegl1-mesa \
speech-dispatcher \
speech-dispatcher portaudio19-dev \
python${PYTHON_VERSION} python${PYTHON_VERSION}-venv \
&& apt-get clean && rm -rf /var/lib/apt/lists/*
@@ -58,7 +58,7 @@ RUN (type -p wget >/dev/null || (apt update && apt-get install wget -y)) \
RUN ln -s /usr/bin/python3 /usr/bin/python
# Install poetry
RUN curl -sSL https://install.python-poetry.org | python -
RUN curl -sSL https://install.python-poetry.org | python - --version 1.8.5
ENV PATH="/root/.local/bin:$PATH"
RUN echo 'if [ "$HOME" != "/root" ]; then ln -sf /root/.local/bin/poetry $HOME/.local/bin/poetry; fi' >> /root/.bashrc
RUN poetry config virtualenvs.create false

View File

@@ -0,0 +1,213 @@
import shutil
from pathlib import Path
import h5py
import numpy as np
import torch
import tqdm
from lerobot.common.datasets.lerobot_dataset import LEROBOT_HOME, LeRobotDataset
from lerobot.common.datasets.push_dataset_to_hub._download_raw import download_raw
def create_empty_dataset(dataset_name, robot_type, mode="video", has_velocity=False, has_effort=False):
motors = [
# TODO(rcadene): verify
"right_waist",
"right_shoulder",
"right_elbow",
"right_forearm_roll",
"right_wrist_angle",
"right_wrist_rotate",
"right_gripper",
"left_waist",
"left_shoulder",
"left_elbow",
"left_forearm_roll",
"left_wrist_angle",
"left_wrist_rotate",
"left_gripper",
]
cameras = [
"cam_high",
"cam_low",
"cam_left_wrist",
"cam_right_wrist",
]
features = {
"observation.state": {
"dtype": "float32",
"shape": (len(motors),),
"names": [
motors,
],
},
"action": {
"dtype": "float32",
"shape": (len(motors),),
"names": [
motors,
],
},
}
if has_velocity:
features["observation.velocity"] = {
"dtype": "float32",
"shape": (len(motors),),
"names": [
motors,
],
}
if has_velocity:
features["observation.effort"] = {
"dtype": "float32",
"shape": (len(motors),),
"names": [
motors,
],
}
for cam in cameras:
features[f"observation.images.{cam}"] = {
"dtype": mode,
"shape": (3, 480, 640),
"names": [
"channels",
"height",
"width",
],
}
dataset = LeRobotDataset.create(
repo_id=f"cadene/{dataset_name}_v2",
fps=50,
robot_type=robot_type,
features=features,
)
return dataset
def get_cameras(hdf5_files):
with h5py.File(hdf5_files[0], "r") as ep:
# ignore depth channel, not currently handled
# TODO(rcadene): add depth
rgb_cameras = [key for key in ep["/observations/images"].keys() if "depth" not in key] # noqa: SIM118
return rgb_cameras
def has_velocity(hdf5_files):
with h5py.File(hdf5_files[0], "r") as ep:
return "/observations/qvel" in ep
def has_effort(hdf5_files):
with h5py.File(hdf5_files[0], "r") as ep:
return "/observations/effort" in ep
def load_raw_images_per_camera(ep, cameras):
imgs_per_cam = {}
for camera in cameras:
uncompressed = ep[f"/observations/images/{camera}"].ndim == 4
if uncompressed:
# load all images in RAM
imgs_array = ep[f"/observations/images/{camera}"][:]
else:
import cv2
# load one compressed image after the other in RAM and uncompress
imgs_array = []
for data in ep[f"/observations/images/{camera}"]:
imgs_array.append(cv2.imdecode(data, 1))
imgs_array = np.array(imgs_array)
imgs_per_cam[camera] = imgs_array
return imgs_per_cam
def load_raw_episode_data(ep_path):
with h5py.File(ep_path, "r") as ep:
state = torch.from_numpy(ep["/observations/qpos"][:])
action = torch.from_numpy(ep["/action"][:])
velocity = None
if "/observations/qvel" in ep:
velocity = torch.from_numpy(ep["/observations/qvel"][:])
effort = None
if "/observations/effort" in ep:
effort = torch.from_numpy(ep["/observations/effort"][:])
imgs_per_cam = load_raw_images_per_camera(ep)
return imgs_per_cam, state, action, velocity, effort
def populate_dataset(dataset, hdf5_files, task, episodes=None):
if episodes is None:
episodes = range(len(hdf5_files))
for ep_idx in tqdm.tqdm(episodes):
ep_path = hdf5_files[ep_idx]
imgs_per_cam, state, action, velocity, effort = load_raw_episode_data(ep_path)
num_frames = state.shape[0]
for i in range(num_frames):
frame = {
"observation.state": state[i],
"action": action[i],
}
for camera, img_array in imgs_per_cam.items():
frame[f"observation.images.{camera}"] = img_array[i]
if velocity is not None:
frame["observation.velocity"] = velocity[i]
if effort is not None:
frame["observation.effort"] = effort[i]
dataset.add_frame(frame)
dataset.save_episode(task=task)
return dataset
def port_aloha(raw_dir, raw_repo_id, repo_id, episodes: list[int] | None = None, push_to_hub=True):
if (LEROBOT_HOME / repo_id).exists():
shutil.rmtree(LEROBOT_HOME / repo_id)
raw_dir = Path(raw_dir)
if not raw_dir.exists():
download_raw(raw_dir, repo_id=raw_repo_id)
hdf5_files = sorted(raw_dir.glob("episode_*.hdf5"))
dataset_name = repo_id.split("/")[1]
dataset = create_empty_dataset(
repo_id,
robot_type="mobile_aloha" if "mobile" in dataset_name else "aloha",
has_effort=has_effort(hdf5_files),
has_velocity=has_velocity(hdf5_files),
)
dataset = populate_dataset(
dataset,
hdf5_files,
task="DEBUG",
episodes=episodes,
)
dataset.consolidate()
if push_to_hub:
dataset.push_to_hub()
if __name__ == "__main__":
raw_repo_id = "lerobot-raw/aloha_sim_insertion_human_raw"
repo_id = "cadene/aloha_sim_insertion_human_v2"
port_aloha(f"data/{raw_repo_id}", raw_repo_id, repo_id, episodes=[0, 1], push_to_hub=False)

View File

@@ -1,282 +0,0 @@
import argparse
import shutil
from pathlib import Path
import tensorflow as tf
import tensorflow_datasets as tfds
import torch
import tqdm
from lerobot.common.datasets.lerobot_dataset import LEROBOT_HOME, LeRobotDataset
def tf_to_torch(data):
return torch.from_numpy(data.numpy())
def tf_img_convert(img):
if img.dtype == tf.string:
img = tf.io.decode_image(img, expand_animations=False, dtype=tf.uint8)
elif img.dtype != tf.uint8:
raise ValueError(f"Unsupported image dtype: found with dtype {img.dtype}")
return torch.from_numpy(img.numpy())
def get_type(dtype):
if dtype == tf.uint8:
return "uint8"
elif dtype == tf.float32:
return "float32"
elif dtype == tf.float64:
return "float64"
elif dtype == tf.bool:
return "bool"
elif dtype == tf.string:
return "str"
def _broadcast_metadata_rlds(i: tf.Tensor, traj: dict) -> dict:
"""
In the RLDS format, each trajectory has some top-level metadata that is explicitly separated out, and a "steps"
entry. This function moves the "steps" entry to the top level, broadcasting any metadata to the length of the
trajectory. This function also adds the extra metadata fields `_len`, `_traj_index`, and `_frame_index`.
NOTE: adapted from DLimp library https://github.com/kvablack/dlimp/
"""
steps = traj.pop("steps")
traj_len = tf.shape(tf.nest.flatten(steps)[0])[0]
# broadcast metadata to the length of the trajectory
metadata = tf.nest.map_structure(lambda x: tf.repeat(x, traj_len), traj)
# put steps back in
assert "traj_metadata" not in steps
traj = {**steps, "traj_metadata": metadata}
assert "_len" not in traj
assert "_traj_index" not in traj
assert "_frame_index" not in traj
traj["_len"] = tf.repeat(traj_len, traj_len)
traj["_traj_index"] = tf.repeat(i, traj_len)
traj["_frame_index"] = tf.range(traj_len)
return traj
def load_raw_dataset(path: Path):
ds_builder = tfds.builder_from_directory(str(path))
dataset = ds_builder.as_dataset(
split="all",
decoders={"steps": tfds.decode.SkipDecoding()},
)
dataset_info = ds_builder.info
print("dataset_info: ", dataset_info)
ds_length = len(dataset)
dataset = dataset.take(ds_length)
# "flatten" the dataset as such we can apply trajectory level map() easily
# each [obs][key] has a shape of (frame_size, ...)
dataset = dataset.enumerate().map(_broadcast_metadata_rlds)
return dataset, dataset_info
def build_features_and_dataset_keys(dataset_info):
features = {}
image_keys = []
state_keys = []
other_keys = []
for key, data_info in dataset_info.features["steps"].items():
if "observation" in key:
# check whether the key is for an image or a vector observation
# only add rgb images, discard depth
for k, info in data_info.items():
if len(info.shape) == 3 and info.dtype == tf.uint8:
image_keys.append(k)
dtype = "video"
shape = info.shape
# TODO (michel_aractingi) add info[key].doc for feature description
features["observation.image." + k] = {"dtype": dtype, "shape": shape, "name": None}
else:
state_keys.append(k)
dtype = get_type(info.dtype)
shape = info.shape
# TODO (michel_aractingi) add info[key].doc for feature description
features["observation.state." + k] = {"dtype": dtype, "shape": shape, "name": None}
else:
if type(data_info) is tfds.features.Tensor:
# TODO extend features to take language instructions
if "language_instruction" in key:
continue
other_keys.append(key)
dtype = get_type(data_info.dtype)
shape = data_info.shape
if len(shape) == 0:
shape = (1,)
if key == "is_last":
features["next.done"] = {"dtype": dtype, "shape": shape, "name": None}
elif key == "reward":
features["next.reward"] = {"dtype": dtype, "shape": shape, "name": None}
else:
features[key] = {"dtype": dtype, "shape": shape, "name": None}
# elif type(data_info) is tfds.features.FeaturesDict: TODO add dictionary based variables
return features, image_keys, state_keys, other_keys
def to_lerobotdataset_with_save_episode(raw_dir: Path, repo_id: str, push_to_hub: bool = True, fps=30):
if (LEROBOT_HOME / repo_id).exists():
shutil.rmtree(LEROBOT_HOME / repo_id)
dataset, dataset_info = load_raw_dataset(path=raw_dir)
# Build features
features, image_keys, state_keys, other_keys = build_features_and_dataset_keys(dataset_info)
lerobot_dataset = LeRobotDataset.create(
repo_id=repo_id,
fps=fps,
features=features,
use_videos=True,
image_writer_threads=4,
)
it = iter(dataset)
lang_instruction = None
# The iterator it loops over each EPISODE in dataset (not frame-by-frame)
# len(dataset) is the number of trajectories/episodes in a dataset
for ep_idx in tqdm.tqdm(range(len(dataset))):
episode = next(it)
episode_data = {}
num_frames = episode["action"].shape[0]
lang_instruction = episode["language_instruction"].numpy()[0].decode("utf-8")
for key in state_keys:
episode_data["observation.state." + key] = tf_to_torch(episode["observation"][key])
for key in image_keys:
decoded_images = [tf_img_convert(img) for img in episode["observation"][key]]
episode_data["observation.image." + key] = decoded_images
for key in other_keys:
if "language_instruction" in key:
# Some openx dataset have multiple language commands
episode_data[key] = episode[key].numpy()[0].decode("utf-8")
else:
if key == "is_last":
episode_data["next.done"] = tf_to_torch(episode[key])
elif key == "reward":
episode_data["next.reward"] = tf_to_torch(episode[key])
else:
episode_data[key] = tf_to_torch(episode[key])
episode_data["size"] = num_frames
episode_data["episode_index"] = ep_idx # torch.tensor([ep_idx] * num_frames)
episode_data["frame_index"] = torch.arange(0, num_frames, 1)
episode_data["timestamp"] = torch.arange(0, num_frames, 1) / fps
episode_data["task_index"] = 0 # TODO calculate task index correctly
episode_data["index"] = 0 # TODO figure out what index is for in DEFAULT_FEATURES
lerobot_dataset.save_episode(task=lang_instruction, episode_data=episode_data)
lerobot_dataset.consolidate()
if push_to_hub:
lerobot_dataset.push_to_hub()
def to_lerobotdataset_with_add_frame(raw_dir: Path, repo_id: str, push_to_hub: bool = True, fps=30):
if (LEROBOT_HOME / repo_id).exists():
shutil.rmtree(LEROBOT_HOME / repo_id)
dataset, dataset_info = load_raw_dataset(path=raw_dir)
# Build features, get keys
features, image_keys, state_keys, other_keys = build_features_and_dataset_keys(dataset_info)
lerobot_dataset = LeRobotDataset.create(
repo_id=repo_id,
fps=fps,
features=features,
use_videos=True,
image_writer_threads=4,
)
it = iter(dataset)
lang_instruction = None
# The iterator it loops over each EPISODE in dataset (not frame-by-frame)
# len(dataset) is the number of trajectories/episodes in a dataset
for _ep_idx in tqdm.tqdm(range(len(dataset))):
episode = next(it)
episode_data = {}
num_frames = episode["action"].shape[0]
lang_instruction = episode["language_instruction"].numpy()[0].decode("utf-8")
for key in state_keys:
episode_data["observation.state." + key] = tf_to_torch(episode["observation"][key])
for key in image_keys:
decoded_images = [tf_img_convert(img) for img in episode["observation"][key]]
episode_data["observation.image." + key] = decoded_images
for key in other_keys:
if "language_instruction" in key:
# Some openx dataset have multiple language commands
# like droid has 1-3 language instructions for some trajectories
episode_data[key] = episode[key].numpy()[0].decode("utf-8")
else:
if key == "is_last":
episode_data["next.done"] = tf_to_torch(episode[key])
elif key == "reward":
episode_data["next.reward"] = tf_to_torch(episode[key])
else:
episode_data[key] = tf_to_torch(episode[key])
for i in range(num_frames):
frame = {}
for key in episode_data:
if "language_instruction" in key:
frame[key] = episode_data[key]
else:
frame[key] = episode_data[key][i]
lerobot_dataset.add_frame(frame)
lerobot_dataset.save_episode(task=lang_instruction)
lerobot_dataset.consolidate()
if push_to_hub:
lerobot_dataset.push_to_hub()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--repo-id",
type=str,
required=True,
help="Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`).",
)
parser.add_argument(
"--raw-dir",
type=str,
required=True,
help="Path to the directory of the raw dataset in rlds/openx format.",
)
parser.add_argument(
"--push-to-hub",
type=int,
default=0,
help="Binary value to indicate whether you want to push the dataset to the HuggingFace Hub.",
)
parser.add_argument(
"--fps",
type=int,
default=30,
help="frames per second, can be found the openx spreadsheet for openx datasets."
"https://docs.google.com/spreadsheets/d/1rPBD77tk60AEIGZrGSODwyyzs5FgCU9Uz3h-3_t2A9g/edit?gid=0#gid=0",
)
args = parser.parse_args()
to_lerobotdataset_with_add_frame(args.raw_dir, args.repo_id, args.push_to_hub, args.fps)

View File

@@ -26,6 +26,7 @@ def get_stats_einops_patterns(dataset, num_workers=0):
Note: We assume the images are in channel first format
"""
dataloader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,

View File

@@ -744,8 +744,9 @@ class LeRobotDataset(torch.utils.data.Dataset):
you can do it later with dataset.consolidate(). This is to give more flexibility on when to spend
time for video encoding.
"""
if not episode_data:
episode_buffer = self.episode_buffer
episode_buffer = episode_data if episode_data else self.episode_buffer
episode_length = episode_buffer.pop("size")
episode_index = episode_buffer["episode_index"]
if episode_index != self.meta.total_episodes:
@@ -761,7 +762,7 @@ class LeRobotDataset(torch.utils.data.Dataset):
)
task_index = self.meta.get_task_index(task)
if not set(episode_buffer.keys()) == set(self.features):
raise ValueError()
@@ -774,7 +775,7 @@ class LeRobotDataset(torch.utils.data.Dataset):
episode_buffer[key] = np.full((episode_length,), episode_index)
elif key == "task_index":
episode_buffer[key] = np.full((episode_length,), task_index)
elif ft["dtype"] in ["image", "video"] or "language_instruction" in key:
elif ft["dtype"] in ["image", "video"]:
continue
elif len(ft["shape"]) == 1 and ft["shape"][0] == 1:
episode_buffer[key] = np.array(episode_buffer[key], dtype=ft["dtype"])
@@ -895,7 +896,7 @@ class LeRobotDataset(torch.utils.data.Dataset):
if run_compute_stats:
self.stop_image_writer()
# TODO(aliberts): refactor stats in save_episodes
self.meta.stats = compute_stats(self, num_workers=0)
self.meta.stats = compute_stats(self)
serialized_stats = serialize_dict(self.meta.stats)
write_json(serialized_stats, self.root / STATS_PATH)
self.consolidated = True

28
poetry.lock generated
View File

@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand.
# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
[[package]]
name = "absl-py"
@@ -1294,6 +1294,10 @@ files = [
{file = "dora_rs-0.3.6-cp37-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:78656d3ae1282a142a5fed410ec3a6f725fdf8d9f9192ed673e336ea3b083e12"},
{file = "dora_rs-0.3.6-cp37-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:681e22c8ecb3b48d11cb9019f8a32d4ae1e353e20d4ce3a0f0eedd0ccbd95e5f"},
{file = "dora_rs-0.3.6-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4598572bab6f726ec41fabb43bf0f7e3cf8082ea0f6f8f4e57845a6c919f31b3"},
{file = "dora_rs-0.3.6-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:157fc1fed50946646f09df75c6d52198735a5973e53d252199bbb1c65e1594d2"},
{file = "dora_rs-0.3.6-cp37-abi3-manylinux_2_28_armv7l.whl", hash = "sha256:7ae2724c181be10692c24fb8d9ce2a99a9afc57237332c3658e2ea6f4f33c091"},
{file = "dora_rs-0.3.6-cp37-abi3-manylinux_2_28_i686.whl", hash = "sha256:3d324835f292edd81b962f8c0df44f7f47c0a6f8fe6f7d081951aeb1f5ba57d2"},
{file = "dora_rs-0.3.6-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:474c087b5e584293685a7d4837165b2ead96dc74fb435ae50d5fa0ac168a0de0"},
{file = "dora_rs-0.3.6-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:297350f05f5f87a0bf647a1e5b4446728e5f800788c6bb28b462bcd167f1de7f"},
{file = "dora_rs-0.3.6-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:b1870a8e30f0ac298d17fd546224348d13a648bcfa0cbc51dba7e5136c1af928"},
{file = "dora_rs-0.3.6-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:182a189212d41be0c960fd3299bf6731af2e771f8858cfb1be7ebcc17d60a254"},
@@ -4924,6 +4928,8 @@ files = [
{file = "PyAudio-0.2.14-cp311-cp311-win_amd64.whl", hash = "sha256:bbeb01d36a2f472ae5ee5e1451cacc42112986abe622f735bb870a5db77cf903"},
{file = "PyAudio-0.2.14-cp312-cp312-win32.whl", hash = "sha256:5fce4bcdd2e0e8c063d835dbe2860dac46437506af509353c7f8114d4bacbd5b"},
{file = "PyAudio-0.2.14-cp312-cp312-win_amd64.whl", hash = "sha256:12f2f1ba04e06ff95d80700a78967897a489c05e093e3bffa05a84ed9c0a7fa3"},
{file = "PyAudio-0.2.14-cp313-cp313-win32.whl", hash = "sha256:95328285b4dab57ea8c52a4a996cb52be6d629353315be5bfda403d15932a497"},
{file = "PyAudio-0.2.14-cp313-cp313-win_amd64.whl", hash = "sha256:692d8c1446f52ed2662120bcd9ddcb5aa2b71f38bda31e58b19fb4672fffba69"},
{file = "PyAudio-0.2.14-cp38-cp38-win32.whl", hash = "sha256:858caf35b05c26d8fc62f1efa2e8f53d5fa1a01164842bd622f70ddc41f55000"},
{file = "PyAudio-0.2.14-cp38-cp38-win_amd64.whl", hash = "sha256:2dac0d6d675fe7e181ba88f2de88d321059b69abd52e3f4934a8878e03a7a074"},
{file = "PyAudio-0.2.14-cp39-cp39-win32.whl", hash = "sha256:f745109634a7c19fa4d6b8b7d6967c3123d988c9ade0cd35d4295ee1acdb53e9"},
@@ -5890,27 +5896,27 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
[[package]]
name = "rerun-sdk"
version = "0.18.2"
version = "0.21.0"
description = "The Rerun Logging SDK"
optional = false
python-versions = "<3.13,>=3.8"
python-versions = ">=3.8"
files = [
{file = "rerun_sdk-0.18.2-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:bc4e73275f428e4e9feb8e85f88db7a9fd18b997b1570de62f949a926978f1b2"},
{file = "rerun_sdk-0.18.2-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:efbba40a59710ae83607cb0dc140398a35979c2d2acf5190c9def2ac4697f6a8"},
{file = "rerun_sdk-0.18.2-cp38-abi3-manylinux_2_31_aarch64.whl", hash = "sha256:2a5e3b618b6d1bfde09bd5614a898995f3c318cc69d8f6d569924a2cd41536ce"},
{file = "rerun_sdk-0.18.2-cp38-abi3-manylinux_2_31_x86_64.whl", hash = "sha256:8fdfc4c51ef2e75cb68d39e56f0d7c196eff250cb9a0260c07d5e2d6736e31b0"},
{file = "rerun_sdk-0.18.2-cp38-abi3-win_amd64.whl", hash = "sha256:c929ade91d3be301b26671b25e70fb529524ced915523d266641c6fc667a1eb5"},
{file = "rerun_sdk-0.21.0-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:1e454ceea31c70ae9ec1bb26eaa82828661b7657ab4d2261ca0b94006d6a1975"},
{file = "rerun_sdk-0.21.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:84ecb77b0b5bac71b53e849801ff073de89fcd2f1e0ca0da62fb18fcbeceadf0"},
{file = "rerun_sdk-0.21.0-cp38-abi3-manylinux_2_31_aarch64.whl", hash = "sha256:919d921165c3238490dbe5bf00a062c68fdd2c54dc14aac6a1914c82edb5d9c8"},
{file = "rerun_sdk-0.21.0-cp38-abi3-manylinux_2_31_x86_64.whl", hash = "sha256:897649aadcab7014b78096f93c84c61c00a227b80adaf0dec279924b5aab53d8"},
{file = "rerun_sdk-0.21.0-cp38-abi3-win_amd64.whl", hash = "sha256:2060bdb536a198f0f04789ba5ba771e66587e7851d668b3dfab257a5efa16819"},
]
[package.dependencies]
attrs = ">=23.1.0"
numpy = ">=1.23,<2"
numpy = ">=1.23"
pillow = ">=8.0.0"
pyarrow = ">=14.0.2"
typing-extensions = ">=4.5"
[package.extras]
notebook = ["rerun-notebook (==0.18.2)"]
notebook = ["rerun-notebook (==0.21.0)"]
tests = ["pytest (==7.1.2)"]
[[package]]
@@ -7569,4 +7575,4 @@ xarm = ["gym-xarm"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.10,<3.13"
content-hash = "41344f0eb2d06d9a378abcd10df8205aa3926ff0a08ac5ab1a0b1bcae7440fd8"
content-hash = "ee60d9251f6a6253d0c371707a72a500a6053d7925c6898e6663d9320ad11503"

View File

@@ -57,7 +57,7 @@ pytest-cov = {version = ">=5.0.0", optional = true}
datasets = ">=2.19.0"
imagecodecs = { version = ">=2024.1.1", optional = true }
pyav = ">=12.0.5"
rerun-sdk = ">=0.15.1"
rerun-sdk = ">=0.21.0"
deepdiff = ">=7.0.1"
flask = ">=3.0.3"
pandas = {version = ">=2.2.2", optional = true}