Update pre-commit-config.yaml + pyproject.toml + ceil rerun & transformer dependencies version (#1520)

* chore: update .gitignore

* chore: update pre-commit

* chore(deps): update pyproject

* fix(ci): multiple fixes

* chore: pre-commit apply

* chore: address review comments

* Update pyproject.toml

Co-authored-by: Ben Zhang <5977478+ben-z@users.noreply.github.com>
Signed-off-by: Steven Palma <imstevenpmwork@ieee.org>

* chore(deps): add todo

---------

Signed-off-by: Steven Palma <imstevenpmwork@ieee.org>
Co-authored-by: Ben Zhang <5977478+ben-z@users.noreply.github.com>
This commit is contained in:
Steven Palma
2025-07-17 14:30:20 +02:00
committed by GitHub
parent 0938a1d816
commit 378e1f0338
78 changed files with 1450 additions and 636 deletions

View File

@@ -15,7 +15,7 @@
# limitations under the License.
import abc
from typing import Any, Dict, List
from typing import Any
import numpy as np
@@ -69,7 +69,7 @@ class Camera(abc.ABC):
@staticmethod
@abc.abstractmethod
def find_cameras() -> List[Dict[str, Any]]:
def find_cameras() -> list[dict[str, Any]]:
"""Detects available cameras connected to the system.
Returns:
List[Dict[str, Any]]: A list of dictionaries,

View File

@@ -23,7 +23,7 @@ import platform
import time
from pathlib import Path
from threading import Event, Lock, Thread
from typing import Any, Dict, List
from typing import Any
# Fix MSMF hardware transform compatibility for Windows before importing cv2
if platform.system() == "Windows" and "OPENCV_VIDEOIO_MSMF_ENABLE_HW_TRANSFORMS" not in os.environ:
@@ -245,7 +245,7 @@ class OpenCVCamera(Camera):
)
@staticmethod
def find_cameras() -> List[Dict[str, Any]]:
def find_cameras() -> list[dict[str, Any]]:
"""
Detects available OpenCV cameras connected to the system.

View File

@@ -19,7 +19,7 @@ Provides the RealSenseCamera class for capturing frames from Intel RealSense cam
import logging
import time
from threading import Event, Lock, Thread
from typing import Any, Dict, List
from typing import Any
import cv2
import numpy as np
@@ -194,7 +194,7 @@ class RealSenseCamera(Camera):
logger.info(f"{self} connected.")
@staticmethod
def find_cameras() -> List[Dict[str, Any]]:
def find_cameras() -> list[dict[str, Any]]:
"""
Detects available Intel RealSense cameras connected to the system.

View File

@@ -28,12 +28,12 @@ class RealSenseCameraConfig(CameraConfig):
Example configurations for Intel RealSense D405:
```python
# Basic configurations
RealSenseCameraConfig("0123456789", 30, 1280, 720) # 1280x720 @ 30FPS
RealSenseCameraConfig("0123456789", 60, 640, 480) # 640x480 @ 60FPS
RealSenseCameraConfig("0123456789", 30, 1280, 720) # 1280x720 @ 30FPS
RealSenseCameraConfig("0123456789", 60, 640, 480) # 640x480 @ 60FPS
# Advanced configurations
RealSenseCameraConfig("0123456789", 30, 640, 480, use_depth=True) # With depth sensing
RealSenseCameraConfig("0123456789", 30, 640, 480, rotation=Cv2Rotation.ROTATE_90) # With 90° rotation
RealSenseCameraConfig("0123456789", 30, 640, 480, rotation=Cv2Rotation.ROTATE_90) # With 90° rotation
```
Attributes:

View File

@@ -16,9 +16,9 @@ import inspect
import pkgutil
import sys
from argparse import ArgumentError
from collections.abc import Sequence
from functools import wraps
from pathlib import Path
from typing import Sequence
import draccus
@@ -76,9 +76,8 @@ def parse_plugin_args(plugin_arg_suffix: str, args: Sequence[str]) -> dict:
- Values are the corresponding argument values
Example:
>>> args = ['--env.discover_packages_path=my_package',
... '--other_arg=value']
>>> parse_plugin_args('discover_packages_path', args)
>>> args = ["--env.discover_packages_path=my_package", "--other_arg=value"]
>>> parse_plugin_args("discover_packages_path", args)
{'env.discover_packages_path': 'my_package'}
"""
plugin_args = {}
@@ -111,7 +110,7 @@ def load_plugin(plugin_path: str) -> None:
PluginLoadError: If the plugin cannot be loaded due to import errors or if the package path is invalid.
Examples:
>>> load_plugin("external_plugin.core") # Loads plugin from external package
>>> load_plugin("external_plugin.core") # Loads plugin from external package
Notes:
- The plugin package should handle its own registration during import

View File

@@ -12,13 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import builtins
import json
import logging
import os
import tempfile
from dataclasses import dataclass, field
from pathlib import Path
from typing import Type, TypeVar
from typing import TypeVar
import draccus
from huggingface_hub import hf_hub_download
@@ -31,7 +32,6 @@ from lerobot.optim.schedulers import LRSchedulerConfig
from lerobot.utils.hub import HubMixin
from lerobot.utils.utils import auto_select_torch_device, is_amp_available, is_torch_device_available
# Generic variable that is either PreTrainedConfig or a subclass thereof
T = TypeVar("T", bound="PreTrainedConfig")
@@ -148,7 +148,7 @@ class PreTrainedConfig(draccus.ChoiceRegistry, HubMixin, abc.ABC):
@classmethod
def from_pretrained(
cls: Type[T],
cls: builtins.type[T],
pretrained_name_or_path: str | Path,
*,
force_download: bool = False,

View File

@@ -11,11 +11,11 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import builtins
import datetime as dt
import os
from dataclasses import dataclass, field
from pathlib import Path
from typing import Type
import draccus
from huggingface_hub import hf_hub_download
@@ -135,7 +135,7 @@ class TrainPipelineConfig(HubMixin):
@classmethod
def from_pretrained(
cls: Type["TrainPipelineConfig"],
cls: builtins.type["TrainPipelineConfig"],
pretrained_name_or_path: str | Path,
*,
force_download: bool = False,

View File

@@ -1,7 +1,8 @@
---
# For reference on dataset card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1
# Doc / guide: https://huggingface.co/docs/hub/datasets-cards
{{ card_data }}
# prettier-ignore
{{card_data}}
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).

View File

@@ -16,8 +16,8 @@
import contextlib
import logging
import shutil
from collections.abc import Callable
from pathlib import Path
from typing import Callable
import datasets
import numpy as np

View File

@@ -16,7 +16,6 @@
import inspect
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Dict
import datasets
import numpy
@@ -77,7 +76,7 @@ def check_repo_id(repo_id: str) -> None:
# TODO(aliberts): remove
def calculate_episode_data_index(hf_dataset: datasets.Dataset) -> Dict[str, torch.Tensor]:
def calculate_episode_data_index(hf_dataset: datasets.Dataset) -> dict[str, torch.Tensor]:
"""
Calculate episode data index for the provided HuggingFace Dataset. Relies on episode_index column of hf_dataset.

View File

@@ -13,7 +13,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterator, Union
from collections.abc import Iterator
import torch
@@ -22,7 +22,7 @@ class EpisodeAwareSampler:
def __init__(
self,
episode_data_index: dict,
episode_indices_to_use: Union[list, None] = None,
episode_indices_to_use: list | None = None,
drop_n_first_frames: int = 0,
drop_n_last_frames: int = 0,
shuffle: bool = False,

View File

@@ -14,13 +14,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from collections.abc import Callable, Sequence
from dataclasses import dataclass, field
from typing import Any, Callable, Sequence
from typing import Any
import torch
from torchvision.transforms import v2
from torchvision.transforms.v2 import Transform
from torchvision.transforms.v2 import functional as F # noqa: N812
from torchvision.transforms.v2 import (
Transform,
functional as F, # noqa: N812
)
class RandomSubsetApply(Transform):

View File

@@ -14,7 +14,7 @@
import abc
from dataclasses import dataclass, field
from typing import Any, Optional
from typing import Any
import draccus
@@ -179,10 +179,10 @@ class EnvTransformConfig:
add_joint_velocity_to_observation: bool = False
add_current_to_observation: bool = False
add_ee_pose_to_observation: bool = False
crop_params_dict: Optional[dict[str, tuple[int, int, int, int]]] = None
resize_size: Optional[tuple[int, int]] = None
crop_params_dict: dict[str, tuple[int, int, int, int]] | None = None
resize_size: tuple[int, int] | None = None
control_time_s: float = 20.0
fixed_reset_joint_positions: Optional[Any] = None
fixed_reset_joint_positions: Any | None = None
reset_time_s: float = 5.0
use_gripper: bool = True
gripper_quantization_threshold: float | None = 0.8
@@ -195,21 +195,21 @@ class EnvTransformConfig:
class HILSerlRobotEnvConfig(EnvConfig):
"""Configuration for the HILSerlRobotEnv environment."""
robot: Optional[RobotConfig] = None
teleop: Optional[TeleoperatorConfig] = None
wrapper: Optional[EnvTransformConfig] = None
robot: RobotConfig | None = None
teleop: TeleoperatorConfig | None = None
wrapper: EnvTransformConfig | None = None
fps: int = 10
name: str = "real_robot"
mode: str = None # Either "record", "replay", None
repo_id: Optional[str] = None
dataset_root: Optional[str] = None
repo_id: str | None = None
dataset_root: str | None = None
task: str = ""
num_episodes: int = 10 # only for record mode
episode: int = 0
device: str = "cuda"
push_to_hub: bool = True
pretrained_policy_name_or_path: Optional[str] = None
reward_classifier_pretrained_path: Optional[str] = None
pretrained_policy_name_or_path: str | None = None
reward_classifier_pretrained_path: str | None = None
# For the reward classifier, to record more positive examples after a success
number_of_steps_after_success: int = 0
@@ -248,18 +248,18 @@ class HILEnvConfig(EnvConfig):
}
)
################# args from hilserlrobotenv
reward_classifier_pretrained_path: Optional[str] = None
robot_config: Optional[RobotConfig] = None
teleop_config: Optional[TeleoperatorConfig] = None
wrapper: Optional[EnvTransformConfig] = None
reward_classifier_pretrained_path: str | None = None
robot_config: RobotConfig | None = None
teleop_config: TeleoperatorConfig | None = None
wrapper: EnvTransformConfig | None = None
mode: str = None # Either "record", "replay", None
repo_id: Optional[str] = None
dataset_root: Optional[str] = None
repo_id: str | None = None
dataset_root: str | None = None
num_episodes: int = 10 # only for record mode
episode: int = 0
device: str = "cuda"
push_to_hub: bool = True
pretrained_policy_name_or_path: Optional[str] = None
pretrained_policy_name_or_path: str | None = None
# For the reward classifier, to record more positive examples after a success
number_of_steps_after_success: int = 0
############################

View File

@@ -32,7 +32,7 @@ import concurrent.futures
import logging
import time
from pathlib import Path
from typing import Any, Dict, List
from typing import Any
import numpy as np
from PIL import Image
@@ -46,14 +46,14 @@ from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraCon
logger = logging.getLogger(__name__)
def find_all_opencv_cameras() -> List[Dict[str, Any]]:
def find_all_opencv_cameras() -> list[dict[str, Any]]:
"""
Finds all available OpenCV cameras plugged into the system.
Returns:
A list of all available OpenCV cameras with their metadata.
"""
all_opencv_cameras_info: List[Dict[str, Any]] = []
all_opencv_cameras_info: list[dict[str, Any]] = []
logger.info("Searching for OpenCV cameras...")
try:
opencv_cameras = OpenCVCamera.find_cameras()
@@ -66,14 +66,14 @@ def find_all_opencv_cameras() -> List[Dict[str, Any]]:
return all_opencv_cameras_info
def find_all_realsense_cameras() -> List[Dict[str, Any]]:
def find_all_realsense_cameras() -> list[dict[str, Any]]:
"""
Finds all available RealSense cameras plugged into the system.
Returns:
A list of all available RealSense cameras with their metadata.
"""
all_realsense_cameras_info: List[Dict[str, Any]] = []
all_realsense_cameras_info: list[dict[str, Any]] = []
logger.info("Searching for RealSense cameras...")
try:
realsense_cameras = RealSenseCamera.find_cameras()
@@ -88,7 +88,7 @@ def find_all_realsense_cameras() -> List[Dict[str, Any]]:
return all_realsense_cameras_info
def find_and_print_cameras(camera_type_filter: str | None = None) -> List[Dict[str, Any]]:
def find_and_print_cameras(camera_type_filter: str | None = None) -> list[dict[str, Any]]:
"""
Finds available cameras based on an optional filter and prints their information.
@@ -99,7 +99,7 @@ def find_and_print_cameras(camera_type_filter: str | None = None) -> List[Dict[s
Returns:
A list of all available cameras matching the filter, with their metadata.
"""
all_cameras_info: List[Dict[str, Any]] = []
all_cameras_info: list[dict[str, Any]] = []
if camera_type_filter:
camera_type_filter = camera_type_filter.lower()
@@ -153,7 +153,7 @@ def save_image(
logger.error(f"Failed to save image for camera {camera_identifier} (type {camera_type}): {e}")
def create_camera_instance(cam_meta: Dict[str, Any]) -> Dict[str, Any] | None:
def create_camera_instance(cam_meta: dict[str, Any]) -> dict[str, Any] | None:
"""Create and connect to a camera instance based on metadata."""
cam_type = cam_meta.get("type")
cam_id = cam_meta.get("id")
@@ -190,7 +190,7 @@ def create_camera_instance(cam_meta: Dict[str, Any]) -> Dict[str, Any] | None:
def process_camera_image(
cam_dict: Dict[str, Any], output_dir: Path, current_time: float
cam_dict: dict[str, Any], output_dir: Path, current_time: float
) -> concurrent.futures.Future | None:
"""Capture and process an image from a single camera."""
cam = cam_dict["instance"]
@@ -216,7 +216,7 @@ def process_camera_image(
return None
def cleanup_cameras(cameras_to_use: List[Dict[str, Any]]):
def cleanup_cameras(cameras_to_use: list[dict[str, Any]]):
"""Disconnect all cameras."""
logger.info(f"Disconnecting {len(cameras_to_use)} cameras...")
for cam_dict in cameras_to_use:

View File

@@ -224,7 +224,7 @@ class MotorsBus(abc.ABC):
```bash
python -m lerobot.find_port.py
>>> Finding all available ports for the MotorsBus.
>>> ['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
>>> ["/dev/tty.usbmodem575E0032081", "/dev/tty.usbmodem575E0031751"]
>>> Remove the usb cable from your MotorsBus and press Enter when done.
>>> The port of this MotorsBus is /dev/tty.usbmodem575E0031751.
>>> Reconnect the usb cable.

View File

@@ -21,8 +21,8 @@ The majority of changes here involve removing unused code, unifying naming, and
import math
from collections import deque
from collections.abc import Callable
from itertools import chain
from typing import Callable
import einops
import numpy as np
@@ -216,7 +216,7 @@ class ACTTemporalEnsembler:
continue
avg *= exp_weights[:i].sum()
avg += item * exp_weights[i]
avg /= exp_weights[:i+1].sum()
avg /= exp_weights[: i + 1].sum()
print("online", avg)
```
"""

View File

@@ -22,7 +22,7 @@ TODO(alexander-soare):
import math
from collections import deque
from typing import Callable
from collections.abc import Callable
import einops
import numpy as np

View File

@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Union
import torch
import torch.version
@@ -228,12 +227,12 @@ class PaliGemmaWithExpertModel(PreTrainedModel):
# TODO: break down this huge forward into modules or functions
def forward(
self,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Union[List[torch.FloatTensor], Cache]] = None,
inputs_embeds: List[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
fill_kv_cache: Optional[bool] = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: list[torch.FloatTensor] | Cache | None = None,
inputs_embeds: list[torch.FloatTensor] = None,
use_cache: bool | None = None,
fill_kv_cache: bool | None = None,
):
models = [self.paligemma.language_model, self.gemma_expert.model]

View File

@@ -12,20 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import builtins
import logging
import os
from importlib.resources import files
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Type, TypeVar
from typing import TypeVar
import packaging
import safetensors
from huggingface_hub import HfApi, ModelCard, ModelCardData, hf_hub_download
from huggingface_hub.constants import SAFETENSORS_SINGLE_FILE
from huggingface_hub.errors import HfHubHTTPError
from safetensors.torch import load_model as load_model_as_safetensor
from safetensors.torch import save_model as save_model_as_safetensor
from safetensors.torch import load_model as load_model_as_safetensor, save_model as save_model_as_safetensor
from torch import Tensor, nn
from lerobot.configs.policies import PreTrainedConfig
@@ -67,7 +67,7 @@ class PreTrainedPolicy(nn.Module, HubMixin, abc.ABC):
@classmethod
def from_pretrained(
cls: Type[T],
cls: builtins.type[T],
pretrained_name_or_path: str | Path,
*,
config: PreTrainedConfig | None = None,
@@ -223,7 +223,7 @@ class PreTrainedPolicy(nn.Module, HubMixin, abc.ABC):
logging.info(f"Model pushed to {commit_info.repo_url.url}")
def generate_model_card(
self, dataset_repo_id: str, model_type: str, license: str | None, tags: List[str] | None
self, dataset_repo_id: str, model_type: str, license: str | None, tags: list[str] | None
) -> ModelCard:
base_model = "lerobot/smolvla_base" if model_type == "smolvla" else None # Set a base model

View File

@@ -16,8 +16,9 @@
# limitations under the License.
import math
from collections.abc import Callable
from dataclasses import asdict
from typing import Callable, Literal
from typing import Literal
import einops
import numpy as np

View File

@@ -13,7 +13,6 @@
# limitations under the License.
import copy
from typing import List, Optional
import torch
from torch import nn
@@ -403,12 +402,12 @@ class SmolVLMWithExpertModel(nn.Module):
def forward(
self,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: List[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
fill_kv_cache: Optional[bool] = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: list[torch.FloatTensor] | None = None,
inputs_embeds: list[torch.FloatTensor] = None,
use_cache: bool | None = None,
fill_kv_cache: bool | None = None,
):
models = [self.get_vlm_model().text_model, self.lm_expert]
model_layers = self.get_model_layers(models)

View File

@@ -24,9 +24,9 @@ The comments in this code may sometimes refer to these references:
# ruff: noqa: N806
from collections import deque
from collections.abc import Callable
from copy import deepcopy
from functools import partial
from typing import Callable
import einops
import numpy as np

View File

@@ -18,7 +18,7 @@
import warnings
from collections import deque
from typing import Callable, List
from collections.abc import Callable
import einops
import numpy as np
@@ -901,7 +901,7 @@ class MLP(torch.nn.Sequential):
def __init__(
self,
in_channels: int,
hidden_channels: List[int],
hidden_channels: list[int],
):
layers = []
in_dim = in_channels

View File

@@ -17,10 +17,10 @@
# limitations under the License.
import math
from collections.abc import Callable
from functools import partial
from math import ceil
from random import randrange
from typing import Callable
import torch
import torch.distributed as distributed
@@ -198,7 +198,7 @@ class GPT(nn.Module):
# report number of parameters
n_params = sum(p.numel() for p in self.parameters())
print("number of parameters: {:.2f}M".format(n_params / 1e6))
print(f"number of parameters: {n_params / 1e6:.2f}M")
def forward(self, input, targets=None):
device = input.device
@@ -255,7 +255,7 @@ class GPT(nn.Module):
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.named_modules():
for pn, _p in m.named_parameters():
fpn = "{}.{}".format(mn, pn) if mn else pn # full param name
fpn = f"{mn}.{pn}" if mn else pn # full param name
if pn.endswith("bias"):
# all biases will not be decayed
no_decay.add(fpn)

View File

@@ -62,7 +62,6 @@ import time
from dataclasses import asdict, dataclass
from pathlib import Path
from pprint import pformat
from typing import List
from lerobot.cameras import ( # noqa: F401
CameraConfig, # noqa: F401
@@ -190,7 +189,7 @@ def record_loop(
events: dict,
fps: int,
dataset: LeRobotDataset | None = None,
teleop: Teleoperator | List[Teleoperator] | None = None,
teleop: Teleoperator | list[Teleoperator] | None = None,
policy: PreTrainedPolicy | None = None,
control_time_s: int | None = None,
single_task: str | None = None,

View File

@@ -9,6 +9,7 @@
Follow the [installation instructions](https://github.com/huggingface/lerobot#installation) to install LeRobot.
Install LeRobot with HopeJR dependencies:
```bash
pip install -e ".[hopejr]"
```
@@ -40,35 +41,39 @@ python -m lerobot.calibrate \
When running the calibration script, a calibration GUI will pop up. Finger joints are named as follows:
**Thumb**:
- **CMC**: base joint connecting thumb to hand
- **MCP**: knuckle joint
- **PIP**: first finger joint
- **DIP** : fingertip joint
**Index, Middle, Ring, and Pinky fingers**:
- **Radial flexor**: Moves base of finger towards the thumb
- **Ulnar flexor**: Moves base of finger towards the pinky
- **PIP/DIP**: Flexes the distal and proximal phalanx of the finger
Each one of these will need to be calibrated individually via the GUI.
Note that ulnar and radial flexors should have ranges of the same size (but with different offsets) in order to get symmetric movement.
Note that ulnar and radial flexors should have ranges of the same size (but with different offsets) in order to get symmetric movement.
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/calibration_gui_1.png"
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/calibration_gui_1.png"
alt="Setting boundaries in the hand calibration GUI"
title="Setting boundaries in the hand calibration GUI"
width="100%">
</img>
width="100%"
></img>
</p>
Use the calibration interface to set the range boundaries for each joint as shown above.
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/calibration_gui_2.png"
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/calibration_gui_2.png"
alt="Saving calibration values"
title="Saving calibration values"
width="100%">
</img>
width="100%"
></img>
</p>
Once you have set the appropriate boundaries for all joints, click "Save" to save the calibration values to the motors.
@@ -122,16 +127,18 @@ python -m lerobot.calibrate \
```
This will open a calibration GUI where you can set the range limits for each motor. The arm motions are organized as follows:
- **Shoulder**: pitch, yaw, and roll
- **Elbow**: flex
- **Wrist**: pitch, yaw, and roll
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/calibration_gui_2.png"
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/calibration_gui_2.png"
alt="Setting boundaries in the arm calibration GUI"
title="Setting boundaries in the arm calibration GUI"
width="100%">
</img>
width="100%"
></img>
</p>
Use the calibration interface to set the range boundaries for each joint. Move each joint through its full range of motion and adjust the minimum and maximum values accordingly. Once you have set the appropriate boundaries for all joints, save the calibration.
@@ -169,6 +176,7 @@ Calibration saved to /Users/your_username/.cache/huggingface/lerobot/calibration
Due to global variable conflicts in the Feetech middleware, teleoperation for arm and hand must run in separate shell sessions:
### Hand
```bash
python -m lerobot.teleoperate \
--robot.type=hope_jr_hand \
@@ -184,6 +192,7 @@ python -m lerobot.teleoperate \
```
### Arm
```bash
python -m lerobot.teleoperate \
--robot.type=hope_jr_arm \

View File

@@ -10,15 +10,16 @@ For a visual walkthrough of the assembly process, you can refer to [this video t
> [!WARNING]
> Since the production of this video, we simplified the configuration phase. Because of this, two things differ from the instructions in that video:
>
> - Don't plug in all the motor cables right away and wait to be instructed to do so in [Configure the motors](#configure-the-motors).
> - Don't screw in the controller board (PCB) to the base right away and wait for being instructed to do so in [Configure the motors](#configure-the-motors).
## Install LeRobot 🤗
To install LeRobot follow, our [Installation Guide](./installation)
In addition to these instructions, you need to install the Dynamixel SDK:
```bash
pip install -e ".[dynamixel]"
```
@@ -28,6 +29,7 @@ pip install -e ".[dynamixel]"
### 1. Find the USB ports associated with each arm
To find the port for each bus servo adapter, run this script:
```bash
python -m lerobot.find_port
```
@@ -54,6 +56,7 @@ Where the found port is: `/dev/tty.usbmodem575E0032081` corresponding to your le
<hfoption id="Linux">
On Linux, you might need to give access to the USB ports by running:
```bash
sudo chmod 666 /dev/ttyACM0
sudo chmod 666 /dev/ttyACM1
@@ -99,9 +102,11 @@ python -m lerobot.setup_motors \
--robot.type=koch_follower \
--robot.port=/dev/tty.usbmodem575E0031751 # <- paste here the port found at previous step
```
</hfoption>
<hfoption id="API example">
<!-- prettier-ignore-start -->
```python
from lerobot.robots.koch_follower import KochFollower, KochFollowerConfig
@@ -112,10 +117,13 @@ config = KochFollowerConfig(
follower = KochFollower(config)
follower.setup_motors()
```
<!-- prettier-ignore-end -->
</hfoption>
</hfoptions>
You should see the following instruction.
```
Connect the controller board to the 'gripper' motor only and press enter.
```
@@ -125,22 +133,26 @@ As instructed, plug the gripper's motor. Make sure it's the only motor connected
<details>
<summary>Troubleshooting</summary>
If you get an error at that point, check your cables and make sure they are plugged in properly:
<ul>
<li>Power supply</li>
<li>USB cable between your computer and the controller board</li>
<li>The 3-pin cable from the controller board to the motor</li>
</ul>
If you get an error at that point, check your cables and make sure they are plugged in properly:
<ul>
<li>Power supply</li>
<li>USB cable between your computer and the controller board</li>
<li>The 3-pin cable from the controller board to the motor</li>
</ul>
If you are using a Waveshare controller board, make sure that the two jumpers are set on the `B` channel (USB).
If you are using a Waveshare controller board, make sure that the two jumpers are set on the `B` channel (USB).
</details>
You should then see the following message:
```
'gripper' motor id set to 6
```
Followed by the next instruction:
```
Connect the controller board to the 'wrist_roll' motor only and press enter.
```
@@ -155,6 +167,7 @@ Repeat the operation for each motor as instructed.
When you are done, the script will simply finish, at which point the motors are ready to be used. You can now plug the 3-pin cable from each motor to the next one, and the cable from the first motor (the 'shoulder pan' with id=1) to the controller board, which can now be attached to the base of the arm.
#### Leader
Do the same steps for the leader arm but modify the command or script accordingly.
<hfoptions id="setup_motors">
@@ -165,9 +178,11 @@ python -m lerobot.setup_motors \
--teleop.type=koch_leader \
--teleop.port=/dev/tty.usbmodem575E0031751 \ # <- paste here the port found at previous step
```
</hfoption>
<hfoption id="API example">
<!-- prettier-ignore-start -->
```python
from lerobot.teleoperators.koch_leader import KochLeader, KochLeaderConfig
@@ -178,6 +193,8 @@ config = KochLeaderConfig(
leader = KochLeader(config)
leader.setup_motors()
```
<!-- prettier-ignore-end -->
</hfoption>
</hfoptions>
@@ -199,9 +216,11 @@ python -m lerobot.calibrate \
--robot.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
--robot.id=my_awesome_follower_arm # <- Give the robot a unique name
```
</hfoption>
<hfoption id="API example">
<!-- prettier-ignore-start -->
```python
from lerobot.robots.koch_follower import KochFollowerConfig, KochFollower
@@ -215,6 +234,8 @@ follower.connect(calibrate=False)
follower.calibrate()
follower.disconnect()
```
<!-- prettier-ignore-end -->
</hfoption>
</hfoptions>
@@ -233,9 +254,11 @@ python -m lerobot.calibrate \
--teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
--teleop.id=my_awesome_leader_arm # <- Give the robot a unique name
```
</hfoption>
<hfoption id="API example">
<!-- prettier-ignore-start -->
```python
from lerobot.teleoperators.koch_leader import KochLeaderConfig, KochLeader
@@ -249,10 +272,12 @@ leader.connect(calibrate=False)
leader.calibrate()
leader.disconnect()
```
<!-- prettier-ignore-end -->
</hfoption>
</hfoptions>
Congrats 🎉, your robot is all set to learn a task on its own. Start training it by following this tutorial: [Getting started with real-world robots](./getting_started_real_world_robot)
> [!TIP]
> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb).
> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb).

View File

@@ -8,31 +8,43 @@ Follow this [README](https://github.com/SIGRobotics-UIUC/LeKiwi). It contains th
And advise if it's your first time printing or if you don't own a 3D printer.
### Wired version
If you have the **wired** LeKiwi version, you can skip the installation of the Raspberry Pi and setting up SSH. You can also run all commands directly on your PC for both the LeKiwi scripts and the leader arm scripts for teleoperating.
## Install software on Pi
Now we have to set up the remote PC that will run on the LeKiwi Robot. This is normally a Raspberry Pi, but can be any PC that can run on 5V and has enough usb ports (2 or more) for the cameras and motor control board.
### Install OS
For setting up the Raspberry Pi and its SD-card see: [Setup PI](https://www.raspberrypi.com/documentation/computers/getting-started.html). Here is explained how to download the [Imager](https://www.raspberrypi.com/software/) to install Raspberry Pi OS or Ubuntu.
### Setup SSH
After setting up your Pi, you should enable and set up [SSH](https://www.raspberrypi.com/news/coding-on-raspberry-pi-remotely-with-visual-studio-code/) (Secure Shell Protocol) so you can log in to the Pi from your laptop without requiring a screen, keyboard, and mouse on the Pi. A great tutorial on how to do this can be found [here](https://www.raspberrypi.com/documentation/computers/remote-access.html#ssh). Logging into your Pi can be done in your Command Prompt (cmd) or, if you use VSCode you can use [this](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-ssh) extension.
### Install LeRobot on Pi 🤗
On your Raspberry Pi install LeRobot using our [Installation Guide](./installation)
In addition to these instructions, you need to install the Feetech sdk on your Pi:
In addition to these instructions, you need to install the Feetech SDK & ZeroMQ on your Pi:
```bash
pip install -e ".[feetech]"
pip install -e ".[lekiwi]"
```
## Install LeRobot locally
If you already have installed LeRobot on your laptop/pc you can skip this step; otherwise, please follow along as we do the same steps we did on the Pi.
Follow our [Installation Guide](./installation)
In addition to these instructions, you need to install the Feetech SDK & ZeroMQ on your laptop/pc:
```bash
pip install -e ".[lekiwi]"
```
Great :hugs:! You are now done installing LeRobot, and we can begin assembling the SO100/SO101 arms and the mobile base :robot:.
Every time you now want to use LeRobot, you can go to the `~/lerobot` folder where we installed LeRobot and run one of the commands.
@@ -46,6 +58,7 @@ First, we will assemble the two SO100/SO101 arms. One to attach to the mobile ba
### Find the USB ports associated with motor board
To find the port for each bus servo adapter, run this script:
```bash
python -m lerobot.find_port
```
@@ -72,6 +85,7 @@ Where the found port is: `/dev/tty.usbmodem575E0032081` corresponding to your bo
<hfoption id="Linux">
On Linux, you might need to give access to the USB ports by running:
```bash
sudo chmod 666 /dev/ttyACM0
sudo chmod 666 /dev/ttyACM1
@@ -96,6 +110,7 @@ Where the found port is: `/dev/ttyACM0` corresponding to your board.
</hfoptions>
### Configure motors
The instructions for configuring the motors can be found in the SO101 [docs](./so101#configure-the-motors). Besides the ids for the arm motors, we also need to set the motor ids for the mobile base. These need to be in a specific order to work. Below an image of the motor ids and motor mounting positions for the mobile base. Note that we only use one Motor Control board on LeKiwi. This means the motor ids for the wheels are 7, 8 and 9.
You can run this command to setup motors for LeKiwi. It will first setup the motors for arm (id 6..1) and then setup motors for wheels (9,8,7)
@@ -113,27 +128,36 @@ python -m lerobot.setup_motors \
If you are having trouble connecting to the Mobile SO100, follow these steps to diagnose and resolve the issue.
#### 1. Verify IP Address Configuration
Make sure that the correct IP for the Pi is used in the commands or in your code. To check the Raspberry Pi's IP address, run (on the Pi command line):
```bash
hostname -I
```
#### 2. Check if Pi is reachable from laptop/pc
Try pinging the Raspberry Pi from your laptop:
```bach
ping <your_pi_ip_address>
```
If the ping fails:
- Ensure the Pi is powered on and connected to the same network.
- Check if SSH is enabled on the Pi.
#### 3. Try SSH connection
If you can't SSH into the Pi, it might not be properly connected. Use:
```bash
ssh <your_pi_user_name>@<your_pi_ip_address>
```
If you get a connection error:
- Ensure SSH is enabled on the Pi by running:
```bash
sudo raspi-config
@@ -158,10 +182,13 @@ python -m lerobot.calibrate \
We unified the calibration method for most robots, thus, the calibration steps for this SO100 arm are the same as the steps for the Koch and SO101. First, we have to move the robot to the position where each joint is in the middle of its range, then we press `Enter`. Secondly, we move all joints through their full range of motion. A video of this same process for the SO101 as reference can be found [here](https://huggingface.co/docs/lerobot/en/so101#calibration-video).
### Wired version
If you have the **wired** LeKiwi version, please run all commands on your laptop.
### Calibrate leader arm
Then, to calibrate the leader arm (which is attached to the laptop/pc). Run the following command of API example on your laptop:
<hfoptions id="calibrate_leader">
<hfoption id="Command">
@@ -171,9 +198,11 @@ python -m lerobot.calibrate \
--teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
--teleop.id=my_awesome_leader_arm # <- Give the robot a unique name
```
</hfoption>
<hfoption id="API example">
<!-- prettier-ignore-start -->
```python
from lerobot.teleoperators.so100_leader import SO100LeaderConfig, SO100Leader
@@ -187,6 +216,8 @@ leader.connect(calibrate=False)
leader.calibrate()
leader.disconnect()
```
<!-- prettier-ignore-end -->
</hfoption>
</hfoptions>
@@ -196,6 +227,7 @@ leader.disconnect()
> If you're using a Mac, you might need to give Terminal permission to access your keyboard for teleoperation. Go to System Preferences > Security & Privacy > Input Monitoring and check the box for Terminal.
To teleoperate, SSH into your Raspberry Pi, and run `conda activate lerobot` and this command:
```bash
python -m lerobot.robots.lekiwi.lekiwi_host --robot.id=my_awesome_kiwi
```
@@ -206,7 +238,7 @@ Then on your laptop, also run `conda activate lerobot` and run the API example,
python examples/lekiwi/teleoperate.py
```
You should see on your laptop something like this: ```[INFO] Connected to remote robot at tcp://172.17.133.91:5555 and video stream at tcp://172.17.133.91:5556.``` Now you can move the leader arm and use the keyboard (w,a,s,d) to drive forward, left, backwards, right. And use (z,x) to turn left or turn right. You can use (r,f) to increase and decrease the speed of the mobile robot. There are three speed modes, see the table below:
You should see on your laptop something like this: `[INFO] Connected to remote robot at tcp://172.17.133.91:5555 and video stream at tcp://172.17.133.91:5556.` Now you can move the leader arm and use the keyboard (w,a,s,d) to drive forward, left, backwards, right. And use (z,x) to turn left or turn right. You can use (r,f) to increase and decrease the speed of the mobile robot. There are three speed modes, see the table below:
| Speed Mode | Linear Speed (m/s) | Rotation Speed (deg/s) |
| ---------- | ------------------ | ---------------------- |
@@ -214,7 +246,6 @@ You should see on your laptop something like this: ```[INFO] Connected to remote
| Medium | 0.25 | 60 |
| Slow | 0.1 | 30 |
| Key | Action |
| --- | -------------- |
| W | Move forward |
@@ -227,9 +258,10 @@ You should see on your laptop something like this: ```[INFO] Connected to remote
| F | Decrease speed |
> [!TIP]
> If you use a different keyboard, you can change the keys for each command in the [`LeKiwiConfig`](../src/lerobot/robot_devices/robots/configs.py).
> If you use a different keyboard, you can change the keys for each command in the [`LeKiwiConfig`](../src/lerobot/robot_devices/robots/configs.py).
### Wired version
If you have the **wired** LeKiwi version, please run all commands on your laptop.
## Record a dataset
@@ -239,26 +271,32 @@ Once you're familiar with teleoperation, you can record your first dataset.
We use the Hugging Face hub features for uploading your dataset. If you haven't previously used the Hub, make sure you can login via the cli using a write-access token, this token can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens).
Add your token to the CLI by running this command:
```bash
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
```
Then store your Hugging Face repository name in a variable:
```bash
HF_USER=$(huggingface-cli whoami | head -n 1)
echo $HF_USER
```
Now you can record a dataset. To record episodes and upload your dataset to the hub, execute this API example tailored for LeKiwi. Make sure to first adapt the `remote_ip`, `repo_id`, `port` and `task` in the script. If you would like to run the script for longer you can increase `NB_CYCLES_CLIENT_CONNECTION`.
```bash
python examples/lekiwi/record.py
```
#### Dataset upload
Locally, your dataset is stored in this folder: `~/.cache/huggingface/lerobot/{repo-id}`. At the end of data recording, your dataset will be uploaded on your Hugging Face page (e.g. https://huggingface.co/datasets/cadene/so101_test) that you can obtain by running:
```bash
echo https://huggingface.co/datasets/${HF_USER}/so101_test
```
Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example).
You can look for other LeRobot datasets on the hub by searching for `LeRobot` [tags](https://huggingface.co/datasets?other=LeRobot).
@@ -274,14 +312,13 @@ Avoid adding too much variation too quickly, as it may hinder your results.
If you want to dive deeper into this important topic, you can check out the [blog post](https://huggingface.co/blog/lerobot-datasets#what-makes-a-good-dataset) we wrote on what makes a good dataset.
#### Troubleshooting:
- On Linux, if the left and right arrow keys and escape key don't have any effect during data recording, make sure you've set the `$DISPLAY` environment variable. See [pynput limitations](https://pynput.readthedocs.io/en/latest/limitations.html#linux).
- On Linux, if the left and right arrow keys and escape key don't have any effect during data recording, make sure you've set the `$DISPLAY` environment variable. See [pynput limitations](https://pynput.readthedocs.io/en/latest/limitations.html#linux).
## Replay an episode
To replay an episode run the API example below, make sure to change `remote_ip`, `port`, LeRobotDatasetId and episode index.
```bash
python examples/lekiwi/replay.py
```
@@ -297,4 +334,4 @@ python examples/lekiwi/evaluate.py
```
> [!TIP]
> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb).
> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb).

View File

@@ -18,11 +18,10 @@ import base64
import json
import logging
from functools import cached_property
from typing import Any, Dict, Optional, Tuple
from typing import Any
import cv2
import numpy as np
import zmq
from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
@@ -35,6 +34,9 @@ class LeKiwiClient(Robot):
name = "lekiwi_client"
def __init__(self, config: LeKiwiClientConfig):
import zmq
self._zmq = zmq
super().__init__(config)
self.config = config
self.id = config.id
@@ -117,6 +119,7 @@ class LeKiwiClient(Robot):
"LeKiwi Daemon is already connected. Do not run `robot.connect()` twice."
)
zmq = self._zmq
self.zmq_context = zmq.Context()
self.zmq_cmd_socket = self.zmq_context.socket(zmq.PUSH)
zmq_cmd_locator = f"tcp://{self.remote_ip}:{self.port_zmq_cmd}"
@@ -139,8 +142,9 @@ class LeKiwiClient(Robot):
def calibrate(self) -> None:
pass
def _poll_and_get_latest_message(self) -> Optional[str]:
def _poll_and_get_latest_message(self) -> str | None:
"""Polls the ZMQ socket for a limited time and returns the latest message string."""
zmq = self._zmq
poller = zmq.Poller()
poller.register(self.zmq_observation_socket, zmq.POLLIN)
@@ -167,7 +171,7 @@ class LeKiwiClient(Robot):
return last_msg
def _parse_observation_json(self, obs_string: str) -> Optional[Dict[str, Any]]:
def _parse_observation_json(self, obs_string: str) -> dict[str, Any] | None:
"""Parses the JSON observation string."""
try:
return json.loads(obs_string)
@@ -175,7 +179,7 @@ class LeKiwiClient(Robot):
logging.error(f"Error decoding JSON observation: {e}")
return None
def _decode_image_from_b64(self, image_b64: str) -> Optional[np.ndarray]:
def _decode_image_from_b64(self, image_b64: str) -> np.ndarray | None:
"""Decodes a base64 encoded image string to an OpenCV image."""
if not image_b64:
return None
@@ -191,18 +195,18 @@ class LeKiwiClient(Robot):
return None
def _remote_state_from_obs(
self, observation: Dict[str, Any]
) -> Tuple[Dict[str, np.ndarray], Dict[str, Any]]:
self, observation: dict[str, Any]
) -> tuple[dict[str, np.ndarray], dict[str, Any]]:
"""Extracts frames, and state from the parsed observation."""
flat_state = {key: observation.get(key, 0.0) for key in self._state_order}
state_vec = np.array([flat_state[key] for key in self._state_order], dtype=np.float32)
obs_dict: Dict[str, Any] = {**flat_state, "observation.state": state_vec}
obs_dict: dict[str, Any] = {**flat_state, "observation.state": state_vec}
# Decode images
current_frames: Dict[str, np.ndarray] = {}
current_frames: dict[str, np.ndarray] = {}
for cam_name, image_b64 in observation.items():
if cam_name not in self._cameras_ft:
continue
@@ -212,7 +216,7 @@ class LeKiwiClient(Robot):
return current_frames, obs_dict
def _get_data(self) -> Tuple[Dict[str, np.ndarray], Dict[str, Any], Dict[str, Any]]:
def _get_data(self) -> tuple[dict[str, np.ndarray], dict[str, Any], dict[str, Any]]:
"""
Polls the video socket for the latest observation data.

View File

@@ -13,8 +13,9 @@
# limitations under the License.
import abc
import builtins
from pathlib import Path
from typing import Any, Type
from typing import Any
import draccus
@@ -39,7 +40,7 @@ class Robot(abc.ABC):
"""
# Set these in ALL subclasses
config_class: Type[RobotConfig]
config_class: builtins.type[RobotConfig]
name: str
def __init__(self, config: RobotConfig):

View File

@@ -11,6 +11,7 @@ Follow this [README](https://github.com/TheRobotStudio/SO-ARM100/blob/main/SO100
To install LeRobot, follow our [Installation Guide](./installation)
In addition to these instructions, you need to install the Feetech SDK:
```bash
pip install -e ".[feetech]"
```
@@ -23,6 +24,7 @@ Unlike the SO-101, the motor connectors are not easily accessible once the arm i
### 1. Find the USB ports associated with each arm
To find the port for each bus servo adapter, run this script:
```bash
python -m lerobot.find_port
```
@@ -49,6 +51,7 @@ Where the found port is: `/dev/tty.usbmodem575E0032081` corresponding to your le
<hfoption id="Linux">
On Linux, you might need to give access to the USB ports by running:
```bash
sudo chmod 666 /dev/ttyACM0
sudo chmod 666 /dev/ttyACM1
@@ -94,9 +97,11 @@ python -m lerobot.setup_motors \
--robot.type=so100_follower \
--robot.port=/dev/tty.usbmodem585A0076841 # <- paste here the port found at previous step
```
</hfoption>
<hfoption id="API example">
<!-- prettier-ignore-start -->
```python
from lerobot.robots.so100_follower import SO100Follower, SO100FollowerConfig
@@ -107,10 +112,13 @@ config = SO100FollowerConfig(
follower = SO100Follower(config)
follower.setup_motors()
```
<!-- prettier-ignore-end -->
</hfoption>
</hfoptions>
You should see the following instruction
```
Connect the controller board to the 'gripper' motor only and press enter.
```
@@ -120,22 +128,26 @@ As instructed, plug the gripper's motor. Make sure it's the only motor connected
<details>
<summary>Troubleshooting</summary>
If you get an error at that point, check your cables and make sure they are plugged in properly:
<ul>
<li>Power supply</li>
<li>USB cable between your computer and the controller board</li>
<li>The 3-pin cable from the controller board to the motor</li>
</ul>
If you get an error at that point, check your cables and make sure they are plugged in properly:
<ul>
<li>Power supply</li>
<li>USB cable between your computer and the controller board</li>
<li>The 3-pin cable from the controller board to the motor</li>
</ul>
If you are using a Waveshare controller board, make sure that the two jumpers are set on the `B` channel (USB).
</details>
You should then see the following message:
```
'gripper' motor id set to 6
```
Followed by the next instruction:
```
Connect the controller board to the 'wrist_roll' motor only and press enter.
```
@@ -150,6 +162,7 @@ Repeat the operation for each motor as instructed.
When you are done, the script will simply finish, at which point the motors are ready to be used. You can now plug the 3-pin cable from each motor to the next one, and the cable from the first motor (the 'shoulder pan' with id=1) to the controller board, which can now be attached to the base of the arm.
#### Leader
Do the same steps for the leader arm.
<hfoptions id="setup_motors">
@@ -162,6 +175,7 @@ python -m lerobot.setup_motors \
</hfoption>
<hfoption id="API example">
<!-- prettier-ignore-start -->
```python
from lerobot.teleoperators.so100_leader import SO100Leader, SO100LeaderConfig
@@ -172,6 +186,8 @@ config = SO100LeaderConfig(
leader = SO100Leader(config)
leader.setup_motors()
```
<!-- prettier-ignore-end -->
</hfoption>
</hfoptions>
@@ -184,7 +200,10 @@ leader.setup_motors()
<div class="video-container">
<video controls width="600">
<source src="https://github.com/user-attachments/assets/0c95b88c-5b85-413d-ba19-aee2f864f2a7" type="video/mp4" />
<source
src="https://github.com/user-attachments/assets/0c95b88c-5b85-413d-ba19-aee2f864f2a7"
type="video/mp4"
/>
</video>
</div>
@@ -193,6 +212,7 @@ leader.setup_motors()
Follow the video for removing gears. You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm.
### Clean Parts
Remove all support material from the 3D-printed parts. The easiest way to do this is using a small screwdriver to get underneath the support material.
### Additional Guidance
@@ -202,7 +222,10 @@ Remove all support material from the 3D-printed parts. The easiest way to do thi
<div class="video-container">
<video controls width="600">
<source src="https://github.com/user-attachments/assets/488a39de-0189-4461-9de3-05b015f90cca" type="video/mp4" />
<source
src="https://github.com/user-attachments/assets/488a39de-0189-4461-9de3-05b015f90cca"
type="video/mp4"
/>
</video>
</div>
@@ -216,75 +239,117 @@ This video provides visual guidance for assembling the arms, but it doesn't spec
### First Motor
**Step 2: Insert Wires**
- Insert two wires into the first motor.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_1.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_1.webp"
style="height:300px;"
/>
**Step 3: Install in Base**
- Place the first motor into the base.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_2.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_2.webp"
style="height:300px;"
/>
**Step 4: Secure Motor**
- Fasten the motor with 4 screws. Two from the bottom and two from top.
**Step 5: Attach Motor Holder**
- Slide over the first motor holder and fasten it using two screws (one on each side).
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_4.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_4.webp"
style="height:300px;"
/>
**Step 6: Attach Motor Horns**
- Install both motor horns, securing the top horn with a screw. Try not to move the motor position when attaching the motor horn, especially for the leader arms, where we removed the gears.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_5.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_5.webp"
style="height:300px;"
/>
<details>
<summary><strong>Video adding motor horn</strong></summary>
<summary>
<strong>Video adding motor horn</strong>
</summary>
<video src="https://github.com/user-attachments/assets/ef3391a4-ad05-4100-b2bd-1699bf86c969"></video>
</details>
**Step 7: Attach Shoulder Part**
- Route one wire to the back of the robot and the other to the left or towards you (see photo).
- Attach the shoulder part.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_6.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_6.webp"
style="height:300px;"
/>
**Step 8: Secure Shoulder**
- Tighten the shoulder part with 4 screws on top and 4 on the bottom
*(access bottom holes by turning the shoulder).*
_(access bottom holes by turning the shoulder)._
---
### Second Motor Assembly
**Step 9: Install Motor 2**
- Slide the second motor in from the top and link the wire from motor 1 to motor 2.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_8.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_8.webp"
style="height:300px;"
/>
**Step 10: Attach Shoulder Holder**
- Add the shoulder motor holder.
- Ensure the wire from motor 1 to motor 2 goes behind the holder while the other wire is routed upward (see photo).
- This part can be tight to assemble, you can use a workbench like the image or a similar setup to push the part around the motor.
<div style="display: flex;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_9.webp" style="height:250px;"/>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_10.webp" style="height:250px;"/>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_12.webp" style="height:250px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_9.webp"
style="height:250px;"
/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_10.webp"
style="height:250px;"
/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_12.webp"
style="height:250px;"
/>
</div>
**Step 11: Secure Motor 2**
- Fasten the second motor with 4 screws.
**Step 12: Attach Motor Horn**
- Attach both motor horns to motor 2, again use the horn screw.
**Step 13: Attach Base**
- Install the base attachment using 2 screws.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_11.webp" style="height:300px;">
**Step 14: Attach Upper Arm**
- Attach the upper arm with 4 screws on each side.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_13.webp" style="height:300px;">
@@ -294,89 +359,144 @@ This video provides visual guidance for assembling the arms, but it doesn't spec
### Third Motor Assembly
**Step 15: Install Motor 3**
- Route the motor cable from motor 2 through the cable holder to motor 3, then secure motor 3 with 4 screws.
**Step 16: Attach Motor Horn**
- Attach both motor horns to motor 3 and secure one again with a horn screw.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_14.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_14.webp"
style="height:300px;"
/>
**Step 17: Attach Forearm**
- Connect the forearm to motor 3 using 4 screws on each side.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_15.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_15.webp"
style="height:300px;"
/>
---
### Fourth Motor Assembly
**Step 18: Install Motor 4**
- Slide in motor 4, attach the cable from motor 3, and secure the cable in its holder with a screw.
<div style="display: flex;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_16.webp" style="height:300px;"/>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_19.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_16.webp"
style="height:300px;"
/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_19.webp"
style="height:300px;"
/>
</div>
**Step 19: Attach Motor Holder 4**
- Install the fourth motor holder (a tight fit). Ensure one wire is routed upward and the wire from motor 3 is routed downward (see photo).
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_17.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_17.webp"
style="height:300px;"
/>
**Step 20: Secure Motor 4 & Attach Horn**
- Fasten motor 4 with 4 screws and attach its motor horns, use for one a horn screw.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_18.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_18.webp"
style="height:300px;"
/>
---
### Wrist Assembly
**Step 21: Install Motor 5**
- Insert motor 5 into the wrist holder and secure it with 2 front screws.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_20.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_20.webp"
style="height:300px;"
/>
**Step 22: Attach Wrist**
- Connect the wire from motor 4 to motor 5. And already insert the other wire for the gripper.
- Secure the wrist to motor 4 using 4 screws on both sides.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_22.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_22.webp"
style="height:300px;"
/>
**Step 23: Attach Wrist Horn**
- Install only one motor horn on the wrist motor and secure it with a horn screw.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_23.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_23.webp"
style="height:300px;"
/>
---
### Follower Configuration
**Step 24: Attach Gripper**
- Attach the gripper to motor 5.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_24.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_24.webp"
style="height:300px;"
/>
**Step 25: Install Gripper Motor**
- Insert the gripper motor, connect the motor wire from motor 5 to motor 6, and secure it with 3 screws on each side.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_25.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_25.webp"
style="height:300px;"
/>
**Step 26: Attach Gripper Horn & Claw**
- Attach the motor horns and again use a horn screw.
- Install the gripper claw and secure it with 4 screws on both sides.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_26.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_26.webp"
style="height:300px;"
/>
**Step 27: Mount Controller**
- Attach the motor controller to the back of the robot.
<div style="display: flex;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_27.webp" style="height:300px;"/>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_28.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_27.webp"
style="height:300px;"
/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_28.webp"
style="height:300px;"
/>
</div>
*Assembly complete proceed to Leader arm assembly.*
_Assembly complete proceed to Leader arm assembly._
---
@@ -385,31 +505,54 @@ This video provides visual guidance for assembling the arms, but it doesn't spec
For the leader configuration, perform **Steps 123**. Make sure that you removed the motor gears from the motors.
**Step 24: Attach Leader Holder**
- Mount the leader holder onto the wrist and secure it with a screw.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_29.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_29.webp"
style="height:300px;"
/>
**Step 25: Attach Handle**
- Attach the handle to motor 5 using 4 screws.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_30.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_30.webp"
style="height:300px;"
/>
**Step 26: Install Gripper Motor**
- Insert the gripper motor, secure it with 3 screws on each side, attach a motor horn using a horn screw, and connect the motor wire.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_31.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_31.webp"
style="height:300px;"
/>
**Step 27: Attach Trigger**
- Attach the follower trigger with 4 screws.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_32.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_32.webp"
style="height:300px;"
/>
**Step 28: Mount Controller**
- Attach the motor controller to the back of the robot.
<div style="display: flex;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_27.webp" style="height:300px;"/>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_28.webp" style="height:300px;"/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_27.webp"
style="height:300px;"
/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/so100_assembly_28.webp"
style="height:300px;"
/>
</div>
## Calibrate
@@ -430,9 +573,11 @@ python -m lerobot.calibrate \
--robot.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
--robot.id=my_awesome_follower_arm # <- Give the robot a unique name
```
</hfoption>
<hfoption id="API example">
<!-- prettier-ignore-start -->
```python
from lerobot.robots.so100_follower import SO100FollowerConfig, SO100Follower
@@ -446,6 +591,8 @@ follower.connect(calibrate=False)
follower.calibrate()
follower.disconnect()
```
<!-- prettier-ignore-end -->
</hfoption>
</hfoptions>
@@ -464,9 +611,11 @@ python -m lerobot.calibrate \
--teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
--teleop.id=my_awesome_leader_arm # <- Give the robot a unique name
```
</hfoption>
<hfoption id="API example">
<!-- prettier-ignore-start -->
```python
from lerobot.teleoperators.so100_leader import SO100LeaderConfig, SO100Leader
@@ -480,10 +629,12 @@ leader.connect(calibrate=False)
leader.calibrate()
leader.disconnect()
```
<!-- prettier-ignore-end -->
</hfoption>
</hfoptions>
Congrats 🎉, your robot is all set to learn a task on its own. Start training it by following this tutorial: [Getting started with real-world robots](./getting_started_real_world_robot)
> [!TIP]
> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb).
> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb).

View File

@@ -12,6 +12,7 @@ And advise if it's your first time printing or if you don't own a 3D printer.
To install LeRobot, follow our [Installation Guide](./installation)
In addition to these instructions, you need to install the Feetech SDK:
```bash
pip install -e ".[feetech]"
```
@@ -20,16 +21,17 @@ pip install -e ".[feetech]"
The follower arm uses 6x STS3215 motors with 1/345 gearing. The leader, however, uses three differently geared motors to make sure it can both sustain its own weight and it can be moved without requiring much force. Which motor is needed for which joint is shown in the table below.
| Leader-Arm Axis | Motor | Gear Ratio |
|-----------------|:-------:|:----------:|
| Base / Shoulder Pan | 1 | 1 / 191 |
| Shoulder Lift | 2 | 1 / 345 |
| Elbow Flex | 3 | 1 / 191 |
| Wrist Flex | 4 | 1 / 147 |
| Wrist Roll | 5 | 1 / 147 |
| Gripper | 6 | 1 / 147 |
| Leader-Arm Axis | Motor | Gear Ratio |
| ------------------- | :---: | :--------: |
| Base / Shoulder Pan | 1 | 1 / 191 |
| Shoulder Lift | 2 | 1 / 345 |
| Elbow Flex | 3 | 1 / 191 |
| Wrist Flex | 4 | 1 / 147 |
| Wrist Roll | 5 | 1 / 147 |
| Gripper | 6 | 1 / 147 |
### Clean Parts
Remove all support material from the 3D-printed parts. The easiest way to do this is using a small screwdriver to get underneath the support material.
### Joint 1
@@ -44,7 +46,10 @@ Remove all support material from the 3D-printed parts. The easiest way to do thi
<div class="video-container">
<video controls width="600">
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Joint1_v2.mp4" type="video/mp4" />
<source
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Joint1_v2.mp4"
type="video/mp4"
/>
</video>
</div>
@@ -57,7 +62,10 @@ Remove all support material from the 3D-printed parts. The easiest way to do thi
<div class="video-container">
<video controls width="600">
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Joint2_v2.mp4" type="video/mp4" />
<source
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Joint2_v2.mp4"
type="video/mp4"
/>
</video>
</div>
@@ -69,7 +77,10 @@ Remove all support material from the 3D-printed parts. The easiest way to do thi
<div class="video-container">
<video controls width="600">
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Joint3_v2.mp4" type="video/mp4" />
<source
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Joint3_v2.mp4"
type="video/mp4"
/>
</video>
</div>
@@ -81,7 +92,10 @@ Remove all support material from the 3D-printed parts. The easiest way to do thi
<div class="video-container">
<video controls width="600">
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Joint4_v2.mp4" type="video/mp4" />
<source
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Joint4_v2.mp4"
type="video/mp4"
/>
</video>
</div>
@@ -93,7 +107,10 @@ Remove all support material from the 3D-printed parts. The easiest way to do thi
<div class="video-container">
<video controls width="600">
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Joint5_v2.mp4" type="video/mp4" />
<source
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Joint5_v2.mp4"
type="video/mp4"
/>
</video>
</div>
@@ -109,7 +126,10 @@ Remove all support material from the 3D-printed parts. The easiest way to do thi
<div class="video-container">
<video controls width="600">
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Gripper_v2.mp4" type="video/mp4" />
<source
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Gripper_v2.mp4"
type="video/mp4"
/>
</video>
</div>
@@ -123,7 +143,10 @@ Remove all support material from the 3D-printed parts. The easiest way to do thi
<div class="video-container">
<video controls width="600">
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Leader_v2.mp4" type="video/mp4" />
<source
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Leader_v2.mp4"
type="video/mp4"
/>
</video>
</div>
@@ -135,6 +158,7 @@ Remove all support material from the 3D-printed parts. The easiest way to do thi
### 1. Find the USB ports associated with each arm
To find the port for each bus servo adapter, run this script:
```bash
python -m lerobot.find_port
```
@@ -161,6 +185,7 @@ Where the found port is: `/dev/tty.usbmodem575E0032081` corresponding to your le
<hfoption id="Linux">
On Linux, you might need to give access to the USB ports by running:
```bash
sudo chmod 666 /dev/ttyACM0
sudo chmod 666 /dev/ttyACM1
@@ -198,7 +223,10 @@ The video below shows the sequence of steps for setting the motor ids.
<div class="video-container">
<video controls width="600">
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/setup_motors_so101_2.mp4" type="video/mp4" />
<source
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/setup_motors_so101_2.mp4"
type="video/mp4"
/>
</video>
</div>
@@ -214,9 +242,11 @@ python -m lerobot.setup_motors \
--robot.type=so101_follower \
--robot.port=/dev/tty.usbmodem585A0076841 # <- paste here the port found at previous step
```
</hfoption>
<hfoption id="API example">
<!-- prettier-ignore-start -->
```python
from lerobot.robots.so101_follower import SO101Follower, SO101FollowerConfig
@@ -227,10 +257,13 @@ config = SO101FollowerConfig(
follower = SO101Follower(config)
follower.setup_motors()
```
<!-- prettier-ignore-end -->
</hfoption>
</hfoptions>
You should see the following instruction
```bash
Connect the controller board to the 'gripper' motor only and press enter.
```
@@ -240,22 +273,26 @@ As instructed, plug the gripper's motor. Make sure it's the only motor connected
<details>
<summary>Troubleshooting</summary>
If you get an error at that point, check your cables and make sure they are plugged in properly:
<ul>
<li>Power supply</li>
<li>USB cable between your computer and the controller board</li>
<li>The 3-pin cable from the controller board to the motor</li>
</ul>
If you get an error at that point, check your cables and make sure they are plugged in properly:
<ul>
<li>Power supply</li>
<li>USB cable between your computer and the controller board</li>
<li>The 3-pin cable from the controller board to the motor</li>
</ul>
If you are using a Waveshare controller board, make sure that the two jumpers are set on the `B` channel (USB).
If you are using a Waveshare controller board, make sure that the two jumpers are set on the `B` channel (USB).
</details>
You should then see the following message:
```bash
'gripper' motor id set to 6
```
Followed by the next instruction:
```bash
Connect the controller board to the 'wrist_roll' motor only and press enter.
```
@@ -270,6 +307,7 @@ Repeat the operation for each motor as instructed.
When you are done, the script will simply finish, at which point the motors are ready to be used. You can now plug the 3-pin cable from each motor to the next one, and the cable from the first motor (the 'shoulder pan' with id=1) to the controller board, which can now be attached to the base of the arm.
#### Leader
Do the same steps for the leader arm.
<hfoptions id="setup_motors">
@@ -280,9 +318,11 @@ python -m lerobot.setup_motors \
--teleop.type=so101_leader \
--teleop.port=/dev/tty.usbmodem575E0031751 # <- paste here the port found at previous step
```
</hfoption>
<hfoption id="API example">
<!-- prettier-ignore-start -->
```python
from lerobot.teleoperators.so101_leader import SO101Leader, SO101LeaderConfig
@@ -293,6 +333,8 @@ config = SO101LeaderConfig(
leader = SO101Leader(config)
leader.setup_motors()
```
<!-- prettier-ignore-end -->
</hfoption>
</hfoptions>
@@ -314,9 +356,11 @@ python -m lerobot.calibrate \
--robot.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
--robot.id=my_awesome_follower_arm # <- Give the robot a unique name
```
</hfoption>
<hfoption id="API example">
<!-- prettier-ignore-start -->
```python
from lerobot.robots.so101_follower import SO101FollowerConfig, SO101Follower
@@ -330,6 +374,8 @@ follower.connect(calibrate=False)
follower.calibrate()
follower.disconnect()
```
<!-- prettier-ignore-end -->
</hfoption>
</hfoptions>
@@ -339,7 +385,10 @@ The video below shows how to perform the calibration. First you need to move the
<div class="video-container">
<video controls width="600">
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/calibrate_so101_2.mp4" type="video/mp4" />
<source
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/calibrate_so101_2.mp4"
type="video/mp4"
/>
</video>
</div>
@@ -356,9 +405,11 @@ python -m lerobot.calibrate \
--teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
--teleop.id=my_awesome_leader_arm # <- Give the robot a unique name
```
</hfoption>
<hfoption id="API example">
<!-- prettier-ignore-start -->
```python
from lerobot.teleoperators.so101_leader import SO101LeaderConfig, SO101Leader
@@ -372,10 +423,12 @@ leader.connect(calibrate=False)
leader.calibrate()
leader.disconnect()
```
<!-- prettier-ignore-end -->
</hfoption>
</hfoptions>
Congrats 🎉, your robot is all set to learn a task on its own. Start training it by following this tutorial: [Getting started with real-world robots](./getting_started_real_world_robot)
> [!TIP]
> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb).
> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb).

View File

@@ -5,16 +5,17 @@ This tutorial explains how to use [Stretch 3](https://hello-robot.com/stretch-3-
Familiarize yourself with Stretch by following its [tutorials](https://docs.hello-robot.com/0.3/getting_started/hello_robot/) (recommended).
To use LeRobot on Stretch, 3 options are available:
- [tethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#tethered-setup)
- [untethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#untethered-setup)
- ssh directly into Stretch (you will first need to install and configure openssh-server on stretch using one of the two above setups)
## Install LeRobot
On Stretch's CLI, follow these steps:
1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
```bash
mkdir -p ~/miniconda3
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
@@ -24,6 +25,7 @@ rm ~/miniconda3/miniconda.sh
```
2. Comment out these lines in `~/.profile` (this can mess up paths used by conda and ~/.local/bin should already be in your PATH)
```
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/.local/bin" ] ; then
@@ -34,21 +36,25 @@ fi
3. Restart shell or `source ~/.bashrc`
4. Create and activate a fresh conda environment for lerobot
```bash
conda create -y -n lerobot python=3.10 && conda activate lerobot
```
5. Clone LeRobot:
```bash
git clone https://github.com/huggingface/lerobot.git ~/lerobot
```
6. When using `miniconda`, install `ffmpeg` in your environment:
```bash
conda install ffmpeg -c conda-forge
```
7. Install LeRobot with stretch dependencies:
```bash
cd ~/lerobot && pip install -e ".[stretch]"
```
@@ -56,6 +62,7 @@ cd ~/lerobot && pip install -e ".[stretch]"
> **Note:** If you get this message, you can ignore it: `ERROR: pip's dependency resolver does not currently take into account all the packages that are installed.`
8. Run a [system check](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#system-check) to make sure your robot is ready:
```bash
stretch_system_check.py
```
@@ -63,6 +70,7 @@ stretch_system_check.py
> **Note:** You may need to free the "robot process" after booting Stretch by running `stretch_free_robot_process.py`. For more info this Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#turning-off-gamepad-teleoperation).
You should get something like this:
```bash
For use with S T R E T C H (R) from Hello Robot Inc.
---------------------------------------------------------------------
@@ -89,11 +97,13 @@ Serial Number = stretch-se3-3054
**Calibrate (Optional)**
Before operating Stretch, you need to [home](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#homing) it first. Be mindful about giving Stretch some space as this procedure will move the robot's arm and gripper. Now run this command:
```bash
python lerobot/scripts/control_robot.py \
--robot.type=stretch \
--control.type=calibrate
```
This is equivalent to running `stretch_robot_home.py`
> **Note:** If you run any of the LeRobot scripts below and Stretch is not properly homed, it will automatically home/calibrate first.
@@ -104,28 +114,33 @@ Before trying teleoperation, you need to activate the gamepad controller by pres
Now try out teleoperation (see above documentation to learn about the gamepad controls):
> **NOTE:** To visualize the data, enable `--control.display_data=true`. This streams the data using `rerun`.
```bash
python lerobot/scripts/control_robot.py \
--robot.type=stretch \
--control.type=teleoperate
```
This is essentially the same as running `stretch_gamepad_teleop.py`
**Record a dataset**
Once you're familiar with the gamepad controls and after a bit of practice, you can try to record your first dataset with Stretch.
If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
```bash
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
```
Store your Hugging Face repository name in a variable to run these commands:
```bash
HF_USER=$(huggingface-cli whoami | head -n 1)
echo $HF_USER
```
Record one episode:
```bash
python lerobot/scripts/control_robot.py \
--robot.type=stretch \
@@ -145,6 +160,7 @@ python lerobot/scripts/control_robot.py \
**Replay an episode**
Now try to replay this episode (make sure the robot's initial position is the same):
```bash
python lerobot/scripts/control_robot.py \
--robot.type=stretch \

View File

@@ -4,12 +4,12 @@ This tutorial explains how to use [Aloha and Aloha 2 stationary](https://www.tro
Follow the [documentation from Trossen Robotics](https://docs.trossenrobotics.com/aloha_docs/2.0/getting_started/stationary/hardware_setup.html) for setting up the hardware and plugging the 4 arms and 4 cameras to your computer.
## Install LeRobot
On your computer:
1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
```bash
mkdir -p ~/miniconda3
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
@@ -21,29 +21,34 @@ rm ~/miniconda3/miniconda.sh
2. Restart shell or `source ~/.bashrc`
3. Create and activate a fresh conda environment for lerobot
```bash
conda create -y -n lerobot python=3.10 && conda activate lerobot
```
4. Clone LeRobot:
```bash
git clone https://github.com/huggingface/lerobot.git ~/lerobot
```
5. When using `miniconda`, install `ffmpeg` in your environment:
```bash
conda install ffmpeg -c conda-forge
```
6. Install LeRobot with dependencies for the Aloha motors (dynamixel) and cameras (intelrealsense):
```bash
cd ~/lerobot && pip install -e ".[dynamixel, intelrealsense]"
```
## Teleoperate
**/!\ FOR SAFETY, READ THIS /!\**
\*\*/!\ FOR SAFETY, READ THIS /!\*\*
Teleoperation consists in manually operating the leader arms to move the follower arms. Importantly:
1. Make sure your leader arms are in the same position as the follower arms, so that the follower arms don't move too fast to match the leader arms,
2. Our code assumes that your robot has been assembled following Trossen Robotics instructions. This allows us to skip calibration, as we use the pre-defined calibration files in `.cache/calibration/aloha_default`. If you replace a motor, make sure you follow the exact instructions from Trossen Robotics.
@@ -59,6 +64,7 @@ python lerobot/scripts/control_robot.py \
```
By adding `--robot.max_relative_target=5`, we override the default value for `max_relative_target` defined in [`AlohaRobotConfig`](lerobot/robot_devices/robots/configs.py). It is expected to be `5` to limit the magnitude of the movement for more safety, but the teleoperation won't be smooth. When you feel confident, you can disable this limit by adding `--robot.max_relative_target=null` to the command line:
```bash
python lerobot/scripts/control_robot.py \
--robot.type=aloha \
@@ -71,17 +77,20 @@ python lerobot/scripts/control_robot.py \
Once you're familiar with teleoperation, you can record your first dataset with Aloha.
If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
```bash
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
```
Store your Hugging Face repository name in a variable to run these commands:
```bash
HF_USER=$(huggingface-cli whoami | head -n 1)
echo $HF_USER
```
Record 2 episodes and upload your dataset to the hub:
```bash
python lerobot/scripts/control_robot.py \
--robot.type=aloha \
@@ -101,11 +110,13 @@ python lerobot/scripts/control_robot.py \
## Visualize a dataset
If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
```bash
echo ${HF_USER}/aloha_test
```
If you didn't upload with `--control.push_to_hub=false`, you can also visualize it locally with:
```bash
python -m lerobot.scripts.visualize_dataset_html \
--repo-id ${HF_USER}/aloha_test
@@ -113,10 +124,11 @@ python -m lerobot.scripts.visualize_dataset_html \
## Replay an episode
**/!\ FOR SAFETY, READ THIS /!\**
\*\*/!\ FOR SAFETY, READ THIS /!\*\*
Replay consists in automatically replaying the sequence of actions (i.e. goal positions for your motors) recorded in a given dataset episode. Make sure the current initial position of your robot is similar to the one in your episode, so that your follower arms don't move too fast to go to the first goal positions. For safety, you might want to add `--robot.max_relative_target=5` to your command line as explained above.
Now try to replay the first episode on your robot:
```bash
python lerobot/scripts/control_robot.py \
--robot.type=aloha \
@@ -130,6 +142,7 @@ python lerobot/scripts/control_robot.py \
## Train a policy
To train a policy to control your robot, use the [`python -m lerobot.scripts.train`](../src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
```bash
python -m lerobot.scripts.train \
--dataset.repo_id=${HF_USER}/aloha_test \
@@ -141,10 +154,11 @@ python -m lerobot.scripts.train \
```
Let's explain it:
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/aloha_test`.
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../src/lerobot/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
3. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
4. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
For more information on the `train` script see the previous tutorial: [`examples/4_train_policy_with_script.md`](../examples/4_train_policy_with_script.md)
@@ -153,6 +167,7 @@ Training should take several hours. You will find checkpoints in `outputs/train/
## Evaluate your policy
You can use the `record` function from [`lerobot/scripts/control_robot.py`](../src/lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
```bash
python lerobot/scripts/control_robot.py \
--robot.type=aloha \
@@ -171,7 +186,8 @@ python lerobot/scripts/control_robot.py \
```
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_aloha_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_aloha_test`).
1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_aloha_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_aloha_test`).
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_aloha_test`).
3. We use `--control.num_image_writer_processes=1` instead of the default value (`0`). On our computer, using a dedicated process to write images from the 4 cameras on disk allows to reach constant 30 fps during inference. Feel free to explore different values for `--control.num_image_writer_processes`.

View File

@@ -50,12 +50,12 @@ import json
import logging
import threading
import time
from collections.abc import Callable
from contextlib import nullcontext
from copy import deepcopy
from dataclasses import asdict
from pathlib import Path
from pprint import pformat
from typing import Callable
import einops
import gymnasium as gym

View File

@@ -18,7 +18,6 @@ import argparse
import json
from copy import deepcopy
from pathlib import Path
from typing import Dict, Tuple
import cv2
import torch
@@ -162,10 +161,10 @@ def get_image_from_lerobot_dataset(dataset: LeRobotDataset):
def convert_lerobot_dataset_to_cropper_lerobot_dataset(
original_dataset: LeRobotDataset,
crop_params_dict: Dict[str, Tuple[int, int, int, int]],
crop_params_dict: dict[str, tuple[int, int, int, int]],
new_repo_id: str,
new_dataset_root: str,
resize_size: Tuple[int, int] = (128, 128),
resize_size: tuple[int, int] = (128, 128),
push_to_hub: bool = False,
task: str = "",
) -> LeRobotDataset:

View File

@@ -39,8 +39,9 @@ Example:
import logging
import time
from collections import deque
from collections.abc import Sequence
from threading import Lock
from typing import Annotated, Any, Sequence
from typing import Annotated, Any
import gymnasium as gym
import numpy as np

View File

@@ -87,12 +87,10 @@ from lerobot.utils.process import ProcessSignalHandler
from lerobot.utils.random_utils import set_seed
from lerobot.utils.train_utils import (
get_step_checkpoint_dir,
load_training_state as utils_load_training_state,
save_checkpoint,
update_last_checkpoint,
)
from lerobot.utils.train_utils import (
load_training_state as utils_load_training_state,
)
from lerobot.utils.transition import move_state_dict_to_device, move_transition_to_device
from lerobot.utils.utils import (
format_big_number,

View File

@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from dataclasses import dataclass, field
from typing import Callable
import torch

View File

@@ -36,10 +36,11 @@ import logging
import pickle # nosec
import threading
import time
from collections.abc import Callable
from dataclasses import asdict
from pprint import pformat
from queue import Queue
from typing import Any, Callable, Optional
from typing import Any
import draccus
import grpc
@@ -231,7 +232,7 @@ class RobotClient:
def _aggregate_action_queues(
self,
incoming_actions: list[TimedAction],
aggregate_fn: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
aggregate_fn: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] | None = None,
):
"""Finds the same timestep actions in the queue and aggregates them using the aggregate_fn"""
if aggregate_fn is None:

View File

@@ -65,8 +65,8 @@ import argparse
import gc
import logging
import time
from collections.abc import Iterator
from pathlib import Path
from typing import Iterator
import numpy as np
import rerun as rr

View File

@@ -18,7 +18,7 @@ import logging
import threading
from collections import deque
from pprint import pformat
from typing import Deque, Dict, Optional
from typing import Deque
import serial
@@ -60,7 +60,7 @@ class HomunculusArm(Teleoperator):
self.n: int = n
self.alpha: float = 2 / (n + 1)
# one deque *per joint* so we can inspect raw history if needed
self._buffers: Dict[str, Deque[int]] = {
self._buffers: dict[str, Deque[int]] = {
joint: deque(maxlen=n)
for joint in (
"shoulder_pitch",
@@ -73,7 +73,7 @@ class HomunculusArm(Teleoperator):
)
}
# running EMA value per joint lazily initialised on first read
self._ema: Dict[str, Optional[float]] = dict.fromkeys(self._buffers)
self._ema: dict[str, float | None] = dict.fromkeys(self._buffers)
self._state: dict[str, float] | None = None
self.new_state_event = threading.Event()
@@ -217,9 +217,9 @@ class HomunculusArm(Teleoperator):
return normalized_values
def _apply_ema(self, raw: Dict[str, int]) -> Dict[str, float]:
def _apply_ema(self, raw: dict[str, int]) -> dict[str, float]:
"""Update buffers & running EMA values; return smoothed dict."""
smoothed: Dict[str, float] = {}
smoothed: dict[str, float] = {}
for joint, value in raw.items():
# maintain raw history
self._buffers[joint].append(value)

View File

@@ -18,7 +18,7 @@ import logging
import threading
from collections import deque
from pprint import pformat
from typing import Deque, Dict, Optional
from typing import Deque
import serial
@@ -97,9 +97,9 @@ class HomunculusGlove(Teleoperator):
self.n: int = n
self.alpha: float = 2 / (n + 1)
# one deque *per joint* so we can inspect raw history if needed
self._buffers: Dict[str, Deque[int]] = {joint: deque(maxlen=n) for joint in self.joints}
self._buffers: dict[str, Deque[int]] = {joint: deque(maxlen=n) for joint in self.joints}
# running EMA value per joint lazily initialised on first read
self._ema: Dict[str, Optional[float]] = dict.fromkeys(self._buffers)
self._ema: dict[str, float | None] = dict.fromkeys(self._buffers)
self._state: dict[str, float] | None = None
self.new_state_event = threading.Event()
@@ -248,9 +248,9 @@ class HomunculusGlove(Teleoperator):
return normalized_values
def _apply_ema(self, raw: Dict[str, int]) -> Dict[str, int]:
def _apply_ema(self, raw: dict[str, int]) -> dict[str, int]:
"""Update buffers & running EMA values; return smoothed dict as integers."""
smoothed: Dict[str, int] = {}
smoothed: dict[str, int] = {}
for joint, value in raw.items():
# maintain raw history
self._buffers[joint].append(value)

View File

@@ -13,8 +13,9 @@
# limitations under the License.
import abc
import builtins
from pathlib import Path
from typing import Any, Type
from typing import Any
import draccus
@@ -37,7 +38,7 @@ class Teleoperator(abc.ABC):
"""
# Set these in ALL subclasses
config_class: Type[TeleoperatorConfig]
config_class: builtins.type[TeleoperatorConfig]
name: str
def __init__(self, config: TeleoperatorConfig):

View File

@@ -1,7 +1,8 @@
---
# For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1
# Doc / guide: https://huggingface.co/docs/hub/model-cards
{{ card_data }}
# prettier-ignore
{{card_data}}
---
# Model Card for {{ model_name | default("Model ID", true) }}
@@ -53,7 +54,7 @@ python -m lerobot.scripts.train \
--wandb.enable=true
```
*Writes checkpoints to `outputs/train/<desired_policy_repo_id>/checkpoints/`.*
_Writes checkpoints to `outputs/train/<desired_policy_repo_id>/checkpoints/`._
### Evaluate the policy/run inference
@@ -71,4 +72,4 @@ Prefix the dataset repo with **eval\_** and supply `--policy.path` pointing to a
## Model Details
* **License:** {{ license | default("\[More Information Needed]", true) }}
- **License:** {{ license | default("\[More Information Needed]", true) }}

View File

@@ -46,11 +46,13 @@ class TimeBenchmark(ContextDecorator):
benchmark = TimeBenchmark()
def context_manager_example():
with benchmark:
time.sleep(0.01)
print(f"Block took {benchmark.result_ms:.2f} milliseconds")
threads = []
for _ in range(3):
t1 = threading.Thread(target=context_manager_example)

View File

@@ -15,8 +15,9 @@
# limitations under the License.
import functools
from collections.abc import Callable, Sequence
from contextlib import suppress
from typing import Callable, Sequence, TypedDict
from typing import TypedDict
import torch
import torch.nn.functional as F # noqa: N812

View File

@@ -12,9 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import builtins
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Any, Type, TypeVar
from typing import Any, TypeVar
from huggingface_hub import HfApi
from huggingface_hub.utils import validate_hf_hub_args
@@ -85,7 +86,7 @@ class HubMixin:
@classmethod
@validate_hf_hub_args
def from_pretrained(
cls: Type[T],
cls: builtins.type[T],
pretrained_name_or_path: str | Path,
*,
force_download: bool = False,

View File

@@ -14,9 +14,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections.abc import Generator
from contextlib import contextmanager
from pathlib import Path
from typing import Any, Generator
from typing import Any
import numpy as np
import torch

View File

@@ -185,10 +185,10 @@ def print_cuda_memory_usage():
gc.collect()
# Also clear the cache if you want to fully release the memory
torch.cuda.empty_cache()
print("Current GPU Memory Allocated: {:.2f} MB".format(torch.cuda.memory_allocated(0) / 1024**2))
print("Maximum GPU Memory Allocated: {:.2f} MB".format(torch.cuda.max_memory_allocated(0) / 1024**2))
print("Current GPU Memory Reserved: {:.2f} MB".format(torch.cuda.memory_reserved(0) / 1024**2))
print("Maximum GPU Memory Reserved: {:.2f} MB".format(torch.cuda.max_memory_reserved(0) / 1024**2))
print(f"Current GPU Memory Allocated: {torch.cuda.memory_allocated(0) / 1024**2:.2f} MB")
print(f"Maximum GPU Memory Allocated: {torch.cuda.max_memory_allocated(0) / 1024**2:.2f} MB")
print(f"Current GPU Memory Reserved: {torch.cuda.memory_reserved(0) / 1024**2:.2f} MB")
print(f"Maximum GPU Memory Reserved: {torch.cuda.max_memory_reserved(0) / 1024**2:.2f} MB")
def capture_timestamp_utc():