Compare commits
1 Commits
feat/add_r
...
user/mshuk
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
86e8c1d997 |
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Misc
|
||||
.git
|
||||
tmp
|
||||
@@ -73,7 +59,7 @@ pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
!tests/artifacts
|
||||
!tests/data
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
|
||||
14
.gitattributes
vendored
14
.gitattributes
vendored
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
*.memmap filter=lfs diff=lfs merge=lfs -text
|
||||
*.stl filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
|
||||
14
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
14
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: "\U0001F41B Bug Report"
|
||||
description: Submit a bug report to help us improve LeRobot
|
||||
body:
|
||||
|
||||
14
.github/workflows/build-docker-images.yml
vendored
14
.github/workflows/build-docker-images.yml
vendored
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Inspired by
|
||||
# https://github.com/huggingface/peft/blob/main/.github/workflows/build_docker_images.yml
|
||||
name: Builds
|
||||
|
||||
14
.github/workflows/nightly-tests.yml
vendored
14
.github/workflows/nightly-tests.yml
vendored
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Inspired by
|
||||
# https://github.com/huggingface/peft/blob/main/.github/workflows/nightly.yml
|
||||
name: Nightly
|
||||
|
||||
32
.github/workflows/quality.yml
vendored
32
.github/workflows/quality.yml
vendored
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: Quality
|
||||
|
||||
on:
|
||||
@@ -46,27 +32,13 @@ jobs:
|
||||
id: get-ruff-version
|
||||
run: |
|
||||
RUFF_VERSION=$(awk '/repo: https:\/\/github.com\/astral-sh\/ruff-pre-commit/{flag=1;next}/rev:/{if(flag){print $2;exit}}' .pre-commit-config.yaml)
|
||||
echo "ruff_version=${RUFF_VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "RUFF_VERSION=${RUFF_VERSION}" >> $GITHUB_ENV
|
||||
|
||||
- name: Install Ruff
|
||||
env:
|
||||
RUFF_VERSION: ${{ steps.get-ruff-version.outputs.ruff_version }}
|
||||
run: python -m pip install "ruff==${RUFF_VERSION}"
|
||||
run: python -m pip install "ruff==${{ env.RUFF_VERSION }}"
|
||||
|
||||
- name: Ruff check
|
||||
run: ruff check --output-format=github
|
||||
|
||||
- name: Ruff format
|
||||
run: ruff format --diff
|
||||
|
||||
typos:
|
||||
name: Typos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: typos-action
|
||||
uses: crate-ci/typos@v1.29.10
|
||||
|
||||
16
.github/workflows/test-docker-build.yml
vendored
16
.github/workflows/test-docker-build.yml
vendored
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Inspired by
|
||||
# https://github.com/huggingface/peft/blob/main/.github/workflows/test-docker-build.yml
|
||||
name: Test Dockerfiles
|
||||
@@ -57,7 +43,7 @@ jobs:
|
||||
needs: get_changed_files
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
if: needs.get_changed_files.outputs.matrix != ''
|
||||
if: ${{ needs.get_changed_files.outputs.matrix }} != ''
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
||||
14
.github/workflows/test.yml
vendored
14
.github/workflows/test.yml
vendored
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: Tests
|
||||
|
||||
on:
|
||||
|
||||
14
.github/workflows/trufflehog.yml
vendored
14
.github/workflows/trufflehog.yml
vendored
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
on:
|
||||
push:
|
||||
|
||||
|
||||
16
.gitignore
vendored
16
.gitignore
vendored
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Logging
|
||||
logs
|
||||
tmp
|
||||
@@ -78,7 +64,7 @@ pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
!tests/artifacts
|
||||
!tests/data
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
|
||||
@@ -1,29 +1,7 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exclude: "tests/artifacts/.*\\.safetensors$"
|
||||
exclude: ^(tests/data)
|
||||
default_language_version:
|
||||
python: python3.10
|
||||
repos:
|
||||
##### Meta #####
|
||||
- repo: meta
|
||||
hooks:
|
||||
- id: check-useless-excludes
|
||||
- id: check-hooks-apply
|
||||
|
||||
|
||||
##### Style / Misc. #####
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
@@ -35,40 +13,21 @@ repos:
|
||||
- id: check-toml
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
|
||||
- repo: https://github.com/crate-ci/typos
|
||||
rev: v1.30.2
|
||||
hooks:
|
||||
- id: typos
|
||||
args: [--force-exclude]
|
||||
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v3.19.1
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.9.10
|
||||
rev: v0.9.6
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [--fix]
|
||||
- id: ruff-format
|
||||
|
||||
|
||||
##### Security #####
|
||||
- repo: https://github.com/gitleaks/gitleaks
|
||||
rev: v8.24.0
|
||||
rev: v8.23.3
|
||||
hooks:
|
||||
- id: gitleaks
|
||||
|
||||
- repo: https://github.com/woodruffw/zizmor-pre-commit
|
||||
rev: v1.4.1
|
||||
rev: v1.3.1
|
||||
hooks:
|
||||
- id: zizmor
|
||||
|
||||
- repo: https://github.com/PyCQA/bandit
|
||||
rev: 1.8.3
|
||||
hooks:
|
||||
- id: bandit
|
||||
args: ["-c", "pyproject.toml"]
|
||||
additional_dependencies: ["bandit[toml]"]
|
||||
|
||||
@@ -228,7 +228,7 @@ Follow these steps to start contributing:
|
||||
git commit
|
||||
```
|
||||
|
||||
Note, if you already committed some changes that have a wrong formatting, you can use:
|
||||
Note, if you already commited some changes that have a wrong formatting, you can use:
|
||||
```bash
|
||||
pre-commit run --all-files
|
||||
```
|
||||
@@ -291,7 +291,7 @@ sudo apt-get install git-lfs
|
||||
git lfs install
|
||||
```
|
||||
|
||||
Pull artifacts if they're not in [tests/artifacts](tests/artifacts)
|
||||
Pull artifacts if they're not in [tests/data](tests/data)
|
||||
```bash
|
||||
git lfs pull
|
||||
```
|
||||
|
||||
32
Makefile
32
Makefile
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
.PHONY: tests
|
||||
|
||||
PYTHON_PATH := $(shell which python)
|
||||
@@ -47,7 +33,6 @@ test-act-ete-train:
|
||||
--policy.dim_model=64 \
|
||||
--policy.n_action_steps=20 \
|
||||
--policy.chunk_size=20 \
|
||||
--policy.device=$(DEVICE) \
|
||||
--env.type=aloha \
|
||||
--env.episode_length=5 \
|
||||
--dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \
|
||||
@@ -62,6 +47,7 @@ test-act-ete-train:
|
||||
--save_checkpoint=true \
|
||||
--log_freq=1 \
|
||||
--wandb.enable=false \
|
||||
--device=$(DEVICE) \
|
||||
--output_dir=tests/outputs/act/
|
||||
|
||||
test-act-ete-train-resume:
|
||||
@@ -72,11 +58,11 @@ test-act-ete-train-resume:
|
||||
test-act-ete-eval:
|
||||
python lerobot/scripts/eval.py \
|
||||
--policy.path=tests/outputs/act/checkpoints/000004/pretrained_model \
|
||||
--policy.device=$(DEVICE) \
|
||||
--env.type=aloha \
|
||||
--env.episode_length=5 \
|
||||
--eval.n_episodes=1 \
|
||||
--eval.batch_size=1
|
||||
--eval.batch_size=1 \
|
||||
--device=$(DEVICE)
|
||||
|
||||
test-diffusion-ete-train:
|
||||
python lerobot/scripts/train.py \
|
||||
@@ -84,7 +70,6 @@ test-diffusion-ete-train:
|
||||
--policy.down_dims='[64,128,256]' \
|
||||
--policy.diffusion_step_embed_dim=32 \
|
||||
--policy.num_inference_steps=10 \
|
||||
--policy.device=$(DEVICE) \
|
||||
--env.type=pusht \
|
||||
--env.episode_length=5 \
|
||||
--dataset.repo_id=lerobot/pusht \
|
||||
@@ -99,21 +84,21 @@ test-diffusion-ete-train:
|
||||
--save_freq=2 \
|
||||
--log_freq=1 \
|
||||
--wandb.enable=false \
|
||||
--device=$(DEVICE) \
|
||||
--output_dir=tests/outputs/diffusion/
|
||||
|
||||
test-diffusion-ete-eval:
|
||||
python lerobot/scripts/eval.py \
|
||||
--policy.path=tests/outputs/diffusion/checkpoints/000002/pretrained_model \
|
||||
--policy.device=$(DEVICE) \
|
||||
--env.type=pusht \
|
||||
--env.episode_length=5 \
|
||||
--eval.n_episodes=1 \
|
||||
--eval.batch_size=1
|
||||
--eval.batch_size=1 \
|
||||
--device=$(DEVICE)
|
||||
|
||||
test-tdmpc-ete-train:
|
||||
python lerobot/scripts/train.py \
|
||||
--policy.type=tdmpc \
|
||||
--policy.device=$(DEVICE) \
|
||||
--env.type=xarm \
|
||||
--env.task=XarmLift-v0 \
|
||||
--env.episode_length=5 \
|
||||
@@ -129,14 +114,15 @@ test-tdmpc-ete-train:
|
||||
--save_freq=2 \
|
||||
--log_freq=1 \
|
||||
--wandb.enable=false \
|
||||
--device=$(DEVICE) \
|
||||
--output_dir=tests/outputs/tdmpc/
|
||||
|
||||
test-tdmpc-ete-eval:
|
||||
python lerobot/scripts/eval.py \
|
||||
--policy.path=tests/outputs/tdmpc/checkpoints/000002/pretrained_model \
|
||||
--policy.device=$(DEVICE) \
|
||||
--env.type=xarm \
|
||||
--env.episode_length=5 \
|
||||
--env.task=XarmLift-v0 \
|
||||
--eval.n_episodes=1 \
|
||||
--eval.batch_size=1
|
||||
--eval.batch_size=1 \
|
||||
--device=$(DEVICE)
|
||||
|
||||
28
README.md
28
README.md
@@ -23,24 +23,15 @@
|
||||
</div>
|
||||
|
||||
<h2 align="center">
|
||||
<p><a href="https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md">
|
||||
Build Your Own SO-100 Robot!</a></p>
|
||||
<p><a href="https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md">New robot in town: SO-100</a></p>
|
||||
</h2>
|
||||
|
||||
<div align="center">
|
||||
<img src="media/so100/leader_follower.webp?raw=true" alt="SO-100 leader and follower arms" title="SO-100 leader and follower arms" width="50%">
|
||||
|
||||
<p><strong>Meet the SO-100 – Just $110 per arm!</strong></p>
|
||||
<p>Train it in minutes with a few simple moves on your laptop.</p>
|
||||
<p>Then sit back and watch your creation act autonomously! 🤯</p>
|
||||
|
||||
<p><a href="https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md">
|
||||
Get the full SO-100 tutorial here.</a></p>
|
||||
|
||||
<p>Want to take it to the next level? Make your SO-100 mobile by building LeKiwi!</p>
|
||||
<p>Check out the <a href="https://github.com/huggingface/lerobot/blob/main/examples/11_use_lekiwi.md">LeKiwi tutorial</a> and bring your robot to life on wheels.</p>
|
||||
|
||||
<img src="media/lekiwi/kiwi.webp?raw=true" alt="LeKiwi mobile robot" title="LeKiwi mobile robot" width="50%">
|
||||
<img src="media/so100/leader_follower.webp?raw=true" alt="SO-100 leader and follower arms" title="SO-100 leader and follower arms" width="50%">
|
||||
<p>We just added a new tutorial on how to build a more affordable robot, at the price of $110 per arm!</p>
|
||||
<p>Teach it new skills by showing it a few moves with just a laptop.</p>
|
||||
<p>Then watch your homemade robot act autonomously 🤯</p>
|
||||
<p>Follow the link to the <a href="https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md">full tutorial for SO-100</a>.</p>
|
||||
</div>
|
||||
|
||||
<br/>
|
||||
@@ -232,8 +223,8 @@ python lerobot/scripts/eval.py \
|
||||
--env.type=pusht \
|
||||
--eval.batch_size=10 \
|
||||
--eval.n_episodes=10 \
|
||||
--policy.use_amp=false \
|
||||
--policy.device=cuda
|
||||
--use_amp=false \
|
||||
--device=cuda
|
||||
```
|
||||
|
||||
Note: After training your own policy, you can re-evaluate the checkpoints with:
|
||||
@@ -384,6 +375,3 @@ Additionally, if you are using any of the particular policy architecture, pretra
|
||||
year={2024}
|
||||
}
|
||||
```
|
||||
## Star History
|
||||
|
||||
[](https://star-history.com/#huggingface/lerobot&Timeline)
|
||||
|
||||
@@ -114,7 +114,7 @@ We tried to measure the most impactful parameters for both encoding and decoding
|
||||
|
||||
Additional encoding parameters exist that are not included in this benchmark. In particular:
|
||||
- `-preset` which allows for selecting encoding presets. This represents a collection of options that will provide a certain encoding speed to compression ratio. By leaving this parameter unspecified, it is considered to be `medium` for libx264 and libx265 and `8` for libsvtav1.
|
||||
- `-tune` which allows to optimize the encoding for certain aspects (e.g. film quality, fast decoding, etc.).
|
||||
- `-tune` which allows to optimize the encoding for certains aspects (e.g. film quality, fast decoding, etc.).
|
||||
|
||||
See the documentation mentioned above for more detailed info on these settings and for a more comprehensive list of other parameters.
|
||||
|
||||
|
||||
@@ -1,29 +1,33 @@
|
||||
# Configure image
|
||||
ARG PYTHON_VERSION=3.10
|
||||
|
||||
FROM python:${PYTHON_VERSION}-slim
|
||||
|
||||
# Configure environment variables
|
||||
ARG PYTHON_VERSION
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV MUJOCO_GL="egl"
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install dependencies and set up Python in a single layer
|
||||
# Install apt dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential cmake git \
|
||||
build-essential cmake git git-lfs \
|
||||
libglib2.0-0 libgl1-mesa-glx libegl1-mesa ffmpeg \
|
||||
speech-dispatcher libgeos-dev \
|
||||
&& ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python \
|
||||
&& python -m venv /opt/venv \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/* \
|
||||
&& echo "source /opt/venv/bin/activate" >> /root/.bashrc
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Clone repository and install LeRobot in a single layer
|
||||
COPY . /lerobot
|
||||
# Create virtual environment
|
||||
RUN ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python
|
||||
RUN python -m venv /opt/venv
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
RUN echo "source /opt/venv/bin/activate" >> /root/.bashrc
|
||||
|
||||
# Install LeRobot
|
||||
RUN git lfs install
|
||||
RUN git clone https://github.com/huggingface/lerobot.git /lerobot
|
||||
WORKDIR /lerobot
|
||||
RUN /opt/venv/bin/pip install --upgrade --no-cache-dir pip \
|
||||
&& /opt/venv/bin/pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]" \
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
RUN pip install --upgrade --no-cache-dir pip
|
||||
RUN pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]" \
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
|
||||
# Set EGL as the rendering backend for MuJoCo
|
||||
ENV MUJOCO_GL="egl"
|
||||
|
||||
# Execute in bash shell rather than python
|
||||
CMD ["/bin/bash"]
|
||||
|
||||
@@ -8,7 +8,7 @@ ENV PATH="/opt/venv/bin:$PATH"
|
||||
|
||||
# Install dependencies and set up Python in a single layer
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential cmake git \
|
||||
build-essential cmake git git-lfs \
|
||||
libglib2.0-0 libgl1-mesa-glx libegl1-mesa ffmpeg \
|
||||
speech-dispatcher libgeos-dev \
|
||||
python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \
|
||||
@@ -18,7 +18,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
&& echo "source /opt/venv/bin/activate" >> /root/.bashrc
|
||||
|
||||
# Clone repository and install LeRobot in a single layer
|
||||
COPY . /lerobot
|
||||
WORKDIR /lerobot
|
||||
RUN /opt/venv/bin/pip install --upgrade --no-cache-dir pip \
|
||||
RUN git lfs install \
|
||||
&& git clone https://github.com/huggingface/lerobot.git . \
|
||||
&& /opt/venv/bin/pip install --upgrade --no-cache-dir pip \
|
||||
&& /opt/venv/bin/pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]"
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
- [A. Source the parts](#a-source-the-parts)
|
||||
- [B. Install LeRobot](#b-install-lerobot)
|
||||
- [C. Configure the Motors](#c-configure-the-motors)
|
||||
- [D. Step-by-Step Assembly Instructions](#d-step-by-step-assembly-instructions)
|
||||
- [C. Configure the motors](#c-configure-the-motors)
|
||||
- [D. Assemble the arms](#d-assemble-the-arms)
|
||||
- [E. Calibrate](#e-calibrate)
|
||||
- [F. Teleoperate](#f-teleoperate)
|
||||
- [G. Record a dataset](#g-record-a-dataset)
|
||||
@@ -70,7 +70,6 @@ conda install -y -c conda-forge "opencv>=4.10.0"
|
||||
```
|
||||
Great :hugs:! You are now done installing LeRobot and we can begin assembling the SO100 arms :robot:.
|
||||
Every time you now want to use LeRobot you can go to the `~/lerobot` folder where we installed LeRobot and run one of the commands.
|
||||
|
||||
## C. Configure the motors
|
||||
|
||||
> [!NOTE]
|
||||
@@ -99,22 +98,22 @@ Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem5
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your MotorsBus and press Enter when done.
|
||||
Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect leader arm and press Enter...]
|
||||
|
||||
The port of this MotorsBus is /dev/tty.usbmodem575E0031751
|
||||
The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your MotorsBus and press Enter when done.
|
||||
Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect follower arm and press Enter...]
|
||||
|
||||
The port of this MotorsBus is /dev/tty.usbmodem575E0032081
|
||||
The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
@@ -222,13 +221,19 @@ Redo the process for all your motors until ID 6. Do the same for the 6 motors of
|
||||
|
||||
Follow the video for removing gears. You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm.
|
||||
|
||||
## D. Step-by-Step Assembly Instructions
|
||||
#### c. Add motor horn to all 12 motors
|
||||
|
||||
**Step 1: Clean Parts**
|
||||
- Remove all support material from the 3D-printed parts.
|
||||
---
|
||||
<details>
|
||||
<summary><strong>Video adding motor horn</strong></summary>
|
||||
|
||||
### Additional Guidance
|
||||
<video src="https://github.com/user-attachments/assets/ef3391a4-ad05-4100-b2bd-1699bf86c969"></video>
|
||||
|
||||
</details>
|
||||
|
||||
Follow the video for adding the motor horn. For SO-100, you need to align the holes on the motor horn to the motor spline to be approximately 1:30, 4:30, 7:30 and 10:30.
|
||||
Try to avoid rotating the motor while doing so to keep position 2048 set during configuration. It is especially tricky for the leader motors as it is more sensible without the gears, but it's ok if it's a bit rotated.
|
||||
|
||||
## D. Assemble the arms
|
||||
|
||||
<details>
|
||||
<summary><strong>Video assembling arms</strong></summary>
|
||||
@@ -237,211 +242,7 @@ Follow the video for removing gears. You need to remove the gear for the motors
|
||||
|
||||
</details>
|
||||
|
||||
**Note:**
|
||||
This video provides visual guidance for assembling the arms, but it doesn't specify when or how to do the wiring. Inserting the cables beforehand is much easier than doing it afterward. The first arm may take a bit more than 1 hour to assemble, but once you get used to it, you can assemble the second arm in under 1 hour.
|
||||
|
||||
---
|
||||
|
||||
### First Motor
|
||||
|
||||
**Step 2: Insert Wires**
|
||||
- Insert two wires into the first motor.
|
||||
|
||||
<img src="../media/tutorial/img1.jpg" style="height:300px;">
|
||||
|
||||
**Step 3: Install in Base**
|
||||
- Place the first motor into the base.
|
||||
|
||||
<img src="../media/tutorial/img2.jpg" style="height:300px;">
|
||||
|
||||
**Step 4: Secure Motor**
|
||||
- Fasten the motor with 4 screws. Two from the bottom and two from top.
|
||||
|
||||
**Step 5: Attach Motor Holder**
|
||||
- Slide over the first motor holder and fasten it using two screws (one on each side).
|
||||
|
||||
<img src="../media/tutorial/img4.jpg" style="height:300px;">
|
||||
|
||||
**Step 6: Attach Motor Horns**
|
||||
- Install both motor horns, securing the top horn with a screw. Try not to move the motor position when attaching the motor horn, especially for the leader arms, where we removed the gears.
|
||||
|
||||
<img src="../media/tutorial/img5.jpg" style="height:300px;">
|
||||
<details>
|
||||
<summary><strong>Video adding motor horn</strong></summary>
|
||||
<video src="https://github.com/user-attachments/assets/ef3391a4-ad05-4100-b2bd-1699bf86c969"></video>
|
||||
</details>
|
||||
|
||||
**Step 7: Attach Shoulder Part**
|
||||
- Route one wire to the back of the robot and the other to the left or in photo towards you (see photo).
|
||||
- Attach the shoulder part.
|
||||
|
||||
<img src="../media/tutorial/img6.jpg" style="height:300px;">
|
||||
|
||||
**Step 8: Secure Shoulder**
|
||||
- Tighten the shoulder part with 4 screws on top and 4 on the bottom
|
||||
*(access bottom holes by turning the shoulder).*
|
||||
|
||||
---
|
||||
|
||||
### Second Motor Assembly
|
||||
|
||||
**Step 9: Install Motor 2**
|
||||
- Slide the second motor in from the top and link the wire from motor 1 to motor 2.
|
||||
|
||||
<img src="../media/tutorial/img8.jpg" style="height:300px;">
|
||||
|
||||
**Step 10: Attach Shoulder Holder**
|
||||
- Add the shoulder motor holder.
|
||||
- Ensure the wire from motor 1 to motor 2 goes behind the holder while the other wire is routed upward (see photo).
|
||||
- This part can be tight to assemble, you can use a workbench like the image or a similar setup to push the part around the motor.
|
||||
|
||||
<div style="display: flex;">
|
||||
<img src="../media/tutorial/img9.jpg" style="height:250px;">
|
||||
<img src="../media/tutorial/img10.jpg" style="height:250px;">
|
||||
<img src="../media/tutorial/img12.jpg" style="height:250px;">
|
||||
</div>
|
||||
|
||||
**Step 11: Secure Motor 2**
|
||||
- Fasten the second motor with 4 screws.
|
||||
|
||||
**Step 12: Attach Motor Horn**
|
||||
- Attach both motor horns to motor 2, again use the horn screw.
|
||||
|
||||
**Step 13: Attach Base**
|
||||
- Install the base attachment using 2 screws.
|
||||
|
||||
<img src="../media/tutorial/img11.jpg" style="height:300px;">
|
||||
|
||||
**Step 14: Attach Upper Arm**
|
||||
- Attach the upper arm with 4 screws on each side.
|
||||
|
||||
<img src="../media/tutorial/img13.jpg" style="height:300px;">
|
||||
|
||||
---
|
||||
|
||||
### Third Motor Assembly
|
||||
|
||||
**Step 15: Install Motor 3**
|
||||
- Route the motor cable from motor 2 through the cable holder to motor 3, then secure motor 3 with 4 screws.
|
||||
|
||||
**Step 16: Attach Motor Horn**
|
||||
- Attach both motor horns to motor 3 and secure one again with a horn screw.
|
||||
|
||||
<img src="../media/tutorial/img14.jpg" style="height:300px;">
|
||||
|
||||
**Step 17: Attach Forearm**
|
||||
- Connect the forearm to motor 3 using 4 screws on each side.
|
||||
|
||||
<img src="../media/tutorial/img15.jpg" style="height:300px;">
|
||||
|
||||
---
|
||||
|
||||
### Fourth Motor Assembly
|
||||
|
||||
**Step 18: Install Motor 4**
|
||||
- Slide in motor 4, attach the cable from motor 3, and secure the cable in its holder with a screw.
|
||||
|
||||
<div style="display: flex;">
|
||||
<img src="../media/tutorial/img16.jpg" style="height:300px;">
|
||||
<img src="../media/tutorial/img19.jpg" style="height:300px;">
|
||||
</div>
|
||||
|
||||
**Step 19: Attach Motor Holder 4**
|
||||
- Install the fourth motor holder (a tight fit). Ensure one wire is routed upward and the wire from motor 3 is routed downward (see photo).
|
||||
|
||||
<img src="../media/tutorial/img17.jpg" style="height:300px;">
|
||||
|
||||
**Step 20: Secure Motor 4 & Attach Horn**
|
||||
- Fasten motor 4 with 4 screws and attach its motor horns, use for one a horn screw.
|
||||
|
||||
<img src="../media/tutorial/img18.jpg" style="height:300px;">
|
||||
|
||||
---
|
||||
|
||||
### Wrist Assembly
|
||||
|
||||
**Step 21: Install Motor 5**
|
||||
- Insert motor 5 into the wrist holder and secure it with 2 front screws.
|
||||
|
||||
<img src="../media/tutorial/img20.jpg" style="height:300px;">
|
||||
|
||||
**Step 22: Attach Wrist**
|
||||
- Connect the wire from motor 4 to motor 5. And already insert the other wire for the gripper.
|
||||
- Secure the wrist to motor 4 using 4 screws on both sides.
|
||||
|
||||
<img src="../media/tutorial/img22.jpg" style="height:300px;">
|
||||
|
||||
**Step 23: Attach Wrist Horn**
|
||||
- Install only one motor horn on the wrist motor and secure it with a horn screw.
|
||||
|
||||
<img src="../media/tutorial/img23.jpg" style="height:300px;">
|
||||
|
||||
---
|
||||
|
||||
### Follower Configuration
|
||||
|
||||
**Step 24: Attach Gripper**
|
||||
- Attach the gripper to motor 5.
|
||||
|
||||
<img src="../media/tutorial/img24.jpg" style="height:300px;">
|
||||
|
||||
**Step 25: Install Gripper Motor**
|
||||
- Insert the gripper motor, connect the motor wire from motor 5 to motor 6, and secure it with 3 screws on each side.
|
||||
|
||||
<img src="../media/tutorial/img25.jpg" style="height:300px;">
|
||||
|
||||
**Step 26: Attach Gripper Horn & Claw**
|
||||
- Attach the motor horns and again use a horn screw.
|
||||
- Install the gripper claw and secure it with 4 screws on both sides.
|
||||
|
||||
<img src="../media/tutorial/img26.jpg" style="height:300px;">
|
||||
|
||||
**Step 27: Mount Controller**
|
||||
- Attach the motor controller on the back.
|
||||
|
||||
<div style="display: flex;">
|
||||
<img src="../media/tutorial/img27.jpg" style="height:300px;">
|
||||
<img src="../media/tutorial/img28.jpg" style="height:300px;">
|
||||
</div>
|
||||
|
||||
*Assembly complete – proceed to Leader arm assembly.*
|
||||
|
||||
---
|
||||
|
||||
### Leader Configuration
|
||||
|
||||
For the leader configuration, perform **Steps 1–23**. Make sure that you removed the motor gears from the motors.
|
||||
|
||||
**Step 24: Attach Leader Holder**
|
||||
- Mount the leader holder onto the wrist and secure it with a screw.
|
||||
|
||||
<img src="../media/tutorial/img29.jpg" style="height:300px;">
|
||||
|
||||
**Step 25: Attach Handle**
|
||||
- Attach the handle to motor 5 using 4 screws.
|
||||
|
||||
<img src="../media/tutorial/img30.jpg" style="height:300px;">
|
||||
|
||||
**Step 26: Install Gripper Motor**
|
||||
- Insert the gripper motor, secure it with 3 screws on each side, attach a motor horn using a horn screw, and connect the motor wire.
|
||||
|
||||
<img src="../media/tutorial/img31.jpg" style="height:300px;">
|
||||
|
||||
**Step 27: Attach Trigger**
|
||||
- Attach the follower trigger with 4 screws.
|
||||
|
||||
<img src="../media/tutorial/img32.jpg" style="height:300px;">
|
||||
|
||||
**Step 28: Mount Controller**
|
||||
- Attach the motor controller on the back.
|
||||
|
||||
<div style="display: flex;">
|
||||
<img src="../media/tutorial/img27.jpg" style="height:300px;">
|
||||
<img src="../media/tutorial/img28.jpg" style="height:300px;">
|
||||
</div>
|
||||
|
||||
*Assembly complete – proceed to calibration.*
|
||||
|
||||
Follow the video for assembling the arms. It is important to insert the cables into the motor that is being assembled before you assemble the motor into the arm! Inserting the cables beforehand is much easier than doing this afterward. The first arm should take a bit more than 1 hour to assemble, but once you get used to it, you can do it under 1 hour for the second arm.
|
||||
|
||||
## E. Calibrate
|
||||
|
||||
@@ -454,8 +255,8 @@ Next, you'll need to calibrate your SO-100 robot to ensure that the leader and f
|
||||
|
||||
You will need to move the follower arm to these positions sequentially:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
| ------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
|---|---|---|
|
||||
| <img src="../media/so100/follower_zero.webp?raw=true" alt="SO-100 follower arm zero position" title="SO-100 follower arm zero position" style="width:100%;"> | <img src="../media/so100/follower_rotated.webp?raw=true" alt="SO-100 follower arm rotated position" title="SO-100 follower arm rotated position" style="width:100%;"> | <img src="../media/so100/follower_rest.webp?raw=true" alt="SO-100 follower arm rest position" title="SO-100 follower arm rest position" style="width:100%;"> |
|
||||
|
||||
Make sure both arms are connected and run this script to launch manual calibration:
|
||||
@@ -470,8 +271,8 @@ python lerobot/scripts/control_robot.py \
|
||||
#### b. Manual calibration of leader arm
|
||||
Follow step 6 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=724) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
| ------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
|---|---|---|
|
||||
| <img src="../media/so100/leader_zero.webp?raw=true" alt="SO-100 leader arm zero position" title="SO-100 leader arm zero position" style="width:100%;"> | <img src="../media/so100/leader_rotated.webp?raw=true" alt="SO-100 leader arm rotated position" title="SO-100 leader arm rotated position" style="width:100%;"> | <img src="../media/so100/leader_rest.webp?raw=true" alt="SO-100 leader arm rest position" title="SO-100 leader arm rest position" style="width:100%;"> |
|
||||
|
||||
Run this script to launch manual calibration:
|
||||
@@ -571,14 +372,14 @@ python lerobot/scripts/train.py \
|
||||
--policy.type=act \
|
||||
--output_dir=outputs/train/act_so100_test \
|
||||
--job_name=act_so100_test \
|
||||
--policy.device=cuda \
|
||||
--device=cuda \
|
||||
--wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/so100_test`.
|
||||
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
|
||||
4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
|
||||
4. We provided `device=cuda` since we are training on a Nvidia GPU, but you could use `device=mps` to train on Apple silicon.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
|
||||
Training should take several hours. You will find checkpoints in `outputs/train/act_so100_test/checkpoints`.
|
||||
@@ -23,9 +23,6 @@ Follow this [README](https://github.com/SIGRobotics-UIUC/LeKiwi). It contains th
|
||||
|
||||
Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
|
||||
|
||||
### Wired version
|
||||
If you have the **wired** LeKiwi version you can skip the installation of the Raspberry Pi and setting up SSH. You can also run all commands directly on your PC for both the LeKiwi scripts and the leader arm scripts for teleoperating.
|
||||
|
||||
## B. Install software on Pi
|
||||
Now we have to setup the remote PC that will run on the LeKiwi Robot. This is normally a Raspberry Pi, but can be any PC that can run on 5V and has enough usb ports (2 or more) for the cameras and motor control board.
|
||||
|
||||
@@ -188,7 +185,7 @@ sudo chmod 666 /dev/ttyACM1
|
||||
|
||||
#### d. Update config file
|
||||
|
||||
IMPORTANTLY: Now that you have your ports of leader and follower arm and ip address of the mobile-so100, update the **ip** in Network configuration, **port** in leader_arms and **port** in lekiwi. In the [`LeKiwiRobotConfig`](../lerobot/common/robot_devices/robots/configs.py) file. Where you will find something like:
|
||||
IMPORTANTLY: Now that you have your ports of leader and follower arm and ip adress of the mobile-so100, update the **ip** in Network configuration, **port** in leader_arms and **port** in lekiwi. In the [`LeKiwiRobotConfig`](../lerobot/common/robot_devices/robots/configs.py) file. Where you will find something like:
|
||||
```python
|
||||
@RobotConfig.register_subclass("lekiwi")
|
||||
@dataclass
|
||||
@@ -249,110 +246,6 @@ class LeKiwiRobotConfig(RobotConfig):
|
||||
}
|
||||
)
|
||||
|
||||
teleop_keys: dict[str, str] = field(
|
||||
default_factory=lambda: {
|
||||
# Movement
|
||||
"forward": "w",
|
||||
"backward": "s",
|
||||
"left": "a",
|
||||
"right": "d",
|
||||
"rotate_left": "z",
|
||||
"rotate_right": "x",
|
||||
# Speed control
|
||||
"speed_up": "r",
|
||||
"speed_down": "f",
|
||||
# quit teleop
|
||||
"quit": "q",
|
||||
}
|
||||
)
|
||||
|
||||
mock: bool = False
|
||||
```
|
||||
|
||||
## Wired version
|
||||
|
||||
For the wired LeKiwi version your configured IP address should refer to your own laptop (127.0.0.1), because leader arm and LeKiwi are in this case connected to own laptop. Below and example configuration for this wired setup:
|
||||
```python
|
||||
@RobotConfig.register_subclass("lekiwi")
|
||||
@dataclass
|
||||
class LeKiwiRobotConfig(RobotConfig):
|
||||
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
|
||||
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
|
||||
# the number of motors in your follower arms.
|
||||
max_relative_target: int | None = None
|
||||
|
||||
# Network Configuration
|
||||
ip: str = "127.0.0.1"
|
||||
port: int = 5555
|
||||
video_port: int = 5556
|
||||
|
||||
cameras: dict[str, CameraConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"front": OpenCVCameraConfig(
|
||||
camera_index=0, fps=30, width=640, height=480, rotation=90
|
||||
),
|
||||
"wrist": OpenCVCameraConfig(
|
||||
camera_index=1, fps=30, width=640, height=480, rotation=180
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
calibration_dir: str = ".cache/calibration/lekiwi"
|
||||
|
||||
leader_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem585A0077581",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
follower_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem58760431061",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
"left_wheel": (7, "sts3215"),
|
||||
"back_wheel": (8, "sts3215"),
|
||||
"right_wheel": (9, "sts3215"),
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
teleop_keys: dict[str, str] = field(
|
||||
default_factory=lambda: {
|
||||
# Movement
|
||||
"forward": "w",
|
||||
"backward": "s",
|
||||
"left": "a",
|
||||
"right": "d",
|
||||
"rotate_left": "z",
|
||||
"rotate_right": "x",
|
||||
# Speed control
|
||||
"speed_up": "r",
|
||||
"speed_down": "f",
|
||||
# quit teleop
|
||||
"quit": "q",
|
||||
}
|
||||
)
|
||||
|
||||
mock: bool = False
|
||||
```
|
||||
|
||||
@@ -366,8 +259,8 @@ Now we have to calibrate the leader arm and the follower arm. The wheel motors d
|
||||
|
||||
You will need to move the follower arm to these positions sequentially:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
| ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
|---|---|---|
|
||||
| <img src="../media/lekiwi/mobile_calib_zero.webp?raw=true" alt="SO-100 follower arm zero position" title="SO-100 follower arm zero position" style="width:100%;"> | <img src="../media/lekiwi/mobile_calib_rotated.webp?raw=true" alt="SO-100 follower arm rotated position" title="SO-100 follower arm rotated position" style="width:100%;"> | <img src="../media/lekiwi/mobile_calib_rest.webp?raw=true" alt="SO-100 follower arm rest position" title="SO-100 follower arm rest position" style="width:100%;"> |
|
||||
|
||||
Make sure the arm is connected to the Raspberry Pi and run this script (on the Raspberry Pi) to launch manual calibration:
|
||||
@@ -379,14 +272,11 @@ python lerobot/scripts/control_robot.py \
|
||||
--control.arms='["main_follower"]'
|
||||
```
|
||||
|
||||
### Wired version
|
||||
If you have the **wired** LeKiwi version please run all commands including this calibration command on your laptop.
|
||||
|
||||
### Calibrate leader arm
|
||||
Then to calibrate the leader arm (which is attached to the laptop/pc). You will need to move the leader arm to these positions sequentially:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
| ------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
|---|---|---|
|
||||
| <img src="../media/so100/leader_zero.webp?raw=true" alt="SO-100 leader arm zero position" title="SO-100 leader arm zero position" style="width:100%;"> | <img src="../media/so100/leader_rotated.webp?raw=true" alt="SO-100 leader arm rotated position" title="SO-100 leader arm rotated position" style="width:100%;"> | <img src="../media/so100/leader_rest.webp?raw=true" alt="SO-100 leader arm rest position" title="SO-100 leader arm rest position" style="width:100%;"> |
|
||||
|
||||
Run this script (on your laptop/pc) to launch manual calibration:
|
||||
@@ -416,28 +306,25 @@ python lerobot/scripts/control_robot.py \
|
||||
|
||||
You should see on your laptop something like this: ```[INFO] Connected to remote robot at tcp://172.17.133.91:5555 and video stream at tcp://172.17.133.91:5556.``` Now you can move the leader arm and use the keyboard (w,a,s,d) to drive forward, left, backwards, right. And use (z,x) to turn left or turn right. You can use (r,f) to increase and decrease the speed of the mobile robot. There are three speed modes, see the table below:
|
||||
| Speed Mode | Linear Speed (m/s) | Rotation Speed (deg/s) |
|
||||
| ---------- | ------------------ | ---------------------- |
|
||||
| Fast | 0.4 | 90 |
|
||||
| Medium | 0.25 | 60 |
|
||||
| Slow | 0.1 | 30 |
|
||||
|------------|-------------------|-----------------------|
|
||||
| Fast | 0.4 | 90 |
|
||||
| Medium | 0.25 | 60 |
|
||||
| Slow | 0.1 | 30 |
|
||||
|
||||
|
||||
| Key | Action |
|
||||
| --- | -------------- |
|
||||
| W | Move forward |
|
||||
| A | Move left |
|
||||
| S | Move backward |
|
||||
| D | Move right |
|
||||
| Z | Turn left |
|
||||
| X | Turn right |
|
||||
| R | Increase speed |
|
||||
| F | Decrease speed |
|
||||
| Key | Action |
|
||||
|------|--------------------------------|
|
||||
| W | Move forward |
|
||||
| A | Move left |
|
||||
| S | Move backward |
|
||||
| D | Move right |
|
||||
| Z | Turn left |
|
||||
| X | Turn right |
|
||||
| R | Increase speed |
|
||||
| F | Decrease speed |
|
||||
|
||||
> [!TIP]
|
||||
> If you use a different keyboard you can change the keys for each command in the [`LeKiwiRobotConfig`](../lerobot/common/robot_devices/robots/configs.py).
|
||||
|
||||
### Wired version
|
||||
If you have the **wired** LeKiwi version please run all commands including both these teleoperation commands on your laptop.
|
||||
> If you use a different keyboard you can change the keys for each commmand in the [`LeKiwiRobotConfig`](../lerobot/common/robot_devices/robots/configs.py).
|
||||
|
||||
## Troubleshoot communication
|
||||
|
||||
@@ -477,13 +364,6 @@ Make sure the configuration file on both your laptop/pc and the Raspberry Pi is
|
||||
# G. Record a dataset
|
||||
Once you're familiar with teleoperation, you can record your first dataset with LeKiwi.
|
||||
|
||||
To start the program on LeKiwi, SSH into your Raspberry Pi, and run `conda activate lerobot` and this script:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=lekiwi \
|
||||
--control.type=remote_robot
|
||||
```
|
||||
|
||||
If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
@@ -494,7 +374,8 @@ Store your Hugging Face repository name in a variable to run these commands:
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
On your laptop then run this command to record 2 episodes and upload your dataset to the hub:
|
||||
|
||||
Record 2 episodes and upload your dataset to the hub:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=lekiwi \
|
||||
@@ -512,9 +393,6 @@ python lerobot/scripts/control_robot.py \
|
||||
|
||||
Note: You can resume recording by adding `--control.resume=true`.
|
||||
|
||||
### Wired version
|
||||
If you have the **wired** LeKiwi version please run all commands including both these record dataset commands on your laptop.
|
||||
|
||||
# H. Visualize a dataset
|
||||
|
||||
If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
|
||||
@@ -549,14 +427,14 @@ python lerobot/scripts/train.py \
|
||||
--policy.type=act \
|
||||
--output_dir=outputs/train/act_lekiwi_test \
|
||||
--job_name=act_lekiwi_test \
|
||||
--policy.device=cuda \
|
||||
--device=cuda \
|
||||
--wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/lekiwi_test`.
|
||||
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
|
||||
4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
|
||||
4. We provided `device=cuda` since we are training on a Nvidia GPU, but you could use `device=mps` to train on Apple silicon.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
|
||||
Training should take several hours. You will find checkpoints in `outputs/train/act_lekiwi_test/checkpoints`.
|
||||
@@ -2,7 +2,7 @@ This tutorial explains how to use [Moss v1](https://github.com/jess-moss/moss-ro
|
||||
|
||||
## Source the parts
|
||||
|
||||
Follow this [README](https://github.com/jess-moss/moss-robot-arms). It contains the bill of materials with link to source the parts, as well as the instructions to 3D print the parts and advice if it's your first time printing or if you don't own a 3D printer already.
|
||||
Follow this [README](https://github.com/jess-moss/moss-robot-arms). It contains the bill of materials, with link to source the parts, as well as the instructions to 3D print the parts, and advices if it's your first time printing or if you don't own a 3D printer already.
|
||||
|
||||
**Important**: Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
|
||||
|
||||
@@ -176,8 +176,8 @@ Next, you'll need to calibrate your Moss v1 robot to ensure that the leader and
|
||||
|
||||
You will need to move the follower arm to these positions sequentially:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
| ------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
|---|---|---|
|
||||
| <img src="../media/moss/follower_zero.webp?raw=true" alt="Moss v1 follower arm zero position" title="Moss v1 follower arm zero position" style="width:100%;"> | <img src="../media/moss/follower_rotated.webp?raw=true" alt="Moss v1 follower arm rotated position" title="Moss v1 follower arm rotated position" style="width:100%;"> | <img src="../media/moss/follower_rest.webp?raw=true" alt="Moss v1 follower arm rest position" title="Moss v1 follower arm rest position" style="width:100%;"> |
|
||||
|
||||
Make sure both arms are connected and run this script to launch manual calibration:
|
||||
@@ -192,8 +192,8 @@ python lerobot/scripts/control_robot.py \
|
||||
**Manual calibration of leader arm**
|
||||
Follow step 6 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
| ------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
|---|---|---|
|
||||
| <img src="../media/moss/leader_zero.webp?raw=true" alt="Moss v1 leader arm zero position" title="Moss v1 leader arm zero position" style="width:100%;"> | <img src="../media/moss/leader_rotated.webp?raw=true" alt="Moss v1 leader arm rotated position" title="Moss v1 leader arm rotated position" style="width:100%;"> | <img src="../media/moss/leader_rest.webp?raw=true" alt="Moss v1 leader arm rest position" title="Moss v1 leader arm rest position" style="width:100%;"> |
|
||||
|
||||
Run this script to launch manual calibration:
|
||||
@@ -293,14 +293,14 @@ python lerobot/scripts/train.py \
|
||||
--policy.type=act \
|
||||
--output_dir=outputs/train/act_moss_test \
|
||||
--job_name=act_moss_test \
|
||||
--policy.device=cuda \
|
||||
--device=cuda \
|
||||
--wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/moss_test`.
|
||||
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
|
||||
4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
|
||||
4. We provided `device=cuda` since we are training on a Nvidia GPU, but you could use `device=mps` to train on Apple silicon.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
|
||||
Training should take several hours. You will find checkpoints in `outputs/train/act_moss_test/checkpoints`.
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This script demonstrates the use of `LeRobotDataset` class for handling and processing robotic datasets from Hugging Face.
|
||||
It illustrates how to load datasets, manipulate them, and apply transformations suitable for machine learning tasks in PyTorch.
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This scripts demonstrates how to evaluate a pretrained policy from the HuggingFace Hub or from your local
|
||||
training outputs directory. In the latter case, you might want to run examples/3_train_policy.py first.
|
||||
@@ -44,7 +30,7 @@ pretrained_policy_path = "lerobot/diffusion_pusht"
|
||||
# OR a path to a local outputs/train folder.
|
||||
# pretrained_policy_path = Path("outputs/train/example_pusht_diffusion")
|
||||
|
||||
policy = DiffusionPolicy.from_pretrained(pretrained_policy_path)
|
||||
policy = DiffusionPolicy.from_pretrained(pretrained_policy_path, map_location=device)
|
||||
|
||||
# Initialize evaluation environment to render two observation types:
|
||||
# an image of the scene and state/position of the agent. The environment
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""This scripts demonstrates how to train Diffusion Policy on the PushT environment.
|
||||
|
||||
Once you have trained a model with this script, you can try to evaluate it on
|
||||
@@ -99,7 +85,7 @@ def main():
|
||||
done = False
|
||||
while not done:
|
||||
for batch in dataloader:
|
||||
batch = {k: (v.to(device) if isinstance(v, torch.Tensor) else v) for k, v in batch.items()}
|
||||
batch = {k: v.to(device, non_blocking=True) for k, v in batch.items()}
|
||||
loss, _ = policy.forward(batch)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
This tutorial will explain the training script, how to use it, and particularly how to configure everything needed for the training run.
|
||||
> **Note:** The following assume you're running these commands on a machine equipped with a cuda GPU. If you don't have one (or if you're using a Mac), you can add `--policy.device=cpu` (`--policy.device=mps` respectively). However, be advised that the code executes much slower on cpu.
|
||||
> **Note:** The following assume you're running these commands on a machine equipped with a cuda GPU. If you don't have one (or if you're using a Mac), you can add `--device=cpu` (`--device=mps` respectively). However, be advised that the code executes much slower on cpu.
|
||||
|
||||
|
||||
## The training script
|
||||
|
||||
@@ -386,19 +386,19 @@ When you connect your robot for the first time, the [`ManipulatorRobot`](../lero
|
||||
|
||||
Here are the positions you'll move the follower arm to:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
| ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
|---|---|---|
|
||||
| <img src="../media/koch/follower_zero.webp?raw=true" alt="Koch v1.1 follower arm zero position" title="Koch v1.1 follower arm zero position" style="width:100%;"> | <img src="../media/koch/follower_rotated.webp?raw=true" alt="Koch v1.1 follower arm rotated position" title="Koch v1.1 follower arm rotated position" style="width:100%;"> | <img src="../media/koch/follower_rest.webp?raw=true" alt="Koch v1.1 follower arm rest position" title="Koch v1.1 follower arm rest position" style="width:100%;"> |
|
||||
|
||||
And here are the corresponding positions for the leader arm:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
| ----------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
|---|---|---|
|
||||
| <img src="../media/koch/leader_zero.webp?raw=true" alt="Koch v1.1 leader arm zero position" title="Koch v1.1 leader arm zero position" style="width:100%;"> | <img src="../media/koch/leader_rotated.webp?raw=true" alt="Koch v1.1 leader arm rotated position" title="Koch v1.1 leader arm rotated position" style="width:100%;"> | <img src="../media/koch/leader_rest.webp?raw=true" alt="Koch v1.1 leader arm rest position" title="Koch v1.1 leader arm rest position" style="width:100%;"> |
|
||||
|
||||
You can watch a [video tutorial of the calibration procedure](https://youtu.be/8drnU9uRY24) for more details.
|
||||
|
||||
During calibration, we count the number of full 360-degree rotations your motors have made since they were first used. That's why we ask yo to move to this arbitrary "zero" position. We don't actually "set" the zero position, so you don't need to be accurate. After calculating these "offsets" to shift the motor values around 0, we need to assess the rotation direction of each motor, which might differ. That's why we ask you to rotate all motors to roughly 90 degrees, to measure if the values changed negatively or positively.
|
||||
During calibration, we count the number of full 360-degree rotations your motors have made since they were first used. That's why we ask yo to move to this arbitrary "zero" position. We don't actually "set" the zero position, so you don't need to be accurate. After calculating these "offsets" to shift the motor values around 0, we need to assess the rotation direction of each motor, which might differ. That's why we ask you to rotate all motors to roughly 90 degrees, to mesure if the values changed negatively or positively.
|
||||
|
||||
Finally, the rest position ensures that the follower and leader arms are roughly aligned after calibration, preventing sudden movements that could damage the motors when starting teleoperation.
|
||||
|
||||
@@ -663,7 +663,7 @@ camera.disconnect()
|
||||
|
||||
**Instantiate your robot with cameras**
|
||||
|
||||
Additionally, you can set up your robot to work with your cameras.
|
||||
Additionaly, you can set up your robot to work with your cameras.
|
||||
|
||||
Modify the following Python code with the appropriate camera names and configurations:
|
||||
```python
|
||||
@@ -825,8 +825,8 @@ It contains:
|
||||
- `dtRlead: 5.06 (197.5hz)` which is the delta time of reading the present position of the leader arm.
|
||||
- `dtWfoll: 0.25 (3963.7hz)` which is the delta time of writing the goal position on the follower arm ; writing is asynchronous so it takes less time than reading.
|
||||
- `dtRfoll: 6.22 (160.7hz)` which is the delta time of reading the present position on the follower arm.
|
||||
- `dtRlaptop:32.57 (30.7hz) ` which is the delta time of capturing an image from the laptop camera in the thread running asynchronously.
|
||||
- `dtRphone:33.84 (29.5hz)` which is the delta time of capturing an image from the phone camera in the thread running asynchronously.
|
||||
- `dtRlaptop:32.57 (30.7hz) ` which is the delta time of capturing an image from the laptop camera in the thread running asynchrously.
|
||||
- `dtRphone:33.84 (29.5hz)` which is the delta time of capturing an image from the phone camera in the thread running asynchrously.
|
||||
|
||||
Troubleshooting:
|
||||
- On Linux, if you encounter a hanging issue when using cameras, uninstall opencv and re-install it with conda:
|
||||
@@ -846,7 +846,7 @@ At the end of data recording, your dataset will be uploaded on your Hugging Face
|
||||
echo https://huggingface.co/datasets/${HF_USER}/koch_test
|
||||
```
|
||||
|
||||
### b. Advice for recording dataset
|
||||
### b. Advices for recording dataset
|
||||
|
||||
Once you're comfortable with data recording, it's time to create a larger dataset for training. A good starting task is grasping an object at different locations and placing it in a bin. We suggest recording at least 50 episodes, with 10 episodes per location. Keep the cameras fixed and maintain consistent grasping behavior throughout the recordings.
|
||||
|
||||
@@ -898,14 +898,14 @@ python lerobot/scripts/train.py \
|
||||
--policy.type=act \
|
||||
--output_dir=outputs/train/act_koch_test \
|
||||
--job_name=act_koch_test \
|
||||
--policy.device=cuda \
|
||||
--device=cuda \
|
||||
--wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/koch_test`.
|
||||
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
|
||||
4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
|
||||
4. We provided `device=cuda` since we are training on a Nvidia GPU, but you could use `device=mps` to train on Apple silicon.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
|
||||
For more information on the `train` script see the previous tutorial: [`examples/4_train_policy_with_script.md`](../examples/4_train_policy_with_script.md)
|
||||
|
||||
@@ -98,7 +98,7 @@ python lerobot/scripts/control_robot.py \
|
||||
```
|
||||
This is equivalent to running `stretch_robot_home.py`
|
||||
|
||||
> **Note:** If you run any of the LeRobot scripts below and Stretch is not properly homed, it will automatically home/calibrate first.
|
||||
> **Note:** If you run any of the LeRobot scripts below and Stretch is not poperly homed, it will automatically home/calibrate first.
|
||||
|
||||
**Teleoperate**
|
||||
Before trying teleoperation, you need activate the gamepad controller by pressing the middle button. For more info, see Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/hello_robot/#gamepad-teleoperation).
|
||||
@@ -135,14 +135,14 @@ python lerobot/scripts/train.py \
|
||||
--policy.type=act \
|
||||
--output_dir=outputs/train/act_aloha_test \
|
||||
--job_name=act_aloha_test \
|
||||
--policy.device=cuda \
|
||||
--device=cuda \
|
||||
--wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/aloha_test`.
|
||||
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
|
||||
4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
|
||||
4. We provided `device=cuda` since we are training on a Nvidia GPU, but you could use `device=mps` to train on Apple silicon.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
|
||||
For more information on the `train` script see the previous tutorial: [`examples/4_train_policy_with_script.md`](../examples/4_train_policy_with_script.md)
|
||||
@@ -172,10 +172,10 @@ python lerobot/scripts/control_robot.py \
|
||||
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
|
||||
1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_aloha_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_aloha_test`).
|
||||
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_aloha_test`).
|
||||
3. We use `--control.num_image_writer_processes=1` instead of the default value (`0`). On our computer, using a dedicated process to write images from the 4 cameras on disk allows to reach constant 30 fps during inference. Feel free to explore different values for `--control.num_image_writer_processes`.
|
||||
3. We use `--control.num_image_writer_processes=1` instead of the default value (`0`). On our computer, using a dedicated process to write images from the 4 cameras on disk allows to reach constent 30 fps during inference. Feel free to explore different values for `--control.num_image_writer_processes`.
|
||||
|
||||
## More
|
||||
|
||||
Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth explanation.
|
||||
Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth explaination.
|
||||
|
||||
If you have any question or need help, please reach out on Discord in the channel `#aloha-arm`.
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This script demonstrates how to use torchvision's image transformation with LeRobotDataset for data
|
||||
augmentation purposes. The transformations are passed to the dataset as an argument upon creation, and
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""This script demonstrates how to slice a dataset and calculate the loss on a subset of the data.
|
||||
|
||||
This technique can be useful for debugging and testing purposes, as well as identifying whether a policy
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
from .camera import Camera
|
||||
from .configs import CameraConfig
|
||||
|
||||
__all__ = ["Camera", "CameraConfig"]
|
||||
@@ -1,25 +0,0 @@
|
||||
import abc
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
class Camera(abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def connect(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def read(self, temporary_color: str | None = None) -> np.ndarray:
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def async_read(self) -> np.ndarray:
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def disconnect(self):
|
||||
pass
|
||||
|
||||
def __del__(self):
|
||||
if getattr(self, "is_connected", False):
|
||||
self.disconnect()
|
||||
@@ -1,11 +0,0 @@
|
||||
import abc
|
||||
from dataclasses import dataclass
|
||||
|
||||
import draccus
|
||||
|
||||
|
||||
@dataclass
|
||||
class CameraConfig(draccus.ChoiceRegistry, abc.ABC):
|
||||
@property
|
||||
def type(self) -> str:
|
||||
return self.get_choice_name(self.__class__)
|
||||
@@ -1,4 +0,0 @@
|
||||
from .camera_realsense import RealSenseCamera
|
||||
from .configuration_realsense import RealSenseCameraConfig
|
||||
|
||||
__all__ = ["RealSenseCamera", "RealSenseCameraConfig"]
|
||||
@@ -1,4 +0,0 @@
|
||||
from .camera_opencv import OpenCVCamera
|
||||
from .configuration_opencv import OpenCVCameraConfig
|
||||
|
||||
__all__ = ["OpenCVCamera", "OpenCVCameraConfig"]
|
||||
@@ -1,38 +0,0 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from ..configs import CameraConfig
|
||||
|
||||
|
||||
@CameraConfig.register_subclass("opencv")
|
||||
@dataclass
|
||||
class OpenCVCameraConfig(CameraConfig):
|
||||
"""
|
||||
Example of tested options for Intel Real Sense D405:
|
||||
|
||||
```python
|
||||
OpenCVCameraConfig(0, 30, 640, 480)
|
||||
OpenCVCameraConfig(0, 60, 640, 480)
|
||||
OpenCVCameraConfig(0, 90, 640, 480)
|
||||
OpenCVCameraConfig(0, 30, 1280, 720)
|
||||
```
|
||||
"""
|
||||
|
||||
camera_index: int
|
||||
fps: int | None = None
|
||||
width: int | None = None
|
||||
height: int | None = None
|
||||
color_mode: str = "rgb"
|
||||
channels: int | None = None
|
||||
rotation: int | None = None
|
||||
mock: bool = False
|
||||
|
||||
def __post_init__(self):
|
||||
if self.color_mode not in ["rgb", "bgr"]:
|
||||
raise ValueError(
|
||||
f"`color_mode` is expected to be 'rgb' or 'bgr', but {self.color_mode} is provided."
|
||||
)
|
||||
|
||||
self.channels = 3
|
||||
|
||||
if self.rotation not in [-90, None, 90, 180]:
|
||||
raise ValueError(f"`rotation` must be in [-90, None, 90, 180] (got {self.rotation})")
|
||||
@@ -1,21 +0,0 @@
|
||||
from .camera import Camera
|
||||
from .configs import CameraConfig
|
||||
|
||||
|
||||
def make_cameras_from_configs(camera_configs: dict[str, CameraConfig]) -> dict[str, Camera]:
|
||||
cameras = {}
|
||||
|
||||
for key, cfg in camera_configs.items():
|
||||
if cfg.type == "opencv":
|
||||
from .opencv import OpenCVCamera
|
||||
|
||||
cameras[key] = OpenCVCamera(cfg)
|
||||
|
||||
elif cfg.type == "intelrealsense":
|
||||
from .intel.camera_realsense import RealSenseCamera
|
||||
|
||||
cameras[key] = RealSenseCamera(cfg)
|
||||
else:
|
||||
raise ValueError(f"The motor type '{cfg.type}' is not valid.")
|
||||
|
||||
return cameras
|
||||
@@ -1,31 +1,15 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# keys
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from huggingface_hub.constants import HF_HOME
|
||||
|
||||
OBS_ENV_STATE = "observation.environment_state"
|
||||
OBS_STATE = "observation.state"
|
||||
OBS_ENV = "observation.environment_state"
|
||||
OBS_ROBOT = "observation.state"
|
||||
OBS_IMAGE = "observation.image"
|
||||
OBS_IMAGES = "observation.images"
|
||||
ACTION = "action"
|
||||
|
||||
ROBOTS = "robots"
|
||||
TELEOPERATORS = "teleoperators"
|
||||
|
||||
# files & directories
|
||||
CHECKPOINTS_DIR = "checkpoints"
|
||||
LAST_CHECKPOINT_LINK = "last"
|
||||
@@ -37,16 +21,12 @@ OPTIMIZER_STATE = "optimizer_state.safetensors"
|
||||
OPTIMIZER_PARAM_GROUPS = "optimizer_param_groups.json"
|
||||
SCHEDULER_STATE = "scheduler_state.json"
|
||||
|
||||
# cache dir
|
||||
default_cache_path = Path(HF_HOME) / "lerobot"
|
||||
HF_LEROBOT_HOME = Path(os.getenv("HF_LEROBOT_HOME", default_cache_path)).expanduser()
|
||||
|
||||
if "LEROBOT_HOME" in os.environ:
|
||||
raise ValueError(
|
||||
f"You have a 'LEROBOT_HOME' environment variable set to '{os.getenv('LEROBOT_HOME')}'.\n"
|
||||
"'LEROBOT_HOME' is deprecated, please use 'HF_LEROBOT_HOME' instead."
|
||||
)
|
||||
|
||||
# cache dir
|
||||
default_cache_path = Path(HF_HOME) / "lerobot"
|
||||
HF_LEROBOT_HOME = Path(os.getenv("HF_LEROBOT_HOME", default_cache_path)).expanduser()
|
||||
|
||||
# calibration dir
|
||||
default_calibration_path = HF_LEROBOT_HOME / ".calibration"
|
||||
HF_LEROBOT_CALIBRATION = Path(os.getenv("HF_LEROBOT_CALIBRATION", default_calibration_path)).expanduser()
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import packaging.version
|
||||
|
||||
V2_MESSAGE = """
|
||||
|
||||
@@ -92,7 +92,7 @@ def compute_episode_stats(episode_data: dict[str, list[str] | np.ndarray], featu
|
||||
axes_to_reduce = (0, 2, 3) # keep channel dim
|
||||
keepdims = True
|
||||
else:
|
||||
ep_ft_array = data # data is already a np.ndarray
|
||||
ep_ft_array = data # data is alreay a np.ndarray
|
||||
axes_to_reduce = 0 # compute stats over the first axis
|
||||
keepdims = data.ndim == 1 # keep as np.array
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import contextlib
|
||||
import logging
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
@@ -28,7 +27,6 @@ import torch.utils
|
||||
from datasets import concatenate_datasets, load_dataset
|
||||
from huggingface_hub import HfApi, snapshot_download
|
||||
from huggingface_hub.constants import REPOCARD_NAME
|
||||
from huggingface_hub.errors import RevisionNotFoundError
|
||||
|
||||
from lerobot.common.constants import HF_LEROBOT_HOME
|
||||
from lerobot.common.datasets.compute_stats import aggregate_stats, compute_episode_stats
|
||||
@@ -71,7 +69,7 @@ from lerobot.common.datasets.video_utils import (
|
||||
encode_video_frames,
|
||||
get_video_info,
|
||||
)
|
||||
from lerobot.common.robots.utils import Robot
|
||||
from lerobot.common.robot_devices.robots.utils import Robot
|
||||
|
||||
CODEBASE_VERSION = "v2.1"
|
||||
|
||||
@@ -228,7 +226,7 @@ class LeRobotDatasetMetadata:
|
||||
|
||||
def add_task(self, task: str):
|
||||
"""
|
||||
Given a task in natural language, add it to the dictionary of tasks.
|
||||
Given a task in natural language, add it to the dictionnary of tasks.
|
||||
"""
|
||||
if task in self.task_to_task_index:
|
||||
raise ValueError(f"The task '{task}' already exists and can't be added twice.")
|
||||
@@ -391,7 +389,7 @@ class LeRobotDataset(torch.utils.data.Dataset):
|
||||
- info contains various information about the dataset like shapes, keys, fps etc.
|
||||
- stats stores the dataset statistics of the different modalities for normalization
|
||||
- tasks contains the prompts for each task of the dataset, which can be used for
|
||||
task-conditioned training.
|
||||
task-conditionned training.
|
||||
- hf_dataset (from datasets.Dataset), which will read any values from parquet files.
|
||||
- videos (optional) from which frames are loaded to be synchronous with data from parquet files.
|
||||
|
||||
@@ -519,7 +517,6 @@ class LeRobotDataset(torch.utils.data.Dataset):
|
||||
branch: str | None = None,
|
||||
tags: list | None = None,
|
||||
license: str | None = "apache-2.0",
|
||||
tag_version: bool = True,
|
||||
push_videos: bool = True,
|
||||
private: bool = False,
|
||||
allow_patterns: list[str] | str | None = None,
|
||||
@@ -565,11 +562,6 @@ class LeRobotDataset(torch.utils.data.Dataset):
|
||||
)
|
||||
card.push_to_hub(repo_id=self.repo_id, repo_type="dataset", revision=branch)
|
||||
|
||||
if tag_version:
|
||||
with contextlib.suppress(RevisionNotFoundError):
|
||||
hub_api.delete_tag(self.repo_id, tag=CODEBASE_VERSION, repo_type="dataset")
|
||||
hub_api.create_tag(self.repo_id, tag=CODEBASE_VERSION, revision=branch, repo_type="dataset")
|
||||
|
||||
def pull_from_repo(
|
||||
self,
|
||||
allow_patterns: list[str] | str | None = None,
|
||||
@@ -856,7 +848,7 @@ class LeRobotDataset(torch.utils.data.Dataset):
|
||||
episode_buffer["index"] = np.arange(self.meta.total_frames, self.meta.total_frames + episode_length)
|
||||
episode_buffer["episode_index"] = np.full((episode_length,), episode_index)
|
||||
|
||||
# Add new tasks to the tasks dictionary
|
||||
# Add new tasks to the tasks dictionnary
|
||||
for task in episode_tasks:
|
||||
task_index = self.meta.get_task_index(task)
|
||||
if task_index is None:
|
||||
|
||||
@@ -152,7 +152,7 @@ def download_raw(raw_dir: Path, repo_id: str):
|
||||
stacklevel=1,
|
||||
)
|
||||
|
||||
# Send warning if raw_dir isn't well formatted
|
||||
# Send warning if raw_dir isn't well formated
|
||||
if raw_dir.parts[-2] != user_id or raw_dir.parts[-1] != dataset_id:
|
||||
warnings.warn(
|
||||
f"""`raw_dir` ({raw_dir}) doesn't contain a community or user id `/` the name of the dataset that
|
||||
|
||||
@@ -68,9 +68,9 @@ def load_from_raw(raw_dir: Path, videos_dir: Path, fps: int, video: bool, episod
|
||||
modality_df,
|
||||
on="timestamp_utc",
|
||||
# "nearest" is the best option over "backward", since the latter can desynchronizes camera timestamps by
|
||||
# matching timestamps that are too far apart, in order to fit the backward constraints. It's not the case for "nearest".
|
||||
# matching timestamps that are too far appart, in order to fit the backward constraints. It's not the case for "nearest".
|
||||
# However, note that "nearest" might synchronize the reference camera with other cameras on slightly future timestamps.
|
||||
# are too far apart.
|
||||
# are too far appart.
|
||||
direction="nearest",
|
||||
tolerance=pd.Timedelta(f"{1 / fps} seconds"),
|
||||
)
|
||||
@@ -126,7 +126,7 @@ def load_from_raw(raw_dir: Path, videos_dir: Path, fps: int, video: bool, episod
|
||||
videos_dir.parent.mkdir(parents=True, exist_ok=True)
|
||||
videos_dir.symlink_to((raw_dir / "videos").absolute())
|
||||
|
||||
# sanity check the video paths are well formatted
|
||||
# sanity check the video paths are well formated
|
||||
for key in df:
|
||||
if "observation.images." not in key:
|
||||
continue
|
||||
@@ -143,7 +143,7 @@ def load_from_raw(raw_dir: Path, videos_dir: Path, fps: int, video: bool, episod
|
||||
# it is the case for video_frame dictionary = [{"path": ..., "timestamp": ...}]
|
||||
data_dict[key] = [video_frame[0] for video_frame in df[key].values]
|
||||
|
||||
# sanity check the video path is well formatted
|
||||
# sanity check the video path is well formated
|
||||
video_path = videos_dir.parent / data_dict[key][0]["path"]
|
||||
if not video_path.exists():
|
||||
raise ValueError(f"Video file not found in {video_path}")
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
For all datasets in the RLDS format.
|
||||
For https://github.com/google-deepmind/open_x_embodiment (OPENX) datasets.
|
||||
|
||||
NOTE: You need to install tensorflow and tensorflow_datasets before running this script.
|
||||
NOTE: You need to install tensorflow and tensorflow_datsets before running this script.
|
||||
|
||||
Example:
|
||||
python lerobot/scripts/push_dataset_to_hub.py \
|
||||
|
||||
@@ -31,7 +31,6 @@ import packaging.version
|
||||
import torch
|
||||
from datasets.table import embed_table_storage
|
||||
from huggingface_hub import DatasetCard, DatasetCardData, HfApi
|
||||
from huggingface_hub.errors import RevisionNotFoundError
|
||||
from PIL import Image as PILImage
|
||||
from torchvision import transforms
|
||||
|
||||
@@ -40,7 +39,7 @@ from lerobot.common.datasets.backward_compatibility import (
|
||||
BackwardCompatibilityError,
|
||||
ForwardCompatibilityError,
|
||||
)
|
||||
from lerobot.common.robots.utils import Robot
|
||||
from lerobot.common.robot_devices.robots.utils import Robot
|
||||
from lerobot.common.utils.utils import is_valid_numpy_dtype_string
|
||||
from lerobot.configs.types import DictLike, FeatureType, PolicyFeature
|
||||
|
||||
@@ -223,7 +222,7 @@ def load_episodes(local_dir: Path) -> dict:
|
||||
|
||||
|
||||
def write_episode_stats(episode_index: int, episode_stats: dict, local_dir: Path):
|
||||
# We wrap episode_stats in a dictionary since `episode_stats["episode_index"]`
|
||||
# We wrap episode_stats in a dictionnary since `episode_stats["episode_index"]`
|
||||
# is a dictionary of stats and not an integer.
|
||||
episode_stats = {"episode_index": episode_index, "stats": serialize_dict(episode_stats)}
|
||||
append_jsonlines(episode_stats, local_dir / EPISODES_STATS_PATH)
|
||||
@@ -326,19 +325,6 @@ def get_safe_version(repo_id: str, version: str | packaging.version.Version) ->
|
||||
)
|
||||
hub_versions = get_repo_versions(repo_id)
|
||||
|
||||
if not hub_versions:
|
||||
raise RevisionNotFoundError(
|
||||
f"""Your dataset must be tagged with a codebase version.
|
||||
Assuming _version_ is the codebase_version value in the info.json, you can run this:
|
||||
```python
|
||||
from huggingface_hub import HfApi
|
||||
|
||||
hub_api = HfApi()
|
||||
hub_api.create_tag("{repo_id}", tag="_version_", repo_type="dataset")
|
||||
```
|
||||
"""
|
||||
)
|
||||
|
||||
if target_version in hub_versions:
|
||||
return f"v{target_version}"
|
||||
|
||||
@@ -459,10 +445,10 @@ def get_episode_data_index(
|
||||
if episodes is not None:
|
||||
episode_lengths = {ep_idx: episode_lengths[ep_idx] for ep_idx in episodes}
|
||||
|
||||
cumulative_lengths = list(accumulate(episode_lengths.values()))
|
||||
cumulative_lenghts = list(accumulate(episode_lengths.values()))
|
||||
return {
|
||||
"from": torch.LongTensor([0] + cumulative_lengths[:-1]),
|
||||
"to": torch.LongTensor(cumulative_lengths),
|
||||
"from": torch.LongTensor([0] + cumulative_lenghts[:-1]),
|
||||
"to": torch.LongTensor(cumulative_lenghts),
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -27,11 +27,10 @@ from textwrap import dedent
|
||||
|
||||
from lerobot import available_datasets
|
||||
from lerobot.common.datasets.v2.convert_dataset_v1_to_v2 import convert_dataset
|
||||
from lerobot.common.robots.aloha.configuration_aloha import AlohaRobotConfig
|
||||
from lerobot.common.robot_devices.robots.configs import AlohaRobotConfig
|
||||
|
||||
LOCAL_DIR = Path("data/")
|
||||
|
||||
# spellchecker:off
|
||||
ALOHA_MOBILE_INFO = {
|
||||
"robot_config": AlohaRobotConfig(),
|
||||
"license": "mit",
|
||||
@@ -857,7 +856,6 @@ DATASETS = {
|
||||
}""").lstrip(),
|
||||
},
|
||||
}
|
||||
# spellchecker:on
|
||||
|
||||
|
||||
def batch_convert():
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
"""
|
||||
This script will help you convert any LeRobot dataset already pushed to the hub from codebase version 1.6 to
|
||||
2.0. You will be required to provide the 'tasks', which is a short but accurate description in plain English
|
||||
for each of the task performed in the dataset. This will allow to easily train models with task-conditioning.
|
||||
for each of the task performed in the dataset. This will allow to easily train models with task-conditionning.
|
||||
|
||||
We support 3 different scenarios for these tasks (see instructions below):
|
||||
1. Single task dataset: all episodes of your dataset have the same single task.
|
||||
@@ -141,8 +141,8 @@ from lerobot.common.datasets.video_utils import (
|
||||
get_image_pixel_channels,
|
||||
get_video_info,
|
||||
)
|
||||
from lerobot.common.robots import RobotConfig
|
||||
from lerobot.common.robots.utils import make_robot_config
|
||||
from lerobot.common.robot_devices.robots.configs import RobotConfig
|
||||
from lerobot.common.robot_devices.robots.utils import make_robot_config
|
||||
|
||||
V16 = "v1.6"
|
||||
V20 = "v2.0"
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This script will help you convert any LeRobot dataset already pushed to the hub from codebase version 2.0 to
|
||||
2.1. It will:
|
||||
@@ -71,7 +57,7 @@ def convert_dataset(
|
||||
dataset.meta.info["codebase_version"] = CODEBASE_VERSION
|
||||
write_info(dataset.meta.info, dataset.root)
|
||||
|
||||
dataset.push_to_hub(branch=branch, tag_version=False, allow_patterns="meta/")
|
||||
dataset.push_to_hub(branch=branch, allow_patterns="meta/")
|
||||
|
||||
# delete old stats.json file
|
||||
if (dataset.root / STATS_PATH).is_file:
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
import numpy as np
|
||||
|
||||
@@ -73,7 +73,7 @@ def decode_video_frames_torchvision(
|
||||
last_ts = max(timestamps)
|
||||
|
||||
# access closest key frame of the first requested frame
|
||||
# Note: closest key frame timestamp is usually smaller than `first_ts` (e.g. key frame can be the first frame of the video)
|
||||
# Note: closest key frame timestamp is usally smaller than `first_ts` (e.g. key frame can be the first frame of the video)
|
||||
# for details on what `seek` is doing see: https://pyav.basswood-io.com/docs/stable/api/container.html?highlight=inputcontainer#av.container.InputContainer.seek
|
||||
reader.seek(first_ts, keyframes_only=keyframes_only)
|
||||
|
||||
|
||||
@@ -1,15 +1 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .configs import AlohaEnv, EnvConfig, PushtEnv, XarmEnv # noqa: F401
|
||||
|
||||
@@ -1,23 +1,9 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import abc
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
import draccus
|
||||
|
||||
from lerobot.common.constants import ACTION, OBS_ENV_STATE, OBS_IMAGE, OBS_IMAGES, OBS_STATE
|
||||
from lerobot.common.constants import ACTION, OBS_ENV, OBS_IMAGE, OBS_IMAGES, OBS_ROBOT
|
||||
from lerobot.configs.types import FeatureType, PolicyFeature
|
||||
|
||||
|
||||
@@ -53,7 +39,7 @@ class AlohaEnv(EnvConfig):
|
||||
features_map: dict[str, str] = field(
|
||||
default_factory=lambda: {
|
||||
"action": ACTION,
|
||||
"agent_pos": OBS_STATE,
|
||||
"agent_pos": OBS_ROBOT,
|
||||
"top": f"{OBS_IMAGE}.top",
|
||||
"pixels/top": f"{OBS_IMAGES}.top",
|
||||
}
|
||||
@@ -94,8 +80,8 @@ class PushtEnv(EnvConfig):
|
||||
features_map: dict[str, str] = field(
|
||||
default_factory=lambda: {
|
||||
"action": ACTION,
|
||||
"agent_pos": OBS_STATE,
|
||||
"environment_state": OBS_ENV_STATE,
|
||||
"agent_pos": OBS_ROBOT,
|
||||
"environment_state": OBS_ENV,
|
||||
"pixels": OBS_IMAGE,
|
||||
}
|
||||
)
|
||||
@@ -136,7 +122,7 @@ class XarmEnv(EnvConfig):
|
||||
features_map: dict[str, str] = field(
|
||||
default_factory=lambda: {
|
||||
"action": ACTION,
|
||||
"agent_pos": OBS_STATE,
|
||||
"agent_pos": OBS_ROBOT,
|
||||
"pixels": OBS_IMAGE,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -37,12 +37,12 @@ def make_env(cfg: EnvConfig, n_envs: int = 1, use_async_envs: bool = False) -> g
|
||||
Args:
|
||||
cfg (EnvConfig): the config of the environment to instantiate.
|
||||
n_envs (int, optional): The number of parallelized env to return. Defaults to 1.
|
||||
use_async_envs (bool, optional): Whether to return an AsyncVectorEnv or a SyncVectorEnv. Defaults to
|
||||
use_async_envs (bool, optional): Wether to return an AsyncVectorEnv or a SyncVectorEnv. Defaults to
|
||||
False.
|
||||
|
||||
Raises:
|
||||
ValueError: if n_envs < 1
|
||||
ModuleNotFoundError: If the requested env package is not installed
|
||||
ModuleNotFoundError: If the requested env package is not intalled
|
||||
|
||||
Returns:
|
||||
gym.vector.VectorEnv: The parallelized gym.env instance.
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
class DeviceNotConnectedError(ConnectionError):
|
||||
"""Exception raised when the device is not connected."""
|
||||
|
||||
def __init__(self, message="This device is not connected. Try calling `connect()` first."):
|
||||
self.message = message
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
class DeviceAlreadyConnectedError(ConnectionError):
|
||||
"""Exception raised when the device is already connected."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message="This device is already connected. Try not calling `connect()` twice.",
|
||||
):
|
||||
self.message = message
|
||||
super().__init__(self.message)
|
||||
@@ -1,3 +0,0 @@
|
||||
from .motors_bus import MotorsBus
|
||||
|
||||
__all__ = ["MotorsBus"]
|
||||
@@ -1,41 +0,0 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import abc
|
||||
from dataclasses import dataclass
|
||||
|
||||
import draccus
|
||||
|
||||
|
||||
@dataclass
|
||||
class MotorsBusConfig(draccus.ChoiceRegistry, abc.ABC):
|
||||
@property
|
||||
def type(self) -> str:
|
||||
return self.get_choice_name(self.__class__)
|
||||
|
||||
|
||||
@MotorsBusConfig.register_subclass("dynamixel")
|
||||
@dataclass
|
||||
class DynamixelMotorsBusConfig(MotorsBusConfig):
|
||||
port: str
|
||||
motors: dict[str, tuple[int, str]]
|
||||
mock: bool = False
|
||||
|
||||
|
||||
@MotorsBusConfig.register_subclass("feetech")
|
||||
@dataclass
|
||||
class FeetechMotorsBusConfig(MotorsBusConfig):
|
||||
port: str
|
||||
motors: dict[str, tuple[int, str]]
|
||||
mock: bool = False
|
||||
@@ -1,4 +0,0 @@
|
||||
from .dynamixel import DynamixelMotorsBus, TorqueMode, set_operating_mode
|
||||
from .dynamixel_calibration import run_arm_calibration
|
||||
|
||||
__all__ = ["DynamixelMotorsBus", "TorqueMode", "set_operating_mode", "run_arm_calibration"]
|
||||
@@ -1,9 +0,0 @@
|
||||
from .feetech import FeetechMotorsBus, TorqueMode
|
||||
from .feetech_calibration import apply_feetech_offsets_from_calibration, run_full_arm_calibration
|
||||
|
||||
__all__ = [
|
||||
"FeetechMotorsBus",
|
||||
"TorqueMode",
|
||||
"apply_feetech_offsets_from_calibration",
|
||||
"run_full_arm_calibration",
|
||||
]
|
||||
@@ -1,254 +0,0 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import numpy as np
|
||||
|
||||
from ..motors_bus import MotorsBus
|
||||
from .feetech import (
|
||||
CalibrationMode,
|
||||
FeetechMotorsBus,
|
||||
TorqueMode,
|
||||
)
|
||||
|
||||
URL_TEMPLATE = (
|
||||
"https://raw.githubusercontent.com/huggingface/lerobot/main/media/{robot}/{arm}_{position}.webp"
|
||||
)
|
||||
|
||||
|
||||
def disable_torque(arm: MotorsBus):
|
||||
if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any():
|
||||
raise ValueError("To run calibration, the torque must be disabled on all motors.")
|
||||
|
||||
|
||||
def get_calibration_modes(arm: MotorsBus):
|
||||
"""Returns calibration modes for each motor (DEGREE for rotational, LINEAR for gripper)."""
|
||||
return [
|
||||
CalibrationMode.LINEAR.name if name == "gripper" else CalibrationMode.DEGREE.name
|
||||
for name in arm.motor_names
|
||||
]
|
||||
|
||||
|
||||
def reset_offset(motor_id, motor_bus):
|
||||
# Open the write lock, changes to EEPROM do NOT persist yet
|
||||
motor_bus.write("Lock", 1)
|
||||
|
||||
# Set offset to 0
|
||||
motor_name = motor_bus.motor_names[motor_id - 1]
|
||||
motor_bus.write("Offset", 0, motor_names=[motor_name])
|
||||
|
||||
# Close the write lock, changes to EEPROM do persist
|
||||
motor_bus.write("Lock", 0)
|
||||
|
||||
# Confirm that the offset is zero by reading it back
|
||||
confirmed_offset = motor_bus.read("Offset")[motor_id - 1]
|
||||
print(f"Offset for motor {motor_id} reset to: {confirmed_offset}")
|
||||
return confirmed_offset
|
||||
|
||||
|
||||
def calibrate_homing_motor(motor_id, motor_bus):
|
||||
reset_offset(motor_id, motor_bus)
|
||||
|
||||
home_ticks = motor_bus.read("Present_Position")[motor_id - 1] # Read index starts at 0
|
||||
print(f"Encoder offset (present position in homing position): {home_ticks}")
|
||||
|
||||
return home_ticks
|
||||
|
||||
|
||||
def calibrate_linear_motor(motor_id, motor_bus):
|
||||
motor_names = motor_bus.motor_names
|
||||
motor_name = motor_names[motor_id - 1]
|
||||
|
||||
reset_offset(motor_id, motor_bus)
|
||||
|
||||
input(f"Close the {motor_name}, then press Enter...")
|
||||
start_pos = motor_bus.read("Present_Position")[motor_id - 1] # Read index starts ar 0
|
||||
print(f" [Motor {motor_id}] start position recorded: {start_pos}")
|
||||
|
||||
input(f"Open the {motor_name} fully, then press Enter...")
|
||||
end_pos = motor_bus.read("Present_Position")[motor_id - 1] # Read index starts ar 0
|
||||
print(f" [Motor {motor_id}] end position recorded: {end_pos}")
|
||||
|
||||
return start_pos, end_pos
|
||||
|
||||
|
||||
def single_motor_calibration(arm: MotorsBus, motor_id: int):
|
||||
"""Calibrates a single motor and returns its calibration data for updating the calibration file."""
|
||||
|
||||
disable_torque(arm)
|
||||
print(f"\n--- Calibrating Motor {motor_id} ---")
|
||||
|
||||
start_pos = 0
|
||||
end_pos = 0
|
||||
encoder_offset = 0
|
||||
|
||||
if motor_id == 6:
|
||||
start_pos, end_pos = calibrate_linear_motor(motor_id, arm)
|
||||
else:
|
||||
input("Move the motor to (zero) position, then press Enter...")
|
||||
encoder_offset = calibrate_homing_motor(motor_id, arm)
|
||||
|
||||
print(f"Calibration for motor ID:{motor_id} done.")
|
||||
|
||||
# Create a calibration dictionary for the single motor
|
||||
calib_dict = {
|
||||
"homing_offset": int(encoder_offset),
|
||||
"drive_mode": 0,
|
||||
"start_pos": int(start_pos),
|
||||
"end_pos": int(end_pos),
|
||||
"calib_mode": get_calibration_modes(arm)[motor_id - 1],
|
||||
"motor_name": arm.motor_names[motor_id - 1],
|
||||
}
|
||||
|
||||
return calib_dict
|
||||
|
||||
|
||||
def run_full_arm_calibration(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str):
|
||||
"""
|
||||
Runs a full calibration process for all motors in a robotic arm.
|
||||
|
||||
This function calibrates each motor in the arm, determining encoder offsets and
|
||||
start/end positions for linear and rotational motors. The calibration data is then
|
||||
stored in a dictionary for later use.
|
||||
|
||||
**Calibration Process:**
|
||||
- The user is prompted to move the arm to its homing position before starting.
|
||||
- Motors with rotational motion are calibrated using a homing method.
|
||||
- Linear actuators (e.g., grippers) are calibrated separately.
|
||||
- Encoder offsets, start positions, and end positions are recorded.
|
||||
|
||||
**Example Usage:**
|
||||
```python
|
||||
run_full_arm_calibration(arm, "so100", "left", "follower")
|
||||
```
|
||||
"""
|
||||
disable_torque(arm)
|
||||
|
||||
print(f"\nRunning calibration of {robot_type} {arm_name} {arm_type}...")
|
||||
|
||||
print("\nMove arm to homing position (middle)")
|
||||
print(
|
||||
"See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="zero")
|
||||
) # TODO(pepijn): replace with new instruction homing pos (all motors in middle) in tutorial
|
||||
input("Press Enter to continue...")
|
||||
|
||||
start_positions = np.zeros(len(arm.motor_indices))
|
||||
end_positions = np.zeros(len(arm.motor_indices))
|
||||
encoder_offsets = np.zeros(len(arm.motor_indices))
|
||||
|
||||
modes = get_calibration_modes(arm)
|
||||
|
||||
for i, motor_id in enumerate(arm.motor_indices):
|
||||
if modes[i] == CalibrationMode.DEGREE.name:
|
||||
encoder_offsets[i] = calibrate_homing_motor(motor_id, arm)
|
||||
start_positions[i] = 0
|
||||
end_positions[i] = 0
|
||||
|
||||
for i, motor_id in enumerate(arm.motor_indices):
|
||||
if modes[i] == CalibrationMode.LINEAR.name:
|
||||
start_positions[i], end_positions[i] = calibrate_linear_motor(motor_id, arm)
|
||||
encoder_offsets[i] = 0
|
||||
|
||||
print("\nMove arm to rest position")
|
||||
input("Press Enter to continue...")
|
||||
|
||||
print(f"\n calibration of {robot_type} {arm_name} {arm_type} done!")
|
||||
|
||||
# Force drive_mode values (can be static)
|
||||
drive_modes = [0, 1, 0, 0, 1, 0]
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": encoder_offsets.astype(int).tolist(),
|
||||
"drive_mode": drive_modes,
|
||||
"start_pos": start_positions.astype(int).tolist(),
|
||||
"end_pos": end_positions.astype(int).tolist(),
|
||||
"calib_mode": get_calibration_modes(arm),
|
||||
"motor_names": arm.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
|
||||
def run_full_auto_arm_calibration(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str):
|
||||
"""TODO(pepijn): Add this method later as extra
|
||||
Example of usage:
|
||||
```python
|
||||
run_full_auto_arm_calibration(arm, "so100", "left", "follower")
|
||||
```
|
||||
"""
|
||||
print(f"\nRunning calibration of {robot_type} {arm_name} {arm_type}...")
|
||||
|
||||
|
||||
def apply_feetech_offsets_from_calibration(motorsbus: FeetechMotorsBus, calibration_dict: dict):
|
||||
"""
|
||||
Reads 'calibration_dict' containing 'homing_offset' and 'motor_names',
|
||||
then writes each motor's offset to the servo's internal Offset (0x1F) in EPROM.
|
||||
|
||||
This version is modified so each homed position (originally 0) will now read
|
||||
2047, i.e. 180° away from 0 in the 4096-count circle. Offsets are permanently
|
||||
stored in EEPROM, so the servo's Present_Position is hardware-shifted even
|
||||
after power cycling.
|
||||
|
||||
Steps:
|
||||
1) Subtract 2047 from the old offset (so 0 -> 2047).
|
||||
2) Clamp to [-2047..+2047].
|
||||
3) Encode sign bit and magnitude into a 12-bit number.
|
||||
"""
|
||||
|
||||
homing_offsets = calibration_dict["homing_offset"]
|
||||
motor_names = calibration_dict["motor_names"]
|
||||
start_pos = calibration_dict["start_pos"]
|
||||
|
||||
# Open the write lock, changes to EEPROM do NOT persist yet
|
||||
motorsbus.write("Lock", 1)
|
||||
|
||||
# For each motor, set the 'Offset' parameter
|
||||
for m_name, old_offset in zip(motor_names, homing_offsets, strict=False):
|
||||
# If bus doesn’t have a motor named m_name, skip
|
||||
if m_name not in motorsbus.motors:
|
||||
print(f"Warning: '{m_name}' not found in motorsbus.motors; skipping offset.")
|
||||
continue
|
||||
|
||||
if m_name == "gripper":
|
||||
old_offset = start_pos # If gripper set the offset to the start position of the gripper
|
||||
continue
|
||||
|
||||
# Shift the offset so the homed position reads 2047
|
||||
new_offset = old_offset - 2047
|
||||
|
||||
# Clamp to [-2047..+2047]
|
||||
if new_offset > 2047:
|
||||
new_offset = 2047
|
||||
print(
|
||||
f"Warning: '{new_offset}' is getting clamped because its larger then 2047; This should not happen!"
|
||||
)
|
||||
elif new_offset < -2047:
|
||||
new_offset = -2047
|
||||
print(
|
||||
f"Warning: '{new_offset}' is getting clamped because its smaller then -2047; This should not happen!"
|
||||
)
|
||||
|
||||
# Determine the direction (sign) bit and magnitude
|
||||
direction_bit = 1 if new_offset < 0 else 0
|
||||
magnitude = abs(new_offset)
|
||||
|
||||
# Combine sign bit (bit 11) with the magnitude (bits 0..10)
|
||||
servo_offset = (direction_bit << 11) | magnitude
|
||||
|
||||
# Write offset to servo
|
||||
motorsbus.write("Offset", servo_offset, motor_names=m_name)
|
||||
print(
|
||||
f"Set offset for {m_name}: "
|
||||
f"old_offset={old_offset}, new_offset={new_offset}, servo_encoded={magnitude} + direction={direction_bit}"
|
||||
)
|
||||
|
||||
motorsbus.write("Lock", 0)
|
||||
print("Offsets have been saved to EEPROM successfully.")
|
||||
@@ -1,46 +0,0 @@
|
||||
import abc
|
||||
|
||||
|
||||
class MotorsBus(abc.ABC):
|
||||
"""The main LeRobot class for implementing motors buses."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
motors: dict[str, tuple[int, str]],
|
||||
):
|
||||
self.motors = motors
|
||||
|
||||
def __len__(self):
|
||||
return len(self.motors)
|
||||
|
||||
@abc.abstractmethod
|
||||
def connect(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def reconnect(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def set_calibration(self, calibration: dict[str, list]):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def apply_calibration(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def revert_calibration(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def read(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def write(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def disconnect(self):
|
||||
pass
|
||||
@@ -1,56 +0,0 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .configs import MotorsBusConfig
|
||||
from .motors_bus import MotorsBus
|
||||
|
||||
|
||||
def make_motors_buses_from_configs(motors_bus_configs: dict[str, MotorsBusConfig]) -> list[MotorsBus]:
|
||||
motors_buses = {}
|
||||
|
||||
for key, cfg in motors_bus_configs.items():
|
||||
if cfg.type == "dynamixel":
|
||||
from .dynamixel import DynamixelMotorsBus
|
||||
|
||||
motors_buses[key] = DynamixelMotorsBus(cfg)
|
||||
|
||||
elif cfg.type == "feetech":
|
||||
from lerobot.common.motors.feetech.feetech import FeetechMotorsBus
|
||||
|
||||
motors_buses[key] = FeetechMotorsBus(cfg)
|
||||
|
||||
else:
|
||||
raise ValueError(f"The motor type '{cfg.type}' is not valid.")
|
||||
|
||||
return motors_buses
|
||||
|
||||
|
||||
def make_motors_bus(motor_type: str, **kwargs) -> MotorsBus:
|
||||
if motor_type == "dynamixel":
|
||||
from .configs import DynamixelMotorsBusConfig
|
||||
from .dynamixel import DynamixelMotorsBus
|
||||
|
||||
config = DynamixelMotorsBusConfig(**kwargs)
|
||||
return DynamixelMotorsBus(config)
|
||||
|
||||
elif motor_type == "feetech":
|
||||
from feetech import FeetechMotorsBus
|
||||
|
||||
from .configs import FeetechMotorsBusConfig
|
||||
|
||||
config = FeetechMotorsBusConfig(**kwargs)
|
||||
return FeetechMotorsBus(config)
|
||||
|
||||
else:
|
||||
raise ValueError(f"The motor type '{motor_type}' is not valid.")
|
||||
@@ -1,15 +1 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .optimizers import OptimizerConfig as OptimizerConfig
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .act.configuration_act import ACTConfig as ACTConfig
|
||||
from .diffusion.configuration_diffusion import DiffusionConfig as DiffusionConfig
|
||||
from .pi0.configuration_pi0 import PI0Config as PI0Config
|
||||
|
||||
@@ -64,7 +64,7 @@ class ACTConfig(PreTrainedConfig):
|
||||
output_normalization_modes: Similar dictionary as `normalize_input_modes`, but to unnormalize to the
|
||||
original scale. Note that this is also used for normalizing the training targets.
|
||||
vision_backbone: Name of the torchvision resnet backbone to use for encoding images.
|
||||
pretrained_backbone_weights: Pretrained weights from torchvision to initialize the backbone.
|
||||
pretrained_backbone_weights: Pretrained weights from torchvision to initalize the backbone.
|
||||
`None` means no pretrained weights.
|
||||
replace_final_stride_with_dilation: Whether to replace the ResNet's final 2x2 stride with a dilated
|
||||
convolution.
|
||||
|
||||
@@ -68,7 +68,7 @@ class DiffusionConfig(PreTrainedConfig):
|
||||
within the image size. If None, no cropping is done.
|
||||
crop_is_random: Whether the crop should be random at training time (it's always a center crop in eval
|
||||
mode).
|
||||
pretrained_backbone_weights: Pretrained weights from torchvision to initialize the backbone.
|
||||
pretrained_backbone_weights: Pretrained weights from torchvision to initalize the backbone.
|
||||
`None` means no pretrained weights.
|
||||
use_group_norm: Whether to replace batch normalization with group normalization in the backbone.
|
||||
The group sizes are set to be about 16 (to be precise, feature_dim // 16).
|
||||
@@ -99,7 +99,7 @@ class DiffusionConfig(PreTrainedConfig):
|
||||
num_inference_steps: Number of reverse diffusion steps to use at inference time (steps are evenly
|
||||
spaced). If not provided, this defaults to be the same as `num_train_timesteps`.
|
||||
do_mask_loss_for_padding: Whether to mask the loss when there are copy-padded actions. See
|
||||
`LeRobotDataset` and `load_previous_and_future_frames` for more information. Note, this defaults
|
||||
`LeRobotDataset` and `load_previous_and_future_frames` for mor information. Note, this defaults
|
||||
to False as the original Diffusion Policy implementation does the same.
|
||||
"""
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ from diffusers.schedulers.scheduling_ddim import DDIMScheduler
|
||||
from diffusers.schedulers.scheduling_ddpm import DDPMScheduler
|
||||
from torch import Tensor, nn
|
||||
|
||||
from lerobot.common.constants import OBS_ENV_STATE, OBS_STATE
|
||||
from lerobot.common.constants import OBS_ENV, OBS_ROBOT
|
||||
from lerobot.common.policies.diffusion.configuration_diffusion import DiffusionConfig
|
||||
from lerobot.common.policies.normalize import Normalize, Unnormalize
|
||||
from lerobot.common.policies.pretrained import PreTrainedPolicy
|
||||
@@ -238,8 +238,8 @@ class DiffusionModel(nn.Module):
|
||||
|
||||
def _prepare_global_conditioning(self, batch: dict[str, Tensor]) -> Tensor:
|
||||
"""Encode image features and concatenate them all together along with the state vector."""
|
||||
batch_size, n_obs_steps = batch[OBS_STATE].shape[:2]
|
||||
global_cond_feats = [batch[OBS_STATE]]
|
||||
batch_size, n_obs_steps = batch[OBS_ROBOT].shape[:2]
|
||||
global_cond_feats = [batch[OBS_ROBOT]]
|
||||
# Extract image features.
|
||||
if self.config.image_features:
|
||||
if self.config.use_separate_rgb_encoder_per_camera:
|
||||
@@ -269,7 +269,7 @@ class DiffusionModel(nn.Module):
|
||||
global_cond_feats.append(img_features)
|
||||
|
||||
if self.config.env_state_feature:
|
||||
global_cond_feats.append(batch[OBS_ENV_STATE])
|
||||
global_cond_feats.append(batch[OBS_ENV])
|
||||
|
||||
# Concatenate features then flatten to (B, global_cond_dim).
|
||||
return torch.cat(global_cond_feats, dim=-1).flatten(start_dim=1)
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
|
||||
import logging
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from lerobot.common.datasets.lerobot_dataset import LeRobotDatasetMetadata
|
||||
@@ -75,6 +76,7 @@ def make_policy_config(policy_type: str, **kwargs) -> PreTrainedConfig:
|
||||
|
||||
def make_policy(
|
||||
cfg: PreTrainedConfig,
|
||||
device: str | torch.device,
|
||||
ds_meta: LeRobotDatasetMetadata | None = None,
|
||||
env_cfg: EnvConfig | None = None,
|
||||
) -> PreTrainedPolicy:
|
||||
@@ -86,6 +88,7 @@ def make_policy(
|
||||
Args:
|
||||
cfg (PreTrainedConfig): The config of the policy to make. If `pretrained_path` is set, the policy will
|
||||
be loaded with the weights from that path.
|
||||
device (str): the device to load the policy onto.
|
||||
ds_meta (LeRobotDatasetMetadata | None, optional): Dataset metadata to take input/output shapes and
|
||||
statistics to use for (un)normalization of inputs/outputs in the policy. Defaults to None.
|
||||
env_cfg (EnvConfig | None, optional): The config of a gym environment to parse features from. Must be
|
||||
@@ -93,7 +96,7 @@ def make_policy(
|
||||
|
||||
Raises:
|
||||
ValueError: Either ds_meta or env and env_cfg must be provided.
|
||||
NotImplementedError: if the policy.type is 'vqbet' and the policy device 'mps' (due to an incompatibility)
|
||||
NotImplementedError: if the policy.type is 'vqbet' and the device 'mps' (due to an incompatibility)
|
||||
|
||||
Returns:
|
||||
PreTrainedPolicy: _description_
|
||||
@@ -108,7 +111,7 @@ def make_policy(
|
||||
# https://github.com/pytorch/pytorch/issues/77764. As a temporary fix, you can set the environment
|
||||
# variable `PYTORCH_ENABLE_MPS_FALLBACK=1` to use the CPU as a fallback for this op. WARNING: this will be
|
||||
# slower than running natively on MPS.
|
||||
if cfg.type == "vqbet" and cfg.device == "mps":
|
||||
if cfg.type == "vqbet" and str(device) == "mps":
|
||||
raise NotImplementedError(
|
||||
"Current implementation of VQBeT does not support `mps` backend. "
|
||||
"Please use `cpu` or `cuda` backend."
|
||||
@@ -142,7 +145,7 @@ def make_policy(
|
||||
# Make a fresh policy.
|
||||
policy = policy_cls(**kwargs)
|
||||
|
||||
policy.to(cfg.device)
|
||||
policy.to(device)
|
||||
assert isinstance(policy, nn.Module)
|
||||
|
||||
# policy = torch.compile(policy, mode="reduce-overhead")
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
from lerobot.common.optim.optimizers import AdamWConfig
|
||||
@@ -90,7 +76,6 @@ class PI0Config(PreTrainedConfig):
|
||||
def __post_init__(self):
|
||||
super().__post_init__()
|
||||
|
||||
# TODO(Steven): Validate device and amp? in all policy configs?
|
||||
"""Input validation (not exhaustive)."""
|
||||
if self.n_action_steps > self.chunk_size:
|
||||
raise ValueError(
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import torch
|
||||
|
||||
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
|
||||
@@ -45,7 +31,7 @@ def main():
|
||||
|
||||
cfg = PreTrainedConfig.from_pretrained(ckpt_torch_dir)
|
||||
cfg.pretrained_path = ckpt_torch_dir
|
||||
policy = make_policy(cfg, ds_meta=dataset.meta)
|
||||
policy = make_policy(cfg, device, ds_meta=dataset.meta)
|
||||
|
||||
# policy = torch.compile(policy, mode="reduce-overhead")
|
||||
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import pickle
|
||||
from pathlib import Path
|
||||
@@ -101,7 +87,7 @@ def main():
|
||||
|
||||
cfg = PreTrainedConfig.from_pretrained(ckpt_torch_dir)
|
||||
cfg.pretrained_path = ckpt_torch_dir
|
||||
policy = make_policy(cfg, dataset_meta)
|
||||
policy = make_policy(cfg, device, dataset_meta)
|
||||
|
||||
# loss_dict = policy.forward(batch, noise=noise, time=time_beta)
|
||||
# loss_dict["loss"].backward()
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from transformers import GemmaConfig, PaliGemmaConfig
|
||||
|
||||
|
||||
|
||||
@@ -1,22 +1,8 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Convert pi0 parameters from Jax to Pytorch
|
||||
|
||||
Follow [README of openpi](https://github.com/Physical-Intelligence/openpi) to create a new environment
|
||||
and install the required libraries.
|
||||
and install the required librairies.
|
||||
|
||||
```bash
|
||||
cd ~/code/openpi
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F # noqa: N812
|
||||
from packaging.version import Version
|
||||
|
||||
@@ -57,7 +57,7 @@ import torch.nn.functional as F # noqa: N812
|
||||
from torch import Tensor, nn
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from lerobot.common.constants import ACTION, OBS_STATE
|
||||
from lerobot.common.constants import ACTION, OBS_ROBOT
|
||||
from lerobot.common.policies.normalize import Normalize, Unnormalize
|
||||
from lerobot.common.policies.pi0.configuration_pi0 import PI0Config
|
||||
from lerobot.common.policies.pi0.paligemma_with_expert import (
|
||||
@@ -271,7 +271,7 @@ class PI0Policy(PreTrainedPolicy):
|
||||
self.eval()
|
||||
|
||||
if self.config.adapt_to_pi_aloha:
|
||||
batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE])
|
||||
batch[OBS_ROBOT] = self._pi_aloha_decode_state(batch[OBS_ROBOT])
|
||||
|
||||
batch = self.normalize_inputs(batch)
|
||||
|
||||
@@ -303,7 +303,7 @@ class PI0Policy(PreTrainedPolicy):
|
||||
def forward(self, batch: dict[str, Tensor], noise=None, time=None) -> tuple[Tensor, dict[str, Tensor]]:
|
||||
"""Do a full training forward pass to compute the loss"""
|
||||
if self.config.adapt_to_pi_aloha:
|
||||
batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE])
|
||||
batch[OBS_ROBOT] = self._pi_aloha_decode_state(batch[OBS_ROBOT])
|
||||
batch[ACTION] = self._pi_aloha_encode_actions_inv(batch[ACTION])
|
||||
|
||||
batch = self.normalize_inputs(batch)
|
||||
@@ -313,7 +313,7 @@ class PI0Policy(PreTrainedPolicy):
|
||||
state = self.prepare_state(batch)
|
||||
lang_tokens, lang_masks = self.prepare_language(batch)
|
||||
actions = self.prepare_action(batch)
|
||||
actions_is_pad = batch.get("actions_is_pad")
|
||||
actions_is_pad = batch.get("actions_id_pad")
|
||||
|
||||
loss_dict = {}
|
||||
losses = self.model.forward(images, img_masks, lang_tokens, lang_masks, state, actions, noise, time)
|
||||
@@ -380,7 +380,7 @@ class PI0Policy(PreTrainedPolicy):
|
||||
|
||||
def prepare_language(self, batch) -> tuple[Tensor, Tensor]:
|
||||
"""Tokenize the text input"""
|
||||
device = batch[OBS_STATE].device
|
||||
device = batch[OBS_ROBOT].device
|
||||
tasks = batch["task"]
|
||||
|
||||
# PaliGemma prompt has to end with a new line
|
||||
@@ -427,7 +427,7 @@ class PI0Policy(PreTrainedPolicy):
|
||||
|
||||
def prepare_state(self, batch):
|
||||
"""Pad state"""
|
||||
state = pad_vector(batch[OBS_STATE], self.config.max_state_dim)
|
||||
state = pad_vector(batch[OBS_ROBOT], self.config.max_state_dim)
|
||||
return state
|
||||
|
||||
def prepare_action(self, batch):
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import List, Optional, Union
|
||||
|
||||
import torch
|
||||
|
||||
@@ -1,16 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import abc
|
||||
import logging
|
||||
import os
|
||||
@@ -86,6 +73,7 @@ class PreTrainedPolicy(nn.Module, HubMixin, abc.ABC):
|
||||
cache_dir: str | Path | None = None,
|
||||
local_files_only: bool = False,
|
||||
revision: str | None = None,
|
||||
map_location: str = "cpu",
|
||||
strict: bool = False,
|
||||
**kwargs,
|
||||
) -> T:
|
||||
@@ -110,7 +98,7 @@ class PreTrainedPolicy(nn.Module, HubMixin, abc.ABC):
|
||||
if os.path.isdir(model_id):
|
||||
print("Loading weights from local directory")
|
||||
model_file = os.path.join(model_id, SAFETENSORS_SINGLE_FILE)
|
||||
policy = cls._load_as_safetensor(instance, model_file, config.device, strict)
|
||||
policy = cls._load_as_safetensor(instance, model_file, map_location, strict)
|
||||
else:
|
||||
try:
|
||||
model_file = hf_hub_download(
|
||||
@@ -124,13 +112,13 @@ class PreTrainedPolicy(nn.Module, HubMixin, abc.ABC):
|
||||
token=token,
|
||||
local_files_only=local_files_only,
|
||||
)
|
||||
policy = cls._load_as_safetensor(instance, model_file, config.device, strict)
|
||||
policy = cls._load_as_safetensor(instance, model_file, map_location, strict)
|
||||
except HfHubHTTPError as e:
|
||||
raise FileNotFoundError(
|
||||
f"{SAFETENSORS_SINGLE_FILE} not found on the HuggingFace Hub in {model_id}"
|
||||
) from e
|
||||
|
||||
policy.to(config.device)
|
||||
policy.to(map_location)
|
||||
policy.eval()
|
||||
return policy
|
||||
|
||||
|
||||
@@ -76,7 +76,7 @@ class TDMPCConfig(PreTrainedConfig):
|
||||
n_pi_samples: Number of samples to draw from the policy / world model rollout every CEM iteration. Can
|
||||
be zero.
|
||||
uncertainty_regularizer_coeff: Coefficient for the uncertainty regularization used when estimating
|
||||
trajectory values (this is the λ coefficient in eqn 4 of FOWM).
|
||||
trajectory values (this is the λ coeffiecient in eqn 4 of FOWM).
|
||||
n_elites: The number of elite samples to use for updating the gaussian parameters every CEM iteration.
|
||||
elite_weighting_temperature: The temperature to use for softmax weighting (by trajectory value) of the
|
||||
elites, when updating the gaussian parameters for CEM.
|
||||
@@ -165,7 +165,7 @@ class TDMPCConfig(PreTrainedConfig):
|
||||
"""Input validation (not exhaustive)."""
|
||||
if self.n_gaussian_samples <= 0:
|
||||
raise ValueError(
|
||||
f"The number of gaussian samples for CEM should be non-zero. Got `{self.n_gaussian_samples=}`"
|
||||
f"The number of guassian samples for CEM should be non-zero. Got `{self.n_gaussian_samples=}`"
|
||||
)
|
||||
if self.normalization_mapping["ACTION"] is not NormalizationMode.MIN_MAX:
|
||||
raise ValueError(
|
||||
|
||||
@@ -35,7 +35,7 @@ import torch.nn as nn
|
||||
import torch.nn.functional as F # noqa: N812
|
||||
from torch import Tensor
|
||||
|
||||
from lerobot.common.constants import OBS_ENV_STATE, OBS_STATE
|
||||
from lerobot.common.constants import OBS_ENV, OBS_ROBOT
|
||||
from lerobot.common.policies.normalize import Normalize, Unnormalize
|
||||
from lerobot.common.policies.pretrained import PreTrainedPolicy
|
||||
from lerobot.common.policies.tdmpc.configuration_tdmpc import TDMPCConfig
|
||||
@@ -753,9 +753,9 @@ class TDMPCObservationEncoder(nn.Module):
|
||||
)
|
||||
)
|
||||
if self.config.env_state_feature:
|
||||
feat.append(self.env_state_enc_layers(obs_dict[OBS_ENV_STATE]))
|
||||
feat.append(self.env_state_enc_layers(obs_dict[OBS_ENV]))
|
||||
if self.config.robot_state_feature:
|
||||
feat.append(self.state_enc_layers(obs_dict[OBS_STATE]))
|
||||
feat.append(self.state_enc_layers(obs_dict[OBS_ROBOT]))
|
||||
return torch.stack(feat, dim=0).mean(0)
|
||||
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ class VQBeTConfig(PreTrainedConfig):
|
||||
within the image size. If None, no cropping is done.
|
||||
crop_is_random: Whether the crop should be random at training time (it's always a center crop in eval
|
||||
mode).
|
||||
pretrained_backbone_weights: Pretrained weights from torchvision to initialize the backbone.
|
||||
pretrained_backbone_weights: Pretrained weights from torchvision to initalize the backbone.
|
||||
`None` means no pretrained weights.
|
||||
use_group_norm: Whether to replace batch normalization with group normalization in the backbone.
|
||||
The group sizes are set to be about 16 (to be precise, feature_dim // 16).
|
||||
|
||||
@@ -485,7 +485,7 @@ class VQBeTHead(nn.Module):
|
||||
def forward(self, x, **kwargs) -> dict:
|
||||
# N is the batch size, and T is number of action query tokens, which are process through same GPT
|
||||
N, T, _ = x.shape
|
||||
# we calculate N and T side parallelly. Thus, the dimensions would be
|
||||
# we calculate N and T side parallely. Thus, the dimensions would be
|
||||
# (batch size * number of action query tokens, action chunk size, action dimension)
|
||||
x = einops.rearrange(x, "N T WA -> (N T) WA")
|
||||
|
||||
@@ -772,7 +772,7 @@ class VqVae(nn.Module):
|
||||
Encoder and decoder are MLPs consisting of an input, output layer, and hidden layer, respectively.
|
||||
The vq_layer uses residual VQs.
|
||||
|
||||
This class contains functions for training the encoder and decoder along with the residual VQ layer (for training phase 1),
|
||||
This class contains functions for training the encoder and decoder along with the residual VQ layer (for trainign phase 1),
|
||||
as well as functions to help BeT training part in training phase 2.
|
||||
"""
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ from lerobot.common.policies.vqbet.configuration_vqbet import VQBeTConfig
|
||||
This file is part of a VQ-BeT that utilizes code from the following repositories:
|
||||
|
||||
- Vector Quantize PyTorch code is licensed under the MIT License:
|
||||
Original source: https://github.com/lucidrains/vector-quantize-pytorch
|
||||
Origianl source: https://github.com/lucidrains/vector-quantize-pytorch
|
||||
|
||||
- nanoGPT part is an adaptation of Andrej Karpathy's nanoGPT implementation in PyTorch.
|
||||
Original source: https://github.com/karpathy/nanoGPT
|
||||
@@ -289,7 +289,7 @@ class GPT(nn.Module):
|
||||
This file is a part for Residual Vector Quantization that utilizes code from the following repository:
|
||||
|
||||
- Phil Wang's vector-quantize-pytorch implementation in PyTorch.
|
||||
Original source: https://github.com/lucidrains/vector-quantize-pytorch
|
||||
Origianl source: https://github.com/lucidrains/vector-quantize-pytorch
|
||||
|
||||
- The vector-quantize-pytorch code is licensed under the MIT License:
|
||||
|
||||
@@ -1349,9 +1349,9 @@ class EuclideanCodebook(nn.Module):
|
||||
|
||||
# calculate distributed variance
|
||||
|
||||
variance_number = reduce((data - batch_mean) ** 2, "h n d -> h 1 d", "sum")
|
||||
distributed.all_reduce(variance_number)
|
||||
batch_variance = variance_number / num_vectors
|
||||
variance_numer = reduce((data - batch_mean) ** 2, "h n d -> h 1 d", "sum")
|
||||
distributed.all_reduce(variance_numer)
|
||||
batch_variance = variance_numer / num_vectors
|
||||
|
||||
self.update_with_decay("batch_variance", batch_variance, self.affine_param_batch_decay)
|
||||
|
||||
|
||||
@@ -1,35 +1,64 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import abc
|
||||
from dataclasses import dataclass
|
||||
|
||||
from ..configs import CameraConfig
|
||||
import draccus
|
||||
|
||||
|
||||
@CameraConfig.register_subclass("intelrealsense")
|
||||
@dataclass
|
||||
class RealSenseCameraConfig(CameraConfig):
|
||||
class CameraConfig(draccus.ChoiceRegistry, abc.ABC):
|
||||
@property
|
||||
def type(self) -> str:
|
||||
return self.get_choice_name(self.__class__)
|
||||
|
||||
|
||||
@CameraConfig.register_subclass("opencv")
|
||||
@dataclass
|
||||
class OpenCVCameraConfig(CameraConfig):
|
||||
"""
|
||||
Example of tested options for Intel Real Sense D405:
|
||||
|
||||
```python
|
||||
RealSenseCameraConfig(128422271347, 30, 640, 480)
|
||||
RealSenseCameraConfig(128422271347, 60, 640, 480)
|
||||
RealSenseCameraConfig(128422271347, 90, 640, 480)
|
||||
RealSenseCameraConfig(128422271347, 30, 1280, 720)
|
||||
RealSenseCameraConfig(128422271347, 30, 640, 480, use_depth=True)
|
||||
RealSenseCameraConfig(128422271347, 30, 640, 480, rotation=90)
|
||||
OpenCVCameraConfig(0, 30, 640, 480)
|
||||
OpenCVCameraConfig(0, 60, 640, 480)
|
||||
OpenCVCameraConfig(0, 90, 640, 480)
|
||||
OpenCVCameraConfig(0, 30, 1280, 720)
|
||||
```
|
||||
"""
|
||||
|
||||
camera_index: int
|
||||
fps: int | None = None
|
||||
width: int | None = None
|
||||
height: int | None = None
|
||||
color_mode: str = "rgb"
|
||||
channels: int | None = None
|
||||
rotation: int | None = None
|
||||
mock: bool = False
|
||||
|
||||
def __post_init__(self):
|
||||
if self.color_mode not in ["rgb", "bgr"]:
|
||||
raise ValueError(
|
||||
f"`color_mode` is expected to be 'rgb' or 'bgr', but {self.color_mode} is provided."
|
||||
)
|
||||
|
||||
self.channels = 3
|
||||
|
||||
if self.rotation not in [-90, None, 90, 180]:
|
||||
raise ValueError(f"`rotation` must be in [-90, None, 90, 180] (got {self.rotation})")
|
||||
|
||||
|
||||
@CameraConfig.register_subclass("intelrealsense")
|
||||
@dataclass
|
||||
class IntelRealSenseCameraConfig(CameraConfig):
|
||||
"""
|
||||
Example of tested options for Intel Real Sense D405:
|
||||
|
||||
```python
|
||||
IntelRealSenseCameraConfig(128422271347, 30, 640, 480)
|
||||
IntelRealSenseCameraConfig(128422271347, 60, 640, 480)
|
||||
IntelRealSenseCameraConfig(128422271347, 90, 640, 480)
|
||||
IntelRealSenseCameraConfig(128422271347, 30, 1280, 720)
|
||||
IntelRealSenseCameraConfig(128422271347, 30, 640, 480, use_depth=True)
|
||||
IntelRealSenseCameraConfig(128422271347, 30, 640, 480, rotation=90)
|
||||
```
|
||||
"""
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This file contains utilities for recording frames from Intel Realsense cameras.
|
||||
"""
|
||||
@@ -31,15 +17,14 @@ from threading import Thread
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
from lerobot.common.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
|
||||
from lerobot.common.utils.robot_utils import (
|
||||
from lerobot.common.robot_devices.cameras.configs import IntelRealSenseCameraConfig
|
||||
from lerobot.common.robot_devices.utils import (
|
||||
RobotDeviceAlreadyConnectedError,
|
||||
RobotDeviceNotConnectedError,
|
||||
busy_wait,
|
||||
)
|
||||
from lerobot.common.utils.utils import capture_timestamp_utc
|
||||
|
||||
from ..camera import Camera
|
||||
from .configuration_realsense import RealSenseCameraConfig
|
||||
|
||||
SERIAL_NUMBER_INDEX = 1
|
||||
|
||||
|
||||
@@ -49,7 +34,7 @@ def find_cameras(raise_when_empty=True, mock=False) -> list[dict]:
|
||||
connected to the computer.
|
||||
"""
|
||||
if mock:
|
||||
import tests.cameras.mock_pyrealsense2 as rs
|
||||
import tests.mock_pyrealsense2 as rs
|
||||
else:
|
||||
import pyrealsense2 as rs
|
||||
|
||||
@@ -101,7 +86,7 @@ def save_images_from_cameras(
|
||||
serial_numbers = [cam["serial_number"] for cam in camera_infos]
|
||||
|
||||
if mock:
|
||||
import tests.cameras.mock_cv2 as cv2
|
||||
import tests.mock_cv2 as cv2
|
||||
else:
|
||||
import cv2
|
||||
|
||||
@@ -109,11 +94,13 @@ def save_images_from_cameras(
|
||||
cameras = []
|
||||
for cam_sn in serial_numbers:
|
||||
print(f"{cam_sn=}")
|
||||
config = RealSenseCameraConfig(serial_number=cam_sn, fps=fps, width=width, height=height, mock=mock)
|
||||
camera = RealSenseCamera(config)
|
||||
config = IntelRealSenseCameraConfig(
|
||||
serial_number=cam_sn, fps=fps, width=width, height=height, mock=mock
|
||||
)
|
||||
camera = IntelRealSenseCamera(config)
|
||||
camera.connect()
|
||||
print(
|
||||
f"RealSenseCamera({camera.serial_number}, fps={camera.fps}, width={camera.capture_width}, height={camera.capture_height}, color_mode={camera.color_mode})"
|
||||
f"IntelRealSenseCamera({camera.serial_number}, fps={camera.fps}, width={camera.width}, height={camera.height}, color_mode={camera.color_mode})"
|
||||
)
|
||||
cameras.append(camera)
|
||||
|
||||
@@ -165,11 +152,11 @@ def save_images_from_cameras(
|
||||
camera.disconnect()
|
||||
|
||||
|
||||
class RealSenseCamera(Camera):
|
||||
class IntelRealSenseCamera:
|
||||
"""
|
||||
The RealSenseCamera class is similar to OpenCVCamera class but adds additional features for Intel Real Sense cameras:
|
||||
The IntelRealSenseCamera class is similar to OpenCVCamera class but adds additional features for Intel Real Sense cameras:
|
||||
- is instantiated with the serial number of the camera - won't randomly change as it can be the case of OpenCVCamera for Linux,
|
||||
- can also be instantiated with the camera's name — if it's unique — using RealSenseCamera.init_from_name(),
|
||||
- can also be instantiated with the camera's name — if it's unique — using IntelRealSenseCamera.init_from_name(),
|
||||
- depth map can be returned.
|
||||
|
||||
To find the camera indices of your cameras, you can run our utility script that will save a few frames for each camera:
|
||||
@@ -177,15 +164,15 @@ class RealSenseCamera(Camera):
|
||||
python lerobot/common/robot_devices/cameras/intelrealsense.py --images-dir outputs/images_from_intelrealsense_cameras
|
||||
```
|
||||
|
||||
When an RealSenseCamera is instantiated, if no specific config is provided, the default fps, width, height and color_mode
|
||||
When an IntelRealSenseCamera is instantiated, if no specific config is provided, the default fps, width, height and color_mode
|
||||
of the given camera will be used.
|
||||
|
||||
Example of instantiating with a serial number:
|
||||
```python
|
||||
from lerobot.common.robot_devices.cameras.configs import RealSenseCameraConfig
|
||||
from lerobot.common.robot_devices.cameras.configs import IntelRealSenseCameraConfig
|
||||
|
||||
config = RealSenseCameraConfig(serial_number=128422271347)
|
||||
camera = RealSenseCamera(config)
|
||||
config = IntelRealSenseCameraConfig(serial_number=128422271347)
|
||||
camera = IntelRealSenseCamera(config)
|
||||
camera.connect()
|
||||
color_image = camera.read()
|
||||
# when done using the camera, consider disconnecting
|
||||
@@ -194,21 +181,21 @@ class RealSenseCamera(Camera):
|
||||
|
||||
Example of instantiating with a name if it's unique:
|
||||
```
|
||||
config = RealSenseCameraConfig(name="Intel RealSense D405")
|
||||
config = IntelRealSenseCameraConfig(name="Intel RealSense D405")
|
||||
```
|
||||
|
||||
Example of changing default fps, width, height and color_mode:
|
||||
```python
|
||||
config = RealSenseCameraConfig(serial_number=128422271347, fps=30, width=1280, height=720)
|
||||
config = RealSenseCameraConfig(serial_number=128422271347, fps=90, width=640, height=480)
|
||||
config = RealSenseCameraConfig(serial_number=128422271347, fps=90, width=640, height=480, color_mode="bgr")
|
||||
config = IntelRealSenseCameraConfig(serial_number=128422271347, fps=30, width=1280, height=720)
|
||||
config = IntelRealSenseCameraConfig(serial_number=128422271347, fps=90, width=640, height=480)
|
||||
config = IntelRealSenseCameraConfig(serial_number=128422271347, fps=90, width=640, height=480, color_mode="bgr")
|
||||
# Note: might error out upon `camera.connect()` if these settings are not compatible with the camera
|
||||
```
|
||||
|
||||
Example of returning depth:
|
||||
```python
|
||||
config = RealSenseCameraConfig(serial_number=128422271347, use_depth=True)
|
||||
camera = RealSenseCamera(config)
|
||||
config = IntelRealSenseCameraConfig(serial_number=128422271347, use_depth=True)
|
||||
camera = IntelRealSenseCamera(config)
|
||||
camera.connect()
|
||||
color_image, depth_map = camera.read()
|
||||
```
|
||||
@@ -216,27 +203,16 @@ class RealSenseCamera(Camera):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config: RealSenseCameraConfig,
|
||||
config: IntelRealSenseCameraConfig,
|
||||
):
|
||||
self.config = config
|
||||
if config.name is not None:
|
||||
self.serial_number = self.find_serial_number_from_name(config.name)
|
||||
else:
|
||||
self.serial_number = config.serial_number
|
||||
|
||||
# Store the raw (capture) resolution from the config.
|
||||
self.capture_width = config.width
|
||||
self.capture_height = config.height
|
||||
|
||||
# If rotated by ±90, swap width and height.
|
||||
if config.rotation in [-90, 90]:
|
||||
self.width = config.height
|
||||
self.height = config.width
|
||||
else:
|
||||
self.width = config.width
|
||||
self.height = config.height
|
||||
|
||||
self.fps = config.fps
|
||||
self.width = config.width
|
||||
self.height = config.height
|
||||
self.channels = config.channels
|
||||
self.color_mode = config.color_mode
|
||||
self.use_depth = config.use_depth
|
||||
@@ -252,10 +228,11 @@ class RealSenseCamera(Camera):
|
||||
self.logs = {}
|
||||
|
||||
if self.mock:
|
||||
import tests.cameras.mock_cv2 as cv2
|
||||
import tests.mock_cv2 as cv2
|
||||
else:
|
||||
import cv2
|
||||
|
||||
# TODO(alibets): Do we keep original width/height or do we define them after rotation?
|
||||
self.rotation = None
|
||||
if config.rotation == -90:
|
||||
self.rotation = cv2.ROTATE_90_COUNTERCLOCKWISE
|
||||
@@ -281,29 +258,27 @@ class RealSenseCamera(Camera):
|
||||
|
||||
def connect(self):
|
||||
if self.is_connected:
|
||||
raise DeviceAlreadyConnectedError(f"RealSenseCamera({self.serial_number}) is already connected.")
|
||||
raise RobotDeviceAlreadyConnectedError(
|
||||
f"IntelRealSenseCamera({self.serial_number}) is already connected."
|
||||
)
|
||||
|
||||
if self.mock:
|
||||
import tests.cameras.mock_pyrealsense2 as rs
|
||||
import tests.mock_pyrealsense2 as rs
|
||||
else:
|
||||
import pyrealsense2 as rs
|
||||
|
||||
config = rs.config()
|
||||
config.enable_device(str(self.serial_number))
|
||||
|
||||
if self.fps and self.capture_width and self.capture_height:
|
||||
if self.fps and self.width and self.height:
|
||||
# TODO(rcadene): can we set rgb8 directly?
|
||||
config.enable_stream(
|
||||
rs.stream.color, self.capture_width, self.capture_height, rs.format.rgb8, self.fps
|
||||
)
|
||||
config.enable_stream(rs.stream.color, self.width, self.height, rs.format.rgb8, self.fps)
|
||||
else:
|
||||
config.enable_stream(rs.stream.color)
|
||||
|
||||
if self.use_depth:
|
||||
if self.fps and self.capture_width and self.capture_height:
|
||||
config.enable_stream(
|
||||
rs.stream.depth, self.capture_width, self.capture_height, rs.format.z16, self.fps
|
||||
)
|
||||
if self.fps and self.width and self.height:
|
||||
config.enable_stream(rs.stream.depth, self.width, self.height, rs.format.z16, self.fps)
|
||||
else:
|
||||
config.enable_stream(rs.stream.depth)
|
||||
|
||||
@@ -327,7 +302,7 @@ class RealSenseCamera(Camera):
|
||||
"To find the serial number you should use, run `python lerobot/common/robot_devices/cameras/intelrealsense.py`."
|
||||
)
|
||||
|
||||
raise OSError(f"Can't access RealSenseCamera({self.serial_number}).")
|
||||
raise OSError(f"Can't access IntelRealSenseCamera({self.serial_number}).")
|
||||
|
||||
color_stream = profile.get_stream(rs.stream.color)
|
||||
color_profile = color_stream.as_video_stream_profile()
|
||||
@@ -339,20 +314,20 @@ class RealSenseCamera(Camera):
|
||||
if self.fps is not None and not math.isclose(self.fps, actual_fps, rel_tol=1e-3):
|
||||
# Using `OSError` since it's a broad that encompasses issues related to device communication
|
||||
raise OSError(
|
||||
f"Can't set {self.fps=} for RealSenseCamera({self.serial_number}). Actual value is {actual_fps}."
|
||||
f"Can't set {self.fps=} for IntelRealSenseCamera({self.serial_number}). Actual value is {actual_fps}."
|
||||
)
|
||||
if self.capture_width is not None and self.capture_width != actual_width:
|
||||
if self.width is not None and self.width != actual_width:
|
||||
raise OSError(
|
||||
f"Can't set {self.capture_width=} for RealSenseCamera({self.serial_number}). Actual value is {actual_width}."
|
||||
f"Can't set {self.width=} for IntelRealSenseCamera({self.serial_number}). Actual value is {actual_width}."
|
||||
)
|
||||
if self.capture_height is not None and self.capture_height != actual_height:
|
||||
if self.height is not None and self.height != actual_height:
|
||||
raise OSError(
|
||||
f"Can't set {self.capture_height=} for RealSenseCamera({self.serial_number}). Actual value is {actual_height}."
|
||||
f"Can't set {self.height=} for IntelRealSenseCamera({self.serial_number}). Actual value is {actual_height}."
|
||||
)
|
||||
|
||||
self.fps = round(actual_fps)
|
||||
self.capture_width = round(actual_width)
|
||||
self.capture_height = round(actual_height)
|
||||
self.width = round(actual_width)
|
||||
self.height = round(actual_height)
|
||||
|
||||
self.is_connected = True
|
||||
|
||||
@@ -367,12 +342,12 @@ class RealSenseCamera(Camera):
|
||||
If you are reading data from other sensors, we advise to use `camera.async_read()` which is non blocking version of `camera.read()`.
|
||||
"""
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError(
|
||||
f"RealSenseCamera({self.serial_number}) is not connected. Try running `camera.connect()` first."
|
||||
raise RobotDeviceNotConnectedError(
|
||||
f"IntelRealSenseCamera({self.serial_number}) is not connected. Try running `camera.connect()` first."
|
||||
)
|
||||
|
||||
if self.mock:
|
||||
import tests.cameras.mock_cv2 as cv2
|
||||
import tests.mock_cv2 as cv2
|
||||
else:
|
||||
import cv2
|
||||
|
||||
@@ -383,7 +358,7 @@ class RealSenseCamera(Camera):
|
||||
color_frame = frame.get_color_frame()
|
||||
|
||||
if not color_frame:
|
||||
raise OSError(f"Can't capture color image from RealSenseCamera({self.serial_number}).")
|
||||
raise OSError(f"Can't capture color image from IntelRealSenseCamera({self.serial_number}).")
|
||||
|
||||
color_image = np.asanyarray(color_frame.get_data())
|
||||
|
||||
@@ -398,7 +373,7 @@ class RealSenseCamera(Camera):
|
||||
color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
|
||||
|
||||
h, w, _ = color_image.shape
|
||||
if h != self.capture_height or w != self.capture_width:
|
||||
if h != self.height or w != self.width:
|
||||
raise OSError(
|
||||
f"Can't capture color image with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead."
|
||||
)
|
||||
@@ -415,12 +390,12 @@ class RealSenseCamera(Camera):
|
||||
if self.use_depth:
|
||||
depth_frame = frame.get_depth_frame()
|
||||
if not depth_frame:
|
||||
raise OSError(f"Can't capture depth image from RealSenseCamera({self.serial_number}).")
|
||||
raise OSError(f"Can't capture depth image from IntelRealSenseCamera({self.serial_number}).")
|
||||
|
||||
depth_map = np.asanyarray(depth_frame.get_data())
|
||||
|
||||
h, w = depth_map.shape
|
||||
if h != self.capture_height or w != self.capture_width:
|
||||
if h != self.height or w != self.width:
|
||||
raise OSError(
|
||||
f"Can't capture depth map with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead."
|
||||
)
|
||||
@@ -442,8 +417,8 @@ class RealSenseCamera(Camera):
|
||||
def async_read(self):
|
||||
"""Access the latest color image"""
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError(
|
||||
f"RealSenseCamera({self.serial_number}) is not connected. Try running `camera.connect()` first."
|
||||
raise RobotDeviceNotConnectedError(
|
||||
f"IntelRealSenseCamera({self.serial_number}) is not connected. Try running `camera.connect()` first."
|
||||
)
|
||||
|
||||
if self.thread is None:
|
||||
@@ -469,8 +444,8 @@ class RealSenseCamera(Camera):
|
||||
|
||||
def disconnect(self):
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError(
|
||||
f"RealSenseCamera({self.serial_number}) is not connected. Try running `camera.connect()` first."
|
||||
raise RobotDeviceNotConnectedError(
|
||||
f"IntelRealSenseCamera({self.serial_number}) is not connected. Try running `camera.connect()` first."
|
||||
)
|
||||
|
||||
if self.thread is not None and self.thread.is_alive():
|
||||
@@ -492,14 +467,14 @@ class RealSenseCamera(Camera):
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Save a few frames using `RealSenseCamera` for all cameras connected to the computer, or a selected subset."
|
||||
description="Save a few frames using `IntelRealSenseCamera` for all cameras connected to the computer, or a selected subset."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--serial-numbers",
|
||||
type=int,
|
||||
nargs="*",
|
||||
default=None,
|
||||
help="List of serial numbers used to instantiate the `RealSenseCamera`. If not provided, find and use all available camera indices.",
|
||||
help="List of serial numbers used to instantiate the `IntelRealSenseCamera`. If not provided, find and use all available camera indices.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--fps",
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This file contains utilities for recording frames from cameras. For more info look at `OpenCVCamera` docstring.
|
||||
"""
|
||||
@@ -29,15 +15,14 @@ from threading import Thread
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
from lerobot.common.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
|
||||
from lerobot.common.utils.robot_utils import (
|
||||
from lerobot.common.robot_devices.cameras.configs import OpenCVCameraConfig
|
||||
from lerobot.common.robot_devices.utils import (
|
||||
RobotDeviceAlreadyConnectedError,
|
||||
RobotDeviceNotConnectedError,
|
||||
busy_wait,
|
||||
)
|
||||
from lerobot.common.utils.utils import capture_timestamp_utc
|
||||
|
||||
from ..camera import Camera
|
||||
from .configuration_opencv import OpenCVCameraConfig
|
||||
|
||||
# The maximum opencv device index depends on your operating system. For instance,
|
||||
# if you have 3 cameras, they should be associated to index 0, 1, and 2. This is the case
|
||||
# on MacOS. However, on Ubuntu, the indices are different like 6, 16, 23.
|
||||
@@ -81,7 +66,7 @@ def _find_cameras(
|
||||
possible_camera_ids: list[int | str], raise_when_empty=False, mock=False
|
||||
) -> list[int | str]:
|
||||
if mock:
|
||||
import tests.cameras.mock_cv2 as cv2
|
||||
import tests.mock_cv2 as cv2
|
||||
else:
|
||||
import cv2
|
||||
|
||||
@@ -145,8 +130,8 @@ def save_images_from_cameras(
|
||||
camera = OpenCVCamera(config)
|
||||
camera.connect()
|
||||
print(
|
||||
f"OpenCVCamera({camera.camera_index}, fps={camera.fps}, width={camera.capture_width}, "
|
||||
f"height={camera.capture_height}, color_mode={camera.color_mode})"
|
||||
f"OpenCVCamera({camera.camera_index}, fps={camera.fps}, width={camera.width}, "
|
||||
f"height={camera.height}, color_mode={camera.color_mode})"
|
||||
)
|
||||
cameras.append(camera)
|
||||
|
||||
@@ -191,7 +176,7 @@ def save_images_from_cameras(
|
||||
print(f"Images have been saved to {images_dir}")
|
||||
|
||||
|
||||
class OpenCVCamera(Camera):
|
||||
class OpenCVCamera:
|
||||
"""
|
||||
The OpenCVCamera class allows to efficiently record images from cameras. It relies on opencv2 to communicate
|
||||
with the cameras. Most cameras are compatible. For more info, see the [Video I/O with OpenCV Overview](https://docs.opencv.org/4.x/d0/da7/videoio_overview.html).
|
||||
@@ -245,19 +230,9 @@ class OpenCVCamera(Camera):
|
||||
else:
|
||||
raise ValueError(f"Please check the provided camera_index: {self.camera_index}")
|
||||
|
||||
# Store the raw (capture) resolution from the config.
|
||||
self.capture_width = config.width
|
||||
self.capture_height = config.height
|
||||
|
||||
# If rotated by ±90, swap width and height.
|
||||
if config.rotation in [-90, 90]:
|
||||
self.width = config.height
|
||||
self.height = config.width
|
||||
else:
|
||||
self.width = config.width
|
||||
self.height = config.height
|
||||
|
||||
self.fps = config.fps
|
||||
self.width = config.width
|
||||
self.height = config.height
|
||||
self.channels = config.channels
|
||||
self.color_mode = config.color_mode
|
||||
self.mock = config.mock
|
||||
@@ -270,10 +245,11 @@ class OpenCVCamera(Camera):
|
||||
self.logs = {}
|
||||
|
||||
if self.mock:
|
||||
import tests.cameras.mock_cv2 as cv2
|
||||
import tests.mock_cv2 as cv2
|
||||
else:
|
||||
import cv2
|
||||
|
||||
# TODO(aliberts): Do we keep original width/height or do we define them after rotation?
|
||||
self.rotation = None
|
||||
if config.rotation == -90:
|
||||
self.rotation = cv2.ROTATE_90_COUNTERCLOCKWISE
|
||||
@@ -284,10 +260,10 @@ class OpenCVCamera(Camera):
|
||||
|
||||
def connect(self):
|
||||
if self.is_connected:
|
||||
raise DeviceAlreadyConnectedError(f"OpenCVCamera({self.camera_index}) is already connected.")
|
||||
raise RobotDeviceAlreadyConnectedError(f"OpenCVCamera({self.camera_index}) is already connected.")
|
||||
|
||||
if self.mock:
|
||||
import tests.cameras.mock_cv2 as cv2
|
||||
import tests.mock_cv2 as cv2
|
||||
else:
|
||||
import cv2
|
||||
|
||||
@@ -295,20 +271,10 @@ class OpenCVCamera(Camera):
|
||||
# when other threads are used to save the images.
|
||||
cv2.setNumThreads(1)
|
||||
|
||||
backend = (
|
||||
cv2.CAP_V4L2
|
||||
if platform.system() == "Linux"
|
||||
else cv2.CAP_DSHOW
|
||||
if platform.system() == "Windows"
|
||||
else cv2.CAP_AVFOUNDATION
|
||||
if platform.system() == "Darwin"
|
||||
else cv2.CAP_ANY
|
||||
)
|
||||
|
||||
camera_idx = f"/dev/video{self.camera_index}" if platform.system() == "Linux" else self.camera_index
|
||||
# First create a temporary camera trying to access `camera_index`,
|
||||
# and verify it is a valid camera by calling `isOpened`.
|
||||
tmp_camera = cv2.VideoCapture(camera_idx, backend)
|
||||
tmp_camera = cv2.VideoCapture(camera_idx)
|
||||
is_camera_open = tmp_camera.isOpened()
|
||||
# Release camera to make it accessible for `find_camera_indices`
|
||||
tmp_camera.release()
|
||||
@@ -331,14 +297,14 @@ class OpenCVCamera(Camera):
|
||||
# Secondly, create the camera that will be used downstream.
|
||||
# Note: For some unknown reason, calling `isOpened` blocks the camera which then
|
||||
# needs to be re-created.
|
||||
self.camera = cv2.VideoCapture(camera_idx, backend)
|
||||
self.camera = cv2.VideoCapture(camera_idx)
|
||||
|
||||
if self.fps is not None:
|
||||
self.camera.set(cv2.CAP_PROP_FPS, self.fps)
|
||||
if self.capture_width is not None:
|
||||
self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, self.capture_width)
|
||||
if self.capture_height is not None:
|
||||
self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, self.capture_height)
|
||||
if self.width is not None:
|
||||
self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
|
||||
if self.height is not None:
|
||||
self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
|
||||
|
||||
actual_fps = self.camera.get(cv2.CAP_PROP_FPS)
|
||||
actual_width = self.camera.get(cv2.CAP_PROP_FRAME_WIDTH)
|
||||
@@ -350,22 +316,19 @@ class OpenCVCamera(Camera):
|
||||
raise OSError(
|
||||
f"Can't set {self.fps=} for OpenCVCamera({self.camera_index}). Actual value is {actual_fps}."
|
||||
)
|
||||
if self.capture_width is not None and not math.isclose(
|
||||
self.capture_width, actual_width, rel_tol=1e-3
|
||||
):
|
||||
if self.width is not None and not math.isclose(self.width, actual_width, rel_tol=1e-3):
|
||||
raise OSError(
|
||||
f"Can't set {self.capture_width=} for OpenCVCamera({self.camera_index}). Actual value is {actual_width}."
|
||||
f"Can't set {self.width=} for OpenCVCamera({self.camera_index}). Actual value is {actual_width}."
|
||||
)
|
||||
if self.capture_height is not None and not math.isclose(
|
||||
self.capture_height, actual_height, rel_tol=1e-3
|
||||
):
|
||||
if self.height is not None and not math.isclose(self.height, actual_height, rel_tol=1e-3):
|
||||
raise OSError(
|
||||
f"Can't set {self.capture_height=} for OpenCVCamera({self.camera_index}). Actual value is {actual_height}."
|
||||
f"Can't set {self.height=} for OpenCVCamera({self.camera_index}). Actual value is {actual_height}."
|
||||
)
|
||||
|
||||
self.fps = round(actual_fps)
|
||||
self.capture_width = round(actual_width)
|
||||
self.capture_height = round(actual_height)
|
||||
self.width = round(actual_width)
|
||||
self.height = round(actual_height)
|
||||
|
||||
self.is_connected = True
|
||||
|
||||
def read(self, temporary_color_mode: str | None = None) -> np.ndarray:
|
||||
@@ -376,7 +339,7 @@ class OpenCVCamera(Camera):
|
||||
If you are reading data from other sensors, we advise to use `camera.async_read()` which is non blocking version of `camera.read()`.
|
||||
"""
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError(
|
||||
raise RobotDeviceNotConnectedError(
|
||||
f"OpenCVCamera({self.camera_index}) is not connected. Try running `camera.connect()` first."
|
||||
)
|
||||
|
||||
@@ -399,14 +362,14 @@ class OpenCVCamera(Camera):
|
||||
# so we convert the image color from BGR to RGB.
|
||||
if requested_color_mode == "rgb":
|
||||
if self.mock:
|
||||
import tests.cameras.mock_cv2 as cv2
|
||||
import tests.mock_cv2 as cv2
|
||||
else:
|
||||
import cv2
|
||||
|
||||
color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
|
||||
|
||||
h, w, _ = color_image.shape
|
||||
if h != self.capture_height or w != self.capture_width:
|
||||
if h != self.height or w != self.width:
|
||||
raise OSError(
|
||||
f"Can't capture color image with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead."
|
||||
)
|
||||
@@ -433,7 +396,7 @@ class OpenCVCamera(Camera):
|
||||
|
||||
def async_read(self):
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError(
|
||||
raise RobotDeviceNotConnectedError(
|
||||
f"OpenCVCamera({self.camera_index}) is not connected. Try running `camera.connect()` first."
|
||||
)
|
||||
|
||||
@@ -455,7 +418,7 @@ class OpenCVCamera(Camera):
|
||||
|
||||
def disconnect(self):
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError(
|
||||
raise RobotDeviceNotConnectedError(
|
||||
f"OpenCVCamera({self.camera_index}) is not connected. Try running `camera.connect()` first."
|
||||
)
|
||||
|
||||
53
lerobot/common/robot_devices/cameras/utils.py
Normal file
53
lerobot/common/robot_devices/cameras/utils.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from typing import Protocol
|
||||
|
||||
import numpy as np
|
||||
|
||||
from lerobot.common.robot_devices.cameras.configs import (
|
||||
CameraConfig,
|
||||
IntelRealSenseCameraConfig,
|
||||
OpenCVCameraConfig,
|
||||
)
|
||||
|
||||
|
||||
# Defines a camera type
|
||||
class Camera(Protocol):
|
||||
def connect(self): ...
|
||||
def read(self, temporary_color: str | None = None) -> np.ndarray: ...
|
||||
def async_read(self) -> np.ndarray: ...
|
||||
def disconnect(self): ...
|
||||
|
||||
|
||||
def make_cameras_from_configs(camera_configs: dict[str, CameraConfig]) -> list[Camera]:
|
||||
cameras = {}
|
||||
|
||||
for key, cfg in camera_configs.items():
|
||||
if cfg.type == "opencv":
|
||||
from lerobot.common.robot_devices.cameras.opencv import OpenCVCamera
|
||||
|
||||
cameras[key] = OpenCVCamera(cfg)
|
||||
|
||||
elif cfg.type == "intelrealsense":
|
||||
from lerobot.common.robot_devices.cameras.intelrealsense import IntelRealSenseCamera
|
||||
|
||||
cameras[key] = IntelRealSenseCamera(cfg)
|
||||
else:
|
||||
raise ValueError(f"The motor type '{cfg.type}' is not valid.")
|
||||
|
||||
return cameras
|
||||
|
||||
|
||||
def make_camera(camera_type, **kwargs) -> Camera:
|
||||
if camera_type == "opencv":
|
||||
from lerobot.common.robot_devices.cameras.opencv import OpenCVCamera
|
||||
|
||||
config = OpenCVCameraConfig(**kwargs)
|
||||
return OpenCVCamera(config)
|
||||
|
||||
elif camera_type == "intelrealsense":
|
||||
from lerobot.common.robot_devices.cameras.intelrealsense import IntelRealSenseCamera
|
||||
|
||||
config = IntelRealSenseCameraConfig(**kwargs)
|
||||
return IntelRealSenseCamera(config)
|
||||
|
||||
else:
|
||||
raise ValueError(f"The camera type '{camera_type}' is not valid.")
|
||||
@@ -1,25 +1,14 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
|
||||
import draccus
|
||||
|
||||
from lerobot.common.robots import RobotConfig
|
||||
from lerobot.common.robot_devices.robots.configs import RobotConfig
|
||||
from lerobot.common.utils.utils import auto_select_torch_device, is_amp_available, is_torch_device_available
|
||||
from lerobot.configs import parser
|
||||
from lerobot.configs.policies import PreTrainedConfig
|
||||
from lerobot.configs.train import TrainPipelineConfig
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -54,6 +43,11 @@ class RecordControlConfig(ControlConfig):
|
||||
# Root directory where the dataset will be stored (e.g. 'dataset/path').
|
||||
root: str | Path | None = None
|
||||
policy: PreTrainedConfig | None = None
|
||||
# TODO(rcadene, aliberts): By default, use device and use_amp values from policy checkpoint.
|
||||
device: str | None = None # cuda | cpu | mps
|
||||
# `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP,
|
||||
# automatic gradient scaling is used.
|
||||
use_amp: bool | None = None
|
||||
# Limit the frames per second. By default, uses the policy fps.
|
||||
fps: int | None = None
|
||||
# Number of seconds before starting data collection. It allows the robot devices to warmup and synchronize.
|
||||
@@ -72,7 +66,7 @@ class RecordControlConfig(ControlConfig):
|
||||
private: bool = False
|
||||
# Add tags to your dataset on the hub.
|
||||
tags: list[str] | None = None
|
||||
# Number of subprocesses handling the saving of frames as PNG. Set to 0 to use threads only;
|
||||
# Number of subprocesses handling the saving of frames as PNGs. Set to 0 to use threads only;
|
||||
# set to ≥1 to use subprocesses, each using threads to write images. The best number of processes
|
||||
# and threads depends on your system. We recommend 4 threads per camera with 0 processes.
|
||||
# If fps is unstable, adjust the thread count. If still unstable, try using 1 or more subprocesses.
|
||||
@@ -96,6 +90,27 @@ class RecordControlConfig(ControlConfig):
|
||||
self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides)
|
||||
self.policy.pretrained_path = policy_path
|
||||
|
||||
# When no device or use_amp are given, use the one from training config.
|
||||
if self.device is None or self.use_amp is None:
|
||||
train_cfg = TrainPipelineConfig.from_pretrained(policy_path)
|
||||
if self.device is None:
|
||||
self.device = train_cfg.device
|
||||
if self.use_amp is None:
|
||||
self.use_amp = train_cfg.use_amp
|
||||
|
||||
# Automatically switch to available device if necessary
|
||||
if not is_torch_device_available(self.device):
|
||||
auto_device = auto_select_torch_device()
|
||||
logging.warning(f"Device '{self.device}' is not available. Switching to '{auto_device}'.")
|
||||
self.device = auto_device
|
||||
|
||||
# Automatically deactivate AMP if necessary
|
||||
if self.use_amp and not is_amp_available(self.device):
|
||||
logging.warning(
|
||||
f"Automatic Mixed Precision (amp) is not available on device '{self.device}'. Deactivating AMP."
|
||||
)
|
||||
self.use_amp = False
|
||||
|
||||
|
||||
@ControlConfig.register_subclass("replay")
|
||||
@dataclass
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
########################################################################################
|
||||
# Utilities
|
||||
########################################################################################
|
||||
@@ -32,9 +18,8 @@ from termcolor import colored
|
||||
from lerobot.common.datasets.image_writer import safe_stop_image_writer
|
||||
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
|
||||
from lerobot.common.datasets.utils import get_features_from_robot
|
||||
from lerobot.common.policies.pretrained import PreTrainedPolicy
|
||||
from lerobot.common.robots.utils import Robot
|
||||
from lerobot.common.utils.robot_utils import busy_wait
|
||||
from lerobot.common.robot_devices.robots.utils import Robot
|
||||
from lerobot.common.robot_devices.utils import busy_wait
|
||||
from lerobot.common.utils.utils import get_safe_torch_device, has_method
|
||||
|
||||
|
||||
@@ -194,6 +179,8 @@ def record_episode(
|
||||
episode_time_s,
|
||||
display_cameras,
|
||||
policy,
|
||||
device,
|
||||
use_amp,
|
||||
fps,
|
||||
single_task,
|
||||
):
|
||||
@@ -204,6 +191,8 @@ def record_episode(
|
||||
dataset=dataset,
|
||||
events=events,
|
||||
policy=policy,
|
||||
device=device,
|
||||
use_amp=use_amp,
|
||||
fps=fps,
|
||||
teleoperate=policy is None,
|
||||
single_task=single_task,
|
||||
@@ -218,7 +207,9 @@ def control_loop(
|
||||
display_cameras=False,
|
||||
dataset: LeRobotDataset | None = None,
|
||||
events=None,
|
||||
policy: PreTrainedPolicy = None,
|
||||
policy=None,
|
||||
device: torch.device | str | None = None,
|
||||
use_amp: bool | None = None,
|
||||
fps: int | None = None,
|
||||
single_task: str | None = None,
|
||||
):
|
||||
@@ -241,6 +232,9 @@ def control_loop(
|
||||
if dataset is not None and fps is not None and dataset.fps != fps:
|
||||
raise ValueError(f"The dataset fps should be equal to requested fps ({dataset['fps']} != {fps}).")
|
||||
|
||||
if isinstance(device, str):
|
||||
device = get_safe_torch_device(device)
|
||||
|
||||
timestamp = 0
|
||||
start_episode_t = time.perf_counter()
|
||||
while timestamp < control_time_s:
|
||||
@@ -252,9 +246,7 @@ def control_loop(
|
||||
observation = robot.capture_observation()
|
||||
|
||||
if policy is not None:
|
||||
pred_action = predict_action(
|
||||
observation, policy, get_safe_torch_device(policy.config.device), policy.config.use_amp
|
||||
)
|
||||
pred_action = predict_action(observation, policy, device, use_amp)
|
||||
# Action can eventually be clipped using `max_relative_target`,
|
||||
# so action actually sent is saved in the dataset.
|
||||
action = robot.send_action(pred_action)
|
||||
27
lerobot/common/robot_devices/motors/configs.py
Normal file
27
lerobot/common/robot_devices/motors/configs.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import abc
|
||||
from dataclasses import dataclass
|
||||
|
||||
import draccus
|
||||
|
||||
|
||||
@dataclass
|
||||
class MotorsBusConfig(draccus.ChoiceRegistry, abc.ABC):
|
||||
@property
|
||||
def type(self) -> str:
|
||||
return self.get_choice_name(self.__class__)
|
||||
|
||||
|
||||
@MotorsBusConfig.register_subclass("dynamixel")
|
||||
@dataclass
|
||||
class DynamixelMotorsBusConfig(MotorsBusConfig):
|
||||
port: str
|
||||
motors: dict[str, tuple[int, str]]
|
||||
mock: bool = False
|
||||
|
||||
|
||||
@MotorsBusConfig.register_subclass("feetech")
|
||||
@dataclass
|
||||
class FeetechMotorsBusConfig(MotorsBusConfig):
|
||||
port: str
|
||||
motors: dict[str, tuple[int, str]]
|
||||
mock: bool = False
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import enum
|
||||
import logging
|
||||
import math
|
||||
@@ -22,7 +8,8 @@ from copy import deepcopy
|
||||
import numpy as np
|
||||
import tqdm
|
||||
|
||||
from lerobot.common.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
|
||||
from lerobot.common.robot_devices.motors.configs import DynamixelMotorsBusConfig
|
||||
from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError
|
||||
from lerobot.common.utils.utils import capture_timestamp_utc
|
||||
|
||||
PROTOCOL_VERSION = 2.0
|
||||
@@ -255,7 +242,7 @@ class DriveMode(enum.Enum):
|
||||
class CalibrationMode(enum.Enum):
|
||||
# Joints with rotational motions are expressed in degrees in nominal range of [-180, 180]
|
||||
DEGREE = 0
|
||||
# Joints with linear motions (like gripper of Aloha) are expressed in nominal range of [0, 100]
|
||||
# Joints with linear motions (like gripper of Aloha) are experessed in nominal range of [0, 100]
|
||||
LINEAR = 1
|
||||
|
||||
|
||||
@@ -287,10 +274,11 @@ class DynamixelMotorsBus:
|
||||
motor_index = 6
|
||||
motor_model = "xl330-m288"
|
||||
|
||||
motors_bus = DynamixelMotorsBus(
|
||||
config = DynamixelMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem575E0031751",
|
||||
motors={motor_name: (motor_index, motor_model)},
|
||||
)
|
||||
motors_bus = DynamixelMotorsBus(config)
|
||||
motors_bus.connect()
|
||||
|
||||
position = motors_bus.read("Present_Position")
|
||||
@@ -306,13 +294,11 @@ class DynamixelMotorsBus:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
port: str,
|
||||
motors: dict[str, tuple[int, str]],
|
||||
mock: bool = False,
|
||||
config: DynamixelMotorsBusConfig,
|
||||
):
|
||||
self.port = port
|
||||
self.motors = motors
|
||||
self.mock = mock
|
||||
self.port = config.port
|
||||
self.motors = config.motors
|
||||
self.mock = config.mock
|
||||
|
||||
self.model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE)
|
||||
self.model_resolution = deepcopy(MODEL_RESOLUTION)
|
||||
@@ -327,12 +313,12 @@ class DynamixelMotorsBus:
|
||||
|
||||
def connect(self):
|
||||
if self.is_connected:
|
||||
raise DeviceAlreadyConnectedError(
|
||||
raise RobotDeviceAlreadyConnectedError(
|
||||
f"DynamixelMotorsBus({self.port}) is already connected. Do not call `motors_bus.connect()` twice."
|
||||
)
|
||||
|
||||
if self.mock:
|
||||
import tests.motors.mock_dynamixel_sdk as dxl
|
||||
import tests.mock_dynamixel_sdk as dxl
|
||||
else:
|
||||
import dynamixel_sdk as dxl
|
||||
|
||||
@@ -356,7 +342,7 @@ class DynamixelMotorsBus:
|
||||
|
||||
def reconnect(self):
|
||||
if self.mock:
|
||||
import tests.motors.mock_dynamixel_sdk as dxl
|
||||
import tests.mock_dynamixel_sdk as dxl
|
||||
else:
|
||||
import dynamixel_sdk as dxl
|
||||
|
||||
@@ -624,7 +610,7 @@ class DynamixelMotorsBus:
|
||||
# 0-centered resolution range (e.g. [-2048, 2048] for resolution=4096)
|
||||
values[i] = values[i] / HALF_TURN_DEGREE * (resolution // 2)
|
||||
|
||||
# Subtract the homing offsets to come back to actual motor range of values
|
||||
# Substract the homing offsets to come back to actual motor range of values
|
||||
# which can be arbitrary.
|
||||
values[i] -= homing_offset
|
||||
|
||||
@@ -646,7 +632,7 @@ class DynamixelMotorsBus:
|
||||
|
||||
def read_with_motor_ids(self, motor_models, motor_ids, data_name, num_retry=NUM_READ_RETRY):
|
||||
if self.mock:
|
||||
import tests.motors.mock_dynamixel_sdk as dxl
|
||||
import tests.mock_dynamixel_sdk as dxl
|
||||
else:
|
||||
import dynamixel_sdk as dxl
|
||||
|
||||
@@ -684,14 +670,14 @@ class DynamixelMotorsBus:
|
||||
|
||||
def read(self, data_name, motor_names: str | list[str] | None = None):
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError(
|
||||
raise RobotDeviceNotConnectedError(
|
||||
f"DynamixelMotorsBus({self.port}) is not connected. You need to run `motors_bus.connect()`."
|
||||
)
|
||||
|
||||
start_time = time.perf_counter()
|
||||
|
||||
if self.mock:
|
||||
import tests.motors.mock_dynamixel_sdk as dxl
|
||||
import tests.mock_dynamixel_sdk as dxl
|
||||
else:
|
||||
import dynamixel_sdk as dxl
|
||||
|
||||
@@ -757,7 +743,7 @@ class DynamixelMotorsBus:
|
||||
|
||||
def write_with_motor_ids(self, motor_models, motor_ids, data_name, values, num_retry=NUM_WRITE_RETRY):
|
||||
if self.mock:
|
||||
import tests.motors.mock_dynamixel_sdk as dxl
|
||||
import tests.mock_dynamixel_sdk as dxl
|
||||
else:
|
||||
import dynamixel_sdk as dxl
|
||||
|
||||
@@ -786,14 +772,14 @@ class DynamixelMotorsBus:
|
||||
|
||||
def write(self, data_name, values: int | float | np.ndarray, motor_names: str | list[str] | None = None):
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError(
|
||||
raise RobotDeviceNotConnectedError(
|
||||
f"DynamixelMotorsBus({self.port}) is not connected. You need to run `motors_bus.connect()`."
|
||||
)
|
||||
|
||||
start_time = time.perf_counter()
|
||||
|
||||
if self.mock:
|
||||
import tests.motors.mock_dynamixel_sdk as dxl
|
||||
import tests.mock_dynamixel_sdk as dxl
|
||||
else:
|
||||
import dynamixel_sdk as dxl
|
||||
|
||||
@@ -855,7 +841,7 @@ class DynamixelMotorsBus:
|
||||
|
||||
def disconnect(self):
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError(
|
||||
raise RobotDeviceNotConnectedError(
|
||||
f"DynamixelMotorsBus({self.port}) is not connected. Try running `motors_bus.connect()` first."
|
||||
)
|
||||
|
||||
@@ -871,25 +857,3 @@ class DynamixelMotorsBus:
|
||||
def __del__(self):
|
||||
if getattr(self, "is_connected", False):
|
||||
self.disconnect()
|
||||
|
||||
|
||||
def set_operating_mode(arm: DynamixelMotorsBus):
|
||||
if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any():
|
||||
raise ValueError("To run set robot preset, the torque must be disabled on all motors.")
|
||||
|
||||
# Use 'extended position mode' for all motors except gripper, because in joint mode the servos can't
|
||||
# rotate more than 360 degrees (from 0 to 4095) And some mistake can happen while assembling the arm,
|
||||
# you could end up with a servo with a position 0 or 4095 at a crucial point See [
|
||||
# https://emanual.robotis.com/docs/en/dxl/x/x_series/#operating-mode11]
|
||||
all_motors_except_gripper = [name for name in arm.motor_names if name != "gripper"]
|
||||
if len(all_motors_except_gripper) > 0:
|
||||
# 4 corresponds to Extended Position on Koch motors
|
||||
arm.write("Operating_Mode", 4, all_motors_except_gripper)
|
||||
|
||||
# Use 'position control current based' for gripper to be limited by the limit of the current.
|
||||
# For the follower gripper, it means it can grasp an object without forcing too much even tho,
|
||||
# it's goal position is a complete grasp (both gripper fingers are ordered to join and reach a touch).
|
||||
# For the leader gripper, it means we can use it as a physical trigger, since we can force with our finger
|
||||
# to make it move, and it will move back to its original target position when we release the force.
|
||||
# 5 corresponds to Current Controlled Position on Koch gripper motors "xl330-m077, xl330-m288"
|
||||
arm.write("Operating_Mode", 5, "gripper")
|
||||
@@ -1,18 +1,6 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import enum
|
||||
import logging
|
||||
import math
|
||||
import time
|
||||
import traceback
|
||||
from copy import deepcopy
|
||||
@@ -20,7 +8,8 @@ from copy import deepcopy
|
||||
import numpy as np
|
||||
import tqdm
|
||||
|
||||
from lerobot.common.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
|
||||
from lerobot.common.robot_devices.motors.configs import FeetechMotorsBusConfig
|
||||
from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError
|
||||
from lerobot.common.utils.utils import capture_timestamp_utc
|
||||
|
||||
PROTOCOL_VERSION = 0
|
||||
@@ -29,6 +18,13 @@ TIMEOUT_MS = 1000
|
||||
|
||||
MAX_ID_RANGE = 252
|
||||
|
||||
# The following bounds define the lower and upper joints range (after calibration).
|
||||
# For joints in degree (i.e. revolute joints), their nominal range is [-180, 180] degrees
|
||||
# which corresponds to a half rotation on the left and half rotation on the right.
|
||||
# Some joints might require higher range, so we allow up to [-270, 270] degrees until
|
||||
# an error is raised.
|
||||
LOWER_BOUND_DEGREE = -270
|
||||
UPPER_BOUND_DEGREE = 270
|
||||
# For joints in percentage (i.e. joints that move linearly like the prismatic joint of a gripper),
|
||||
# their nominal range is [0, 100] %. For instance, for Aloha gripper, 0% is fully
|
||||
# closed, and 100% is fully open. To account for slight calibration issue, we allow up to
|
||||
@@ -38,6 +34,7 @@ UPPER_BOUND_LINEAR = 110
|
||||
|
||||
HALF_TURN_DEGREE = 180
|
||||
|
||||
|
||||
# See this link for STS3215 Memory Table:
|
||||
# https://docs.google.com/spreadsheets/d/1GVs7W1VS1PqdhA1nW-abeyAHhTUxKUdR/edit?usp=sharing&ouid=116566590112741600240&rtpof=true&sd=true
|
||||
# data_name: (address, size_byte)
|
||||
@@ -103,6 +100,8 @@ SCS_SERIES_BAUDRATE_TABLE = {
|
||||
}
|
||||
|
||||
CALIBRATION_REQUIRED = ["Goal_Position", "Present_Position"]
|
||||
CONVERT_UINT32_TO_INT32_REQUIRED = ["Goal_Position", "Present_Position"]
|
||||
|
||||
|
||||
MODEL_CONTROL_TABLE = {
|
||||
"scs_series": SCS_SERIES_CONTROL_TABLE,
|
||||
@@ -124,63 +123,15 @@ NUM_READ_RETRY = 20
|
||||
NUM_WRITE_RETRY = 20
|
||||
|
||||
|
||||
def convert_ticks_to_degrees(ticks, model):
|
||||
resolutions = MODEL_RESOLUTION[model]
|
||||
# Convert the ticks to degrees
|
||||
return ticks * (360.0 / resolutions)
|
||||
|
||||
|
||||
def convert_degrees_to_ticks(degrees, model):
|
||||
resolutions = MODEL_RESOLUTION[model]
|
||||
# Convert degrees to motor ticks
|
||||
return int(degrees * (resolutions / 360.0))
|
||||
|
||||
|
||||
def adjusted_to_homing_ticks(raw_motor_ticks: int, model: str, motorbus, motor_id: int) -> int:
|
||||
def convert_degrees_to_steps(degrees: float | np.ndarray, models: str | list[str]) -> np.ndarray:
|
||||
"""This function converts the degree range to the step range for indicating motors rotation.
|
||||
It assumes a motor achieves a full rotation by going from -180 degree position to +180.
|
||||
The motor resolution (e.g. 4096) corresponds to the number of steps needed to achieve a full rotation.
|
||||
"""
|
||||
Takes a raw reading [0..(res-1)] (e.g. 0..4095) and shifts it so that '2048'
|
||||
becomes 0 in the homed coordinate system ([-2048..+2047] for 4096 resolution).
|
||||
"""
|
||||
resolutions = MODEL_RESOLUTION[model]
|
||||
|
||||
# Shift raw ticks by half-resolution so 2048 -> 0, then wrap [0..res-1].
|
||||
ticks = (raw_motor_ticks - (resolutions // 2)) % resolutions
|
||||
|
||||
# If above halfway, fold it into negative territory => [-2048..+2047].
|
||||
if ticks > (resolutions // 2):
|
||||
ticks -= resolutions
|
||||
|
||||
# Flip sign if drive_mode is set.
|
||||
drive_mode = 0
|
||||
if motorbus.calibration is not None:
|
||||
drive_mode = motorbus.calibration["drive_mode"][motor_id - 1]
|
||||
|
||||
if drive_mode:
|
||||
ticks *= -1
|
||||
|
||||
return ticks
|
||||
|
||||
|
||||
def adjusted_to_motor_ticks(adjusted_pos: int, model: str, motorbus, motor_id: int) -> int:
|
||||
"""
|
||||
Inverse of adjusted_to_homing_ticks(). Takes a 'homed' position in [-2048..+2047]
|
||||
and recovers the raw [0..(res-1)] ticks with 2048 as midpoint.
|
||||
"""
|
||||
# Flip sign if drive_mode was set.
|
||||
drive_mode = 0
|
||||
if motorbus.calibration is not None:
|
||||
drive_mode = motorbus.calibration["drive_mode"][motor_id - 1]
|
||||
|
||||
if drive_mode:
|
||||
adjusted_pos *= -1
|
||||
|
||||
resolutions = MODEL_RESOLUTION[model]
|
||||
|
||||
# Shift by +half-resolution and wrap into [0..res-1].
|
||||
# This undoes the earlier shift by -half-resolution.
|
||||
ticks = (adjusted_pos + (resolutions // 2)) % resolutions
|
||||
|
||||
return ticks
|
||||
resolutions = [MODEL_RESOLUTION[model] for model in models]
|
||||
steps = degrees / 180 * np.array(resolutions) / 2
|
||||
steps = steps.astype(int)
|
||||
return steps
|
||||
|
||||
|
||||
def convert_to_bytes(value, bytes, mock=False):
|
||||
@@ -270,7 +221,7 @@ class DriveMode(enum.Enum):
|
||||
class CalibrationMode(enum.Enum):
|
||||
# Joints with rotational motions are expressed in degrees in nominal range of [-180, 180]
|
||||
DEGREE = 0
|
||||
# Joints with linear motions (like gripper of Aloha) are expressed in nominal range of [0, 100]
|
||||
# Joints with linear motions (like gripper of Aloha) are experessed in nominal range of [0, 100]
|
||||
LINEAR = 1
|
||||
|
||||
|
||||
@@ -302,10 +253,11 @@ class FeetechMotorsBus:
|
||||
motor_index = 6
|
||||
motor_model = "sts3215"
|
||||
|
||||
motors_bus = FeetechMotorsBus(
|
||||
config = FeetechMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem575E0031751",
|
||||
motors={motor_name: (motor_index, motor_model)},
|
||||
)
|
||||
motors_bus = FeetechMotorsBus(config)
|
||||
motors_bus.connect()
|
||||
|
||||
position = motors_bus.read("Present_Position")
|
||||
@@ -321,13 +273,11 @@ class FeetechMotorsBus:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
port: str,
|
||||
motors: dict[str, tuple[int, str]],
|
||||
mock: bool = False,
|
||||
config: FeetechMotorsBusConfig,
|
||||
):
|
||||
self.port = port
|
||||
self.motors = motors
|
||||
self.mock = mock
|
||||
self.port = config.port
|
||||
self.motors = config.motors
|
||||
self.mock = config.mock
|
||||
|
||||
self.model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE)
|
||||
self.model_resolution = deepcopy(MODEL_RESOLUTION)
|
||||
@@ -340,14 +290,16 @@ class FeetechMotorsBus:
|
||||
self.group_writers = {}
|
||||
self.logs = {}
|
||||
|
||||
self.track_positions = {}
|
||||
|
||||
def connect(self):
|
||||
if self.is_connected:
|
||||
raise DeviceAlreadyConnectedError(
|
||||
raise RobotDeviceAlreadyConnectedError(
|
||||
f"FeetechMotorsBus({self.port}) is already connected. Do not call `motors_bus.connect()` twice."
|
||||
)
|
||||
|
||||
if self.mock:
|
||||
import tests.motors.mock_scservo_sdk as scs
|
||||
import tests.mock_scservo_sdk as scs
|
||||
else:
|
||||
import scservo_sdk as scs
|
||||
|
||||
@@ -371,7 +323,7 @@ class FeetechMotorsBus:
|
||||
|
||||
def reconnect(self):
|
||||
if self.mock:
|
||||
import tests.motors.mock_scservo_sdk as scs
|
||||
import tests.mock_scservo_sdk as scs
|
||||
else:
|
||||
import scservo_sdk as scs
|
||||
|
||||
@@ -436,7 +388,33 @@ class FeetechMotorsBus:
|
||||
def set_calibration(self, calibration: dict[str, list]):
|
||||
self.calibration = calibration
|
||||
|
||||
def apply_calibration_autocorrect(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""This function apply the calibration, automatically detects out of range errors for motors values and attempt to correct.
|
||||
|
||||
For more info, see docstring of `apply_calibration` and `autocorrect_calibration`.
|
||||
"""
|
||||
try:
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
except JointOutOfRangeError as e:
|
||||
print(e)
|
||||
self.autocorrect_calibration(values, motor_names)
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
return values
|
||||
|
||||
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
|
||||
a "zero position" at 0 degree.
|
||||
|
||||
Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor
|
||||
rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range.
|
||||
|
||||
Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation
|
||||
when given a goal position that is + or - their resolution. For instance, feetech xl330-m077 have a resolution of 4096, and
|
||||
at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830,
|
||||
or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor.
|
||||
To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work
|
||||
in the centered nominal degree range ]-180, 180[.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.motor_names
|
||||
|
||||
@@ -448,11 +426,34 @@ class FeetechMotorsBus:
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.DEGREE:
|
||||
motor_idx, model = self.motors[name]
|
||||
drive_mode = self.calibration["drive_mode"][calib_idx]
|
||||
homing_offset = self.calibration["homing_offset"][calib_idx]
|
||||
_, model = self.motors[name]
|
||||
resolution = self.model_resolution[model]
|
||||
|
||||
# Convert raw motor ticks to homed ticks, then convert the homed ticks to degrees
|
||||
values[i] = adjusted_to_homing_ticks(values[i], model, self, motor_idx)
|
||||
values[i] = convert_ticks_to_degrees(values[i], model)
|
||||
# Update direction of rotation of the motor to match between leader and follower.
|
||||
# In fact, the motor of the leader for a given joint can be assembled in an
|
||||
# opposite direction in term of rotation than the motor of the follower on the same joint.
|
||||
if drive_mode:
|
||||
values[i] *= -1
|
||||
|
||||
# Convert from range [-2**31, 2**31[ to
|
||||
# nominal range ]-resolution, resolution[ (e.g. ]-2048, 2048[)
|
||||
values[i] += homing_offset
|
||||
|
||||
# Convert from range ]-resolution, resolution[ to
|
||||
# universal float32 centered degree range ]-180, 180[
|
||||
values[i] = values[i] / (resolution // 2) * HALF_TURN_DEGREE
|
||||
|
||||
if (values[i] < LOWER_BOUND_DEGREE) or (values[i] > UPPER_BOUND_DEGREE):
|
||||
raise JointOutOfRangeError(
|
||||
f"Wrong motor position range detected for {name}. "
|
||||
f"Expected to be in nominal range of [-{HALF_TURN_DEGREE}, {HALF_TURN_DEGREE}] degrees (a full rotation), "
|
||||
f"with a maximum range of [{LOWER_BOUND_DEGREE}, {UPPER_BOUND_DEGREE}] degrees to account for joints that can rotate a bit more, "
|
||||
f"but present value is {values[i]} degree. "
|
||||
"This might be due to a cable connection issue creating an artificial 360 degrees jump in motor values. "
|
||||
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
|
||||
)
|
||||
|
||||
elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
@@ -474,6 +475,103 @@ class FeetechMotorsBus:
|
||||
|
||||
return values
|
||||
|
||||
def autocorrect_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""This function automatically detects issues with values of motors after calibration, and correct for these issues.
|
||||
|
||||
Some motors might have values outside of expected maximum bounds after calibration.
|
||||
For instance, for a joint in degree, its value can be outside [-270, 270] degrees, which is totally unexpected given
|
||||
a nominal range of [-180, 180] degrees, which represents half a turn to the left or right starting from zero position.
|
||||
|
||||
Known issues:
|
||||
#1: Motor value randomly shifts of a full turn, caused by hardware/connection errors.
|
||||
#2: Motor internal homing offset is shifted of a full turn, caused by using default calibration (e.g Aloha).
|
||||
#3: motor internal homing offset is shifted of less or more than a full turn, caused by using default calibration
|
||||
or by human error during manual calibration.
|
||||
|
||||
Issues #1 and #2 can be solved by shifting the calibration homing offset by a full turn.
|
||||
Issue #3 will be visually detected by user and potentially captured by the safety feature `max_relative_target`,
|
||||
that will slow down the motor, raise an error asking to recalibrate. Manual recalibrating will solve the issue.
|
||||
|
||||
Note: A full turn corresponds to 360 degrees but also to 4096 steps for a motor resolution of 4096.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.motor_names
|
||||
|
||||
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
|
||||
values = values.astype(np.float32)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
calib_idx = self.calibration["motor_names"].index(name)
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.DEGREE:
|
||||
drive_mode = self.calibration["drive_mode"][calib_idx]
|
||||
homing_offset = self.calibration["homing_offset"][calib_idx]
|
||||
_, model = self.motors[name]
|
||||
resolution = self.model_resolution[model]
|
||||
|
||||
if drive_mode:
|
||||
values[i] *= -1
|
||||
|
||||
# Convert from initial range to range [-180, 180] degrees
|
||||
calib_val = (values[i] + homing_offset) / (resolution // 2) * HALF_TURN_DEGREE
|
||||
in_range = (calib_val > LOWER_BOUND_DEGREE) and (calib_val < UPPER_BOUND_DEGREE)
|
||||
|
||||
# Solve this inequality to find the factor to shift the range into [-180, 180] degrees
|
||||
# values[i] = (values[i] + homing_offset + resolution * factor) / (resolution // 2) * HALF_TURN_DEGREE
|
||||
# - HALF_TURN_DEGREE <= (values[i] + homing_offset + resolution * factor) / (resolution // 2) * HALF_TURN_DEGREE <= HALF_TURN_DEGREE
|
||||
# (- HALF_TURN_DEGREE / HALF_TURN_DEGREE * (resolution // 2) - values[i] - homing_offset) / resolution <= factor <= (HALF_TURN_DEGREE / 180 * (resolution // 2) - values[i] - homing_offset) / resolution
|
||||
low_factor = (
|
||||
-HALF_TURN_DEGREE / HALF_TURN_DEGREE * (resolution // 2) - values[i] - homing_offset
|
||||
) / resolution
|
||||
upp_factor = (
|
||||
HALF_TURN_DEGREE / HALF_TURN_DEGREE * (resolution // 2) - values[i] - homing_offset
|
||||
) / resolution
|
||||
|
||||
elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# Convert from initial range to range [0, 100] in %
|
||||
calib_val = (values[i] - start_pos) / (end_pos - start_pos) * 100
|
||||
in_range = (calib_val > LOWER_BOUND_LINEAR) and (calib_val < UPPER_BOUND_LINEAR)
|
||||
|
||||
# Solve this inequality to find the factor to shift the range into [0, 100] %
|
||||
# values[i] = (values[i] - start_pos + resolution * factor) / (end_pos + resolution * factor - start_pos - resolution * factor) * 100
|
||||
# values[i] = (values[i] - start_pos + resolution * factor) / (end_pos - start_pos) * 100
|
||||
# 0 <= (values[i] - start_pos + resolution * factor) / (end_pos - start_pos) * 100 <= 100
|
||||
# (start_pos - values[i]) / resolution <= factor <= (end_pos - values[i]) / resolution
|
||||
low_factor = (start_pos - values[i]) / resolution
|
||||
upp_factor = (end_pos - values[i]) / resolution
|
||||
|
||||
if not in_range:
|
||||
# Get first integer between the two bounds
|
||||
if low_factor < upp_factor:
|
||||
factor = math.ceil(low_factor)
|
||||
|
||||
if factor > upp_factor:
|
||||
raise ValueError(f"No integer found between bounds [{low_factor=}, {upp_factor=}]")
|
||||
else:
|
||||
factor = math.ceil(upp_factor)
|
||||
|
||||
if factor > low_factor:
|
||||
raise ValueError(f"No integer found between bounds [{low_factor=}, {upp_factor=}]")
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.DEGREE:
|
||||
out_of_range_str = f"{LOWER_BOUND_DEGREE} < {calib_val} < {UPPER_BOUND_DEGREE} degrees"
|
||||
in_range_str = f"{LOWER_BOUND_DEGREE} < {calib_val} < {UPPER_BOUND_DEGREE} degrees"
|
||||
elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
out_of_range_str = f"{LOWER_BOUND_LINEAR} < {calib_val} < {UPPER_BOUND_LINEAR} %"
|
||||
in_range_str = f"{LOWER_BOUND_LINEAR} < {calib_val} < {UPPER_BOUND_LINEAR} %"
|
||||
|
||||
logging.warning(
|
||||
f"Auto-correct calibration of motor '{name}' by shifting value by {abs(factor)} full turns, "
|
||||
f"from '{out_of_range_str}' to '{in_range_str}'."
|
||||
)
|
||||
|
||||
# A full turn corresponds to 360 degrees but also to 4096 steps for a motor resolution of 4096.
|
||||
self.calibration["homing_offset"][calib_idx] += resolution * factor
|
||||
|
||||
def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""Inverse of `apply_calibration`."""
|
||||
if motor_names is None:
|
||||
@@ -484,11 +582,23 @@ class FeetechMotorsBus:
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.DEGREE:
|
||||
motor_idx, model = self.motors[name]
|
||||
drive_mode = self.calibration["drive_mode"][calib_idx]
|
||||
homing_offset = self.calibration["homing_offset"][calib_idx]
|
||||
_, model = self.motors[name]
|
||||
resolution = self.model_resolution[model]
|
||||
|
||||
# Convert degrees to homed ticks, then convert the homed ticks to raw ticks
|
||||
values[i] = convert_degrees_to_ticks(values[i], model)
|
||||
values[i] = adjusted_to_motor_ticks(values[i], model, self, motor_idx)
|
||||
# Convert from nominal 0-centered degree range [-180, 180] to
|
||||
# 0-centered resolution range (e.g. [-2048, 2048] for resolution=4096)
|
||||
values[i] = values[i] / HALF_TURN_DEGREE * (resolution // 2)
|
||||
|
||||
# Substract the homing offsets to come back to actual motor range of values
|
||||
# which can be arbitrary.
|
||||
values[i] -= homing_offset
|
||||
|
||||
# Remove drive mode, which is the rotation direction of the motor, to come back to
|
||||
# actual motor rotation direction which can be arbitrary.
|
||||
if drive_mode:
|
||||
values[i] *= -1
|
||||
|
||||
elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
@@ -501,9 +611,46 @@ class FeetechMotorsBus:
|
||||
values = np.round(values).astype(np.int32)
|
||||
return values
|
||||
|
||||
def avoid_rotation_reset(self, values, motor_names, data_name):
|
||||
if data_name not in self.track_positions:
|
||||
self.track_positions[data_name] = {
|
||||
"prev": [None] * len(self.motor_names),
|
||||
# Assume False at initialization
|
||||
"below_zero": [False] * len(self.motor_names),
|
||||
"above_max": [False] * len(self.motor_names),
|
||||
}
|
||||
|
||||
track = self.track_positions[data_name]
|
||||
|
||||
if motor_names is None:
|
||||
motor_names = self.motor_names
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
idx = self.motor_names.index(name)
|
||||
|
||||
if track["prev"][idx] is None:
|
||||
track["prev"][idx] = values[i]
|
||||
continue
|
||||
|
||||
# Detect a full rotation occured
|
||||
if abs(track["prev"][idx] - values[i]) > 2048:
|
||||
# Position went below 0 and got reset to 4095
|
||||
if track["prev"][idx] < values[i]:
|
||||
# So we set negative value by adding a full rotation
|
||||
values[i] -= 4096
|
||||
|
||||
# Position went above 4095 and got reset to 0
|
||||
elif track["prev"][idx] > values[i]:
|
||||
# So we add a full rotation
|
||||
values[i] += 4096
|
||||
|
||||
track["prev"][idx] = values[i]
|
||||
|
||||
return values
|
||||
|
||||
def read_with_motor_ids(self, motor_models, motor_ids, data_name, num_retry=NUM_READ_RETRY):
|
||||
if self.mock:
|
||||
import tests.motors.mock_scservo_sdk as scs
|
||||
import tests.mock_scservo_sdk as scs
|
||||
else:
|
||||
import scservo_sdk as scs
|
||||
|
||||
@@ -541,12 +688,12 @@ class FeetechMotorsBus:
|
||||
|
||||
def read(self, data_name, motor_names: str | list[str] | None = None):
|
||||
if self.mock:
|
||||
import tests.motors.mock_scservo_sdk as scs
|
||||
import tests.mock_scservo_sdk as scs
|
||||
else:
|
||||
import scservo_sdk as scs
|
||||
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError(
|
||||
raise RobotDeviceNotConnectedError(
|
||||
f"FeetechMotorsBus({self.port}) is not connected. You need to run `motors_bus.connect()`."
|
||||
)
|
||||
|
||||
@@ -574,7 +721,7 @@ class FeetechMotorsBus:
|
||||
self.port_handler.ser.reset_output_buffer()
|
||||
self.port_handler.ser.reset_input_buffer()
|
||||
|
||||
# Create new group reader
|
||||
# create new group reader
|
||||
self.group_readers[group_key] = scs.GroupSyncRead(
|
||||
self.port_handler, self.packet_handler, addr, bytes
|
||||
)
|
||||
@@ -599,8 +746,15 @@ class FeetechMotorsBus:
|
||||
|
||||
values = np.array(values)
|
||||
|
||||
# Convert to signed int to use range [-2048, 2048] for our motor positions.
|
||||
if data_name in CONVERT_UINT32_TO_INT32_REQUIRED:
|
||||
values = values.astype(np.int32)
|
||||
|
||||
if data_name in CALIBRATION_REQUIRED:
|
||||
values = self.avoid_rotation_reset(values, motor_names, data_name)
|
||||
|
||||
if data_name in CALIBRATION_REQUIRED and self.calibration is not None:
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
values = self.apply_calibration_autocorrect(values, motor_names)
|
||||
|
||||
# log the number of seconds it took to read the data from the motors
|
||||
delta_ts_name = get_log_name("delta_timestamp_s", "read", data_name, motor_names)
|
||||
@@ -614,7 +768,7 @@ class FeetechMotorsBus:
|
||||
|
||||
def write_with_motor_ids(self, motor_models, motor_ids, data_name, values, num_retry=NUM_WRITE_RETRY):
|
||||
if self.mock:
|
||||
import tests.motors.mock_scservo_sdk as scs
|
||||
import tests.mock_scservo_sdk as scs
|
||||
else:
|
||||
import scservo_sdk as scs
|
||||
|
||||
@@ -643,14 +797,14 @@ class FeetechMotorsBus:
|
||||
|
||||
def write(self, data_name, values: int | float | np.ndarray, motor_names: str | list[str] | None = None):
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError(
|
||||
raise RobotDeviceNotConnectedError(
|
||||
f"FeetechMotorsBus({self.port}) is not connected. You need to run `motors_bus.connect()`."
|
||||
)
|
||||
|
||||
start_time = time.perf_counter()
|
||||
|
||||
if self.mock:
|
||||
import tests.motors.mock_scservo_sdk as scs
|
||||
import tests.mock_scservo_sdk as scs
|
||||
else:
|
||||
import scservo_sdk as scs
|
||||
|
||||
@@ -712,7 +866,7 @@ class FeetechMotorsBus:
|
||||
|
||||
def disconnect(self):
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError(
|
||||
raise RobotDeviceNotConnectedError(
|
||||
f"FeetechMotorsBus({self.port}) is not connected. Try running `motors_bus.connect()` first."
|
||||
)
|
||||
|
||||
53
lerobot/common/robot_devices/motors/utils.py
Normal file
53
lerobot/common/robot_devices/motors/utils.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from typing import Protocol
|
||||
|
||||
from lerobot.common.robot_devices.motors.configs import (
|
||||
DynamixelMotorsBusConfig,
|
||||
FeetechMotorsBusConfig,
|
||||
MotorsBusConfig,
|
||||
)
|
||||
|
||||
|
||||
class MotorsBus(Protocol):
|
||||
def motor_names(self): ...
|
||||
def set_calibration(self): ...
|
||||
def apply_calibration(self): ...
|
||||
def revert_calibration(self): ...
|
||||
def read(self): ...
|
||||
def write(self): ...
|
||||
|
||||
|
||||
def make_motors_buses_from_configs(motors_bus_configs: dict[str, MotorsBusConfig]) -> list[MotorsBus]:
|
||||
motors_buses = {}
|
||||
|
||||
for key, cfg in motors_bus_configs.items():
|
||||
if cfg.type == "dynamixel":
|
||||
from lerobot.common.robot_devices.motors.dynamixel import DynamixelMotorsBus
|
||||
|
||||
motors_buses[key] = DynamixelMotorsBus(cfg)
|
||||
|
||||
elif cfg.type == "feetech":
|
||||
from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus
|
||||
|
||||
motors_buses[key] = FeetechMotorsBus(cfg)
|
||||
|
||||
else:
|
||||
raise ValueError(f"The motor type '{cfg.type}' is not valid.")
|
||||
|
||||
return motors_buses
|
||||
|
||||
|
||||
def make_motors_bus(motor_type: str, **kwargs) -> MotorsBus:
|
||||
if motor_type == "dynamixel":
|
||||
from lerobot.common.robot_devices.motors.dynamixel import DynamixelMotorsBus
|
||||
|
||||
config = DynamixelMotorsBusConfig(**kwargs)
|
||||
return DynamixelMotorsBus(config)
|
||||
|
||||
elif motor_type == "feetech":
|
||||
from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus
|
||||
|
||||
config = FeetechMotorsBusConfig(**kwargs)
|
||||
return FeetechMotorsBus(config)
|
||||
|
||||
else:
|
||||
raise ValueError(f"The motor type '{motor_type}' is not valid.")
|
||||
599
lerobot/common/robot_devices/robots/configs.py
Normal file
599
lerobot/common/robot_devices/robots/configs.py
Normal file
@@ -0,0 +1,599 @@
|
||||
import abc
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Sequence
|
||||
|
||||
import draccus
|
||||
|
||||
from lerobot.common.robot_devices.cameras.configs import (
|
||||
CameraConfig,
|
||||
IntelRealSenseCameraConfig,
|
||||
OpenCVCameraConfig,
|
||||
)
|
||||
from lerobot.common.robot_devices.motors.configs import (
|
||||
DynamixelMotorsBusConfig,
|
||||
FeetechMotorsBusConfig,
|
||||
MotorsBusConfig,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class RobotConfig(draccus.ChoiceRegistry, abc.ABC):
|
||||
@property
|
||||
def type(self) -> str:
|
||||
return self.get_choice_name(self.__class__)
|
||||
|
||||
|
||||
# TODO(rcadene, aliberts): remove ManipulatorRobotConfig abstraction
|
||||
@dataclass
|
||||
class ManipulatorRobotConfig(RobotConfig):
|
||||
leader_arms: dict[str, MotorsBusConfig] = field(default_factory=lambda: {})
|
||||
follower_arms: dict[str, MotorsBusConfig] = field(default_factory=lambda: {})
|
||||
cameras: dict[str, CameraConfig] = field(default_factory=lambda: {})
|
||||
|
||||
# Optionally limit the magnitude of the relative positional target vector for safety purposes.
|
||||
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length
|
||||
# as the number of motors in your follower arms (assumes all follower arms have the same number of
|
||||
# motors).
|
||||
max_relative_target: list[float] | float | None = None
|
||||
|
||||
# Optionally set the leader arm in torque mode with the gripper motor set to this angle. This makes it
|
||||
# possible to squeeze the gripper and have it spring back to an open position on its own. If None, the
|
||||
# gripper is not put in torque mode.
|
||||
gripper_open_degree: float | None = None
|
||||
|
||||
mock: bool = False
|
||||
|
||||
def __post_init__(self):
|
||||
if self.mock:
|
||||
for arm in self.leader_arms.values():
|
||||
if not arm.mock:
|
||||
arm.mock = True
|
||||
for arm in self.follower_arms.values():
|
||||
if not arm.mock:
|
||||
arm.mock = True
|
||||
for cam in self.cameras.values():
|
||||
if not cam.mock:
|
||||
cam.mock = True
|
||||
|
||||
if self.max_relative_target is not None and isinstance(self.max_relative_target, Sequence):
|
||||
for name in self.follower_arms:
|
||||
if len(self.follower_arms[name].motors) != len(self.max_relative_target):
|
||||
raise ValueError(
|
||||
f"len(max_relative_target)={len(self.max_relative_target)} but the follower arm with name {name} has "
|
||||
f"{len(self.follower_arms[name].motors)} motors. Please make sure that the "
|
||||
f"`max_relative_target` list has as many parameters as there are motors per arm. "
|
||||
"Note: This feature does not yet work with robots where different follower arms have "
|
||||
"different numbers of motors."
|
||||
)
|
||||
|
||||
|
||||
@RobotConfig.register_subclass("aloha")
|
||||
@dataclass
|
||||
class AlohaRobotConfig(ManipulatorRobotConfig):
|
||||
# Specific to Aloha, LeRobot comes with default calibration files. Assuming the motors have been
|
||||
# properly assembled, no manual calibration step is expected. If you need to run manual calibration,
|
||||
# simply update this path to ".cache/calibration/aloha"
|
||||
calibration_dir: str = ".cache/calibration/aloha_default"
|
||||
|
||||
# /!\ FOR SAFETY, READ THIS /!\
|
||||
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
|
||||
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
|
||||
# the number of motors in your follower arms.
|
||||
# For Aloha, for every goal position request, motor rotations are capped at 5 degrees by default.
|
||||
# When you feel more confident with teleoperation or running the policy, you can extend
|
||||
# this safety limit and even removing it by setting it to `null`.
|
||||
# Also, everything is expected to work safely out-of-the-box, but we highly advise to
|
||||
# first try to teleoperate the grippers only (by commenting out the rest of the motors in this yaml),
|
||||
# then to gradually add more motors (by uncommenting), until you can teleoperate both arms fully
|
||||
max_relative_target: int | None = 5
|
||||
|
||||
leader_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"left": DynamixelMotorsBusConfig(
|
||||
# window_x
|
||||
port="/dev/ttyDXL_leader_left",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"waist": [1, "xm430-w350"],
|
||||
"shoulder": [2, "xm430-w350"],
|
||||
"shoulder_shadow": [3, "xm430-w350"],
|
||||
"elbow": [4, "xm430-w350"],
|
||||
"elbow_shadow": [5, "xm430-w350"],
|
||||
"forearm_roll": [6, "xm430-w350"],
|
||||
"wrist_angle": [7, "xm430-w350"],
|
||||
"wrist_rotate": [8, "xl430-w250"],
|
||||
"gripper": [9, "xc430-w150"],
|
||||
},
|
||||
),
|
||||
"right": DynamixelMotorsBusConfig(
|
||||
# window_x
|
||||
port="/dev/ttyDXL_leader_right",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"waist": [1, "xm430-w350"],
|
||||
"shoulder": [2, "xm430-w350"],
|
||||
"shoulder_shadow": [3, "xm430-w350"],
|
||||
"elbow": [4, "xm430-w350"],
|
||||
"elbow_shadow": [5, "xm430-w350"],
|
||||
"forearm_roll": [6, "xm430-w350"],
|
||||
"wrist_angle": [7, "xm430-w350"],
|
||||
"wrist_rotate": [8, "xl430-w250"],
|
||||
"gripper": [9, "xc430-w150"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
follower_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"left": DynamixelMotorsBusConfig(
|
||||
port="/dev/ttyDXL_follower_left",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"waist": [1, "xm540-w270"],
|
||||
"shoulder": [2, "xm540-w270"],
|
||||
"shoulder_shadow": [3, "xm540-w270"],
|
||||
"elbow": [4, "xm540-w270"],
|
||||
"elbow_shadow": [5, "xm540-w270"],
|
||||
"forearm_roll": [6, "xm540-w270"],
|
||||
"wrist_angle": [7, "xm540-w270"],
|
||||
"wrist_rotate": [8, "xm430-w350"],
|
||||
"gripper": [9, "xm430-w350"],
|
||||
},
|
||||
),
|
||||
"right": DynamixelMotorsBusConfig(
|
||||
port="/dev/ttyDXL_follower_right",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"waist": [1, "xm540-w270"],
|
||||
"shoulder": [2, "xm540-w270"],
|
||||
"shoulder_shadow": [3, "xm540-w270"],
|
||||
"elbow": [4, "xm540-w270"],
|
||||
"elbow_shadow": [5, "xm540-w270"],
|
||||
"forearm_roll": [6, "xm540-w270"],
|
||||
"wrist_angle": [7, "xm540-w270"],
|
||||
"wrist_rotate": [8, "xm430-w350"],
|
||||
"gripper": [9, "xm430-w350"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
# Troubleshooting: If one of your IntelRealSense cameras freeze during
|
||||
# data recording due to bandwidth limit, you might need to plug the camera
|
||||
# on another USB hub or PCIe card.
|
||||
cameras: dict[str, CameraConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"cam_high": IntelRealSenseCameraConfig(
|
||||
serial_number=128422271347,
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
"cam_low": IntelRealSenseCameraConfig(
|
||||
serial_number=130322270656,
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
"cam_left_wrist": IntelRealSenseCameraConfig(
|
||||
serial_number=218622272670,
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
"cam_right_wrist": IntelRealSenseCameraConfig(
|
||||
serial_number=130322272300,
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
mock: bool = False
|
||||
|
||||
|
||||
@RobotConfig.register_subclass("koch")
|
||||
@dataclass
|
||||
class KochRobotConfig(ManipulatorRobotConfig):
|
||||
calibration_dir: str = ".cache/calibration/koch"
|
||||
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
|
||||
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
|
||||
# the number of motors in your follower arms.
|
||||
max_relative_target: int | None = None
|
||||
|
||||
leader_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": DynamixelMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem585A0085511",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "xl330-m077"],
|
||||
"shoulder_lift": [2, "xl330-m077"],
|
||||
"elbow_flex": [3, "xl330-m077"],
|
||||
"wrist_flex": [4, "xl330-m077"],
|
||||
"wrist_roll": [5, "xl330-m077"],
|
||||
"gripper": [6, "xl330-m077"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
follower_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": DynamixelMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem585A0076891",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "xl430-w250"],
|
||||
"shoulder_lift": [2, "xl430-w250"],
|
||||
"elbow_flex": [3, "xl330-m288"],
|
||||
"wrist_flex": [4, "xl330-m288"],
|
||||
"wrist_roll": [5, "xl330-m288"],
|
||||
"gripper": [6, "xl330-m288"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
cameras: dict[str, CameraConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"laptop": OpenCVCameraConfig(
|
||||
camera_index=0,
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
"phone": OpenCVCameraConfig(
|
||||
camera_index=1,
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
# ~ Koch specific settings ~
|
||||
# Sets the leader arm in torque mode with the gripper motor set to this angle. This makes it possible
|
||||
# to squeeze the gripper and have it spring back to an open position on its own.
|
||||
gripper_open_degree: float = 35.156
|
||||
|
||||
mock: bool = False
|
||||
|
||||
|
||||
@RobotConfig.register_subclass("koch_bimanual")
|
||||
@dataclass
|
||||
class KochBimanualRobotConfig(ManipulatorRobotConfig):
|
||||
calibration_dir: str = ".cache/calibration/koch_bimanual"
|
||||
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
|
||||
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
|
||||
# the number of motors in your follower arms.
|
||||
max_relative_target: int | None = None
|
||||
|
||||
leader_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"left": DynamixelMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem585A0085511",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "xl330-m077"],
|
||||
"shoulder_lift": [2, "xl330-m077"],
|
||||
"elbow_flex": [3, "xl330-m077"],
|
||||
"wrist_flex": [4, "xl330-m077"],
|
||||
"wrist_roll": [5, "xl330-m077"],
|
||||
"gripper": [6, "xl330-m077"],
|
||||
},
|
||||
),
|
||||
"right": DynamixelMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem575E0031751",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "xl330-m077"],
|
||||
"shoulder_lift": [2, "xl330-m077"],
|
||||
"elbow_flex": [3, "xl330-m077"],
|
||||
"wrist_flex": [4, "xl330-m077"],
|
||||
"wrist_roll": [5, "xl330-m077"],
|
||||
"gripper": [6, "xl330-m077"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
follower_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"left": DynamixelMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem585A0076891",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "xl430-w250"],
|
||||
"shoulder_lift": [2, "xl430-w250"],
|
||||
"elbow_flex": [3, "xl330-m288"],
|
||||
"wrist_flex": [4, "xl330-m288"],
|
||||
"wrist_roll": [5, "xl330-m288"],
|
||||
"gripper": [6, "xl330-m288"],
|
||||
},
|
||||
),
|
||||
"right": DynamixelMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem575E0032081",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "xl430-w250"],
|
||||
"shoulder_lift": [2, "xl430-w250"],
|
||||
"elbow_flex": [3, "xl330-m288"],
|
||||
"wrist_flex": [4, "xl330-m288"],
|
||||
"wrist_roll": [5, "xl330-m288"],
|
||||
"gripper": [6, "xl330-m288"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
cameras: dict[str, CameraConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"laptop": OpenCVCameraConfig(
|
||||
camera_index=0,
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
"phone": OpenCVCameraConfig(
|
||||
camera_index=1,
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
# ~ Koch specific settings ~
|
||||
# Sets the leader arm in torque mode with the gripper motor set to this angle. This makes it possible
|
||||
# to squeeze the gripper and have it spring back to an open position on its own.
|
||||
gripper_open_degree: float = 35.156
|
||||
|
||||
mock: bool = False
|
||||
|
||||
|
||||
@RobotConfig.register_subclass("moss")
|
||||
@dataclass
|
||||
class MossRobotConfig(ManipulatorRobotConfig):
|
||||
calibration_dir: str = ".cache/calibration/moss"
|
||||
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
|
||||
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
|
||||
# the number of motors in your follower arms.
|
||||
max_relative_target: int | None = None
|
||||
|
||||
leader_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem58760431091",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
follower_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem585A0076891",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
cameras: dict[str, CameraConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"laptop": OpenCVCameraConfig(
|
||||
camera_index=0,
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
"phone": OpenCVCameraConfig(
|
||||
camera_index=1,
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
mock: bool = False
|
||||
|
||||
|
||||
@RobotConfig.register_subclass("so100")
|
||||
@dataclass
|
||||
class So100RobotConfig(ManipulatorRobotConfig):
|
||||
calibration_dir: str = ".cache/calibration/so100"
|
||||
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
|
||||
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
|
||||
# the number of motors in your follower arms.
|
||||
max_relative_target: int | None = None
|
||||
|
||||
leader_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem58760431091",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
follower_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem585A0076891",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
cameras: dict[str, CameraConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"laptop": OpenCVCameraConfig(
|
||||
camera_index=0,
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
"phone": OpenCVCameraConfig(
|
||||
camera_index=1,
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
mock: bool = False
|
||||
|
||||
|
||||
@RobotConfig.register_subclass("stretch")
|
||||
@dataclass
|
||||
class StretchRobotConfig(RobotConfig):
|
||||
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
|
||||
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
|
||||
# the number of motors in your follower arms.
|
||||
max_relative_target: int | None = None
|
||||
|
||||
cameras: dict[str, CameraConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"navigation": OpenCVCameraConfig(
|
||||
camera_index="/dev/hello-nav-head-camera",
|
||||
fps=10,
|
||||
width=1280,
|
||||
height=720,
|
||||
rotation=-90,
|
||||
),
|
||||
"head": IntelRealSenseCameraConfig(
|
||||
name="Intel RealSense D435I",
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
rotation=90,
|
||||
),
|
||||
"wrist": IntelRealSenseCameraConfig(
|
||||
name="Intel RealSense D405",
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
mock: bool = False
|
||||
|
||||
|
||||
@RobotConfig.register_subclass("lekiwi")
|
||||
@dataclass
|
||||
class LeKiwiRobotConfig(RobotConfig):
|
||||
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
|
||||
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
|
||||
# the number of motors in your follower arms.
|
||||
max_relative_target: int | None = None
|
||||
|
||||
# Network Configuration
|
||||
ip: str = "192.168.0.193"
|
||||
port: int = 5555
|
||||
video_port: int = 5556
|
||||
|
||||
cameras: dict[str, CameraConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"front": OpenCVCameraConfig(
|
||||
camera_index="/dev/video0", fps=30, width=640, height=480, rotation=90
|
||||
),
|
||||
"wrist": OpenCVCameraConfig(
|
||||
camera_index="/dev/video2", fps=30, width=640, height=480, rotation=180
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
calibration_dir: str = ".cache/calibration/lekiwi"
|
||||
|
||||
leader_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem585A0077581",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
follower_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
port="/dev/ttyACM0",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
"left_wheel": (7, "sts3215"),
|
||||
"back_wheel": (8, "sts3215"),
|
||||
"right_wheel": (9, "sts3215"),
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
teleop_keys: dict[str, str] = field(
|
||||
default_factory=lambda: {
|
||||
# Movement
|
||||
"forward": "w",
|
||||
"backward": "s",
|
||||
"left": "a",
|
||||
"right": "d",
|
||||
"rotate_left": "z",
|
||||
"rotate_right": "x",
|
||||
# Speed control
|
||||
"speed_up": "r",
|
||||
"speed_down": "f",
|
||||
# quit teleop
|
||||
"quit": "q",
|
||||
}
|
||||
)
|
||||
|
||||
mock: bool = False
|
||||
@@ -1,28 +1,14 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Logic to calibrate a robot arm built with dynamixel motors"""
|
||||
# TODO(rcadene, aliberts): move this logic into the robot code when refactoring
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..motors_bus import MotorsBus
|
||||
from .dynamixel import (
|
||||
from lerobot.common.robot_devices.motors.dynamixel import (
|
||||
CalibrationMode,
|
||||
TorqueMode,
|
||||
convert_degrees_to_steps,
|
||||
)
|
||||
from lerobot.common.robot_devices.motors.utils import MotorsBus
|
||||
|
||||
URL_TEMPLATE = (
|
||||
"https://raw.githubusercontent.com/huggingface/lerobot/main/media/{robot}/{arm}_{position}.webp"
|
||||
@@ -101,7 +87,7 @@ def run_arm_calibration(arm: MotorsBus, robot_type: str, arm_name: str, arm_type
|
||||
# For instance, if the motor rotates 90 degree, and its value is -90 after applying the homing offset, then we know its rotation direction
|
||||
# is inverted. However, for the calibration being successful, we need everyone to follow the same target position.
|
||||
# Sometimes, there is only one possible rotation direction. For instance, if the gripper is closed, there is only one direction which
|
||||
# corresponds to opening the gripper. When the rotation direction is ambiguous, we arbitrarily rotate clockwise from the point of view
|
||||
# corresponds to opening the gripper. When the rotation direction is ambiguous, we arbitrarely rotate clockwise from the point of view
|
||||
# of the previous motor in the kinetic chain.
|
||||
print("\nMove arm to rotated target position")
|
||||
print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="rotated"))
|
||||
@@ -129,7 +115,7 @@ def run_arm_calibration(arm: MotorsBus, robot_type: str, arm_name: str, arm_type
|
||||
|
||||
# TODO(rcadene): make type of joints (DEGREE or LINEAR) configurable from yaml?
|
||||
if robot_type in ["aloha"] and "gripper" in arm.motor_names:
|
||||
# Joints with linear motions (like gripper of Aloha) are expressed in nominal range of [0, 100]
|
||||
# Joints with linear motions (like gripper of Aloha) are experessed in nominal range of [0, 100]
|
||||
calib_idx = arm.motor_names.index("gripper")
|
||||
calib_mode[calib_idx] = CalibrationMode.LINEAR.name
|
||||
|
||||
484
lerobot/common/robot_devices/robots/feetech_calibration.py
Normal file
484
lerobot/common/robot_devices/robots/feetech_calibration.py
Normal file
@@ -0,0 +1,484 @@
|
||||
"""Logic to calibrate a robot arm built with feetech motors"""
|
||||
# TODO(rcadene, aliberts): move this logic into the robot code when refactoring
|
||||
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
|
||||
from lerobot.common.robot_devices.motors.feetech import (
|
||||
CalibrationMode,
|
||||
TorqueMode,
|
||||
convert_degrees_to_steps,
|
||||
)
|
||||
from lerobot.common.robot_devices.motors.utils import MotorsBus
|
||||
|
||||
URL_TEMPLATE = (
|
||||
"https://raw.githubusercontent.com/huggingface/lerobot/main/media/{robot}/{arm}_{position}.webp"
|
||||
)
|
||||
|
||||
# The following positions are provided in nominal degree range ]-180, +180[
|
||||
# For more info on these constants, see comments in the code where they get used.
|
||||
ZERO_POSITION_DEGREE = 0
|
||||
ROTATED_POSITION_DEGREE = 90
|
||||
|
||||
|
||||
def assert_drive_mode(drive_mode):
|
||||
# `drive_mode` is in [0,1] with 0 means original rotation direction for the motor, and 1 means inverted.
|
||||
if not np.all(np.isin(drive_mode, [0, 1])):
|
||||
raise ValueError(f"`drive_mode` contains values other than 0 or 1: ({drive_mode})")
|
||||
|
||||
|
||||
def apply_drive_mode(position, drive_mode):
|
||||
assert_drive_mode(drive_mode)
|
||||
# Convert `drive_mode` from [0, 1] with 0 indicates original rotation direction and 1 inverted,
|
||||
# to [-1, 1] with 1 indicates original rotation direction and -1 inverted.
|
||||
signed_drive_mode = -(drive_mode * 2 - 1)
|
||||
position *= signed_drive_mode
|
||||
return position
|
||||
|
||||
|
||||
def move_until_block(arm, motor_name, positive_direction=True, while_move_hook=None):
|
||||
count = 0
|
||||
while True:
|
||||
present_pos = arm.read("Present_Position", motor_name)
|
||||
if positive_direction:
|
||||
# Move +100 steps every time. Lower the steps to lower the speed at which the arm moves.
|
||||
arm.write("Goal_Position", present_pos + 100, motor_name)
|
||||
else:
|
||||
arm.write("Goal_Position", present_pos - 100, motor_name)
|
||||
|
||||
if while_move_hook is not None:
|
||||
while_move_hook()
|
||||
|
||||
present_pos = arm.read("Present_Position", motor_name).item()
|
||||
present_speed = arm.read("Present_Speed", motor_name).item()
|
||||
present_current = arm.read("Present_Current", motor_name).item()
|
||||
# present_load = arm.read("Present_Load", motor_name).item()
|
||||
# present_voltage = arm.read("Present_Voltage", motor_name).item()
|
||||
# present_temperature = arm.read("Present_Temperature", motor_name).item()
|
||||
|
||||
# print(f"{present_pos=}")
|
||||
# print(f"{present_speed=}")
|
||||
# print(f"{present_current=}")
|
||||
# print(f"{present_load=}")
|
||||
# print(f"{present_voltage=}")
|
||||
# print(f"{present_temperature=}")
|
||||
|
||||
if present_speed == 0 and present_current > 40:
|
||||
count += 1
|
||||
if count > 100 or present_current > 300:
|
||||
return present_pos
|
||||
else:
|
||||
count = 0
|
||||
|
||||
|
||||
def move_to_calibrate(
|
||||
arm,
|
||||
motor_name,
|
||||
invert_drive_mode=False,
|
||||
positive_first=True,
|
||||
in_between_move_hook=None,
|
||||
while_move_hook=None,
|
||||
):
|
||||
initial_pos = arm.read("Present_Position", motor_name)
|
||||
|
||||
if positive_first:
|
||||
p_present_pos = move_until_block(
|
||||
arm, motor_name, positive_direction=True, while_move_hook=while_move_hook
|
||||
)
|
||||
else:
|
||||
n_present_pos = move_until_block(
|
||||
arm, motor_name, positive_direction=False, while_move_hook=while_move_hook
|
||||
)
|
||||
|
||||
if in_between_move_hook is not None:
|
||||
in_between_move_hook()
|
||||
|
||||
if positive_first:
|
||||
n_present_pos = move_until_block(
|
||||
arm, motor_name, positive_direction=False, while_move_hook=while_move_hook
|
||||
)
|
||||
else:
|
||||
p_present_pos = move_until_block(
|
||||
arm, motor_name, positive_direction=True, while_move_hook=while_move_hook
|
||||
)
|
||||
|
||||
zero_pos = (n_present_pos + p_present_pos) / 2
|
||||
|
||||
calib_data = {
|
||||
"initial_pos": initial_pos,
|
||||
"homing_offset": zero_pos if invert_drive_mode else -zero_pos,
|
||||
"invert_drive_mode": invert_drive_mode,
|
||||
"drive_mode": -1 if invert_drive_mode else 0,
|
||||
"zero_pos": zero_pos,
|
||||
"start_pos": n_present_pos if invert_drive_mode else p_present_pos,
|
||||
"end_pos": p_present_pos if invert_drive_mode else n_present_pos,
|
||||
}
|
||||
return calib_data
|
||||
|
||||
|
||||
def apply_offset(calib, offset):
|
||||
calib["zero_pos"] += offset
|
||||
if calib["drive_mode"]:
|
||||
calib["homing_offset"] += offset
|
||||
else:
|
||||
calib["homing_offset"] -= offset
|
||||
return calib
|
||||
|
||||
|
||||
def run_arm_auto_calibration(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str):
|
||||
if robot_type == "so100":
|
||||
return run_arm_auto_calibration_so100(arm, robot_type, arm_name, arm_type)
|
||||
elif robot_type == "moss":
|
||||
return run_arm_auto_calibration_moss(arm, robot_type, arm_name, arm_type)
|
||||
else:
|
||||
raise ValueError(robot_type)
|
||||
|
||||
|
||||
def run_arm_auto_calibration_so100(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str):
|
||||
"""All the offsets and magic numbers are hand tuned, and are unique to SO-100 follower arms"""
|
||||
if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any():
|
||||
raise ValueError("To run calibration, the torque must be disabled on all motors.")
|
||||
|
||||
if not (robot_type == "so100" and arm_type == "follower"):
|
||||
raise NotImplementedError("Auto calibration only supports the follower of so100 arms for now.")
|
||||
|
||||
print(f"\nRunning calibration of {robot_type} {arm_name} {arm_type}...")
|
||||
|
||||
print("\nMove arm to initial position")
|
||||
print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="initial"))
|
||||
input("Press Enter to continue...")
|
||||
|
||||
# Lower the acceleration of the motors (in [0,254])
|
||||
initial_acceleration = arm.read("Acceleration")
|
||||
arm.write("Lock", 0)
|
||||
arm.write("Acceleration", 10)
|
||||
time.sleep(1)
|
||||
|
||||
arm.write("Torque_Enable", TorqueMode.ENABLED.value)
|
||||
|
||||
print(f'{arm.read("Present_Position", "elbow_flex")=}')
|
||||
|
||||
calib = {}
|
||||
|
||||
init_wf_pos = arm.read("Present_Position", "wrist_flex")
|
||||
init_sl_pos = arm.read("Present_Position", "shoulder_lift")
|
||||
init_ef_pos = arm.read("Present_Position", "elbow_flex")
|
||||
arm.write("Goal_Position", init_wf_pos - 800, "wrist_flex")
|
||||
arm.write("Goal_Position", init_sl_pos + 150 + 1024, "shoulder_lift")
|
||||
arm.write("Goal_Position", init_ef_pos - 2048, "elbow_flex")
|
||||
time.sleep(2)
|
||||
|
||||
print("Calibrate shoulder_pan")
|
||||
calib["shoulder_pan"] = move_to_calibrate(arm, "shoulder_pan")
|
||||
arm.write("Goal_Position", calib["shoulder_pan"]["zero_pos"], "shoulder_pan")
|
||||
time.sleep(1)
|
||||
|
||||
print("Calibrate gripper")
|
||||
calib["gripper"] = move_to_calibrate(arm, "gripper", invert_drive_mode=True)
|
||||
time.sleep(1)
|
||||
|
||||
print("Calibrate wrist_flex")
|
||||
calib["wrist_flex"] = move_to_calibrate(arm, "wrist_flex")
|
||||
calib["wrist_flex"] = apply_offset(calib["wrist_flex"], offset=80)
|
||||
|
||||
def in_between_move_hook():
|
||||
nonlocal arm, calib
|
||||
time.sleep(2)
|
||||
ef_pos = arm.read("Present_Position", "elbow_flex")
|
||||
sl_pos = arm.read("Present_Position", "shoulder_lift")
|
||||
arm.write("Goal_Position", ef_pos + 1024, "elbow_flex")
|
||||
arm.write("Goal_Position", sl_pos - 1024, "shoulder_lift")
|
||||
time.sleep(2)
|
||||
|
||||
print("Calibrate elbow_flex")
|
||||
calib["elbow_flex"] = move_to_calibrate(
|
||||
arm, "elbow_flex", positive_first=False, in_between_move_hook=in_between_move_hook
|
||||
)
|
||||
calib["elbow_flex"] = apply_offset(calib["elbow_flex"], offset=80 - 1024)
|
||||
|
||||
arm.write("Goal_Position", calib["elbow_flex"]["zero_pos"] + 1024 + 512, "elbow_flex")
|
||||
time.sleep(1)
|
||||
|
||||
def in_between_move_hook():
|
||||
nonlocal arm, calib
|
||||
arm.write("Goal_Position", calib["elbow_flex"]["zero_pos"], "elbow_flex")
|
||||
|
||||
print("Calibrate shoulder_lift")
|
||||
calib["shoulder_lift"] = move_to_calibrate(
|
||||
arm,
|
||||
"shoulder_lift",
|
||||
invert_drive_mode=True,
|
||||
positive_first=False,
|
||||
in_between_move_hook=in_between_move_hook,
|
||||
)
|
||||
# add an 30 steps as offset to align with body
|
||||
calib["shoulder_lift"] = apply_offset(calib["shoulder_lift"], offset=1024 - 50)
|
||||
|
||||
def while_move_hook():
|
||||
nonlocal arm, calib
|
||||
positions = {
|
||||
"shoulder_lift": round(calib["shoulder_lift"]["zero_pos"] - 1600),
|
||||
"elbow_flex": round(calib["elbow_flex"]["zero_pos"] + 1700),
|
||||
"wrist_flex": round(calib["wrist_flex"]["zero_pos"] + 800),
|
||||
"gripper": round(calib["gripper"]["end_pos"]),
|
||||
}
|
||||
arm.write("Goal_Position", list(positions.values()), list(positions.keys()))
|
||||
|
||||
arm.write("Goal_Position", round(calib["shoulder_lift"]["zero_pos"] - 1600), "shoulder_lift")
|
||||
time.sleep(2)
|
||||
arm.write("Goal_Position", round(calib["elbow_flex"]["zero_pos"] + 1700), "elbow_flex")
|
||||
time.sleep(2)
|
||||
arm.write("Goal_Position", round(calib["wrist_flex"]["zero_pos"] + 800), "wrist_flex")
|
||||
time.sleep(2)
|
||||
arm.write("Goal_Position", round(calib["gripper"]["end_pos"]), "gripper")
|
||||
time.sleep(2)
|
||||
|
||||
print("Calibrate wrist_roll")
|
||||
calib["wrist_roll"] = move_to_calibrate(
|
||||
arm, "wrist_roll", invert_drive_mode=True, positive_first=False, while_move_hook=while_move_hook
|
||||
)
|
||||
|
||||
arm.write("Goal_Position", calib["wrist_roll"]["zero_pos"], "wrist_roll")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", calib["gripper"]["start_pos"], "gripper")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"], "wrist_flex")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", calib["elbow_flex"]["zero_pos"] + 2048, "elbow_flex")
|
||||
arm.write("Goal_Position", calib["shoulder_lift"]["zero_pos"] - 2048, "shoulder_lift")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", calib["shoulder_pan"]["zero_pos"], "shoulder_pan")
|
||||
time.sleep(1)
|
||||
|
||||
calib_modes = []
|
||||
for name in arm.motor_names:
|
||||
if name == "gripper":
|
||||
calib_modes.append(CalibrationMode.LINEAR.name)
|
||||
else:
|
||||
calib_modes.append(CalibrationMode.DEGREE.name)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": [calib[name]["homing_offset"] for name in arm.motor_names],
|
||||
"drive_mode": [calib[name]["drive_mode"] for name in arm.motor_names],
|
||||
"start_pos": [calib[name]["start_pos"] for name in arm.motor_names],
|
||||
"end_pos": [calib[name]["end_pos"] for name in arm.motor_names],
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": arm.motor_names,
|
||||
}
|
||||
|
||||
# Re-enable original accerlation
|
||||
arm.write("Lock", 0)
|
||||
arm.write("Acceleration", initial_acceleration)
|
||||
time.sleep(1)
|
||||
|
||||
return calib_dict
|
||||
|
||||
|
||||
def run_arm_auto_calibration_moss(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str):
|
||||
"""All the offsets and magic numbers are hand tuned, and are unique to SO-100 follower arms"""
|
||||
if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any():
|
||||
raise ValueError("To run calibration, the torque must be disabled on all motors.")
|
||||
|
||||
if not (robot_type == "moss" and arm_type == "follower"):
|
||||
raise NotImplementedError("Auto calibration only supports the follower of moss arms for now.")
|
||||
|
||||
print(f"\nRunning calibration of {robot_type} {arm_name} {arm_type}...")
|
||||
|
||||
print("\nMove arm to initial position")
|
||||
print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="initial"))
|
||||
input("Press Enter to continue...")
|
||||
|
||||
# Lower the acceleration of the motors (in [0,254])
|
||||
initial_acceleration = arm.read("Acceleration")
|
||||
arm.write("Lock", 0)
|
||||
arm.write("Acceleration", 10)
|
||||
time.sleep(1)
|
||||
|
||||
arm.write("Torque_Enable", TorqueMode.ENABLED.value)
|
||||
|
||||
sl_pos = arm.read("Present_Position", "shoulder_lift")
|
||||
arm.write("Goal_Position", sl_pos - 1024 - 450, "shoulder_lift")
|
||||
ef_pos = arm.read("Present_Position", "elbow_flex")
|
||||
arm.write("Goal_Position", ef_pos + 1024 + 450, "elbow_flex")
|
||||
time.sleep(2)
|
||||
|
||||
calib = {}
|
||||
|
||||
print("Calibrate shoulder_pan")
|
||||
calib["shoulder_pan"] = move_to_calibrate(arm, "shoulder_pan")
|
||||
arm.write("Goal_Position", calib["shoulder_pan"]["zero_pos"], "shoulder_pan")
|
||||
time.sleep(1)
|
||||
|
||||
print("Calibrate gripper")
|
||||
calib["gripper"] = move_to_calibrate(arm, "gripper", invert_drive_mode=True)
|
||||
time.sleep(1)
|
||||
|
||||
print("Calibrate wrist_flex")
|
||||
calib["wrist_flex"] = move_to_calibrate(arm, "wrist_flex", invert_drive_mode=True)
|
||||
calib["wrist_flex"] = apply_offset(calib["wrist_flex"], offset=-210 + 1024)
|
||||
|
||||
wr_pos = arm.read("Present_Position", "wrist_roll")
|
||||
arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 1024, "wrist_flex")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", wr_pos - 1024, "wrist_roll")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 2048, "wrist_flex")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", calib["gripper"]["end_pos"], "gripper")
|
||||
time.sleep(1)
|
||||
|
||||
print("Calibrate wrist_roll")
|
||||
calib["wrist_roll"] = move_to_calibrate(arm, "wrist_roll", invert_drive_mode=True)
|
||||
calib["wrist_roll"] = apply_offset(calib["wrist_roll"], offset=790)
|
||||
|
||||
arm.write("Goal_Position", calib["wrist_roll"]["zero_pos"] - 1024, "wrist_roll")
|
||||
arm.write("Goal_Position", calib["gripper"]["start_pos"], "gripper")
|
||||
arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 1024, "wrist_flex")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", calib["wrist_roll"]["zero_pos"], "wrist_roll")
|
||||
arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 2048, "wrist_flex")
|
||||
|
||||
def in_between_move_elbow_flex_hook():
|
||||
nonlocal arm, calib
|
||||
arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"], "wrist_flex")
|
||||
|
||||
print("Calibrate elbow_flex")
|
||||
calib["elbow_flex"] = move_to_calibrate(
|
||||
arm,
|
||||
"elbow_flex",
|
||||
invert_drive_mode=True,
|
||||
in_between_move_hook=in_between_move_elbow_flex_hook,
|
||||
)
|
||||
arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 1024, "wrist_flex")
|
||||
|
||||
def in_between_move_shoulder_lift_hook():
|
||||
nonlocal arm, calib
|
||||
sl = arm.read("Present_Position", "shoulder_lift")
|
||||
arm.write("Goal_Position", sl - 1500, "shoulder_lift")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", calib["elbow_flex"]["zero_pos"] + 1536, "elbow_flex")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", calib["wrist_flex"]["start_pos"], "wrist_flex")
|
||||
time.sleep(1)
|
||||
|
||||
print("Calibrate shoulder_lift")
|
||||
calib["shoulder_lift"] = move_to_calibrate(
|
||||
arm, "shoulder_lift", in_between_move_hook=in_between_move_shoulder_lift_hook
|
||||
)
|
||||
calib["shoulder_lift"] = apply_offset(calib["shoulder_lift"], offset=-1024)
|
||||
|
||||
arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 1024, "wrist_flex")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", calib["shoulder_lift"]["zero_pos"] + 2048, "shoulder_lift")
|
||||
arm.write("Goal_Position", calib["elbow_flex"]["zero_pos"] - 1024 - 400, "elbow_flex")
|
||||
time.sleep(2)
|
||||
|
||||
calib_modes = []
|
||||
for name in arm.motor_names:
|
||||
if name == "gripper":
|
||||
calib_modes.append(CalibrationMode.LINEAR.name)
|
||||
else:
|
||||
calib_modes.append(CalibrationMode.DEGREE.name)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": [calib[name]["homing_offset"] for name in arm.motor_names],
|
||||
"drive_mode": [calib[name]["drive_mode"] for name in arm.motor_names],
|
||||
"start_pos": [calib[name]["start_pos"] for name in arm.motor_names],
|
||||
"end_pos": [calib[name]["end_pos"] for name in arm.motor_names],
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": arm.motor_names,
|
||||
}
|
||||
|
||||
# Re-enable original accerlation
|
||||
arm.write("Lock", 0)
|
||||
arm.write("Acceleration", initial_acceleration)
|
||||
time.sleep(1)
|
||||
|
||||
return calib_dict
|
||||
|
||||
|
||||
def run_arm_manual_calibration(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str):
|
||||
"""This function ensures that a neural network trained on data collected on a given robot
|
||||
can work on another robot. For instance before calibration, setting a same goal position
|
||||
for each motor of two different robots will get two very different positions. But after calibration,
|
||||
the two robots will move to the same position.To this end, this function computes the homing offset
|
||||
and the drive mode for each motor of a given robot.
|
||||
|
||||
Homing offset is used to shift the motor position to a ]-2048, +2048[ nominal range (when the motor uses 2048 steps
|
||||
to complete a half a turn). This range is set around an arbitrary "zero position" corresponding to all motor positions
|
||||
being 0. During the calibration process, you will need to manually move the robot to this "zero position".
|
||||
|
||||
Drive mode is used to invert the rotation direction of the motor. This is useful when some motors have been assembled
|
||||
in the opposite orientation for some robots. During the calibration process, you will need to manually move the robot
|
||||
to the "rotated position".
|
||||
|
||||
After calibration, the homing offsets and drive modes are stored in a cache.
|
||||
|
||||
Example of usage:
|
||||
```python
|
||||
run_arm_calibration(arm, "so100", "left", "follower")
|
||||
```
|
||||
"""
|
||||
if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any():
|
||||
raise ValueError("To run calibration, the torque must be disabled on all motors.")
|
||||
|
||||
print(f"\nRunning calibration of {robot_type} {arm_name} {arm_type}...")
|
||||
|
||||
print("\nMove arm to zero position")
|
||||
print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="zero"))
|
||||
input("Press Enter to continue...")
|
||||
|
||||
# We arbitrarily chose our zero target position to be a straight horizontal position with gripper upwards and closed.
|
||||
# It is easy to identify and all motors are in a "quarter turn" position. Once calibration is done, this position will
|
||||
# correspond to every motor angle being 0. If you set all 0 as Goal Position, the arm will move in this position.
|
||||
zero_target_pos = convert_degrees_to_steps(ZERO_POSITION_DEGREE, arm.motor_models)
|
||||
|
||||
# Compute homing offset so that `present_position + homing_offset ~= target_position`.
|
||||
zero_pos = arm.read("Present_Position")
|
||||
homing_offset = zero_target_pos - zero_pos
|
||||
|
||||
# The rotated target position corresponds to a rotation of a quarter turn from the zero position.
|
||||
# This allows to identify the rotation direction of each motor.
|
||||
# For instance, if the motor rotates 90 degree, and its value is -90 after applying the homing offset, then we know its rotation direction
|
||||
# is inverted. However, for the calibration being successful, we need everyone to follow the same target position.
|
||||
# Sometimes, there is only one possible rotation direction. For instance, if the gripper is closed, there is only one direction which
|
||||
# corresponds to opening the gripper. When the rotation direction is ambiguous, we arbitrarely rotate clockwise from the point of view
|
||||
# of the previous motor in the kinetic chain.
|
||||
print("\nMove arm to rotated target position")
|
||||
print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="rotated"))
|
||||
input("Press Enter to continue...")
|
||||
|
||||
rotated_target_pos = convert_degrees_to_steps(ROTATED_POSITION_DEGREE, arm.motor_models)
|
||||
|
||||
# Find drive mode by rotating each motor by a quarter of a turn.
|
||||
# Drive mode indicates if the motor rotation direction should be inverted (=1) or not (=0).
|
||||
rotated_pos = arm.read("Present_Position")
|
||||
drive_mode = (rotated_pos < zero_pos).astype(np.int32)
|
||||
|
||||
# Re-compute homing offset to take into account drive mode
|
||||
rotated_drived_pos = apply_drive_mode(rotated_pos, drive_mode)
|
||||
homing_offset = rotated_target_pos - rotated_drived_pos
|
||||
|
||||
print("\nMove arm to rest position")
|
||||
print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="rest"))
|
||||
input("Press Enter to continue...")
|
||||
print()
|
||||
|
||||
# Joints with rotational motions are expressed in degrees in nominal range of [-180, 180]
|
||||
calib_modes = []
|
||||
for name in arm.motor_names:
|
||||
if name == "gripper":
|
||||
calib_modes.append(CalibrationMode.LINEAR.name)
|
||||
else:
|
||||
calib_modes.append(CalibrationMode.DEGREE.name)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset.tolist(),
|
||||
"drive_mode": drive_mode.tolist(),
|
||||
"start_pos": zero_pos.tolist(),
|
||||
"end_pos": rotated_pos.tolist(),
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": arm.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import base64
|
||||
import json
|
||||
import threading
|
||||
@@ -21,7 +7,7 @@ from pathlib import Path
|
||||
import cv2
|
||||
import zmq
|
||||
|
||||
from lerobot.common.robots.mobile_manipulator import LeKiwi
|
||||
from lerobot.common.robot_devices.robots.mobile_manipulator import LeKiwi
|
||||
|
||||
|
||||
def setup_zmq_sockets(config):
|
||||
@@ -61,7 +47,7 @@ def calibrate_follower_arm(motors_bus, calib_dir_str):
|
||||
calib_dir.mkdir(parents=True, exist_ok=True)
|
||||
calib_file = calib_dir / "main_follower.json"
|
||||
try:
|
||||
from lerobot.common.motors.feetech.feetech_calibration import run_full_arm_calibration
|
||||
from lerobot.common.robot_devices.robots.feetech_calibration import run_arm_manual_calibration
|
||||
except ImportError:
|
||||
print("[WARNING] Calibration function not available. Skipping calibration.")
|
||||
return
|
||||
@@ -72,7 +58,7 @@ def calibrate_follower_arm(motors_bus, calib_dir_str):
|
||||
print(f"[INFO] Loaded calibration from {calib_file}")
|
||||
else:
|
||||
print("[INFO] Calibration file not found. Running manual calibration...")
|
||||
calibration = run_full_arm_calibration(motors_bus, "lekiwi", "follower_arm", "follower")
|
||||
calibration = run_arm_manual_calibration(motors_bus, "lekiwi", "follower_arm", "follower")
|
||||
print(f"[INFO] Calibration complete. Saving to {calib_file}")
|
||||
with open(calib_file, "w") as f:
|
||||
json.dump(calibration, f)
|
||||
@@ -93,8 +79,8 @@ def run_lekiwi(robot_config):
|
||||
- Processes incoming commands (arm and wheel commands) and sends back sensor and camera data.
|
||||
"""
|
||||
# Import helper functions and classes
|
||||
from lerobot.common.cameras.utils import make_cameras_from_configs
|
||||
from lerobot.common.motors.feetech.feetech import FeetechMotorsBus, TorqueMode
|
||||
from lerobot.common.robot_devices.cameras.utils import make_cameras_from_configs
|
||||
from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus, TorqueMode
|
||||
|
||||
# Initialize cameras from the robot configuration.
|
||||
cameras = make_cameras_from_configs(robot_config.cameras)
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Contains logic to instantiate a robot, read information from its motors and cameras,
|
||||
and send orders to its motors.
|
||||
"""
|
||||
@@ -19,140 +5,46 @@ and send orders to its motors.
|
||||
# calibration procedure, to make it easy for people to add their own robot.
|
||||
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
import warnings
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Sequence
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from lerobot.common.cameras.configs import CameraConfig
|
||||
from lerobot.common.cameras.utils import make_cameras_from_configs
|
||||
from lerobot.common.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
|
||||
from lerobot.common.motors.configs import MotorsBusConfig
|
||||
from lerobot.common.motors.motors_bus import MotorsBus
|
||||
from lerobot.common.motors.utils import make_motors_buses_from_configs
|
||||
from lerobot.common.robots.config import RobotConfig
|
||||
from lerobot.common.robots.utils import ensure_safe_goal_position, get_arm_id
|
||||
from lerobot.common.robot_devices.cameras.utils import make_cameras_from_configs
|
||||
from lerobot.common.robot_devices.motors.utils import MotorsBus, make_motors_buses_from_configs
|
||||
from lerobot.common.robot_devices.robots.configs import ManipulatorRobotConfig
|
||||
from lerobot.common.robot_devices.robots.utils import get_arm_id
|
||||
from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError
|
||||
|
||||
|
||||
@dataclass
|
||||
class ManipulatorRobotConfig(RobotConfig):
|
||||
leader_arms: dict[str, MotorsBusConfig] = field(default_factory=lambda: {})
|
||||
follower_arms: dict[str, MotorsBusConfig] = field(default_factory=lambda: {})
|
||||
cameras: dict[str, CameraConfig] = field(default_factory=lambda: {})
|
||||
def ensure_safe_goal_position(
|
||||
goal_pos: torch.Tensor, present_pos: torch.Tensor, max_relative_target: float | list[float]
|
||||
):
|
||||
# Cap relative action target magnitude for safety.
|
||||
diff = goal_pos - present_pos
|
||||
max_relative_target = torch.tensor(max_relative_target)
|
||||
safe_diff = torch.minimum(diff, max_relative_target)
|
||||
safe_diff = torch.maximum(safe_diff, -max_relative_target)
|
||||
safe_goal_pos = present_pos + safe_diff
|
||||
|
||||
# Optionally limit the magnitude of the relative positional target vector for safety purposes.
|
||||
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length
|
||||
# as the number of motors in your follower arms (assumes all follower arms have the same number of
|
||||
# motors).
|
||||
max_relative_target: list[float] | float | None = None
|
||||
|
||||
# Optionally set the leader arm in torque mode with the gripper motor set to this angle. This makes it
|
||||
# possible to squeeze the gripper and have it spring back to an open position on its own. If None, the
|
||||
# gripper is not put in torque mode.
|
||||
gripper_open_degree: float | None = None
|
||||
|
||||
mock: bool = False
|
||||
|
||||
def __post_init__(self):
|
||||
if self.mock:
|
||||
for arm in self.leader_arms.values():
|
||||
if not arm.mock:
|
||||
arm.mock = True
|
||||
for arm in self.follower_arms.values():
|
||||
if not arm.mock:
|
||||
arm.mock = True
|
||||
for cam in self.cameras.values():
|
||||
if not cam.mock:
|
||||
cam.mock = True
|
||||
|
||||
if self.max_relative_target is not None and isinstance(self.max_relative_target, Sequence):
|
||||
for name in self.follower_arms:
|
||||
if len(self.follower_arms[name].motors) != len(self.max_relative_target):
|
||||
raise ValueError(
|
||||
f"len(max_relative_target)={len(self.max_relative_target)} but the follower arm with name {name} has "
|
||||
f"{len(self.follower_arms[name].motors)} motors. Please make sure that the "
|
||||
f"`max_relative_target` list has as many parameters as there are motors per arm. "
|
||||
"Note: This feature does not yet work with robots where different follower arms have "
|
||||
"different numbers of motors."
|
||||
)
|
||||
|
||||
|
||||
def apply_feetech_offsets_from_calibration(motorsbus, calibration_dict: dict):
|
||||
"""
|
||||
Reads 'calibration_dict' containing 'homing_offset' and 'motor_names',
|
||||
then writes each motor's offset to the servo's internal Offset (0x1F) in EPROM.
|
||||
|
||||
This version is modified so each homed position (originally 0) will now read
|
||||
2047, i.e. 180° away from 0 in the 4096-count circle. Offsets are permanently
|
||||
stored in EEPROM, so the servo's Present_Position is hardware-shifted even
|
||||
after power cycling.
|
||||
|
||||
Steps:
|
||||
1) Subtract 2047 from the old offset (so 0 -> 2047).
|
||||
2) Clamp to [-2047..+2047].
|
||||
3) Encode sign bit and magnitude into a 12-bit number.
|
||||
"""
|
||||
|
||||
homing_offsets = calibration_dict["homing_offset"]
|
||||
motor_names = calibration_dict["motor_names"]
|
||||
start_pos = calibration_dict["start_pos"]
|
||||
|
||||
# Open the write lock, changes to EEPROM do NOT persist yet
|
||||
motorsbus.write("Lock", 1)
|
||||
|
||||
# For each motor, set the 'Offset' parameter
|
||||
for m_name, old_offset in zip(motor_names, homing_offsets, strict=False):
|
||||
# If bus doesn’t have a motor named m_name, skip
|
||||
if m_name not in motorsbus.motors:
|
||||
print(f"Warning: '{m_name}' not found in motorsbus.motors; skipping offset.")
|
||||
continue
|
||||
|
||||
if m_name == "gripper":
|
||||
old_offset = start_pos # If gripper set the offset to the start position of the gripper
|
||||
continue
|
||||
|
||||
# Shift the offset so the homed position reads 2047
|
||||
new_offset = old_offset - 2047
|
||||
|
||||
# Clamp to [-2047..+2047]
|
||||
if new_offset > 2047:
|
||||
new_offset = 2047
|
||||
print(
|
||||
f"Warning: '{new_offset}' is getting clamped because its larger then 2047; This should not happen!"
|
||||
)
|
||||
elif new_offset < -2047:
|
||||
new_offset = -2047
|
||||
print(
|
||||
f"Warning: '{new_offset}' is getting clamped because its smaller then -2047; This should not happen!"
|
||||
)
|
||||
|
||||
# Determine the direction (sign) bit and magnitude
|
||||
direction_bit = 1 if new_offset < 0 else 0
|
||||
magnitude = abs(new_offset)
|
||||
|
||||
# Combine sign bit (bit 11) with the magnitude (bits 0..10)
|
||||
servo_offset = (direction_bit << 11) | magnitude
|
||||
|
||||
# Write offset to servo
|
||||
motorsbus.write("Offset", servo_offset, motor_names=m_name)
|
||||
print(
|
||||
f"Set offset for {m_name}: "
|
||||
f"old_offset={old_offset}, new_offset={new_offset}, servo_encoded={magnitude} + direction={direction_bit}"
|
||||
if not torch.allclose(goal_pos, safe_goal_pos):
|
||||
logging.warning(
|
||||
"Relative goal position magnitude had to be clamped to be safe.\n"
|
||||
f" requested relative goal position target: {diff}\n"
|
||||
f" clamped relative goal position target: {safe_diff}"
|
||||
)
|
||||
|
||||
motorsbus.write("Lock", 0)
|
||||
print("Offsets have been saved to EEPROM successfully.")
|
||||
return safe_goal_pos
|
||||
|
||||
|
||||
class ManipulatorRobot:
|
||||
# TODO(rcadene): Implement force feedback
|
||||
"""This class allows to control any manipulator robot of various number of motors.
|
||||
|
||||
Non exhaustive list of robots:
|
||||
Non exaustive list of robots:
|
||||
- [Koch v1.0](https://github.com/AlexanderKoch-Koch/low_cost_robot), with and without the wrist-to-elbow expansion, developed
|
||||
by Alexander Koch from [Tau Robotics](https://tau-robotics.com)
|
||||
- [Koch v1.1](https://github.com/jess-moss/koch-v1-1) developed by Jess Moss
|
||||
@@ -163,7 +55,7 @@ class ManipulatorRobot:
|
||||
robot = ManipulatorRobot(KochRobotConfig())
|
||||
```
|
||||
|
||||
Example of overwriting motors during instantiation:
|
||||
Example of overwritting motors during instantiation:
|
||||
```python
|
||||
# Defines how to communicate with the motors of the leader and follower arms
|
||||
leader_arms = {
|
||||
@@ -198,7 +90,7 @@ class ManipulatorRobot:
|
||||
robot = ManipulatorRobot(robot_config)
|
||||
```
|
||||
|
||||
Example of overwriting cameras during instantiation:
|
||||
Example of overwritting cameras during instantiation:
|
||||
```python
|
||||
# Defines how to communicate with 2 cameras connected to the computer.
|
||||
# Here, the webcam of the laptop and the phone (connected in USB to the laptop)
|
||||
@@ -318,7 +210,7 @@ class ManipulatorRobot:
|
||||
|
||||
def connect(self):
|
||||
if self.is_connected:
|
||||
raise DeviceAlreadyConnectedError(
|
||||
raise RobotDeviceAlreadyConnectedError(
|
||||
"ManipulatorRobot is already connected. Do not run `robot.connect()` twice."
|
||||
)
|
||||
|
||||
@@ -336,9 +228,9 @@ class ManipulatorRobot:
|
||||
self.leader_arms[name].connect()
|
||||
|
||||
if self.robot_type in ["koch", "koch_bimanual", "aloha"]:
|
||||
from lerobot.common.motors.dynamixel.dynamixel import TorqueMode
|
||||
from lerobot.common.robot_devices.motors.dynamixel import TorqueMode
|
||||
elif self.robot_type in ["so100", "moss", "lekiwi"]:
|
||||
from lerobot.common.motors.feetech.feetech import TorqueMode
|
||||
from lerobot.common.robot_devices.motors.feetech import TorqueMode
|
||||
|
||||
# We assume that at connection time, arms are in a rest position, and torque can
|
||||
# be safely disabled to run calibration and/or set robot preset configurations.
|
||||
@@ -403,16 +295,16 @@ class ManipulatorRobot:
|
||||
print(f"Missing calibration file '{arm_calib_path}'")
|
||||
|
||||
if self.robot_type in ["koch", "koch_bimanual", "aloha"]:
|
||||
from lerobot.common.motors.dynamixel.dynamixel_calibration import run_arm_calibration
|
||||
from lerobot.common.robot_devices.robots.dynamixel_calibration import run_arm_calibration
|
||||
|
||||
calibration = run_arm_calibration(arm, self.robot_type, name, arm_type)
|
||||
|
||||
elif self.robot_type in ["so100", "moss", "lekiwi"]:
|
||||
from lerobot.common.motors.feetech.feetech_calibration import (
|
||||
run_full_arm_calibration,
|
||||
from lerobot.common.robot_devices.robots.feetech_calibration import (
|
||||
run_arm_manual_calibration,
|
||||
)
|
||||
|
||||
calibration = run_full_arm_calibration(arm, self.robot_type, name, arm_type)
|
||||
calibration = run_arm_manual_calibration(arm, self.robot_type, name, arm_type)
|
||||
|
||||
print(f"Calibration is done! Saving calibration file '{arm_calib_path}'")
|
||||
arm_calib_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
@@ -421,28 +313,16 @@ class ManipulatorRobot:
|
||||
|
||||
return calibration
|
||||
|
||||
# For each follower arm
|
||||
|
||||
for name, arm_bus in self.follower_arms.items():
|
||||
calibration = load_or_run_calibration_(name, arm_bus, "follower")
|
||||
arm_bus.set_calibration(calibration)
|
||||
|
||||
# If this is a Feetech robot, also set the servo offset into EEPROM
|
||||
if self.robot_type in ["so100", "lekiwi"]:
|
||||
apply_feetech_offsets_from_calibration(arm_bus, calibration)
|
||||
|
||||
# For each leader arm
|
||||
for name, arm_bus in self.leader_arms.items():
|
||||
calibration = load_or_run_calibration_(name, arm_bus, "leader")
|
||||
arm_bus.set_calibration(calibration)
|
||||
|
||||
# Optionally also set offset for leader if you want the servo offsets as well
|
||||
if self.robot_type in ["so100", "lekiwi"]:
|
||||
apply_feetech_offsets_from_calibration(arm_bus, calibration)
|
||||
for name, arm in self.follower_arms.items():
|
||||
calibration = load_or_run_calibration_(name, arm, "follower")
|
||||
arm.set_calibration(calibration)
|
||||
for name, arm in self.leader_arms.items():
|
||||
calibration = load_or_run_calibration_(name, arm, "leader")
|
||||
arm.set_calibration(calibration)
|
||||
|
||||
def set_koch_robot_preset(self):
|
||||
def set_operating_mode_(arm):
|
||||
from lerobot.common.motors.dynamixel.dynamixel import TorqueMode
|
||||
from lerobot.common.robot_devices.motors.dynamixel import TorqueMode
|
||||
|
||||
if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any():
|
||||
raise ValueError("To run set robot preset, the torque must be disabled on all motors.")
|
||||
@@ -468,7 +348,7 @@ class ManipulatorRobot:
|
||||
set_operating_mode_(self.follower_arms[name])
|
||||
|
||||
# Set better PID values to close the gap between recorded states and actions
|
||||
# TODO(rcadene): Implement an automatic procedure to set optimal PID values for each motor
|
||||
# TODO(rcadene): Implement an automatic procedure to set optimial PID values for each motor
|
||||
self.follower_arms[name].write("Position_P_Gain", 1500, "elbow_flex")
|
||||
self.follower_arms[name].write("Position_I_Gain", 0, "elbow_flex")
|
||||
self.follower_arms[name].write("Position_D_Gain", 600, "elbow_flex")
|
||||
@@ -552,7 +432,7 @@ class ManipulatorRobot:
|
||||
self, record_data=False
|
||||
) -> None | tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]:
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError(
|
||||
raise RobotDeviceNotConnectedError(
|
||||
"ManipulatorRobot is not connected. You need to run `robot.connect()`."
|
||||
)
|
||||
|
||||
@@ -620,7 +500,7 @@ class ManipulatorRobot:
|
||||
self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
|
||||
self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
|
||||
|
||||
# Populate output dictionaries
|
||||
# Populate output dictionnaries
|
||||
obs_dict, action_dict = {}, {}
|
||||
obs_dict["observation.state"] = state
|
||||
action_dict["action"] = action
|
||||
@@ -632,7 +512,7 @@ class ManipulatorRobot:
|
||||
def capture_observation(self):
|
||||
"""The returned observations do not have a batch dimension."""
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError(
|
||||
raise RobotDeviceNotConnectedError(
|
||||
"ManipulatorRobot is not connected. You need to run `robot.connect()`."
|
||||
)
|
||||
|
||||
@@ -660,7 +540,7 @@ class ManipulatorRobot:
|
||||
self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
|
||||
self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
|
||||
|
||||
# Populate output dictionaries and format to pytorch
|
||||
# Populate output dictionnaries and format to pytorch
|
||||
obs_dict = {}
|
||||
obs_dict["observation.state"] = state
|
||||
for name in self.cameras:
|
||||
@@ -678,7 +558,7 @@ class ManipulatorRobot:
|
||||
action: tensor containing the concatenated goal positions for the follower arms.
|
||||
"""
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError(
|
||||
raise RobotDeviceNotConnectedError(
|
||||
"ManipulatorRobot is not connected. You need to run `robot.connect()`."
|
||||
)
|
||||
|
||||
@@ -713,7 +593,7 @@ class ManipulatorRobot:
|
||||
|
||||
def disconnect(self):
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError(
|
||||
raise RobotDeviceNotConnectedError(
|
||||
"ManipulatorRobot is not connected. You need to run `robot.connect()` before disconnecting."
|
||||
)
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
@@ -23,14 +9,13 @@ import numpy as np
|
||||
import torch
|
||||
import zmq
|
||||
|
||||
from lerobot.common.cameras.utils import make_cameras_from_configs
|
||||
from lerobot.common.errors import DeviceNotConnectedError
|
||||
from lerobot.common.motors.feetech.feetech import TorqueMode
|
||||
from lerobot.common.motors.feetech.feetech_calibration import run_full_arm_calibration
|
||||
from lerobot.common.motors.motors_bus import MotorsBus
|
||||
from lerobot.common.motors.utils import make_motors_buses_from_configs
|
||||
from lerobot.common.robots.lekiwi.configuration_lekiwi import LeKiwiRobotConfig
|
||||
from lerobot.common.robots.utils import get_arm_id
|
||||
from lerobot.common.robot_devices.cameras.utils import make_cameras_from_configs
|
||||
from lerobot.common.robot_devices.motors.feetech import TorqueMode
|
||||
from lerobot.common.robot_devices.motors.utils import MotorsBus, make_motors_buses_from_configs
|
||||
from lerobot.common.robot_devices.robots.configs import LeKiwiRobotConfig
|
||||
from lerobot.common.robot_devices.robots.feetech_calibration import run_arm_manual_calibration
|
||||
from lerobot.common.robot_devices.robots.utils import get_arm_id
|
||||
from lerobot.common.robot_devices.utils import RobotDeviceNotConnectedError
|
||||
|
||||
PYNPUT_AVAILABLE = True
|
||||
try:
|
||||
@@ -267,7 +252,7 @@ class MobileManipulator:
|
||||
calibration = json.load(f)
|
||||
else:
|
||||
print(f"Missing calibration file '{arm_calib_path}'")
|
||||
calibration = run_full_arm_calibration(arm, self.robot_type, name, arm_type)
|
||||
calibration = run_arm_manual_calibration(arm, self.robot_type, name, arm_type)
|
||||
print(f"Calibration is done! Saving calibration file '{arm_calib_path}'")
|
||||
arm_calib_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(arm_calib_path, "w") as f:
|
||||
@@ -396,7 +381,7 @@ class MobileManipulator:
|
||||
self, record_data: bool = False
|
||||
) -> None | tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]:
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError("MobileManipulator is not connected. Run `connect()` first.")
|
||||
raise RobotDeviceNotConnectedError("MobileManipulator is not connected. Run `connect()` first.")
|
||||
|
||||
speed_setting = self.speed_levels[self.speed_index]
|
||||
xy_speed = speed_setting["xy"] # e.g. 0.1, 0.25, or 0.4
|
||||
@@ -407,19 +392,21 @@ class MobileManipulator:
|
||||
for name in self.leader_arms:
|
||||
pos = self.leader_arms[name].read("Present_Position")
|
||||
pos_tensor = torch.from_numpy(pos).float()
|
||||
# Instead of pos_tensor.item(), use tolist() to convert the entire tensor to a list
|
||||
arm_positions.extend(pos_tensor.tolist())
|
||||
|
||||
y_cmd = 0.0 # m/s forward/backward
|
||||
x_cmd = 0.0 # m/s lateral
|
||||
# (The rest of your code for generating wheel commands remains unchanged)
|
||||
x_cmd = 0.0 # m/s forward/backward
|
||||
y_cmd = 0.0 # m/s lateral
|
||||
theta_cmd = 0.0 # deg/s rotation
|
||||
if self.pressed_keys["forward"]:
|
||||
y_cmd += xy_speed
|
||||
if self.pressed_keys["backward"]:
|
||||
y_cmd -= xy_speed
|
||||
if self.pressed_keys["left"]:
|
||||
x_cmd += xy_speed
|
||||
if self.pressed_keys["right"]:
|
||||
if self.pressed_keys["backward"]:
|
||||
x_cmd -= xy_speed
|
||||
if self.pressed_keys["left"]:
|
||||
y_cmd += xy_speed
|
||||
if self.pressed_keys["right"]:
|
||||
y_cmd -= xy_speed
|
||||
if self.pressed_keys["rotate_left"]:
|
||||
theta_cmd += theta_speed
|
||||
if self.pressed_keys["rotate_right"]:
|
||||
@@ -456,7 +443,7 @@ class MobileManipulator:
|
||||
and a camera frame.
|
||||
"""
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError("Not connected. Run `connect()` first.")
|
||||
raise RobotDeviceNotConnectedError("Not connected. Run `connect()` first.")
|
||||
|
||||
frames, present_speed, remote_arm_state_tensor = self._get_data()
|
||||
|
||||
@@ -480,7 +467,7 @@ class MobileManipulator:
|
||||
|
||||
def send_action(self, action: torch.Tensor) -> torch.Tensor:
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError("Not connected. Run `connect()` first.")
|
||||
raise RobotDeviceNotConnectedError("Not connected. Run `connect()` first.")
|
||||
|
||||
# Ensure the action tensor has at least 9 elements:
|
||||
# - First 6: arm positions.
|
||||
@@ -518,7 +505,7 @@ class MobileManipulator:
|
||||
|
||||
def disconnect(self):
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError("Not connected.")
|
||||
raise RobotDeviceNotConnectedError("Not connected.")
|
||||
if self.cmd_socket:
|
||||
stop_cmd = {
|
||||
"raw_velocity": {"left_wheel": 0, "back_wheel": 0, "right_wheel": 0},
|
||||
@@ -597,8 +584,8 @@ class MobileManipulator:
|
||||
# Create the body velocity vector [x, y, theta_rad].
|
||||
velocity_vector = np.array([x_cmd, y_cmd, theta_rad])
|
||||
|
||||
# Define the wheel mounting angles (defined from y axis cw)
|
||||
angles = np.radians(np.array([300, 180, 60]))
|
||||
# Define the wheel mounting angles with a -90° offset.
|
||||
angles = np.radians(np.array([240, 120, 0]) - 90)
|
||||
# Build the kinematic matrix: each row maps body velocities to a wheel’s linear speed.
|
||||
# The third column (base_radius) accounts for the effect of rotation.
|
||||
m = np.array([[np.cos(a), np.sin(a), base_radius] for a in angles])
|
||||
@@ -654,8 +641,8 @@ class MobileManipulator:
|
||||
# Compute each wheel’s linear speed (m/s) from its angular speed.
|
||||
wheel_linear_speeds = wheel_radps * wheel_radius
|
||||
|
||||
# Define the wheel mounting angles (defined from y axis cw)
|
||||
angles = np.radians(np.array([300, 180, 60]))
|
||||
# Define the wheel mounting angles with a -90° offset.
|
||||
angles = np.radians(np.array([240, 120, 0]) - 90)
|
||||
m = np.array([[np.cos(a), np.sin(a), base_radius] for a in angles])
|
||||
|
||||
# Solve the inverse kinematics: body_velocity = M⁻¹ · wheel_linear_speeds.
|
||||
208
lerobot/common/robot_devices/robots/stretch.py
Normal file
208
lerobot/common/robot_devices/robots/stretch.py
Normal file
@@ -0,0 +1,208 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import time
|
||||
from dataclasses import replace
|
||||
|
||||
import torch
|
||||
from stretch_body.gamepad_teleop import GamePadTeleop
|
||||
from stretch_body.robot import Robot as StretchAPI
|
||||
from stretch_body.robot_params import RobotParams
|
||||
|
||||
from lerobot.common.robot_devices.robots.configs import StretchRobotConfig
|
||||
|
||||
|
||||
class StretchRobot(StretchAPI):
|
||||
"""Wrapper of stretch_body.robot.Robot"""
|
||||
|
||||
def __init__(self, config: StretchRobotConfig | None = None, **kwargs):
|
||||
super().__init__()
|
||||
if config is None:
|
||||
self.config = StretchRobotConfig(**kwargs)
|
||||
else:
|
||||
# Overwrite config arguments using kwargs
|
||||
self.config = replace(config, **kwargs)
|
||||
|
||||
self.robot_type = self.config.type
|
||||
self.cameras = self.config.cameras
|
||||
self.is_connected = False
|
||||
self.teleop = None
|
||||
self.logs = {}
|
||||
|
||||
# TODO(aliberts): test this
|
||||
RobotParams.set_logging_level("WARNING")
|
||||
RobotParams.set_logging_formatter("brief_console_formatter")
|
||||
|
||||
self.state_keys = None
|
||||
self.action_keys = None
|
||||
|
||||
def connect(self) -> None:
|
||||
self.is_connected = self.startup()
|
||||
if not self.is_connected:
|
||||
print("Another process is already using Stretch. Try running 'stretch_free_robot_process.py'")
|
||||
raise ConnectionError()
|
||||
|
||||
for name in self.cameras:
|
||||
self.cameras[name].connect()
|
||||
self.is_connected = self.is_connected and self.cameras[name].is_connected
|
||||
|
||||
if not self.is_connected:
|
||||
print("Could not connect to the cameras, check that all cameras are plugged-in.")
|
||||
raise ConnectionError()
|
||||
|
||||
self.run_calibration()
|
||||
|
||||
def run_calibration(self) -> None:
|
||||
if not self.is_homed():
|
||||
self.home()
|
||||
|
||||
def teleop_step(
|
||||
self, record_data=False
|
||||
) -> None | tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]:
|
||||
# TODO(aliberts): return ndarrays instead of torch.Tensors
|
||||
if not self.is_connected:
|
||||
raise ConnectionError()
|
||||
|
||||
if self.teleop is None:
|
||||
self.teleop = GamePadTeleop(robot_instance=False)
|
||||
self.teleop.startup(robot=self)
|
||||
|
||||
before_read_t = time.perf_counter()
|
||||
state = self.get_state()
|
||||
action = self.teleop.gamepad_controller.get_state()
|
||||
self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t
|
||||
|
||||
before_write_t = time.perf_counter()
|
||||
self.teleop.do_motion(robot=self)
|
||||
self.push_command()
|
||||
self.logs["write_pos_dt_s"] = time.perf_counter() - before_write_t
|
||||
|
||||
if self.state_keys is None:
|
||||
self.state_keys = list(state)
|
||||
|
||||
if not record_data:
|
||||
return
|
||||
|
||||
state = torch.as_tensor(list(state.values()))
|
||||
action = torch.as_tensor(list(action.values()))
|
||||
|
||||
# Capture images from cameras
|
||||
images = {}
|
||||
for name in self.cameras:
|
||||
before_camread_t = time.perf_counter()
|
||||
images[name] = self.cameras[name].async_read()
|
||||
images[name] = torch.from_numpy(images[name])
|
||||
self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
|
||||
self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
|
||||
|
||||
# Populate output dictionnaries
|
||||
obs_dict, action_dict = {}, {}
|
||||
obs_dict["observation.state"] = state
|
||||
action_dict["action"] = action
|
||||
for name in self.cameras:
|
||||
obs_dict[f"observation.images.{name}"] = images[name]
|
||||
|
||||
return obs_dict, action_dict
|
||||
|
||||
def get_state(self) -> dict:
|
||||
status = self.get_status()
|
||||
return {
|
||||
"head_pan.pos": status["head"]["head_pan"]["pos"],
|
||||
"head_tilt.pos": status["head"]["head_tilt"]["pos"],
|
||||
"lift.pos": status["lift"]["pos"],
|
||||
"arm.pos": status["arm"]["pos"],
|
||||
"wrist_pitch.pos": status["end_of_arm"]["wrist_pitch"]["pos"],
|
||||
"wrist_roll.pos": status["end_of_arm"]["wrist_roll"]["pos"],
|
||||
"wrist_yaw.pos": status["end_of_arm"]["wrist_yaw"]["pos"],
|
||||
"gripper.pos": status["end_of_arm"]["stretch_gripper"]["pos"],
|
||||
"base_x.vel": status["base"]["x_vel"],
|
||||
"base_y.vel": status["base"]["y_vel"],
|
||||
"base_theta.vel": status["base"]["theta_vel"],
|
||||
}
|
||||
|
||||
def capture_observation(self) -> dict:
|
||||
# TODO(aliberts): return ndarrays instead of torch.Tensors
|
||||
before_read_t = time.perf_counter()
|
||||
state = self.get_state()
|
||||
self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t
|
||||
|
||||
if self.state_keys is None:
|
||||
self.state_keys = list(state)
|
||||
|
||||
state = torch.as_tensor(list(state.values()))
|
||||
|
||||
# Capture images from cameras
|
||||
images = {}
|
||||
for name in self.cameras:
|
||||
before_camread_t = time.perf_counter()
|
||||
images[name] = self.cameras[name].async_read()
|
||||
images[name] = torch.from_numpy(images[name])
|
||||
self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
|
||||
self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
|
||||
|
||||
# Populate output dictionnaries
|
||||
obs_dict = {}
|
||||
obs_dict["observation.state"] = state
|
||||
for name in self.cameras:
|
||||
obs_dict[f"observation.images.{name}"] = images[name]
|
||||
|
||||
return obs_dict
|
||||
|
||||
def send_action(self, action: torch.Tensor) -> torch.Tensor:
|
||||
# TODO(aliberts): return ndarrays instead of torch.Tensors
|
||||
if not self.is_connected:
|
||||
raise ConnectionError()
|
||||
|
||||
if self.teleop is None:
|
||||
self.teleop = GamePadTeleop(robot_instance=False)
|
||||
self.teleop.startup(robot=self)
|
||||
|
||||
if self.action_keys is None:
|
||||
dummy_action = self.teleop.gamepad_controller.get_state()
|
||||
self.action_keys = list(dummy_action.keys())
|
||||
|
||||
action_dict = dict(zip(self.action_keys, action.tolist(), strict=True))
|
||||
|
||||
before_write_t = time.perf_counter()
|
||||
self.teleop.do_motion(state=action_dict, robot=self)
|
||||
self.push_command()
|
||||
self.logs["write_pos_dt_s"] = time.perf_counter() - before_write_t
|
||||
|
||||
# TODO(aliberts): return action_sent when motion is limited
|
||||
return action
|
||||
|
||||
def print_logs(self) -> None:
|
||||
pass
|
||||
# TODO(aliberts): move robot-specific logs logic here
|
||||
|
||||
def teleop_safety_stop(self) -> None:
|
||||
if self.teleop is not None:
|
||||
self.teleop._safety_stop(robot=self)
|
||||
|
||||
def disconnect(self) -> None:
|
||||
self.stop()
|
||||
if self.teleop is not None:
|
||||
self.teleop.gamepad_controller.stop()
|
||||
self.teleop.stop()
|
||||
|
||||
if len(self.cameras) > 0:
|
||||
for cam in self.cameras.values():
|
||||
cam.disconnect()
|
||||
|
||||
self.is_connected = False
|
||||
|
||||
def __del__(self):
|
||||
self.disconnect()
|
||||
72
lerobot/common/robot_devices/robots/utils.py
Normal file
72
lerobot/common/robot_devices/robots/utils.py
Normal file
@@ -0,0 +1,72 @@
|
||||
from typing import Protocol
|
||||
|
||||
from lerobot.common.robot_devices.robots.configs import (
|
||||
AlohaRobotConfig,
|
||||
KochBimanualRobotConfig,
|
||||
KochRobotConfig,
|
||||
LeKiwiRobotConfig,
|
||||
ManipulatorRobotConfig,
|
||||
MossRobotConfig,
|
||||
RobotConfig,
|
||||
So100RobotConfig,
|
||||
StretchRobotConfig,
|
||||
)
|
||||
|
||||
|
||||
def get_arm_id(name, arm_type):
|
||||
"""Returns the string identifier of a robot arm. For instance, for a bimanual manipulator
|
||||
like Aloha, it could be left_follower, right_follower, left_leader, or right_leader.
|
||||
"""
|
||||
return f"{name}_{arm_type}"
|
||||
|
||||
|
||||
class Robot(Protocol):
|
||||
# TODO(rcadene, aliberts): Add unit test checking the protocol is implemented in the corresponding classes
|
||||
robot_type: str
|
||||
features: dict
|
||||
|
||||
def connect(self): ...
|
||||
def run_calibration(self): ...
|
||||
def teleop_step(self, record_data=False): ...
|
||||
def capture_observation(self): ...
|
||||
def send_action(self, action): ...
|
||||
def disconnect(self): ...
|
||||
|
||||
|
||||
def make_robot_config(robot_type: str, **kwargs) -> RobotConfig:
|
||||
if robot_type == "aloha":
|
||||
return AlohaRobotConfig(**kwargs)
|
||||
elif robot_type == "koch":
|
||||
return KochRobotConfig(**kwargs)
|
||||
elif robot_type == "koch_bimanual":
|
||||
return KochBimanualRobotConfig(**kwargs)
|
||||
elif robot_type == "moss":
|
||||
return MossRobotConfig(**kwargs)
|
||||
elif robot_type == "so100":
|
||||
return So100RobotConfig(**kwargs)
|
||||
elif robot_type == "stretch":
|
||||
return StretchRobotConfig(**kwargs)
|
||||
elif robot_type == "lekiwi":
|
||||
return LeKiwiRobotConfig(**kwargs)
|
||||
else:
|
||||
raise ValueError(f"Robot type '{robot_type}' is not available.")
|
||||
|
||||
|
||||
def make_robot_from_config(config: RobotConfig):
|
||||
if isinstance(config, ManipulatorRobotConfig):
|
||||
from lerobot.common.robot_devices.robots.manipulator import ManipulatorRobot
|
||||
|
||||
return ManipulatorRobot(config)
|
||||
elif isinstance(config, LeKiwiRobotConfig):
|
||||
from lerobot.common.robot_devices.robots.mobile_manipulator import MobileManipulator
|
||||
|
||||
return MobileManipulator(config)
|
||||
else:
|
||||
from lerobot.common.robot_devices.robots.stretch import StretchRobot
|
||||
|
||||
return StretchRobot(config)
|
||||
|
||||
|
||||
def make_robot(robot_type: str, **kwargs) -> Robot:
|
||||
config = make_robot_config(robot_type, **kwargs)
|
||||
return make_robot_from_config(config)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user