Compare commits
55 Commits
test/add_c
...
lerobot-xh
| Author | SHA1 | Date | |
|---|---|---|---|
| 1752309c77 | |||
| 63a8d74733 | |||
| a87dce9e3f | |||
| 3685542bf1 | |||
| 7c1699898b | |||
| b3e9e11e11 | |||
| b04e6e0c7b | |||
| 96804bc86c | |||
| ef45ea9649 | |||
| bc351a0134 | |||
| 68986f6fc0 | |||
| 2f124e34de | |||
| c28e774234 | |||
| 80b1a97e4c | |||
| f4fec8f51c | |||
| f4f82c916f | |||
| ecbe154709 | |||
| d00c154db9 | |||
| 55f284b306 | |||
| cf8df17d3a | |||
| e079566597 | |||
| 83d6419d70 | |||
| a0ec9e1cb1 | |||
| 3eede4447d | |||
| 9c6a7d9701 | |||
| 7b201773f3 | |||
|
|
bfd26eef5a | ||
|
|
1537d0ab90 | ||
|
|
2be7f3a3ff | ||
|
|
0cf864870c | ||
|
|
1786916a16 | ||
|
|
0507ad4f68 | ||
|
|
bed90e3a41 | ||
|
|
6163daaaa4 | ||
|
|
8e2a394442 | ||
|
|
a445d9c9da | ||
|
|
f24030d4d8 | ||
|
|
7598aeaad7 | ||
|
|
4485cc0b5b | ||
|
|
8cfab38824 | ||
|
|
ee5525fea1 | ||
|
|
a1daeaf0c4 | ||
|
|
6d723c45a9 | ||
|
|
674e784aa9 | ||
|
|
42bf1e8b9d | ||
|
|
a75d00970f | ||
|
|
4df18de636 | ||
|
|
8dc69c6126 | ||
|
|
7d481e6048 | ||
|
|
b43ece8934 | ||
|
|
c10c5a0e64 | ||
|
|
a8db91c40e | ||
|
|
0f5f7ac780 | ||
|
|
768e36660d | ||
|
|
790d6740ba |
24
.github/workflows/build-docker-images.yml
vendored
@@ -40,24 +40,24 @@ jobs:
|
||||
git lfs install
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
with:
|
||||
cache-binary: false
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
lfs: true
|
||||
persist-credentials: false
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push CPU
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/lerobot-cpu/Dockerfile
|
||||
@@ -78,24 +78,24 @@ jobs:
|
||||
git lfs install
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
with:
|
||||
cache-binary: false
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
lfs: true
|
||||
persist-credentials: false
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/lerobot-gpu/Dockerfile
|
||||
@@ -110,23 +110,23 @@ jobs:
|
||||
group: aws-general-8-plus
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
with:
|
||||
cache-binary: false
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push GPU dev
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/lerobot-gpu-dev/Dockerfile
|
||||
|
||||
23
.github/workflows/build_documentation.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Build documentation
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- "docs/**"
|
||||
branches:
|
||||
- main
|
||||
- doc-builder*
|
||||
- v*-release
|
||||
|
||||
|
||||
jobs:
|
||||
build: # zizmor: ignore[excessive-permissions] We follow the same pattern as in Transformers
|
||||
uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@main
|
||||
with:
|
||||
commit_sha: ${{ github.sha }}
|
||||
package: lerobot
|
||||
additional_args: --not_python_module
|
||||
secrets:
|
||||
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
||||
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
||||
19
.github/workflows/build_pr_documentation.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
name: Build PR Documentation
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "docs/**"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build: # zizmor: ignore[excessive-permissions] We follow the same pattern as in Transformers
|
||||
uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main
|
||||
with:
|
||||
commit_sha: ${{ github.event.pull_request.head.sha }}
|
||||
pr_number: ${{ github.event.number }}
|
||||
package: lerobot
|
||||
additional_args: --not_python_module
|
||||
4
.github/workflows/nightly-tests.yml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
container:
|
||||
image: huggingface/lerobot-cpu:latest
|
||||
image: huggingface/lerobot-cpu:latest # zizmor: ignore[unpinned-images]
|
||||
options: --shm-size "16gb"
|
||||
credentials:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
@@ -60,7 +60,7 @@ jobs:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
TEST_TYPE: "single_gpu"
|
||||
container:
|
||||
image: huggingface/lerobot-gpu:latest
|
||||
image: huggingface/lerobot-gpu:latest # zizmor: ignore[unpinned-images]
|
||||
options: --gpus all --shm-size "16gb"
|
||||
credentials:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
|
||||
8
.github/workflows/quality.yml
vendored
@@ -33,12 +33,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@7f4fc3e22c37d6ff65e88745f38bd3157c663f7c # v4.9.1
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
@@ -64,9 +64,9 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: typos-action
|
||||
uses: crate-ci/typos@v1.29.10
|
||||
uses: crate-ci/typos@db35ee91e80fbb447f33b0e5fbddb24d2a1a884f # v1.29.10
|
||||
|
||||
8
.github/workflows/test-docker-build.yml
vendored
@@ -35,7 +35,7 @@ jobs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -64,17 +64,17 @@ jobs:
|
||||
docker-file: ${{ fromJson(needs.get_changed_files.outputs.matrix) }}
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
with:
|
||||
cache-binary: false
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Build Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
|
||||
with:
|
||||
file: ${{ matrix.docker-file }}
|
||||
context: .
|
||||
|
||||
12
.github/workflows/test.yml
vendored
@@ -50,7 +50,7 @@ jobs:
|
||||
env:
|
||||
MUJOCO_GL: egl
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
lfs: true # Ensure LFS files are pulled
|
||||
persist-credentials: false
|
||||
@@ -62,7 +62,7 @@ jobs:
|
||||
sudo apt-get install -y libegl1-mesa-dev ffmpeg portaudio19-dev
|
||||
|
||||
- name: Install uv and python
|
||||
uses: astral-sh/setup-uv@v5
|
||||
uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5.4.2
|
||||
with:
|
||||
enable-cache: true
|
||||
version: ${{ env.UV_VERSION }}
|
||||
@@ -85,7 +85,7 @@ jobs:
|
||||
env:
|
||||
MUJOCO_GL: egl
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
lfs: true # Ensure LFS files are pulled
|
||||
persist-credentials: false
|
||||
@@ -94,7 +94,7 @@ jobs:
|
||||
run: sudo apt-get update && sudo apt-get install -y ffmpeg
|
||||
|
||||
- name: Install uv and python
|
||||
uses: astral-sh/setup-uv@v5
|
||||
uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5.4.2
|
||||
with:
|
||||
enable-cache: true
|
||||
version: ${{ env.UV_VERSION }}
|
||||
@@ -117,7 +117,7 @@ jobs:
|
||||
env:
|
||||
MUJOCO_GL: egl
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
lfs: true # Ensure LFS files are pulled
|
||||
persist-credentials: false
|
||||
@@ -129,7 +129,7 @@ jobs:
|
||||
sudo apt-get install -y libegl1-mesa-dev ffmpeg portaudio19-dev
|
||||
|
||||
- name: Install uv and python
|
||||
uses: astral-sh/setup-uv@v5
|
||||
uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5.4.2
|
||||
with:
|
||||
enable-cache: true
|
||||
version: ${{ env.UV_VERSION }}
|
||||
|
||||
4
.github/workflows/trufflehog.yml
vendored
@@ -24,12 +24,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Secret Scanning
|
||||
uses: trufflesecurity/trufflehog@main
|
||||
uses: trufflesecurity/trufflehog@90694bf9af66e7536abc5824e7a87246dbf933cb # v3.88.35
|
||||
with:
|
||||
extra_args: --only-verified
|
||||
|
||||
16
.github/workflows/upload_pr_documentation.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: Upload PR Documentation
|
||||
|
||||
on: # zizmor: ignore[dangerous-triggers] We follow the same pattern as in Transformers
|
||||
workflow_run:
|
||||
workflows: [ "Build PR Documentation" ]
|
||||
types:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
build: # zizmor: ignore[excessive-permissions] We follow the same pattern as in Transformers
|
||||
uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@main
|
||||
with:
|
||||
package_name: lerobot
|
||||
secrets:
|
||||
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
||||
comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }}
|
||||
@@ -37,18 +37,18 @@ repos:
|
||||
- id: trailing-whitespace
|
||||
|
||||
- repo: https://github.com/adhtruong/mirrors-typos
|
||||
rev: v1.31.1
|
||||
rev: v1.32.0
|
||||
hooks:
|
||||
- id: typos
|
||||
args: [--force-exclude]
|
||||
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v3.19.1
|
||||
rev: v3.20.0
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.11.4
|
||||
rev: v0.11.11
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [--fix]
|
||||
@@ -57,12 +57,12 @@ repos:
|
||||
|
||||
##### Security #####
|
||||
- repo: https://github.com/gitleaks/gitleaks
|
||||
rev: v8.24.2
|
||||
rev: v8.26.0
|
||||
hooks:
|
||||
- id: gitleaks
|
||||
|
||||
- repo: https://github.com/woodruffw/zizmor-pre-commit
|
||||
rev: v1.5.2
|
||||
rev: v1.8.0
|
||||
hooks:
|
||||
- id: zizmor
|
||||
|
||||
|
||||
46
README.md
@@ -23,21 +23,35 @@
|
||||
</div>
|
||||
|
||||
<h2 align="center">
|
||||
<p><a href="https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md">
|
||||
Build Your Own SO-100 Robot!</a></p>
|
||||
<p><a href="https://github.com/huggingface/lerobot/blob/main/examples/12_use_so101.md">
|
||||
Build Your Own SO-101 Robot!</a></p>
|
||||
</h2>
|
||||
|
||||
<div align="center">
|
||||
<img src="media/so100/leader_follower.webp?raw=true" alt="SO-100 leader and follower arms" title="SO-100 leader and follower arms" width="50%">
|
||||
<div style="display: flex; gap: 1rem; justify-content: center; align-items: center;" >
|
||||
<img
|
||||
src="media/so101/so101.webp?raw=true"
|
||||
alt="SO-101 follower arm"
|
||||
title="SO-101 follower arm"
|
||||
style="width: 40%;"
|
||||
/>
|
||||
<img
|
||||
src="media/so101/so101-leader.webp?raw=true"
|
||||
alt="SO-101 leader arm"
|
||||
title="SO-101 leader arm"
|
||||
style="width: 40%;"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<p><strong>Meet the SO-100 – Just $110 per arm!</strong></p>
|
||||
|
||||
<p><strong>Meet the updated SO100, the SO-101 – Just €114 per arm!</strong></p>
|
||||
<p>Train it in minutes with a few simple moves on your laptop.</p>
|
||||
<p>Then sit back and watch your creation act autonomously! 🤯</p>
|
||||
|
||||
<p><a href="https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md">
|
||||
Get the full SO-100 tutorial here.</a></p>
|
||||
<p><a href="https://github.com/huggingface/lerobot/blob/main/examples/12_use_so101.md">
|
||||
See the full SO-101 tutorial here.</a></p>
|
||||
|
||||
<p>Want to take it to the next level? Make your SO-100 mobile by building LeKiwi!</p>
|
||||
<p>Want to take it to the next level? Make your SO-101 mobile by building LeKiwi!</p>
|
||||
<p>Check out the <a href="https://github.com/huggingface/lerobot/blob/main/examples/11_use_lekiwi.md">LeKiwi tutorial</a> and bring your robot to life on wheels.</p>
|
||||
|
||||
<img src="media/lekiwi/kiwi.webp?raw=true" alt="LeKiwi mobile robot" title="LeKiwi mobile robot" width="50%">
|
||||
@@ -51,7 +65,6 @@
|
||||
|
||||
---
|
||||
|
||||
|
||||
🤗 LeRobot aims to provide models, datasets, and tools for real-world robotics in PyTorch. The goal is to lower the barrier to entry to robotics so that everyone can contribute and benefit from sharing datasets and pretrained models.
|
||||
|
||||
🤗 LeRobot contains state-of-the-art approaches that have been shown to transfer to the real-world with a focus on imitation learning and reinforcement learning.
|
||||
@@ -103,13 +116,20 @@ When using `miniconda`, install `ffmpeg` in your environment:
|
||||
conda install ffmpeg -c conda-forge
|
||||
```
|
||||
|
||||
> **NOTE:** This usually installs `ffmpeg 7.X` for your platform compiled with the `libsvtav1` encoder. If `libsvtav1` is not supported (check supported encoders with `ffmpeg -encoders`), you can:
|
||||
> - _[On any platform]_ Explicitly install `ffmpeg 7.X` using:
|
||||
> ```bash
|
||||
> conda install ffmpeg=7.1.1 -c conda-forge
|
||||
> ```
|
||||
> - _[On Linux only]_ Install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1), and make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`.
|
||||
|
||||
Install 🤗 LeRobot:
|
||||
```bash
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
> **NOTE:** If you encounter build errors, you may need to install additional dependencies (`cmake`, `build-essential`, and `ffmpeg libs`). On Linux, run:
|
||||
`sudo apt-get install cmake build-essential python-dev pkg-config libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev pkg-config`. For other systems, see: [Compiling PyAV](https://pyav.org/docs/develop/overview/installation.html#bring-your-own-ffmpeg)
|
||||
`sudo apt-get install cmake build-essential python3-dev pkg-config libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev pkg-config`. For other systems, see: [Compiling PyAV](https://pyav.org/docs/develop/overview/installation.html#bring-your-own-ffmpeg)
|
||||
|
||||
For simulations, 🤗 LeRobot comes with gymnasium environments that can be installed as extras:
|
||||
- [aloha](https://github.com/huggingface/gym-aloha)
|
||||
@@ -201,7 +221,7 @@ dataset attributes:
|
||||
│ ├ episode_index (int64): index of the episode for this sample
|
||||
│ ├ frame_index (int64): index of the frame for this sample in the episode ; starts at 0 for each episode
|
||||
│ ├ timestamp (float32): timestamp in the episode
|
||||
│ ├ next.done (bool): indicates the end of en episode ; True for the last frame in each episode
|
||||
│ ├ next.done (bool): indicates the end of an episode ; True for the last frame in each episode
|
||||
│ └ index (int64): general index in the whole dataset
|
||||
├ episode_data_index: contains 2 tensors with the start and end indices of each episode
|
||||
│ ├ from (1D int64 tensor): first frame index for each episode — shape (num episodes,) starts with 0
|
||||
@@ -250,7 +270,7 @@ See `python lerobot/scripts/eval.py --help` for more instructions.
|
||||
|
||||
### Train your own policy
|
||||
|
||||
Check out [example 3](./examples/3_train_policy.py) that illustrate how to train a model using our core library in python, and [example 4](./examples/4_train_policy_with_script.md) that shows how to use our training script from command line.
|
||||
Check out [example 3](./examples/3_train_policy.py) that illustrates how to train a model using our core library in python, and [example 4](./examples/4_train_policy_with_script.md) that shows how to use our training script from command line.
|
||||
|
||||
To use wandb for logging training and evaluation curves, make sure you've run `wandb login` as a one-time setup step. Then, when running the training command above, enable WandB in the configuration by adding `--wandb.enable=true`.
|
||||
|
||||
@@ -301,7 +321,7 @@ Once you have trained a policy you may upload it to the Hugging Face hub using a
|
||||
You first need to find the checkpoint folder located inside your experiment directory (e.g. `outputs/train/2024-05-05/20-21-12_aloha_act_default/checkpoints/002500`). Within that there is a `pretrained_model` directory which should contain:
|
||||
- `config.json`: A serialized version of the policy configuration (following the policy's dataclass config).
|
||||
- `model.safetensors`: A set of `torch.nn.Module` parameters, saved in [Hugging Face Safetensors](https://huggingface.co/docs/safetensors/index) format.
|
||||
- `train_config.json`: A consolidated configuration containing all parameter userd for training. The policy configuration should match `config.json` exactly. Thisis useful for anyone who wants to evaluate your policy or for reproducibility.
|
||||
- `train_config.json`: A consolidated configuration containing all parameters used for training. The policy configuration should match `config.json` exactly. This is useful for anyone who wants to evaluate your policy or for reproducibility.
|
||||
|
||||
To upload these to the hub, run the following:
|
||||
```bash
|
||||
@@ -340,7 +360,7 @@ with profile(
|
||||
If you want, you can cite this work with:
|
||||
```bibtex
|
||||
@misc{cadene2024lerobot,
|
||||
author = {Cadene, Remi and Alibert, Simon and Soare, Alexander and Gallouedec, Quentin and Zouitine, Adil and Wolf, Thomas},
|
||||
author = {Cadene, Remi and Alibert, Simon and Soare, Alexander and Gallouedec, Quentin and Zouitine, Adil and Palma, Steven and Kooijmans, Pepijn and Aractingi, Michel and Shukor, Mustafa and Aubakirova, Dana and Russi, Martino and Capuano, Francesco and Pascale, Caroline and Choghari, Jade and Moss, Jess and Wolf, Thomas},
|
||||
title = {LeRobot: State-of-the-art Machine Learning for Real-World Robotics in Pytorch},
|
||||
howpublished = "\url{https://github.com/huggingface/lerobot}",
|
||||
year = {2024}
|
||||
|
||||
@@ -416,7 +416,7 @@ if __name__ == "__main__":
|
||||
"--vcodec",
|
||||
type=str,
|
||||
nargs="*",
|
||||
default=["libx264", "libx265", "libsvtav1"],
|
||||
default=["libx264", "hevc", "libsvtav1"],
|
||||
help="Video codecs to be tested",
|
||||
)
|
||||
parser.add_argument(
|
||||
@@ -446,7 +446,7 @@ if __name__ == "__main__":
|
||||
# nargs="*",
|
||||
# default=[0, 1],
|
||||
# help="Use the fastdecode tuning option. 0 disables it. "
|
||||
# "For libx264 and libx265, only 1 is possible. "
|
||||
# "For libx264 and libx265/hevc, only 1 is possible. "
|
||||
# "For libsvtav1, 1, 2 or 3 are possible values with a higher number meaning a faster decoding optimization",
|
||||
# )
|
||||
parser.add_argument(
|
||||
|
||||
@@ -14,7 +14,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
tcpdump sysstat screen tmux \
|
||||
libglib2.0-0 libgl1-mesa-glx libegl1-mesa \
|
||||
speech-dispatcher portaudio19-dev libgeos-dev \
|
||||
python${PYTHON_VERSION} python${PYTHON_VERSION}-venv \
|
||||
python${PYTHON_VERSION} python${PYTHON_VERSION}-venv python${PYTHON_VERSION}-dev \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install ffmpeg build dependencies. See:
|
||||
|
||||
137
docs/README.md
Normal file
@@ -0,0 +1,137 @@
|
||||
<!---
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
# Generating the documentation
|
||||
|
||||
To generate the documentation, you first have to build it. Several packages are necessary to build the doc,
|
||||
you can install them with the following command, at the root of the code repository:
|
||||
|
||||
```bash
|
||||
pip install -e ".[docs]"
|
||||
```
|
||||
|
||||
You will also need `nodejs`. Please refer to their [installation page](https://nodejs.org/en/download)
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
You only need to generate the documentation to inspect it locally (if you're planning changes and want to
|
||||
check how they look before committing for instance). You don't have to `git commit` the built documentation.
|
||||
|
||||
---
|
||||
|
||||
## Building the documentation
|
||||
|
||||
Once you have setup the `doc-builder` and additional packages, you can generate the documentation by
|
||||
typing the following command:
|
||||
|
||||
```bash
|
||||
doc-builder build lerobot docs/source/ --build_dir ~/tmp/test-build
|
||||
```
|
||||
|
||||
You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate
|
||||
the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite
|
||||
Markdown editor.
|
||||
|
||||
## Previewing the documentation
|
||||
|
||||
To preview the docs, first install the `watchdog` module with:
|
||||
|
||||
```bash
|
||||
pip install watchdog
|
||||
```
|
||||
|
||||
Then run the following command:
|
||||
|
||||
```bash
|
||||
doc-builder preview lerobot docs/source/
|
||||
```
|
||||
|
||||
The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives.
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again).
|
||||
|
||||
---
|
||||
|
||||
## Adding a new element to the navigation bar
|
||||
|
||||
Accepted files are Markdown (.md).
|
||||
|
||||
Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting
|
||||
the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/lerobot/blob/main/docs/source/_toctree.yml) file.
|
||||
|
||||
## Renaming section headers and moving sections
|
||||
|
||||
It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information.
|
||||
|
||||
Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor.
|
||||
|
||||
So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file:
|
||||
|
||||
```
|
||||
Sections that were moved:
|
||||
|
||||
[ <a href="#section-b">Section A</a><a id="section-a"></a> ]
|
||||
```
|
||||
and of course, if you moved it to another file, then:
|
||||
|
||||
```
|
||||
Sections that were moved:
|
||||
|
||||
[ <a href="../new-file#section-b">Section A</a><a id="section-a"></a> ]
|
||||
```
|
||||
|
||||
Use the relative style to link to the new file so that the versioned docs continue to work.
|
||||
|
||||
For an example of a rich moved sections set please see the very end of [the transformers Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/en/main_classes/trainer.md).
|
||||
|
||||
### Adding a new tutorial
|
||||
|
||||
Adding a new tutorial or section is done in two steps:
|
||||
|
||||
- Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md).
|
||||
- Link that file in `./source/_toctree.yml` on the correct toc-tree.
|
||||
|
||||
Make sure to put your new file under the proper section. If you have a doubt, feel free to ask in a Github Issue or PR.
|
||||
|
||||
### Writing source documentation
|
||||
|
||||
Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names
|
||||
and objects like True, None or any strings should usually be put in `code`.
|
||||
|
||||
#### Writing a multi-line code block
|
||||
|
||||
Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown:
|
||||
|
||||
|
||||
````
|
||||
```
|
||||
# first line of code
|
||||
# second line
|
||||
# etc
|
||||
```
|
||||
````
|
||||
|
||||
#### Adding an image
|
||||
|
||||
Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos, and other non-text files. We prefer to leverage a hf.co hosted `dataset` like
|
||||
the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference
|
||||
them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images).
|
||||
If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images
|
||||
to this dataset.
|
||||
12
docs/source/_toctree.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
- sections:
|
||||
- local: index
|
||||
title: LeRobot
|
||||
- local: installation
|
||||
title: Installation
|
||||
title: Get started
|
||||
- sections:
|
||||
- local: assemble_so101
|
||||
title: Assemble SO-101
|
||||
- local: getting_started_real_world_robot
|
||||
title: Getting Started with Real-World Robots
|
||||
title: "Tutorials"
|
||||
348
docs/source/assemble_so101.mdx
Normal file
@@ -0,0 +1,348 @@
|
||||
# Assemble SO-101
|
||||
|
||||
In the steps below we explain how to assemble our flagship robot, the SO-101.
|
||||
|
||||
## Source the parts
|
||||
|
||||
Follow this [README](https://github.com/TheRobotStudio/SO-ARM100). It contains the bill of materials, with a link to source the parts, as well as the instructions to 3D print the parts,
|
||||
and advice if it's your first time printing or if you don't own a 3D printer.
|
||||
|
||||
Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
|
||||
|
||||
## Install LeRobot
|
||||
|
||||
To install LeRobot follow our [Installation Guide](./installation)
|
||||
|
||||
## Configure motors
|
||||
|
||||
To configure the motors designate one bus servo adapter and 6 motors for your leader arm, and similarly the other bus servo adapter and 6 motors for the follower arm. It's convenient to label them and write on each motor if it's for the follower `F` or for the leader `L` and it's ID from 1 to 6.
|
||||
|
||||
You now should plug the 5V or 12V power supply to the motor bus. 5V for the STS3215 7.4V motors and 12V for the STS3215 12V motors. Note that the leader arm always uses the 7.4V motors, so watch out that you plug in the right power supply if you have 12V and 7.4V motors, otherwise you might burn your motors! Now, connect the motor bus to your computer via USB. Note that the USB doesn't provide any power, and both the power supply and USB have to be plugged in.
|
||||
|
||||
### Find the USB ports associated to each arm
|
||||
|
||||
To find the port for each bus servo adapter, run this script:
|
||||
```bash
|
||||
python lerobot/scripts/find_motors_bus_port.py
|
||||
```
|
||||
##### Example outputs of script
|
||||
|
||||
<hfoptions id="example">
|
||||
<hfoption id="Mac">
|
||||
|
||||
Example output leader arm's port: `/dev/tty.usbmodem575E0031751`
|
||||
|
||||
```bash
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your MotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect leader arm and press Enter...]
|
||||
|
||||
The port of this MotorsBus is /dev/tty.usbmodem575E0031751
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
Example output follower arm port: `/dev/tty.usbmodem575E0032081`
|
||||
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your MotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect follower arm and press Enter...]
|
||||
|
||||
The port of this MotorsBus is /dev/tty.usbmodem575E0032081
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Linux">
|
||||
|
||||
On Linux, you might need to give access to the USB ports by running:
|
||||
```bash
|
||||
sudo chmod 666 /dev/ttyACM0
|
||||
sudo chmod 666 /dev/ttyACM1
|
||||
```
|
||||
|
||||
Example output leader arm port: `/dev/ttyACM0`
|
||||
|
||||
```bash
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/ttyACM0', '/dev/ttyACM1']
|
||||
Remove the usb cable from your MotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect leader arm and press Enter...]
|
||||
|
||||
The port of this MotorsBus is /dev/ttyACM0
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
Example output follower arm port: `/dev/ttyACM1`
|
||||
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/ttyACM0', '/dev/ttyACM1']
|
||||
Remove the usb cable from your MotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect follower arm and press Enter...]
|
||||
|
||||
The port of this MotorsBus is /dev/ttyACM1
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
#### Update config file
|
||||
|
||||
Now that you have your ports, update the **port** default values of [`SO101RobotConfig`](https://github.com/huggingface/lerobot/blob/main/lerobot/common/robot_devices/robots/configs.py).
|
||||
You will find a class called `so101` where you can update the `port` values with your actual motor ports:
|
||||
```diff
|
||||
@RobotConfig.register_subclass("so101")
|
||||
@dataclass
|
||||
class So101RobotConfig(ManipulatorRobotConfig):
|
||||
calibration_dir: str = ".cache/calibration/so101"
|
||||
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
|
||||
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
|
||||
# the number of motors in your follower arms.
|
||||
max_relative_target: int | None = None
|
||||
|
||||
leader_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
- port="/dev/tty.usbmodem58760431091",
|
||||
+ port="{ADD YOUR LEADER PORT}",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
follower_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
- port="/dev/tty.usbmodem585A0076891",
|
||||
+ port="{ADD YOUR FOLLOWER PORT}",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
Here is a video of the process:
|
||||
<div class="video-container">
|
||||
<video controls width="600">
|
||||
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/lerobot-find-motorbus.mp4" type="video/mp4" />
|
||||
</video>
|
||||
</div>
|
||||
|
||||
## Step-by-Step Assembly Instructions
|
||||
|
||||
The follower arm uses 6x STS3215 motors with 1/345 gearing. The leader however uses three differently geared motors to make sure it can both sustain its own weight and it can be moved without requiring much force. Which motor is needed for which joint is shown in table below.
|
||||
|
||||
| Leader-Arm Axis | Motor | Gear Ratio |
|
||||
|-----------------|:-------:|:----------:|
|
||||
| Base / Shoulder Yaw | 1 | 1 / 191 |
|
||||
| Shoulder Pitch | 2 | 1 / 345 |
|
||||
| Elbow | 3 | 1 / 191 |
|
||||
| Wrist Roll | 4 | 1 / 147 |
|
||||
| Wrist Pitch | 5 | 1 / 147 |
|
||||
| Gripper | 6 | 1 / 147 |
|
||||
|
||||
### Set motor IDs
|
||||
|
||||
Plug your motor in one of the two ports of the motor bus and run this script to set its ID to 1. Replace the text after --port to the corresponding control board port.
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 1
|
||||
```
|
||||
|
||||
Then unplug your motor and plug the second motor and set its ID to 2.
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 2
|
||||
```
|
||||
|
||||
Redo this process for all your motors until ID 6. Do the same for the 6 motors of the leader arm, but make sure to change the power supply if you use motors with different voltage and make sure you give the right ID to the right motor according to the table above.
|
||||
|
||||
Here is a video of the process:
|
||||
<div class="video-container">
|
||||
<video controls width="600">
|
||||
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/lerobot-configure-motor.mp4" type="video/mp4" />
|
||||
</video>
|
||||
</div>
|
||||
|
||||
### Clean Parts
|
||||
Remove all support material from the 3D-printed parts, the easiest way to do this is using a small screwdriver to get underneath the support material.
|
||||
|
||||
### Joint 1
|
||||
|
||||
- Place the first motor into the base.
|
||||
- Fasten the motor with 4 M2x6mm screws (smallest screws). Two from the top and two from bottom.
|
||||
- Slide over the first motor holder and fasten it using two M2x6mm screws (one on each side).
|
||||
- Install both motor horns, securing the top horn with a M3x6mm screw.
|
||||
- Attach the shoulder part.
|
||||
- Tighten the shoulder part with 4 M3x6mm screws on top and 4 M3x6mm screws on the bottom
|
||||
- Add the shoulder motor holder.
|
||||
|
||||
<div class="video-container">
|
||||
<video controls width="600">
|
||||
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Joint1_v2.mp4" type="video/mp4" />
|
||||
</video>
|
||||
</div>
|
||||
|
||||
### Joint 2
|
||||
|
||||
- Slide the second motor in from the top.
|
||||
- Fasten the second motor with 4 M2x6mm screws.
|
||||
- Attach both motor horns to motor 2, again use the M3x6mm horn screw.
|
||||
- Attach the upper arm with 4 M3x6mm screws on each side.
|
||||
|
||||
<div class="video-container">
|
||||
<video controls width="600">
|
||||
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Joint2_v2.mp4" type="video/mp4" />
|
||||
</video>
|
||||
</div>
|
||||
|
||||
### Joint 3
|
||||
|
||||
- Insert motor 3 and fasten using 4 M2x6mm screws
|
||||
- Attach both motor horns to motor 3 and secure one again with a M3x6mm horn screw.
|
||||
- Connect the forearm to motor 3 using 4 M3x6mm screws on each side.
|
||||
|
||||
<div class="video-container">
|
||||
<video controls width="600">
|
||||
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Joint3_v2.mp4" type="video/mp4" />
|
||||
</video>
|
||||
</div>
|
||||
|
||||
### Joint 4
|
||||
|
||||
- Slide over motor holder 4.
|
||||
- Slide in motor 4.
|
||||
- Fasten motor 4 with 4 M2x6mm screws and attach its motor horns, use a M3x6mm horn screw.
|
||||
|
||||
<div class="video-container">
|
||||
<video controls width="600">
|
||||
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Joint4_v2.mp4" type="video/mp4" />
|
||||
</video>
|
||||
</div>
|
||||
|
||||
### Joint 5
|
||||
|
||||
- Insert motor 5 into the wrist holder and secure it with 2 M2x6mm front screws.
|
||||
- Install only one motor horn on the wrist motor and secure it with a M3x6mm horn screw.
|
||||
- Secure the wrist to motor 4 using 4 M3x6mm screws on both sides.
|
||||
|
||||
<div class="video-container">
|
||||
<video controls width="600">
|
||||
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Joint5_v2.mp4" type="video/mp4" />
|
||||
</video>
|
||||
</div>
|
||||
|
||||
### Gripper / Handle
|
||||
|
||||
<hfoptions id="assembly">
|
||||
<hfoption id="Follower">
|
||||
|
||||
- Attach the gripper to motor 5, attach it to the motor horn on the wrist using 4 M3x6mm screws.
|
||||
- Insert the gripper motor and secure it with 2 M2x6mm screws on each side.
|
||||
- Attach the motor horns and again use a M3x6mm horn screw.
|
||||
- Install the gripper claw and secure it with 4 M3x6mm screws on both sides.
|
||||
|
||||
<div class="video-container">
|
||||
<video controls width="600">
|
||||
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Gripper_v2.mp4" type="video/mp4" />
|
||||
</video>
|
||||
</div>
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Leader">
|
||||
|
||||
- Mount the leader holder onto the wrist and secure it with 4 M3x6mm screws.
|
||||
- Attach the handle to motor 5 using 1 M2x6mm screw.
|
||||
- Insert the gripper motor, secure it with 2 M2x6mm screws on each side, attach a motor horn using a M3x6mm horn screw.
|
||||
- Attach the follower trigger with 4 M3x6mm screws.
|
||||
|
||||
<div class="video-container">
|
||||
<video controls width="600">
|
||||
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Leader_v2.mp4" type="video/mp4" />
|
||||
</video>
|
||||
</div>
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
##### Wiring
|
||||
|
||||
- Attach the motor controller on the back.
|
||||
- Then insert all wires, use the wire guides everywhere to make sure the wires don't unplug themselves and stay in place.
|
||||
|
||||
<div class="video-container">
|
||||
<video controls width="600">
|
||||
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/Wiring_v2.mp4" type="video/mp4" />
|
||||
</video>
|
||||
</div>
|
||||
|
||||
## Calibrate
|
||||
|
||||
Next, you'll need to calibrate your SO-101 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position.
|
||||
The calibration process is very important because it allows a neural network trained on one SO-101 robot to work on another.
|
||||
|
||||
#### Manual calibration of follower arm
|
||||
|
||||
You will need to move the follower arm to these positions sequentially, note that the rotated position is on the right side of the robot and you have to open the gripper fully.
|
||||
|
||||
| 1. Middle position | 2. Zero position | 3. Rotated position | 4. Rest position |
|
||||
| ------------ |------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/follower_middle.webp?raw=true" alt="SO-101 leader arm middle position" title="SO-101 leader arm middle position" style="width:100%;"> | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/follower_zero.webp?raw=true" alt="SO-101 leader arm zero position" title="SO-101 leader arm zero position" style="width:100%;"> | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/follower_rotated.webp?raw=true" alt="SO-101 leader arm rotated position" title="SO-101 leader arm rotated position" style="width:100%;"> | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/follower_rest.webp?raw=true" alt="SO-101 leader arm rest position" title="SO-101 leader arm rest position" style="width:100%;"> |
|
||||
|
||||
Make sure both arms are connected and run this script to launch manual calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so101 \
|
||||
--robot.cameras='{}' \
|
||||
--control.type=calibrate \
|
||||
--control.arms='["main_follower"]'
|
||||
```
|
||||
|
||||
#### Manual calibration of leader arm
|
||||
You will also need to move the leader arm to these positions sequentially:
|
||||
|
||||
| 1. Middle position | 2. Zero position | 3. Rotated position | 4. Rest position |
|
||||
| ------------ |------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/leader_middle.webp?raw=true" alt="SO-101 leader arm middle position" title="SO-101 leader arm middle position" style="width:100%;"> | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/leader_zero.webp?raw=true" alt="SO-101 leader arm zero position" title="SO-101 leader arm zero position" style="width:100%;"> | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/leader_rotated.webp?raw=true" alt="SO-101 leader arm rotated position" title="SO-101 leader arm rotated position" style="width:100%;"> | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/leader_rest.webp?raw=true" alt="SO-101 leader arm rest position" title="SO-101 leader arm rest position" style="width:100%;"> |
|
||||
|
||||
Run this script to launch manual calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so101 \
|
||||
--robot.cameras='{}' \
|
||||
--control.type=calibrate \
|
||||
--control.arms='["main_leader"]'
|
||||
```
|
||||
|
||||
Congrats 🎉, your robot is all set to learn a task on its own. Start training it by following this tutorial: [Getting started with real-world robots](./getting_started_real_world_robot)
|
||||
370
docs/source/getting_started_real_world_robot.mdx
Normal file
@@ -0,0 +1,370 @@
|
||||
# Getting Started with Real-World Robots
|
||||
|
||||
This tutorial will explain you how to train a neural network to autonomously control a real robot.
|
||||
|
||||
**You'll learn:**
|
||||
1. How to record and visualize your dataset.
|
||||
2. How to train a policy using your data and prepare it for evaluation.
|
||||
3. How to evaluate your policy and visualize the results.
|
||||
|
||||
By following these steps, you'll be able to replicate tasks like picking up a Lego block and placing it in a bin with a high success rate, as demonstrated in [this video](https://x.com/RemiCadene/status/1814680760592572934).
|
||||
|
||||
This tutorial is specifically made for the affordable [SO-101](https://github.com/TheRobotStudio/SO-ARM100) robot, but it contains additional information to be easily adapted to various types of robots like [Aloha bimanual robot](https://aloha-2.github.io) by changing some configurations. The SO-101 consists of a leader arm and a follower arm, each with 6 motors. It can work with one or several cameras to record the scene, which serve as visual sensors for the robot.
|
||||
|
||||
During the data collection phase, you will control the follower arm by moving the leader arm. This process is known as "teleoperation." This technique is used to collect robot trajectories. Afterward, you'll train a neural network to imitate these trajectories and deploy the network to enable your robot to operate autonomously.
|
||||
|
||||
If you encounter any issues at any step of the tutorial, feel free to seek help on [Discord](https://discord.com/invite/s3KuuzsPFb) or don't hesitate to iterate with us on the tutorial by creating issues or pull requests.
|
||||
|
||||
## Setup and Calibrate
|
||||
|
||||
If you haven't yet setup and calibrate the SO-101 follow these steps:
|
||||
1. [Find ports and update config file](./assemble_so101#find-the-usb-ports-associated-to-each-arm)
|
||||
2. [Calibrate](./assemble_so101#calibrate)
|
||||
|
||||
## Teleoperate
|
||||
|
||||
Run this simple script to teleoperate your robot (it won't connect and display the cameras):
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so101 \
|
||||
--robot.cameras='{}' \
|
||||
--control.type=teleoperate
|
||||
```
|
||||
|
||||
The teleoperate command will automatically:
|
||||
1. Identify any missing calibrations and initiate the calibration procedure.
|
||||
2. Connect the robot and start teleoperation.
|
||||
|
||||
## Setup Cameras
|
||||
|
||||
To connect a camera you have three options:
|
||||
1. OpenCVCamera which allows us to use any camera: usb, realsense, laptop webcam
|
||||
2. iPhone camera with MacOS
|
||||
3. Phone camera on Linux
|
||||
|
||||
### Use OpenCVCamera
|
||||
|
||||
The [`OpenCVCamera`](../lerobot/common/robot_devices/cameras/opencv.py) class allows you to efficiently record frames from most cameras using the [`opencv2`](https://docs.opencv.org) library. For more details on compatibility, see [Video I/O with OpenCV Overview](https://docs.opencv.org/4.x/d0/da7/videoio_overview.html).
|
||||
|
||||
To instantiate an [`OpenCVCamera`](../lerobot/common/robot_devices/cameras/opencv.py), you need a camera index (e.g. `OpenCVCamera(camera_index=0)`). When you only have one camera like a webcam of a laptop, the camera index is usually `0` but it might differ, and the camera index might change if you reboot your computer or re-plug your camera. This behavior depends on your operating system.
|
||||
|
||||
To find the camera indices, run the following utility script, which will save a few frames from each detected camera:
|
||||
```bash
|
||||
python lerobot/common/robot_devices/cameras/opencv.py \
|
||||
--images-dir outputs/images_from_opencv_cameras
|
||||
```
|
||||
|
||||
The output will look something like this if you have two cameras connected:
|
||||
```
|
||||
Mac or Windows detected. Finding available camera indices through scanning all indices from 0 to 60
|
||||
[...]
|
||||
Camera found at index 0
|
||||
Camera found at index 1
|
||||
[...]
|
||||
Connecting cameras
|
||||
OpenCVCamera(0, fps=30.0, width=1920.0, height=1080.0, color_mode=rgb)
|
||||
OpenCVCamera(1, fps=24.0, width=1920.0, height=1080.0, color_mode=rgb)
|
||||
Saving images to outputs/images_from_opencv_cameras
|
||||
Frame: 0000 Latency (ms): 39.52
|
||||
[...]
|
||||
Frame: 0046 Latency (ms): 40.07
|
||||
Images have been saved to outputs/images_from_opencv_cameras
|
||||
```
|
||||
|
||||
Check the saved images in `outputs/images_from_opencv_cameras` to identify which camera index corresponds to which physical camera (e.g. `0` for `camera_00` or `1` for `camera_01`):
|
||||
```
|
||||
camera_00_frame_000000.png
|
||||
[...]
|
||||
camera_00_frame_000047.png
|
||||
camera_01_frame_000000.png
|
||||
[...]
|
||||
camera_01_frame_000047.png
|
||||
```
|
||||
|
||||
Note: Some cameras may take a few seconds to warm up, and the first frame might be black or green.
|
||||
|
||||
Now that you have the camera indexes, you should specify the camera's in the config.
|
||||
|
||||
### Use your phone
|
||||
<hfoptions id="use phone">
|
||||
<hfoption id="Mac">
|
||||
|
||||
To use your iPhone as a camera on macOS, enable the Continuity Camera feature:
|
||||
- Ensure your Mac is running macOS 13 or later, and your iPhone is on iOS 16 or later.
|
||||
- Sign in both devices with the same Apple ID.
|
||||
- Connect your devices with a USB cable or turn on Wi-Fi and Bluetooth for a wireless connection.
|
||||
|
||||
For more details, visit [Apple support](https://support.apple.com/en-gb/guide/mac-help/mchl77879b8a/mac).
|
||||
|
||||
Your iPhone should be detected automatically when running the camera setup script in the next section.
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Linux">
|
||||
|
||||
If you want to use your phone as a camera on Linux, follow these steps to set up a virtual camera
|
||||
|
||||
1. *Install `v4l2loopback-dkms` and `v4l-utils`*. Those packages are required to create virtual camera devices (`v4l2loopback`) and verify their settings with the `v4l2-ctl` utility from `v4l-utils`. Install them using:
|
||||
```python
|
||||
sudo apt install v4l2loopback-dkms v4l-utils
|
||||
```
|
||||
2. *Install [DroidCam](https://droidcam.app) on your phone*. This app is available for both iOS and Android.
|
||||
3. *Install [OBS Studio](https://obsproject.com)*. This software will help you manage the camera feed. Install it using [Flatpak](https://flatpak.org):
|
||||
```python
|
||||
flatpak install flathub com.obsproject.Studio
|
||||
```
|
||||
4. *Install the DroidCam OBS plugin*. This plugin integrates DroidCam with OBS Studio. Install it with:
|
||||
```python
|
||||
flatpak install flathub com.obsproject.Studio.Plugin.DroidCam
|
||||
```
|
||||
5. *Start OBS Studio*. Launch with:
|
||||
```python
|
||||
flatpak run com.obsproject.Studio
|
||||
```
|
||||
6. *Add your phone as a source*. Follow the instructions [here](https://droidcam.app/obs/usage). Be sure to set the resolution to `640x480`.
|
||||
7. *Adjust resolution settings*. In OBS Studio, go to `File > Settings > Video`. Change the `Base(Canvas) Resolution` and the `Output(Scaled) Resolution` to `640x480` by manually typing it in.
|
||||
8. *Start virtual camera*. In OBS Studio, follow the instructions [here](https://obsproject.com/kb/virtual-camera-guide).
|
||||
9. *Verify the virtual camera setup*. Use `v4l2-ctl` to list the devices:
|
||||
```python
|
||||
v4l2-ctl --list-devices
|
||||
```
|
||||
You should see an entry like:
|
||||
```
|
||||
VirtualCam (platform:v4l2loopback-000):
|
||||
/dev/video1
|
||||
```
|
||||
10. *Check the camera resolution*. Use `v4l2-ctl` to ensure that the virtual camera output resolution is `640x480`. Change `/dev/video1` to the port of your virtual camera from the output of `v4l2-ctl --list-devices`.
|
||||
```python
|
||||
v4l2-ctl -d /dev/video1 --get-fmt-video
|
||||
```
|
||||
You should see an entry like:
|
||||
```
|
||||
>>> Format Video Capture:
|
||||
>>> Width/Height : 640/480
|
||||
>>> Pixel Format : 'YUYV' (YUYV 4:2:2)
|
||||
```
|
||||
|
||||
Troubleshooting: If the resolution is not correct you will have to delete the Virtual Camera port and try again as it cannot be changed.
|
||||
|
||||
If everything is set up correctly, you can proceed with the rest of the tutorial.
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## Teleoperate with cameras
|
||||
|
||||
We can now teleoperate again while at the same time visualizing the cameras and joint positions with `rerun`.
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so101 \
|
||||
--control.type=teleoperate
|
||||
--control.display_data=true
|
||||
```
|
||||
|
||||
## Record a dataset
|
||||
|
||||
Once you're familiar with teleoperation, you can record your first dataset with SO-101.
|
||||
|
||||
We use the Hugging Face hub features for uploading your dataset. If you haven't previously used the Hub, make sure you can login via the cli using a write-access token, this token can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens).
|
||||
|
||||
Add your token to the cli by running this command:
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
```
|
||||
|
||||
Then store your Hugging Face repository name in a variable:
|
||||
```bash
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
|
||||
Now you can record a dataset, to record 2 episodes and upload your dataset to the hub execute this command:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so101 \
|
||||
--control.type=record \
|
||||
--control.fps=30 \
|
||||
--control.single_task="Grasp a lego block and put it in the bin." \
|
||||
--control.repo_id=${HF_USER}/so101_test \
|
||||
--control.tags='["so101","tutorial"]' \
|
||||
--control.warmup_time_s=5 \
|
||||
--control.episode_time_s=30 \
|
||||
--control.reset_time_s=30 \
|
||||
--control.num_episodes=2 \
|
||||
--control.push_to_hub=true
|
||||
```
|
||||
|
||||
You will see a lot of lines appearing like this one:
|
||||
```
|
||||
INFO 2024-08-10 15:02:58 ol_robot.py:219 dt:33.34 (30.0hz) dtRlead: 5.06 (197.5hz) dtWfoll: 0.25 (3963.7hz) dtRfoll: 6.22 (160.7hz) dtRlaptop: 32.57 (30.7hz) dtRphone: 33.84 (29.5hz)
|
||||
```
|
||||
|
||||
| Field | Meaning |
|
||||
|:---|:---|
|
||||
| `2024-08-10 15:02:58` | Timestamp when `print` was called. |
|
||||
| `ol_robot.py:219` | Source file and line number of the `print` call (`lerobot/scripts/control_robot.py` at line `219`). |
|
||||
| `dt: 33.34 (30.0 Hz)` | Delta time (ms) between teleop steps (target: 30.0 Hz, `--fps 30`). Yellow if step is too slow. |
|
||||
| `dtRlead: 5.06 (197.5 Hz)` | Delta time (ms) for reading present position from the **leader arm**. |
|
||||
| `dtWfoll: 0.25 (3963.7 Hz)` | Delta time (ms) for writing goal position to the **follower arm** (asynchronous). |
|
||||
| `dtRfoll: 6.22 (160.7 Hz)` | Delta time (ms) for reading present position from the **follower arm**. |
|
||||
| `dtRlaptop: 32.57 (30.7 Hz)` | Delta time (ms) for capturing an image from the **laptop camera** (async thread). |
|
||||
| `dtRphone: 33.84 (29.5 Hz)` | Delta time (ms) for capturing an image from the **phone camera** (async thread). |
|
||||
|
||||
|
||||
#### Dataset upload
|
||||
Locally your dataset is stored in this folder: `~/.cache/huggingface/lerobot/{repo-id}` (e.g. `data/cadene/so101_test`). At the end of data recording, your dataset will be uploaded on your Hugging Face page (e.g. https://huggingface.co/datasets/cadene/so101_test) that you can obtain by running:
|
||||
```bash
|
||||
echo https://huggingface.co/datasets/${HF_USER}/so101_test
|
||||
```
|
||||
Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example).
|
||||
|
||||
You can look for other LeRobot datasets on the hub by searching for `LeRobot` [tags](https://huggingface.co/datasets?other=LeRobot).
|
||||
|
||||
#### Record function
|
||||
|
||||
The `record` function provides a suite of tools for capturing and managing data during robot operation:
|
||||
|
||||
##### 1. Frame Capture and Video Encoding
|
||||
- Frames from cameras are saved to disk during recording.
|
||||
- At the end of each episode, frames are encoded into video files.
|
||||
|
||||
##### 2. Data Storage
|
||||
- Data is stored using the `LeRobotDataset` format.
|
||||
- By default, the dataset is pushed to your Hugging Face page.
|
||||
- To disable uploading, use `--control.push_to_hub=false`.
|
||||
|
||||
##### 3. Checkpointing and Resuming
|
||||
- Checkpoints are automatically created during recording.
|
||||
- If an issue occurs, you can resume by re-running the same command with `--control.resume=true`.
|
||||
- To start recording from scratch, **manually delete** the dataset directory.
|
||||
|
||||
##### 4. Recording Parameters
|
||||
Set the flow of data recording using command-line arguments:
|
||||
- `--control.warmup_time_s=10`
|
||||
Number of seconds before starting data collection (default: **10 seconds**).
|
||||
Allows devices to warm up and synchronize.
|
||||
- `--control.episode_time_s=60`
|
||||
Duration of each data recording episode (default: **60 seconds**).
|
||||
- `--control.reset_time_s=60`
|
||||
Duration for resetting the environment after each episode (default: **60 seconds**).
|
||||
- `--control.num_episodes=50`
|
||||
Total number of episodes to record (default: **50**).
|
||||
|
||||
##### 5. Keyboard Controls During Recording
|
||||
Control the data recording flow using keyboard shortcuts:
|
||||
- Press **Right Arrow (`→`)**: Early stop the current episode or reset time and move to the next.
|
||||
- Press **Left Arrow (`←`)**: Cancel the current episode and re-record it.
|
||||
- Press **Escape (`ESC`)**: Immediately stop the session, encode videos, and upload the dataset.
|
||||
|
||||
#### Tips for gathering data
|
||||
|
||||
Once you're comfortable with data recording, you can create a larger dataset for training. A good starting task is grasping an object at different locations and placing it in a bin. We suggest recording at least 50 episodes, with 10 episodes per location. Keep the cameras fixed and maintain consistent grasping behavior throughout the recordings. Also make sure the object you are manipulating is visible on the camera's. A good rule of thumb is you should be able to do the task yourself by only looking at the camera images.
|
||||
|
||||
In the following sections, you’ll train your neural network. After achieving reliable grasping performance, you can start introducing more variations during data collection, such as additional grasp locations, different grasping techniques, and altering camera positions.
|
||||
|
||||
Avoid adding too much variation too quickly, as it may hinder your results.
|
||||
|
||||
|
||||
#### Troubleshooting:
|
||||
- On Linux, if the left and right arrow keys and escape key don't have any effect during data recording, make sure you've set the `$DISPLAY` environment variable. See [pynput limitations](https://pynput.readthedocs.io/en/latest/limitations.html#linux).
|
||||
|
||||
## Visualize a dataset
|
||||
|
||||
If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
|
||||
```bash
|
||||
echo ${HF_USER}/so101_test
|
||||
```
|
||||
|
||||
If you didn't upload with `--control.push_to_hub=false`, you can visualize it locally with (via a window in the browser `http://127.0.0.1:9090` with the visualization tool):
|
||||
```bash
|
||||
python lerobot/scripts/visualize_dataset_html.py \
|
||||
--repo-id ${HF_USER}/so101_test \
|
||||
--local-files-only 1
|
||||
```
|
||||
|
||||
This will launch a local web server that looks like this:
|
||||
<div style="text-align:center;">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/visualize_dataset_html.webp?raw=true" alt="Koch v1.1 leader and follower arms" title="Koch v1.1 leader and follower arms" width="100%"></img>
|
||||
</div>
|
||||
|
||||
## Replay an episode
|
||||
|
||||
A useful feature is the `replay` function, which allows to replay on your robot any episode that you've recorded or episodes from any dataset out there. This function helps you test the repeatability of your robot's actions and assess transferability across robots of the same model.
|
||||
|
||||
You can replay the first episode on your robot with:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so101 \
|
||||
--control.type=replay \
|
||||
--control.fps=30 \
|
||||
--control.repo_id=${HF_USER}/so101_test \
|
||||
--control.episode=0
|
||||
```
|
||||
|
||||
Your robot should replicate movements similar to those you recorded. For example, check out [this video](https://x.com/RemiCadene/status/1793654950905680090) where we use `replay` on a Aloha robot from [Trossen Robotics](https://www.trossenrobotics.com).
|
||||
|
||||
## Train a policy
|
||||
|
||||
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--dataset.repo_id=${HF_USER}/so101_test \
|
||||
--policy.type=act \
|
||||
--output_dir=outputs/train/act_so101_test \
|
||||
--job_name=act_so101_test \
|
||||
--policy.device=cuda \
|
||||
--wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain the command:
|
||||
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/so101_test`.
|
||||
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
|
||||
4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
|
||||
Training should take several hours. You will find checkpoints in `outputs/train/act_so101_test/checkpoints`.
|
||||
|
||||
To resume training from a checkpoint, below is an example command to resume from `last` checkpoint of the `act_so101_test` policy:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--config_path=outputs/train/act_so101_test/checkpoints/last/pretrained_model/train_config.json \
|
||||
--resume=true
|
||||
```
|
||||
|
||||
#### Upload policy checkpoints
|
||||
|
||||
Once training is done, upload the latest checkpoint with:
|
||||
```bash
|
||||
huggingface-cli upload ${HF_USER}/act_so101_test \
|
||||
outputs/train/act_so101_test/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
You can also upload intermediate checkpoints with:
|
||||
```bash
|
||||
CKPT=010000
|
||||
huggingface-cli upload ${HF_USER}/act_so101_test${CKPT} \
|
||||
outputs/train/act_so101_test/checkpoints/${CKPT}/pretrained_model
|
||||
```
|
||||
|
||||
## Evaluate your policy
|
||||
|
||||
You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so101 \
|
||||
--control.type=record \
|
||||
--control.fps=30 \
|
||||
--control.single_task="Grasp a lego block and put it in the bin." \
|
||||
--control.repo_id=${HF_USER}/eval_act_so101_test \
|
||||
--control.tags='["tutorial"]' \
|
||||
--control.warmup_time_s=5 \
|
||||
--control.episode_time_s=30 \
|
||||
--control.reset_time_s=30 \
|
||||
--control.num_episodes=10 \
|
||||
--control.push_to_hub=true \
|
||||
--control.policy.path=outputs/train/act_so101_test/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
|
||||
1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_so101_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_so101_test`).
|
||||
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_so101_test`).
|
||||
19
docs/source/index.mdx
Normal file
@@ -0,0 +1,19 @@
|
||||
<div class="flex justify-center">
|
||||
<a target="_blank" href="https://huggingface.co/lerobot">
|
||||
<img alt="HuggingFace Expert Acceleration Program" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/lerobot-logo-thumbnail.png" style="width: 100%"></img>
|
||||
</a>
|
||||
</div>
|
||||
|
||||
# LeRobot
|
||||
|
||||
**State-of-the-art machine learning for real-world robotics**
|
||||
|
||||
🤗 LeRobot aims to provide models, datasets, and tools for real-world robotics in PyTorch. The goal is to lower the barrier for entry to robotics so that everyone can contribute and benefit from sharing datasets and pretrained models.
|
||||
|
||||
🤗 LeRobot contains state-of-the-art approaches that have been shown to transfer to the real-world with a focus on imitation learning and reinforcement learning.
|
||||
|
||||
🤗 LeRobot already provides a set of pretrained models, datasets with human collected demonstrations, and simulated environments so that everyone can get started.
|
||||
|
||||
🤗 LeRobot hosts pretrained models and datasets on the LeRobot HuggingFace page.
|
||||
|
||||
Join the LeRobot community on [Discord](https://discord.gg/s3KuuzsPFb)
|
||||
84
docs/source/installation.mdx
Normal file
@@ -0,0 +1,84 @@
|
||||
# Installation
|
||||
|
||||
## Install LeRobot
|
||||
|
||||
Download our source code:
|
||||
```bash
|
||||
git clone https://github.com/huggingface/lerobot.git
|
||||
cd lerobot
|
||||
```
|
||||
|
||||
Create a virtual environment with Python 3.10, using [`Miniconda`](https://docs.anaconda.com/miniconda/install/#quick-command-line-install)
|
||||
```bash
|
||||
conda create -y -n lerobot python=3.10
|
||||
```
|
||||
|
||||
Now restart the shell by running:
|
||||
<hfoptions id="shell_restart">
|
||||
<hfoption id="Windows">
|
||||
|
||||
```bash
|
||||
source ~/.bashrc
|
||||
```
|
||||
</hfoption>
|
||||
<hfoption id="Mac">
|
||||
|
||||
```bash
|
||||
source ~/.bash_profile
|
||||
```
|
||||
</hfoption>
|
||||
<hfoption id="zshell">
|
||||
|
||||
```bash
|
||||
source ~/.zshrc
|
||||
```
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
Then activate your conda environment, you have to do this each time you open a shell to use lerobot:
|
||||
```bash
|
||||
conda activate lerobot
|
||||
```
|
||||
|
||||
When using `miniconda`, install `ffmpeg` in your environment:
|
||||
```bash
|
||||
conda install ffmpeg -c conda-forge
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> This usually installs `ffmpeg 7.X` for your platform compiled with the `libsvtav1` encoder. If `libsvtav1` is not supported (check supported encoders with `ffmpeg -encoders`), you can:
|
||||
> - _[On any platform]_ Explicitly install `ffmpeg 7.X` using:
|
||||
> ```bash
|
||||
> conda install ffmpeg=7.1.1 -c conda-forge
|
||||
> ```
|
||||
> - _[On Linux only]_ Install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1), and make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`.
|
||||
|
||||
Install 🤗 LeRobot:
|
||||
```bash
|
||||
cd lerobot && pip install -e ".[feetech]"
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
If you encounter build errors, you may need to install additional dependencies: `cmake`, `build-essential`, and `ffmpeg libs`.
|
||||
To install these for linux run:
|
||||
```bash
|
||||
sudo apt-get install cmake build-essential python-dev pkg-config libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev pkg-config
|
||||
```
|
||||
For other systems, see: [Compiling PyAV](https://pyav.org/docs/develop/overview/installation.html#bring-your-own-ffmpeg)
|
||||
|
||||
## Sim
|
||||
For simulations, 🤗 LeRobot comes with gymnasium environments that can be installed as extras:
|
||||
- [aloha](https://github.com/huggingface/gym-aloha)
|
||||
- [xarm](https://github.com/huggingface/gym-xarm)
|
||||
- [pusht](https://github.com/huggingface/gym-pusht)
|
||||
|
||||
For instance, to install 🤗 LeRobot with aloha and pusht, use:
|
||||
```bash
|
||||
pip install -e ".[aloha, pusht]"
|
||||
```
|
||||
|
||||
## W&B
|
||||
To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiment tracking, log in with
|
||||
```bash
|
||||
wandb login
|
||||
```
|
||||
@@ -128,7 +128,7 @@ sudo chmod 666 /dev/ttyACM1
|
||||
#### d. Update config file
|
||||
|
||||
IMPORTANTLY: Now that you have your ports, update the **port** default values of [`SO100RobotConfig`](../lerobot/common/robot_devices/robots/configs.py). You will find something like:
|
||||
```python
|
||||
```diff
|
||||
@RobotConfig.register_subclass("so100")
|
||||
@dataclass
|
||||
class So100RobotConfig(ManipulatorRobotConfig):
|
||||
@@ -141,7 +141,8 @@ class So100RobotConfig(ManipulatorRobotConfig):
|
||||
leader_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem58760431091", <-- UPDATE HERE
|
||||
- port="/dev/tty.usbmodem58760431091",
|
||||
+ port="{ADD YOUR LEADER PORT}",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
@@ -158,7 +159,8 @@ class So100RobotConfig(ManipulatorRobotConfig):
|
||||
follower_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem585A0076891", <-- UPDATE HERE
|
||||
- port="/dev/tty.usbmodem585A0076891",
|
||||
+ port="{ADD YOUR FOLLOWER PORT}",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
@@ -445,18 +447,16 @@ For the leader configuration, perform **Steps 1–23**. Make sure that you remov
|
||||
|
||||
## E. Calibrate
|
||||
|
||||
Next, you'll need to calibrate your SO-100 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one SO-100 robot to work on another.
|
||||
Next, you'll need to calibrate your SO-100 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position.
|
||||
The calibration process is very important because it allows a neural network trained on one SO-100 robot to work on another.
|
||||
|
||||
#### a. Manual calibration of follower arm
|
||||
#### Manual calibration of follower arm
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Contrarily to step 6 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=724) which illustrates the auto calibration, we will actually do manual calibration of follower for now.
|
||||
You will need to move the follower arm to these positions sequentially, note that the rotated position is on the right side of the robot and you have to open the gripper fully.
|
||||
|
||||
You will need to move the follower arm to these positions sequentially:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
| ------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| <img src="../media/so100/follower_zero.webp?raw=true" alt="SO-100 follower arm zero position" title="SO-100 follower arm zero position" style="width:100%;"> | <img src="../media/so100/follower_rotated.webp?raw=true" alt="SO-100 follower arm rotated position" title="SO-100 follower arm rotated position" style="width:100%;"> | <img src="../media/so100/follower_rest.webp?raw=true" alt="SO-100 follower arm rest position" title="SO-100 follower arm rest position" style="width:100%;"> |
|
||||
| 1. Middle position | 2. Zero position | 3. Rotated position | 4. Rest position |
|
||||
| ------------ |------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| <img src="../media/so101/follower_middle.webp?raw=true" alt="SO-101 leader arm middle position" title="SO-101 leader arm middle position" style="width:100%;"> | <img src="../media/so101/follower_zero.webp?raw=true" alt="SO-101 leader arm zero position" title="SO-101 leader arm zero position" style="width:100%;"> | <img src="../media/so101/follower_rotated.webp?raw=true" alt="SO-101 leader arm rotated position" title="SO-101 leader arm rotated position" style="width:100%;"> | <img src="../media/so101/follower_rest.webp?raw=true" alt="SO-101 leader arm rest position" title="SO-101 leader arm rest position" style="width:100%;"> |
|
||||
|
||||
Make sure both arms are connected and run this script to launch manual calibration:
|
||||
```bash
|
||||
@@ -467,12 +467,12 @@ python lerobot/scripts/control_robot.py \
|
||||
--control.arms='["main_follower"]'
|
||||
```
|
||||
|
||||
#### b. Manual calibration of leader arm
|
||||
Follow step 6 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=724) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
|
||||
#### Manual calibration of leader arm
|
||||
You will also need to move the leader arm to these positions sequentially:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
| ------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| <img src="../media/so100/leader_zero.webp?raw=true" alt="SO-100 leader arm zero position" title="SO-100 leader arm zero position" style="width:100%;"> | <img src="../media/so100/leader_rotated.webp?raw=true" alt="SO-100 leader arm rotated position" title="SO-100 leader arm rotated position" style="width:100%;"> | <img src="../media/so100/leader_rest.webp?raw=true" alt="SO-100 leader arm rest position" title="SO-100 leader arm rest position" style="width:100%;"> |
|
||||
| 1. Middle position | 2. Zero position | 3. Rotated position | 4. Rest position |
|
||||
| ------------ |------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| <img src="../media/so101/leader_middle.webp?raw=true" alt="SO-100 leader arm middle position" title="SO-100 leader arm middle position" style="width:100%;"> | <img src="../media/so101/leader_zero.webp?raw=true" alt="SO-100 leader arm zero position" title="SO-100 leader arm zero position" style="width:100%;"> | <img src="../media/so101/leader_rotated.webp?raw=true" alt="SO-100 leader arm rotated position" title="SO-100 leader arm rotated position" style="width:100%;"> | <img src="../media/so101/leader_rest.webp?raw=true" alt="SO-100 leader arm rest position" title="SO-100 leader arm rest position" style="width:100%;"> |
|
||||
|
||||
Run this script to launch manual calibration:
|
||||
```bash
|
||||
@@ -580,7 +580,7 @@ python lerobot/scripts/train.py \
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/so100_test`.
|
||||
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
|
||||
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
|
||||
4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
|
||||
|
||||
@@ -134,7 +134,7 @@ First we will assemble the two SO100 arms. One to attach to the mobile base and
|
||||
|
||||
## SO100 Arms
|
||||
### Configure motors
|
||||
The instructions for configuring the motors can be found [Here](https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md#c-configure-the-motors) in step C of the SO100 tutorial. Besides the ID's for the arm motors we also need to set the motor ID's for the mobile base. These needs to be in a specific order to work. Below an image of the motor ID's and motor mounting positions for the mobile base. Note that we only use one Motor Control board on LeKiwi. This means the motor ID's for the wheels are 7, 8 and 9.
|
||||
The instructions for configuring the motors can be found [Here](https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md#c-configure-the-motors) in step C of the SO100 tutorial. Besides the ID's for the arm motors we also need to set the motor ID's for the mobile base. These need to be in a specific order to work. Below an image of the motor ID's and motor mounting positions for the mobile base. Note that we only use one Motor Control board on LeKiwi. This means the motor ID's for the wheels are 7, 8 and 9.
|
||||
|
||||
<img src="../media/lekiwi/motor_ids.webp?raw=true" alt="Motor ID's for mobile robot" title="Motor ID's for mobile robot" width="60%">
|
||||
|
||||
@@ -567,7 +567,7 @@ python lerobot/scripts/train.py \
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/lekiwi_test`.
|
||||
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
|
||||
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
|
||||
4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ cd ~/lerobot && pip install -e ".[feetech]"
|
||||
|
||||
## Configure the motors
|
||||
|
||||
Follow steps 1 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the use of our scripts below.
|
||||
Follow step 1 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the use of our scripts below.
|
||||
|
||||
**Find USB ports associated to your arms**
|
||||
To find the correct ports for each arm, run the utility script twice:
|
||||
@@ -141,7 +141,7 @@ python lerobot/scripts/configure_motor.py \
|
||||
--ID 1
|
||||
```
|
||||
|
||||
Note: These motors are currently limitated. They can take values between 0 and 4096 only, which corresponds to a full turn. They can't turn more than that. 2048 is at the middle of this range, so we can take -2048 steps (180 degrees anticlockwise) and reach the maximum range, or take +2048 steps (180 degrees clockwise) and reach the maximum range. The configuration step also sets the homing offset to 0, so that if you misassembled the arm, you can always update the homing offset to account for a shift up to ± 2048 steps (± 180 degrees).
|
||||
Note: These motors are currently limited. They can take values between 0 and 4096 only, which corresponds to a full turn. They can't turn more than that. 2048 is at the middle of this range, so we can take -2048 steps (180 degrees anticlockwise) and reach the maximum range, or take +2048 steps (180 degrees clockwise) and reach the maximum range. The configuration step also sets the homing offset to 0, so that if you misassembled the arm, you can always update the homing offset to account for a shift up to ± 2048 steps (± 180 degrees).
|
||||
|
||||
Then unplug your motor and plug the second motor and set its ID to 2.
|
||||
```bash
|
||||
@@ -164,7 +164,7 @@ Try to avoid rotating the motor while doing so to keep position 2048 set during
|
||||
|
||||
## Assemble the arms
|
||||
|
||||
Follow step 4 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). The first arm should take a bit more than 1 hour to assemble, but once you get use to it, you can do it under 1 hour for the second arm.
|
||||
Follow step 4 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). The first arm should take a bit more than 1 hour to assemble, but once you get used to it, you can do it under 1 hour for the second arm.
|
||||
|
||||
## Calibrate
|
||||
|
||||
@@ -301,7 +301,7 @@ python lerobot/scripts/train.py \
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/moss_test`.
|
||||
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
|
||||
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
|
||||
4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
|
||||
|
||||
711
examples/12_use_so101.md
Normal file
@@ -0,0 +1,711 @@
|
||||
# Assemble and use SO-101
|
||||
|
||||
In the steps below we explain how to assemble and use our flagship robot, the SO-101 with LeRobot 🤗.
|
||||
|
||||
## Source the parts
|
||||
|
||||
Follow this [README](https://github.com/TheRobotStudio/SO-ARM100). It contains the bill of materials, with a link to source the parts, as well as the instructions to 3D print the parts,
|
||||
and advice if it's your first time printing or if you don't own a 3D printer.
|
||||
|
||||
Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
|
||||
|
||||
## Install LeRobot
|
||||
|
||||
> [!TIP]
|
||||
> We use the Command Prompt (cmd) quite a lot. If you are not comfortable using the cmd or want to brush up using the command line you can have a look here: [Command line crash course](https://developer.mozilla.org/en-US/docs/Learn_web_development/Getting_started/Environment_setup/Command_line)
|
||||
|
||||
Download our source code:
|
||||
```bash
|
||||
git clone https://github.com/huggingface/lerobot.git
|
||||
cd lerobot
|
||||
```
|
||||
|
||||
Create a virtual environment with Python 3.10 and activate it, e.g. with [`miniconda`](https://docs.anaconda.com/miniconda/install/#quick-command-line-install):
|
||||
```bash
|
||||
conda create -y -n lerobot python=3.10
|
||||
```
|
||||
Now restart the shell by running:
|
||||
|
||||
##### Windows:
|
||||
```bash
|
||||
`source ~/.bashrc`
|
||||
```
|
||||
|
||||
##### Mac:
|
||||
```bash
|
||||
`source ~/.bash_profile`
|
||||
```
|
||||
|
||||
##### zshell:
|
||||
```bash
|
||||
`source ~/.zshrc`
|
||||
```
|
||||
|
||||
Then activate your conda environment, you have to do this each time you open a shell to use lerobot:
|
||||
```bash
|
||||
conda activate lerobot
|
||||
```
|
||||
|
||||
When using `miniconda`, install `ffmpeg` in your environment:
|
||||
```bash
|
||||
conda install ffmpeg -c conda-forge
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> This usually installs `ffmpeg 7.X` for your platform compiled with the `libsvtav1` encoder. If `libsvtav1` is not supported (check supported encoders with `ffmpeg -encoders`), you can:
|
||||
> - _[On any platform]_ Explicitly install `ffmpeg 7.X` using:
|
||||
> ```bash
|
||||
> conda install ffmpeg=7.1.1 -c conda-forge
|
||||
> ```
|
||||
> - _[On Linux only]_ Install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1), and make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`.
|
||||
|
||||
Install 🤗 LeRobot:
|
||||
```bash
|
||||
cd lerobot && pip install -e ".[feetech]"
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> If you encounter build errors, you may need to install additional dependencies (`cmake`, `build-essential`, and `ffmpeg libs`). On Linux, run: `sudo apt-get install cmake build-essential python3-dev pkg-config libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev pkg-config`. For other systems, see: [Compiling PyAV](https://pyav.org/docs/develop/overview/installation.html#bring-your-own-ffmpeg)
|
||||
|
||||
|
||||
## Configure motors
|
||||
|
||||
To configure the motors designate one bus servo adapter and 6 motors for your leader arm, and similarly the other bus servo adapter and 6 motors for the follower arm. It's convenient to label them and write on each motor if it's for the follower `F` or for the leader `L` and it's ID from 1 to 6.
|
||||
|
||||
You now should plug the 5V or 12V power supply to the motor bus. 5V for the STS3215 7.4V motors and 12V for the STS3215 12V motors. Note that the leader arm always uses the 7.4V motors, so watch out that you plug in the right power supply if you have 12V and 7.4V motors, otherwise you might burn your motors! Now, connect the motor bus to your computer via USB. Note that the USB doesn't provide any power, and both the power supply and USB have to be plugged in.
|
||||
|
||||
### Find the USB ports associated to each arm
|
||||
|
||||
To find the port for each bus servo adapter, run this script:
|
||||
```bash
|
||||
python lerobot/scripts/find_motors_bus_port.py
|
||||
```
|
||||
#### Example outputs of script
|
||||
|
||||
##### Mac:
|
||||
Example output leader arm's port: `/dev/tty.usbmodem575E0031751`
|
||||
|
||||
```bash
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your MotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect leader arm and press Enter...]
|
||||
|
||||
The port of this MotorsBus is /dev/tty.usbmodem575E0031751
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
Example output follower arm port: `/dev/tty.usbmodem575E0032081`
|
||||
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your MotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect follower arm and press Enter...]
|
||||
|
||||
The port of this MotorsBus is /dev/tty.usbmodem575E0032081
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
##### Linux:
|
||||
On Linux, you might need to give access to the USB ports by running:
|
||||
```bash
|
||||
sudo chmod 666 /dev/ttyACM0
|
||||
sudo chmod 666 /dev/ttyACM1
|
||||
```
|
||||
|
||||
Example output leader arm port: `/dev/ttyACM0`
|
||||
|
||||
```bash
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/ttyACM0', '/dev/ttyACM1']
|
||||
Remove the usb cable from your MotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect leader arm and press Enter...]
|
||||
|
||||
The port of this MotorsBus is /dev/ttyACM0
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
Example output follower arm port: `/dev/ttyACM1`
|
||||
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/ttyACM0', '/dev/ttyACM1']
|
||||
Remove the usb cable from your MotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect follower arm and press Enter...]
|
||||
|
||||
The port of this MotorsBus is /dev/ttyACM1
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
#### Update config file
|
||||
|
||||
Now that you have your ports, update the **port** default values of [`SO101RobotConfig`](https://github.com/huggingface/lerobot/blob/main/lerobot/common/robot_devices/robots/configs.py).
|
||||
You will find a class called `so101` where you can update the `port` values with your actual motor ports:
|
||||
```diff
|
||||
@RobotConfig.register_subclass("so101")
|
||||
@dataclass
|
||||
class So101RobotConfig(ManipulatorRobotConfig):
|
||||
calibration_dir: str = ".cache/calibration/so101"
|
||||
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
|
||||
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
|
||||
# the number of motors in your follower arms.
|
||||
max_relative_target: int | None = None
|
||||
|
||||
leader_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
- port="/dev/tty.usbmodem58760431091",
|
||||
+ port="{ADD YOUR LEADER PORT}",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
follower_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
- port="/dev/tty.usbmodem585A0076891",
|
||||
+ port="{ADD YOUR FOLLOWER PORT}",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
Here is a video of the process:
|
||||
|
||||
<video controls width="640" src="https://github.com/user-attachments/assets/fc45d756-31bb-4a61-b973-a87d633d08a7" type="video/mp4"></video>
|
||||
|
||||
### Set motor IDs
|
||||
|
||||
Now we need to set the motor ID for each motor. Plug your motor in only one of the two ports of the motor bus and run this script to set its ID to 1. Replace the text after --port to the corresponding control board port.
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 1
|
||||
```
|
||||
|
||||
Then unplug your motor and plug the second motor and set its ID to 2.
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 2
|
||||
```
|
||||
|
||||
Redo this process for all your motors until ID 6. Do the same for the 6 motors of the leader arm, but make sure to change the power supply if you use motors with different voltage.
|
||||
|
||||
Here is a video of the process:
|
||||
|
||||
<video controls width="640" src="https://github.com/user-attachments/assets/b31c115f-e706-4dcd-b7f1-4535da62416d" type="video/mp4"></video>
|
||||
|
||||
## Step-by-Step Assembly Instructions
|
||||
|
||||
The follower arm uses 6x STS3215 motors with 1/345 gearing. The leader however uses three differently geared motors to make sure it can both sustain its own weight and it can be moved without requiring much force. Which motor is needed for which joint is shown in table below.
|
||||
|
||||
| Leader-Arm Axis | Motor | Gear Ratio |
|
||||
|-----------------|:-------:|:----------:|
|
||||
| Base / Shoulder Yaw | 1 | 1 / 191 |
|
||||
| Shoulder Pitch | 2 | 1 / 345 |
|
||||
| Elbow | 3 | 1 / 191 |
|
||||
| Wrist Roll | 4 | 1 / 147 |
|
||||
| Wrist Pitch | 5 | 1 / 147 |
|
||||
| Gripper | 6 | 1 / 147 |
|
||||
|
||||
|
||||
### Clean Parts
|
||||
Remove all support material from the 3D-printed parts.
|
||||
|
||||
### Joint 1
|
||||
|
||||
- Place the first motor into the base.
|
||||
- Fasten the motor with 4 M2x6mm screws (smallest screws). Two from the top and two from bottom.
|
||||
- Slide over the first motor holder and fasten it using two M2x6mm screws (one on each side).
|
||||
- Install both motor horns, securing the top horn with a M3x6mm screw.
|
||||
- Attach the shoulder part.
|
||||
- Tighten the shoulder part with 4 M3x6mm screws on top and 4 M3x6mm screws on the bottom
|
||||
- Add the shoulder motor holder.
|
||||
|
||||
<video controls width="640" src="https://github.com/user-attachments/assets/b0ee9dee-a2d0-445b-8489-02ebecb3d639" type="video/mp4"></video>
|
||||
|
||||
### Joint 2
|
||||
|
||||
- Slide the second motor in from the top.
|
||||
- Fasten the second motor with 4 M2x6mm screws.
|
||||
- Attach both motor horns to motor 2, again use the M3x6mm horn screw.
|
||||
- Attach the upper arm with 4 M3x6mm screws on each side.
|
||||
|
||||
<video controls width="640" src="https://github.com/user-attachments/assets/32453dc2-5006-4140-9f56-f0d78eae5155" type="video/mp4"></video>
|
||||
|
||||
### Joint 3
|
||||
|
||||
- Insert motor 3 and fasten using 4 M2x6mm screws
|
||||
- Attach both motor horns to motor 3 and secure one again with a M3x6mm horn screw.
|
||||
- Connect the forearm to motor 3 using 4 M3x6mm screws on each side.
|
||||
|
||||
<video controls width="640" src="https://github.com/user-attachments/assets/7384b9a7-a946-440c-b292-91391bcc4d6b" type="video/mp4"></video>
|
||||
|
||||
### Joint 4
|
||||
|
||||
- Slide over motor holder 4.
|
||||
- Slide in motor 4.
|
||||
- Fasten motor 4 with 4 M2x6mm screws and attach its motor horns, use a M3x6mm horn screw.
|
||||
|
||||
<video controls width="640" src="https://github.com/user-attachments/assets/dca78ad0-7c36-4bdf-8162-c9ac42a1506f" type="video/mp4"></video>
|
||||
|
||||
### Joint 5
|
||||
|
||||
- Insert motor 5 into the wrist holder and secure it with 2 M2x6mm front screws.
|
||||
- Install only one motor horn on the wrist motor and secure it with a M3x6mm horn screw.
|
||||
- Secure the wrist to motor 4 using 4 M3x6mm screws on both sides.
|
||||
|
||||
<video controls width="640" src="https://github.com/user-attachments/assets/55f5d245-976d-49ff-8b4a-59843c441b12" type="video/mp4"></video>
|
||||
|
||||
### Gripper / Handle
|
||||
|
||||
#### Follower:
|
||||
|
||||
- Attach the gripper to motor 5, attach it to the motor horn on the wrist using 4 M3x6mm screws.
|
||||
- Insert the gripper motor and secure it with 2 M2x6mm screws on each side.
|
||||
- Attach the motor horns and again use a M3x6mm horn screw.
|
||||
- Install the gripper claw and secure it with 4 M3x6mm screws on both sides.
|
||||
|
||||
<video controls width="640" src="https://github.com/user-attachments/assets/6f766aa9-cfae-4388-89e7-0247f198c086" type="video/mp4"></video>
|
||||
|
||||
#### Leader:
|
||||
|
||||
- Mount the leader holder onto the wrist and secure it with 4 M3x6mm screws.
|
||||
- Attach the handle to motor 5 using 1 M2x6mm screw.
|
||||
- Insert the gripper motor, secure it with 2 M2x6mm screws on each side, attach a motor horn using a M3x6mm horn screw.
|
||||
- Attach the follower trigger with 4 M3x6mm screws.
|
||||
|
||||
<video controls width="640" src="https://github.com/user-attachments/assets/1308c93d-2ef1-4560-8e93-a3812568a202" type="video/mp4"></video>
|
||||
|
||||
##### Wiring
|
||||
|
||||
- Attach the motor controller on the back.
|
||||
- Then insert all wires, use the wire guides everywhere to make sure the wires don't unplug themselves and stay in place.
|
||||
|
||||
<video controls width="640" src="https://github.com/user-attachments/assets/4c2cacfd-9276-4ee4-8bf2-ba2492667b78" type="video/mp4"></video>
|
||||
|
||||
## Calibrate
|
||||
|
||||
Next, you'll need to calibrate your SO-101 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position.
|
||||
The calibration process is very important because it allows a neural network trained on one SO-101 robot to work on another.
|
||||
|
||||
#### Manual calibration of follower arm
|
||||
|
||||
You will need to move the follower arm to these positions sequentially, note that the rotated position is on the right side of the robot and you have to open the gripper fully.
|
||||
|
||||
| 1. Middle position | 2. Zero position | 3. Rotated position | 4. Rest position |
|
||||
| ------------ |------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| <img src="../media/so101/follower_middle.webp?raw=true" alt="SO-101 leader arm middle position" title="SO-101 leader arm middle position" style="width:100%;"> | <img src="../media/so101/follower_zero.webp?raw=true" alt="SO-101 leader arm zero position" title="SO-101 leader arm zero position" style="width:100%;"> | <img src="../media/so101/follower_rotated.webp?raw=true" alt="SO-101 leader arm rotated position" title="SO-101 leader arm rotated position" style="width:100%;"> | <img src="../media/so101/follower_rest.webp?raw=true" alt="SO-101 leader arm rest position" title="SO-101 leader arm rest position" style="width:100%;"> |
|
||||
|
||||
Make sure both arms are connected and run this script to launch manual calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so101 \
|
||||
--robot.cameras='{}' \
|
||||
--control.type=calibrate \
|
||||
--control.arms='["main_follower"]'
|
||||
```
|
||||
|
||||
#### Manual calibration of leader arm
|
||||
You will also need to move the leader arm to these positions sequentially:
|
||||
|
||||
| 1. Middle position | 2. Zero position | 3. Rotated position | 4. Rest position |
|
||||
| ------------ |------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| <img src="../media/so101/leader_middle.webp?raw=true" alt="SO-101 leader arm middle position" title="SO-101 leader arm middle position" style="width:100%;"> | <img src="../media/so101/leader_zero.webp?raw=true" alt="SO-101 leader arm zero position" title="SO-101 leader arm zero position" style="width:100%;"> | <img src="../media/so101/leader_rotated.webp?raw=true" alt="SO-101 leader arm rotated position" title="SO-101 leader arm rotated position" style="width:100%;"> | <img src="../media/so101/leader_rest.webp?raw=true" alt="SO-101 leader arm rest position" title="SO-101 leader arm rest position" style="width:100%;"> |
|
||||
|
||||
Run this script to launch manual calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so101 \
|
||||
--robot.cameras='{}' \
|
||||
--control.type=calibrate \
|
||||
--control.arms='["main_leader"]'
|
||||
```
|
||||
## Control your robot
|
||||
|
||||
Congrats 🎉, your robot is all set to learn a task on its own. Next we will explain to you how to train a neural network to autonomously control a real robot.
|
||||
|
||||
**You'll learn to:**
|
||||
1. How to record and visualize your dataset.
|
||||
2. How to train a policy using your data and prepare it for evaluation.
|
||||
3. How to evaluate your policy and visualize the results.
|
||||
|
||||
By following these steps, you'll be able to replicate tasks like picking up a Lego block and placing it in a bin with a high success rate, as demonstrated in [this video](https://x.com/RemiCadene/status/1814680760592572934).
|
||||
|
||||
This tutorial is specifically made for the affordable [SO-101](https://github.com/TheRobotStudio/SO-ARM100) robot, but it contains additional information to be easily adapted to various types of robots like [Aloha bimanual robot](https://aloha-2.github.io) by changing some configurations. The SO-101 consists of a leader arm and a follower arm, each with 6 motors. It can work with one or several cameras to record the scene, which serve as visual sensors for the robot.
|
||||
|
||||
During the data collection phase, you will control the follower arm by moving the leader arm. This process is known as "teleoperation." This technique is used to collect robot trajectories. Afterward, you'll train a neural network to imitate these trajectories and deploy the network to enable your robot to operate autonomously.
|
||||
|
||||
If you encounter any issues at any step of the tutorial, feel free to seek help on [Discord](https://discord.com/invite/s3KuuzsPFb) or don't hesitate to iterate with us on the tutorial by creating issues or pull requests.
|
||||
|
||||
## Teleoperate
|
||||
|
||||
Run this simple script to teleoperate your robot (it won't connect and display the cameras):
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so101 \
|
||||
--robot.cameras='{}' \
|
||||
--control.type=teleoperate
|
||||
```
|
||||
|
||||
The teleoperate command will automatically:
|
||||
1. Identify any missing calibrations and initiate the calibration procedure.
|
||||
2. Connect the robot and start teleoperation.
|
||||
|
||||
## Setup Cameras
|
||||
|
||||
To connect a camera you have three options:
|
||||
1. OpenCVCamera which allows us to use any camera: usb, realsense, laptop webcam
|
||||
2. iPhone camera with MacOS
|
||||
3. Phone camera on Linux
|
||||
|
||||
### Use OpenCVCamera
|
||||
|
||||
The [`OpenCVCamera`](../lerobot/common/robot_devices/cameras/opencv.py) class allows you to efficiently record frames from most cameras using the [`opencv2`](https://docs.opencv.org) library. For more details on compatibility, see [Video I/O with OpenCV Overview](https://docs.opencv.org/4.x/d0/da7/videoio_overview.html).
|
||||
|
||||
To instantiate an [`OpenCVCamera`](../lerobot/common/robot_devices/cameras/opencv.py), you need a camera index (e.g. `OpenCVCamera(camera_index=0)`). When you only have one camera like a webcam of a laptop, the camera index is usually `0` but it might differ, and the camera index might change if you reboot your computer or re-plug your camera. This behavior depends on your operating system.
|
||||
|
||||
To find the camera indices, run the following utility script, which will save a few frames from each detected camera:
|
||||
```bash
|
||||
python lerobot/common/robot_devices/cameras/opencv.py \
|
||||
--images-dir outputs/images_from_opencv_cameras
|
||||
```
|
||||
|
||||
The output will look something like this if you have two cameras connected:
|
||||
```
|
||||
Mac or Windows detected. Finding available camera indices through scanning all indices from 0 to 60
|
||||
[...]
|
||||
Camera found at index 0
|
||||
Camera found at index 1
|
||||
[...]
|
||||
Connecting cameras
|
||||
OpenCVCamera(0, fps=30.0, width=1920.0, height=1080.0, color_mode=rgb)
|
||||
OpenCVCamera(1, fps=24.0, width=1920.0, height=1080.0, color_mode=rgb)
|
||||
Saving images to outputs/images_from_opencv_cameras
|
||||
Frame: 0000 Latency (ms): 39.52
|
||||
[...]
|
||||
Frame: 0046 Latency (ms): 40.07
|
||||
Images have been saved to outputs/images_from_opencv_cameras
|
||||
```
|
||||
|
||||
Check the saved images in `outputs/images_from_opencv_cameras` to identify which camera index corresponds to which physical camera (e.g. `0` for `camera_00` or `1` for `camera_01`):
|
||||
```
|
||||
camera_00_frame_000000.png
|
||||
[...]
|
||||
camera_00_frame_000047.png
|
||||
camera_01_frame_000000.png
|
||||
[...]
|
||||
camera_01_frame_000047.png
|
||||
```
|
||||
|
||||
Note: Some cameras may take a few seconds to warm up, and the first frame might be black or green.
|
||||
|
||||
Now that you have the camera indexes, you should change them in the config. You can also change the fps, width or height of the camera.
|
||||
|
||||
The camera config is defined per robot, can be found here [`RobotConfig`](https://github.com/huggingface/lerobot/blob/main/lerobot/common/robot_devices/robots/configs.py) and looks like this:
|
||||
```python
|
||||
cameras: dict[str, CameraConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"wrist": OpenCVCameraConfig(
|
||||
camera_index=0, <-- UPDATE HERE
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
"base": OpenCVCameraConfig(
|
||||
camera_index=1, <-- UPDATE HERE
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### Use your phone
|
||||
#### Mac:
|
||||
|
||||
To use your iPhone as a camera on macOS, enable the Continuity Camera feature:
|
||||
- Ensure your Mac is running macOS 13 or later, and your iPhone is on iOS 16 or later.
|
||||
- Sign in both devices with the same Apple ID.
|
||||
- Connect your devices with a USB cable or turn on Wi-Fi and Bluetooth for a wireless connection.
|
||||
|
||||
For more details, visit [Apple support](https://support.apple.com/en-gb/guide/mac-help/mchl77879b8a/mac).
|
||||
|
||||
Your iPhone should be detected automatically when running the camera setup script in the next section.
|
||||
|
||||
#### Linux:
|
||||
|
||||
If you want to use your phone as a camera on Linux, follow these steps to set up a virtual camera
|
||||
|
||||
1. *Install `v4l2loopback-dkms` and `v4l-utils`*. Those packages are required to create virtual camera devices (`v4l2loopback`) and verify their settings with the `v4l2-ctl` utility from `v4l-utils`. Install them using:
|
||||
```python
|
||||
sudo apt install v4l2loopback-dkms v4l-utils
|
||||
```
|
||||
2. *Install [DroidCam](https://droidcam.app) on your phone*. This app is available for both iOS and Android.
|
||||
3. *Install [OBS Studio](https://obsproject.com)*. This software will help you manage the camera feed. Install it using [Flatpak](https://flatpak.org):
|
||||
```python
|
||||
flatpak install flathub com.obsproject.Studio
|
||||
```
|
||||
4. *Install the DroidCam OBS plugin*. This plugin integrates DroidCam with OBS Studio. Install it with:
|
||||
```python
|
||||
flatpak install flathub com.obsproject.Studio.Plugin.DroidCam
|
||||
```
|
||||
5. *Start OBS Studio*. Launch with:
|
||||
```python
|
||||
flatpak run com.obsproject.Studio
|
||||
```
|
||||
6. *Add your phone as a source*. Follow the instructions [here](https://droidcam.app/obs/usage). Be sure to set the resolution to `640x480`.
|
||||
7. *Adjust resolution settings*. In OBS Studio, go to `File > Settings > Video`. Change the `Base(Canvas) Resolution` and the `Output(Scaled) Resolution` to `640x480` by manually typing it in.
|
||||
8. *Start virtual camera*. In OBS Studio, follow the instructions [here](https://obsproject.com/kb/virtual-camera-guide).
|
||||
9. *Verify the virtual camera setup*. Use `v4l2-ctl` to list the devices:
|
||||
```python
|
||||
v4l2-ctl --list-devices
|
||||
```
|
||||
You should see an entry like:
|
||||
```
|
||||
VirtualCam (platform:v4l2loopback-000):
|
||||
/dev/video1
|
||||
```
|
||||
10. *Check the camera resolution*. Use `v4l2-ctl` to ensure that the virtual camera output resolution is `640x480`. Change `/dev/video1` to the port of your virtual camera from the output of `v4l2-ctl --list-devices`.
|
||||
```python
|
||||
v4l2-ctl -d /dev/video1 --get-fmt-video
|
||||
```
|
||||
You should see an entry like:
|
||||
```
|
||||
>>> Format Video Capture:
|
||||
>>> Width/Height : 640/480
|
||||
>>> Pixel Format : 'YUYV' (YUYV 4:2:2)
|
||||
```
|
||||
|
||||
Troubleshooting: If the resolution is not correct you will have to delete the Virtual Camera port and try again as it cannot be changed.
|
||||
|
||||
If everything is set up correctly, you can proceed with the rest of the tutorial.
|
||||
|
||||
### Add wrist camera
|
||||
If you have an additional camera you can add a wrist camera to the SO101. There are already many premade wrist camera holders that you can find in the SO101 repo: [Wrist camera's](https://github.com/TheRobotStudio/SO-ARM100#wrist-cameras)
|
||||
|
||||
## Teleoperate with cameras
|
||||
|
||||
We can now teleoperate again while at the same time visualizing the cameras and joint positions with `rerun`.
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so101 \
|
||||
--control.type=teleoperate \
|
||||
--control.display_data=true
|
||||
```
|
||||
|
||||
## Record a dataset
|
||||
|
||||
Once you're familiar with teleoperation, you can record your first dataset with SO-101.
|
||||
|
||||
We use the Hugging Face hub features for uploading your dataset. If you haven't previously used the Hub, make sure you can login via the cli using a write-access token, this token can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens).
|
||||
|
||||
Add your token to the cli by running this command:
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
```
|
||||
|
||||
Then store your Hugging Face repository name in a variable:
|
||||
```bash
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
|
||||
Now you can record a dataset, to record 2 episodes and upload your dataset to the hub execute this command:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so101 \
|
||||
--control.type=record \
|
||||
--control.fps=30 \
|
||||
--control.single_task="Grasp a lego block and put it in the bin." \
|
||||
--control.repo_id=${HF_USER}/so101_test \
|
||||
--control.tags='["so101","tutorial"]' \
|
||||
--control.warmup_time_s=5 \
|
||||
--control.episode_time_s=30 \
|
||||
--control.reset_time_s=30 \
|
||||
--control.num_episodes=2 \
|
||||
--control.display_data=true \
|
||||
--control.push_to_hub=true
|
||||
```
|
||||
|
||||
You will see a lot of lines appearing like this one:
|
||||
```
|
||||
INFO 2024-08-10 15:02:58 ol_robot.py:219 dt:33.34 (30.0hz) dtRlead: 5.06 (197.5hz) dtWfoll: 0.25 (3963.7hz) dtRfoll: 6.22 (160.7hz) dtRlaptop: 32.57 (30.7hz) dtRphone: 33.84 (29.5hz)
|
||||
```
|
||||
It contains:
|
||||
- `2024-08-10 15:02:58` which is the date and time of the call to the print function,
|
||||
- `ol_robot.py:219` which is the end of the file name and the line number where the print function is called (`lerobot/scripts/control_robot.py` line `219`).
|
||||
- `dt:33.34 (30.0hz)` which is the "delta time" or the number of milliseconds spent between the previous call to `robot.teleop_step(record_data=True)` and the current one, associated with the frequency (33.34 ms equals 30.0 Hz) ; note that we use `--fps 30` so we expect 30.0 Hz ; when a step takes more time, the line appears in yellow.
|
||||
- `dtRlead: 5.06 (197.5hz)` which is the delta time of reading the present position of the leader arm.
|
||||
- `dtWfoll: 0.25 (3963.7hz)` which is the delta time of writing the goal position on the follower arm ; writing is asynchronous so it takes less time than reading.
|
||||
- `dtRfoll: 6.22 (160.7hz)` which is the delta time of reading the present position on the follower arm.
|
||||
- `dtRlaptop:32.57 (30.7hz) ` which is the delta time of capturing an image from the laptop camera in the thread running asynchronously.
|
||||
- `dtRphone:33.84 (29.5hz)` which is the delta time of capturing an image from the phone camera in the thread running asynchronously.
|
||||
|
||||
#### Dataset upload
|
||||
Locally your dataset is stored in this folder: `~/.cache/huggingface/lerobot/{repo-id}` (e.g. `data/cadene/so101_test`). At the end of data recording, your dataset will be uploaded on your Hugging Face page (e.g. https://huggingface.co/datasets/cadene/so101_test) that you can obtain by running:
|
||||
```bash
|
||||
echo https://huggingface.co/datasets/${HF_USER}/so101_test
|
||||
```
|
||||
Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example).
|
||||
|
||||
You can look for other LeRobot datasets on the hub by searching for `LeRobot` [tags](https://huggingface.co/datasets?other=LeRobot).
|
||||
|
||||
#### Record function
|
||||
|
||||
The `record` function provides a suite of tools for capturing and managing data during robot operation:
|
||||
1. Set the flow of data recording using command line arguments:
|
||||
- `--control.warmup_time_s=10` defines the number of seconds before starting data collection. It allows the robot devices to warmup and synchronize (10 seconds by default).
|
||||
- `--control.episode_time_s=60` defines the number of seconds for data recording for each episode (60 seconds by default).
|
||||
- `--control.reset_time_s=60` defines the number of seconds for resetting the environment after each episode (60 seconds by default).
|
||||
- `--control.num_episodes=50` defines the number of episodes to record (50 by default).
|
||||
2. Control the flow during data recording using keyboard keys:
|
||||
- Press right arrow `->` at any time during episode recording to early stop and go to resetting. Same during resetting, to early stop and to go to the next episode recording.
|
||||
- Press left arrow `<-` at any time during episode recording or resetting to early stop, cancel the current episode, and re-record it.
|
||||
- Press escape `ESC` at any time during episode recording to end the session early and go straight to video encoding and dataset uploading.
|
||||
3. Checkpoints are done set during recording, so if any issue occurs, you can resume recording by re-running the same command again with `--control.resume=true`. You will need to manually delete the dataset directory if you want to start recording from scratch.
|
||||
|
||||
#### Tips for gathering data
|
||||
|
||||
Once you're comfortable with data recording, you can create a larger dataset for training. A good starting task is grasping an object at different locations and placing it in a bin. We suggest recording at least 50 episodes, with 10 episodes per location. Keep the cameras fixed and maintain consistent grasping behavior throughout the recordings. Also make sure the object you are manipulating is visible on the camera's. A good rule of thumb is you should be able to do the task yourself by only looking at the camera images.
|
||||
|
||||
In the following sections, you’ll train your neural network. After achieving reliable grasping performance, you can start introducing more variations during data collection, such as additional grasp locations, different grasping techniques, and altering camera positions.
|
||||
|
||||
Avoid adding too much variation too quickly, as it may hinder your results.
|
||||
|
||||
#### Troubleshooting:
|
||||
- On Linux, if the left and right arrow keys and escape key don't have any effect during data recording, make sure you've set the `$DISPLAY` environment variable. See [pynput limitations](https://pynput.readthedocs.io/en/latest/limitations.html#linux).
|
||||
|
||||
## Visualize a dataset
|
||||
|
||||
If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
|
||||
```bash
|
||||
echo ${HF_USER}/so101_test
|
||||
```
|
||||
|
||||
If you didn't upload with `--control.push_to_hub=false`, you can visualize it locally with (via a window in the browser `http://127.0.0.1:9090` with the visualization tool):
|
||||
```bash
|
||||
python lerobot/scripts/visualize_dataset_html.py \
|
||||
--repo-id ${HF_USER}/so101_test \
|
||||
--local-files-only 1
|
||||
```
|
||||
|
||||
This will launch a local web server that looks like this:
|
||||
|
||||
<div style="text-align:center;">
|
||||
<img src="../media/tutorial/visualize_dataset_html.webp?raw=true" alt="Koch v1.1 leader and follower arms" title="Koch v1.1 leader and follower arms" width="100%"></img>
|
||||
</div>
|
||||
|
||||
## Replay an episode
|
||||
|
||||
A useful feature is the `replay` function, which allows to replay on your robot any episode that you've recorded or episodes from any dataset out there. This function helps you test the repeatability of your robot's actions and assess transferability across robots of the same model.
|
||||
|
||||
You can replay the first episode on your robot with:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so101 \
|
||||
--control.type=replay \
|
||||
--control.fps=30 \
|
||||
--control.repo_id=${HF_USER}/so101_test \
|
||||
--control.episode=0
|
||||
```
|
||||
|
||||
Your robot should replicate movements similar to those you recorded. For example, check out [this video](https://x.com/RemiCadene/status/1793654950905680090) where we use `replay` on a Aloha robot from [Trossen Robotics](https://www.trossenrobotics.com).
|
||||
|
||||
## Train a policy
|
||||
|
||||
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--dataset.repo_id=${HF_USER}/so101_test \
|
||||
--policy.type=act \
|
||||
--output_dir=outputs/train/act_so101_test \
|
||||
--job_name=act_so101_test \
|
||||
--policy.device=cuda \
|
||||
--wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain the command:
|
||||
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/so101_test`.
|
||||
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
|
||||
4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
|
||||
Training should take several hours. You will find checkpoints in `outputs/train/act_so101_test/checkpoints`.
|
||||
|
||||
To resume training from a checkpoint, below is an example command to resume from `last` checkpoint of the `act_so101_test` policy:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--config_path=outputs/train/act_so101_test/checkpoints/last/pretrained_model/train_config.json \
|
||||
--resume=true
|
||||
```
|
||||
|
||||
#### Upload policy checkpoints
|
||||
|
||||
Once training is done, upload the latest checkpoint with:
|
||||
```bash
|
||||
huggingface-cli upload ${HF_USER}/act_so101_test \
|
||||
outputs/train/act_so101_test/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
You can also upload intermediate checkpoints with:
|
||||
```bash
|
||||
CKPT=010000
|
||||
huggingface-cli upload ${HF_USER}/act_so101_test${CKPT} \
|
||||
outputs/train/act_so101_test/checkpoints/${CKPT}/pretrained_model
|
||||
```
|
||||
|
||||
## Evaluate your policy
|
||||
|
||||
You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so101 \
|
||||
--control.type=record \
|
||||
--control.fps=30 \
|
||||
--control.single_task="Grasp a lego block and put it in the bin." \
|
||||
--control.repo_id=${HF_USER}/eval_act_so101_test \
|
||||
--control.tags='["tutorial"]' \
|
||||
--control.warmup_time_s=5 \
|
||||
--control.episode_time_s=30 \
|
||||
--control.reset_time_s=30 \
|
||||
--control.num_episodes=10 \
|
||||
--control.push_to_hub=true \
|
||||
--control.policy.path=outputs/train/act_so101_test/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
|
||||
1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_so101_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_so101_test`).
|
||||
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_so101_test`).
|
||||
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This scripts demonstrates how to evaluate a pretrained policy from the HuggingFace Hub or from your local
|
||||
This script demonstrates how to evaluate a pretrained policy from the HuggingFace Hub or from your local
|
||||
training outputs directory. In the latter case, you might want to run examples/3_train_policy.py first.
|
||||
|
||||
It requires the installation of the 'gym_pusht' simulation environment. Install it by running:
|
||||
@@ -119,7 +119,7 @@ while not done:
|
||||
rewards.append(reward)
|
||||
frames.append(env.render())
|
||||
|
||||
# The rollout is considered done when the success state is reach (i.e. terminated is True),
|
||||
# The rollout is considered done when the success state is reached (i.e. terminated is True),
|
||||
# or the maximum number of iterations is reached (i.e. truncated is True)
|
||||
done = terminated | truncated | done
|
||||
step += 1
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""This scripts demonstrates how to train Diffusion Policy on the PushT environment.
|
||||
"""This script demonstrates how to train Diffusion Policy on the PushT environment.
|
||||
|
||||
Once you have trained a model with this script, you can try to evaluate it on
|
||||
examples/2_evaluate_pretrained_policy.py
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
This tutorial will explain the training script, how to use it, and particularly how to configure everything needed for the training run.
|
||||
> **Note:** The following assume you're running these commands on a machine equipped with a cuda GPU. If you don't have one (or if you're using a Mac), you can add `--policy.device=cpu` (`--policy.device=mps` respectively). However, be advised that the code executes much slower on cpu.
|
||||
> **Note:** The following assumes you're running these commands on a machine equipped with a cuda GPU. If you don't have one (or if you're using a Mac), you can add `--policy.device=cpu` (`--policy.device=mps` respectively). However, be advised that the code executes much slower on cpu.
|
||||
|
||||
|
||||
## The training script
|
||||
|
||||
LeRobot offers a training script at [`lerobot/scripts/train.py`](../../lerobot/scripts/train.py). At a high level it does the following:
|
||||
LeRobot offers a training script at [`lerobot/scripts/train.py`](../lerobot/scripts/train.py). At a high level it does the following:
|
||||
|
||||
- Initialize/load a configuration for the following steps using.
|
||||
- Instantiates a dataset.
|
||||
@@ -21,9 +21,9 @@ In the training script, the main function `train` expects a `TrainPipelineConfig
|
||||
def train(cfg: TrainPipelineConfig):
|
||||
```
|
||||
|
||||
You can inspect the `TrainPipelineConfig` defined in [`lerobot/configs/train.py`](../../lerobot/configs/train.py) (which is heavily commented and meant to be a reference to understand any option)
|
||||
You can inspect the `TrainPipelineConfig` defined in [`lerobot/configs/train.py`](../lerobot/configs/train.py) (which is heavily commented and meant to be a reference to understand any option)
|
||||
|
||||
When running the script, inputs for the command line are parsed thanks to the `@parser.wrap()` decorator and an instance of this class is automatically generated. Under the hood, this is done with [Draccus](https://github.com/dlwh/draccus) which is a tool dedicated for this purpose. If you're familiar with Hydra, Draccus can similarly load configurations from config files (.json, .yaml) and also override their values through command line inputs. Unlike Hydra, these configurations are pre-defined in the code through dataclasses rather than being defined entirely in config files. This allows for more rigorous serialization/deserialization, typing, and to manipulate configuration as objects directly in the code and not as dictionaries or namespaces (which enables nice features in an IDE such as autocomplete, jump-to-def, etc.)
|
||||
When running the script, inputs for the command line are parsed thanks to the `@parser.wrap()` decorator and an instance of this class is automatically generated. Under the hood, this is done with [Draccus](https://github.com/dlwh/draccus) which is a tool dedicated to this purpose. If you're familiar with Hydra, Draccus can similarly load configurations from config files (.json, .yaml) and also override their values through command line inputs. Unlike Hydra, these configurations are pre-defined in the code through dataclasses rather than being defined entirely in config files. This allows for more rigorous serialization/deserialization, typing, and to manipulate configuration as objects directly in the code and not as dictionaries or namespaces (which enables nice features in an IDE such as autocomplete, jump-to-def, etc.)
|
||||
|
||||
Let's have a look at a simplified example. Amongst other attributes, the training config has the following attributes:
|
||||
```python
|
||||
@@ -43,14 +43,14 @@ class DatasetConfig:
|
||||
```
|
||||
|
||||
This creates a hierarchical relationship where, for example assuming we have a `cfg` instance of `TrainPipelineConfig`, we can access the `repo_id` value with `cfg.dataset.repo_id`.
|
||||
From the command line, we can specify this value with using a very similar syntax `--dataset.repo_id=repo/id`.
|
||||
From the command line, we can specify this value by using a very similar syntax `--dataset.repo_id=repo/id`.
|
||||
|
||||
By default, every field takes its default value specified in the dataclass. If a field doesn't have a default value, it needs to be specified either from the command line or from a config file – which path is also given in the command line (more in this below). In the example above, the `dataset` field doesn't have a default value which means it must be specified.
|
||||
|
||||
|
||||
## Specifying values from the CLI
|
||||
|
||||
Let's say that we want to train [Diffusion Policy](../../lerobot/common/policies/diffusion) on the [pusht](https://huggingface.co/datasets/lerobot/pusht) dataset, using the [gym_pusht](https://github.com/huggingface/gym-pusht) environment for evaluation. The command to do so would look like this:
|
||||
Let's say that we want to train [Diffusion Policy](../lerobot/common/policies/diffusion) on the [pusht](https://huggingface.co/datasets/lerobot/pusht) dataset, using the [gym_pusht](https://github.com/huggingface/gym-pusht) environment for evaluation. The command to do so would look like this:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--dataset.repo_id=lerobot/pusht \
|
||||
@@ -60,10 +60,10 @@ python lerobot/scripts/train.py \
|
||||
|
||||
Let's break this down:
|
||||
- To specify the dataset, we just need to specify its `repo_id` on the hub which is the only required argument in the `DatasetConfig`. The rest of the fields have default values and in this case we are fine with those so we can just add the option `--dataset.repo_id=lerobot/pusht`.
|
||||
- To specify the policy, we can just select diffusion policy using `--policy` appended with `.type`. Here, `.type` is a special argument which allows us to select config classes inheriting from `draccus.ChoiceRegistry` and that have been decorated with the `register_subclass()` method. To have a better explanation of this feature, have a look at this [Draccus demo](https://github.com/dlwh/draccus?tab=readme-ov-file#more-flexible-configuration-with-choice-types). In our code, we use this mechanism mainly to select policies, environments, robots, and some other components like optimizers. The policies available to select are located in [lerobot/common/policies](../../lerobot/common/policies)
|
||||
- Similarly, we select the environment with `--env.type=pusht`. The different environment configs are available in [`lerobot/common/envs/configs.py`](../../lerobot/common/envs/configs.py)
|
||||
- To specify the policy, we can just select diffusion policy using `--policy` appended with `.type`. Here, `.type` is a special argument which allows us to select config classes inheriting from `draccus.ChoiceRegistry` and that have been decorated with the `register_subclass()` method. To have a better explanation of this feature, have a look at this [Draccus demo](https://github.com/dlwh/draccus?tab=readme-ov-file#more-flexible-configuration-with-choice-types). In our code, we use this mechanism mainly to select policies, environments, robots, and some other components like optimizers. The policies available to select are located in [lerobot/common/policies](../lerobot/common/policies)
|
||||
- Similarly, we select the environment with `--env.type=pusht`. The different environment configs are available in [`lerobot/common/envs/configs.py`](../lerobot/common/envs/configs.py)
|
||||
|
||||
Let's see another example. Let's say you've been training [ACT](../../lerobot/common/policies/act) on [lerobot/aloha_sim_insertion_human](https://huggingface.co/datasets/lerobot/aloha_sim_insertion_human) using the [gym-aloha](https://github.com/huggingface/gym-aloha) environment for evaluation with:
|
||||
Let's see another example. Let's say you've been training [ACT](../lerobot/common/policies/act) on [lerobot/aloha_sim_insertion_human](https://huggingface.co/datasets/lerobot/aloha_sim_insertion_human) using the [gym-aloha](https://github.com/huggingface/gym-aloha) environment for evaluation with:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--policy.type=act \
|
||||
@@ -74,7 +74,7 @@ python lerobot/scripts/train.py \
|
||||
> Notice we added `--output_dir` to explicitly tell where to write outputs from this run (checkpoints, training state, configs etc.). This is not mandatory and if you don't specify it, a default directory will be created from the current date and time, env.type and policy.type. This will typically look like `outputs/train/2025-01-24/16-10-05_aloha_act`.
|
||||
|
||||
We now want to train a different policy for aloha on another task. We'll change the dataset and use [lerobot/aloha_sim_transfer_cube_human](https://huggingface.co/datasets/lerobot/aloha_sim_transfer_cube_human) instead. Of course, we also need to change the task of the environment as well to match this other task.
|
||||
Looking at the [`AlohaEnv`](../../lerobot/common/envs/configs.py) config, the task is `"AlohaInsertion-v0"` by default, which corresponds to the task we trained on in the command above. The [gym-aloha](https://github.com/huggingface/gym-aloha?tab=readme-ov-file#description) environment also has the `AlohaTransferCube-v0` task which corresponds to this other task we want to train on. Putting this together, we can train this new policy on this different task using:
|
||||
Looking at the [`AlohaEnv`](../lerobot/common/envs/configs.py) config, the task is `"AlohaInsertion-v0"` by default, which corresponds to the task we trained on in the command above. The [gym-aloha](https://github.com/huggingface/gym-aloha?tab=readme-ov-file#description) environment also has the `AlohaTransferCube-v0` task which corresponds to this other task we want to train on. Putting this together, we can train this new policy on this different task using:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--policy.type=act \
|
||||
@@ -135,7 +135,7 @@ will start a training run with the same configuration used for training [lerobot
|
||||
|
||||
## Resume training
|
||||
|
||||
Being able to resume a training run is important in case it crashed or aborted for any reason. We'll demonstrate how to that here.
|
||||
Being able to resume a training run is important in case it crashed or aborted for any reason. We'll demonstrate how to do that here.
|
||||
|
||||
Let's reuse the command from the previous run and add a few more options:
|
||||
```bash
|
||||
|
||||
@@ -377,7 +377,7 @@ robot = ManipulatorRobot(robot_config)
|
||||
|
||||
The `KochRobotConfig` is used to set the associated settings and calibration process. For instance, we activate the torque of the gripper of the leader Koch v1.1 arm and position it at a 40 degree angle to use it as a trigger.
|
||||
|
||||
For the [Aloha bimanual robot](https://aloha-2.github.io), we would use `AlohaRobotConfig` to set different settings such as a secondary ID for shadow joints (shoulder, elbow). Specific to Aloha, LeRobot comes with default calibration files stored in in `.cache/calibration/aloha_default`. Assuming the motors have been properly assembled, no manual calibration step is expected for Aloha.
|
||||
For the [Aloha bimanual robot](https://aloha-2.github.io), we would use `AlohaRobotConfig` to set different settings such as a secondary ID for shadow joints (shoulder, elbow). Specific to Aloha, LeRobot comes with default calibration files stored in `.cache/calibration/aloha_default`. Assuming the motors have been properly assembled, no manual calibration step is expected for Aloha.
|
||||
|
||||
**Calibrate and Connect the ManipulatorRobot**
|
||||
|
||||
@@ -399,7 +399,7 @@ And here are the corresponding positions for the leader arm:
|
||||
|
||||
You can watch a [video tutorial of the calibration procedure](https://youtu.be/8drnU9uRY24) for more details.
|
||||
|
||||
During calibration, we count the number of full 360-degree rotations your motors have made since they were first used. That's why we ask yo to move to this arbitrary "zero" position. We don't actually "set" the zero position, so you don't need to be accurate. After calculating these "offsets" to shift the motor values around 0, we need to assess the rotation direction of each motor, which might differ. That's why we ask you to rotate all motors to roughly 90 degrees, to measure if the values changed negatively or positively.
|
||||
During calibration, we count the number of full 360-degree rotations your motors have made since they were first used. That's why we ask you to move to this arbitrary "zero" position. We don't actually "set" the zero position, so you don't need to be accurate. After calculating these "offsets" to shift the motor values around 0, we need to assess the rotation direction of each motor, which might differ. That's why we ask you to rotate all motors to roughly 90 degrees, to measure if the values changed negatively or positively.
|
||||
|
||||
Finally, the rest position ensures that the follower and leader arms are roughly aligned after calibration, preventing sudden movements that could damage the motors when starting teleoperation.
|
||||
|
||||
@@ -622,7 +622,7 @@ camera_01_frame_000047.png
|
||||
|
||||
Note: Some cameras may take a few seconds to warm up, and the first frame might be black or green.
|
||||
|
||||
Finally, run this code to instantiate and connectyour camera:
|
||||
Finally, run this code to instantiate and connect your camera:
|
||||
```python
|
||||
from lerobot.common.robot_devices.cameras.configs import OpenCVCameraConfig
|
||||
from lerobot.common.robot_devices.cameras.opencv import OpenCVCamera
|
||||
@@ -830,11 +830,6 @@ It contains:
|
||||
- `dtRphone:33.84 (29.5hz)` which is the delta time of capturing an image from the phone camera in the thread running asynchronously.
|
||||
|
||||
Troubleshooting:
|
||||
- On Linux, if you encounter any issue during video encoding with `ffmpeg: unknown encoder libsvtav1`, you can:
|
||||
- install with conda-forge by running `conda install -c conda-forge ffmpeg` (it should be compiled with `libsvtav1`),
|
||||
> **NOTE:** This usually installs `ffmpeg 7.X` for your platform (check the version installed with `ffmpeg -encoders | grep libsvtav1`). If it isn't `ffmpeg 7.X` or lacks `libsvtav1` support, you can explicitly install `ffmpeg 7.X` using: `conda install ffmpeg=7.1.1 -c conda-forge`
|
||||
- or, install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1),
|
||||
- and, make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`.
|
||||
- On Linux, if the left and right arrow keys and escape key don't have any effect during data recording, make sure you've set the `$DISPLAY` environment variable. See [pynput limitations](https://pynput.readthedocs.io/en/latest/limitations.html#linux).
|
||||
|
||||
At the end of data recording, your dataset will be uploaded on your Hugging Face page (e.g. https://huggingface.co/datasets/cadene/koch_test) that you can obtain by running:
|
||||
|
||||
@@ -99,7 +99,7 @@ This is equivalent to running `stretch_robot_home.py`
|
||||
> **Note:** If you run any of the LeRobot scripts below and Stretch is not properly homed, it will automatically home/calibrate first.
|
||||
|
||||
**Teleoperate**
|
||||
Before trying teleoperation, you need activate the gamepad controller by pressing the middle button. For more info, see Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/hello_robot/#gamepad-teleoperation).
|
||||
Before trying teleoperation, you need to activate the gamepad controller by pressing the middle button. For more info, see Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/hello_robot/#gamepad-teleoperation).
|
||||
|
||||
Now try out teleoperation (see above documentation to learn about the gamepad controls):
|
||||
|
||||
|
||||
@@ -142,7 +142,7 @@ python lerobot/scripts/train.py \
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/aloha_test`.
|
||||
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
|
||||
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
|
||||
4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ def main():
|
||||
print(f"Number of episodes in full dataset: {total_episodes}")
|
||||
print(f"Number of episodes in training dataset (90% subset): {len(train_episodes)}")
|
||||
print(f"Number of episodes in validation dataset (10% subset): {len(val_episodes)}")
|
||||
# - Load train an val datasets
|
||||
# - Load train and val datasets
|
||||
train_dataset = LeRobotDataset(
|
||||
"lerobot/pusht", episodes=train_episodes, delta_timestamps=delta_timestamps
|
||||
)
|
||||
|
||||
@@ -168,12 +168,7 @@ available_datasets = sorted(
|
||||
)
|
||||
|
||||
# lists all available policies from `lerobot/common/policies`
|
||||
available_policies = [
|
||||
"act",
|
||||
"diffusion",
|
||||
"tdmpc",
|
||||
"vqbet",
|
||||
]
|
||||
available_policies = ["act", "diffusion", "tdmpc", "vqbet"]
|
||||
|
||||
# lists all available robots from `lerobot/common/robot_devices/robots`
|
||||
available_robots = [
|
||||
@@ -181,6 +176,7 @@ available_robots = [
|
||||
"koch_bimanual",
|
||||
"aloha",
|
||||
"so100",
|
||||
"so101",
|
||||
"moss",
|
||||
]
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ def resolve_delta_timestamps(
|
||||
"observation.state": [-0.04, -0.02, 0]
|
||||
"observation.action": [-0.02, 0, 0.02]
|
||||
}
|
||||
returns `None` if the the resulting dict is empty.
|
||||
returns `None` if the resulting dict is empty.
|
||||
"""
|
||||
delta_timestamps = {}
|
||||
for key in ds_meta.features:
|
||||
|
||||
@@ -106,7 +106,7 @@ def worker_process(queue: queue.Queue, num_threads: int):
|
||||
class AsyncImageWriter:
|
||||
"""
|
||||
This class abstract away the initialisation of processes or/and threads to
|
||||
save images on disk asynchrounously, which is critical to control a robot and record data
|
||||
save images on disk asynchronously, which is critical to control a robot and record data
|
||||
at a high frame rate.
|
||||
|
||||
When `num_processes=0`, it creates a threads pool of size `num_threads`.
|
||||
|
||||
@@ -944,7 +944,7 @@ class LeRobotDataset(torch.utils.data.Dataset):
|
||||
def stop_image_writer(self) -> None:
|
||||
"""
|
||||
Whenever wrapping this dataset inside a parallelized DataLoader, this needs to be called first to
|
||||
remove the image_writer in order for the LeRobotDataset object to be pickleable and parallelized.
|
||||
remove the image_writer in order for the LeRobotDataset object to be picklable and parallelized.
|
||||
"""
|
||||
if self.image_writer is not None:
|
||||
self.image_writer.stop()
|
||||
|
||||
@@ -128,7 +128,7 @@ class SharpnessJitter(Transform):
|
||||
raise TypeError(f"{sharpness=} should be a single number or a sequence with length 2.")
|
||||
|
||||
if not 0.0 <= sharpness[0] <= sharpness[1]:
|
||||
raise ValueError(f"sharpnesss values should be between (0., inf), but got {sharpness}.")
|
||||
raise ValueError(f"sharpness values should be between (0., inf), but got {sharpness}.")
|
||||
|
||||
return float(sharpness[0]), float(sharpness[1])
|
||||
|
||||
|
||||
@@ -13,16 +13,15 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import glob
|
||||
import importlib
|
||||
import json
|
||||
import logging
|
||||
import subprocess
|
||||
import warnings
|
||||
from collections import OrderedDict
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, ClassVar
|
||||
|
||||
import av
|
||||
import pyarrow as pa
|
||||
import torch
|
||||
import torchvision
|
||||
@@ -102,7 +101,7 @@ def decode_video_frames_torchvision(
|
||||
keyframes_only = False
|
||||
torchvision.set_video_backend(backend)
|
||||
if backend == "pyav":
|
||||
keyframes_only = True # pyav doesnt support accuracte seek
|
||||
keyframes_only = True # pyav doesn't support accurate seek
|
||||
|
||||
# set a video stream reader
|
||||
# TODO(rcadene): also load audio stream at the same time
|
||||
@@ -252,51 +251,83 @@ def encode_video_frames(
|
||||
g: int | None = 2,
|
||||
crf: int | None = 30,
|
||||
fast_decode: int = 0,
|
||||
log_level: str | None = "error",
|
||||
log_level: int | None = av.logging.ERROR,
|
||||
overwrite: bool = False,
|
||||
) -> None:
|
||||
"""More info on ffmpeg arguments tuning on `benchmark/video/README.md`"""
|
||||
# Check encoder availability
|
||||
if vcodec not in ["h264", "hevc", "libsvtav1"]:
|
||||
raise ValueError(f"Unsupported video codec: {vcodec}. Supported codecs are: h264, hevc, libsvtav1.")
|
||||
|
||||
video_path = Path(video_path)
|
||||
imgs_dir = Path(imgs_dir)
|
||||
video_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
ffmpeg_args = OrderedDict(
|
||||
[
|
||||
("-f", "image2"),
|
||||
("-r", str(fps)),
|
||||
("-i", str(imgs_dir / "frame_%06d.png")),
|
||||
("-vcodec", vcodec),
|
||||
("-pix_fmt", pix_fmt),
|
||||
]
|
||||
video_path.parent.mkdir(parents=True, exist_ok=overwrite)
|
||||
|
||||
# Encoders/pixel formats incompatibility check
|
||||
if (vcodec == "libsvtav1" or vcodec == "hevc") and pix_fmt == "yuv444p":
|
||||
logging.warning(
|
||||
f"Incompatible pixel format 'yuv444p' for codec {vcodec}, auto-selecting format 'yuv420p'"
|
||||
)
|
||||
pix_fmt = "yuv420p"
|
||||
|
||||
# Get input frames
|
||||
template = "frame_" + ("[0-9]" * 6) + ".png"
|
||||
input_list = sorted(
|
||||
glob.glob(str(imgs_dir / template)), key=lambda x: int(x.split("_")[-1].split(".")[0])
|
||||
)
|
||||
|
||||
# Define video output frame size (assuming all input frames are the same size)
|
||||
if len(input_list) == 0:
|
||||
raise FileNotFoundError(f"No images found in {imgs_dir}.")
|
||||
dummy_image = Image.open(input_list[0])
|
||||
width, height = dummy_image.size
|
||||
|
||||
# Define video codec options
|
||||
video_options = {}
|
||||
|
||||
if g is not None:
|
||||
ffmpeg_args["-g"] = str(g)
|
||||
video_options["g"] = str(g)
|
||||
|
||||
if crf is not None:
|
||||
ffmpeg_args["-crf"] = str(crf)
|
||||
video_options["crf"] = str(crf)
|
||||
|
||||
if fast_decode:
|
||||
key = "-svtav1-params" if vcodec == "libsvtav1" else "-tune"
|
||||
key = "svtav1-params" if vcodec == "libsvtav1" else "tune"
|
||||
value = f"fast-decode={fast_decode}" if vcodec == "libsvtav1" else "fastdecode"
|
||||
ffmpeg_args[key] = value
|
||||
video_options[key] = value
|
||||
|
||||
# Set logging level
|
||||
if log_level is not None:
|
||||
ffmpeg_args["-loglevel"] = str(log_level)
|
||||
# "While less efficient, it is generally preferable to modify logging with Python’s logging"
|
||||
logging.getLogger("libav").setLevel(log_level)
|
||||
|
||||
ffmpeg_args = [item for pair in ffmpeg_args.items() for item in pair]
|
||||
if overwrite:
|
||||
ffmpeg_args.append("-y")
|
||||
# Create and open output file (overwrite by default)
|
||||
with av.open(str(video_path), "w") as output:
|
||||
output_stream = output.add_stream(vcodec, fps, options=video_options)
|
||||
output_stream.pix_fmt = pix_fmt
|
||||
output_stream.width = width
|
||||
output_stream.height = height
|
||||
|
||||
ffmpeg_cmd = ["ffmpeg"] + ffmpeg_args + [str(video_path)]
|
||||
# redirect stdin to subprocess.DEVNULL to prevent reading random keyboard inputs from terminal
|
||||
subprocess.run(ffmpeg_cmd, check=True, stdin=subprocess.DEVNULL)
|
||||
# Loop through input frames and encode them
|
||||
for input_data in input_list:
|
||||
input_image = Image.open(input_data).convert("RGB")
|
||||
input_frame = av.VideoFrame.from_image(input_image)
|
||||
packet = output_stream.encode(input_frame)
|
||||
if packet:
|
||||
output.mux(packet)
|
||||
|
||||
# Flush the encoder
|
||||
packet = output_stream.encode()
|
||||
if packet:
|
||||
output.mux(packet)
|
||||
|
||||
# Reset logging level
|
||||
if log_level is not None:
|
||||
av.logging.restore_default_callback()
|
||||
|
||||
if not video_path.exists():
|
||||
raise OSError(
|
||||
f"Video encoding did not work. File not found: {video_path}. "
|
||||
f"Try running the command manually to debug: `{''.join(ffmpeg_cmd)}`"
|
||||
)
|
||||
raise OSError(f"Video encoding did not work. File not found: {video_path}.")
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -332,78 +363,68 @@ with warnings.catch_warnings():
|
||||
|
||||
|
||||
def get_audio_info(video_path: Path | str) -> dict:
|
||||
ffprobe_audio_cmd = [
|
||||
"ffprobe",
|
||||
"-v",
|
||||
"error",
|
||||
"-select_streams",
|
||||
"a:0",
|
||||
"-show_entries",
|
||||
"stream=channels,codec_name,bit_rate,sample_rate,bit_depth,channel_layout,duration",
|
||||
"-of",
|
||||
"json",
|
||||
str(video_path),
|
||||
]
|
||||
result = subprocess.run(ffprobe_audio_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
||||
if result.returncode != 0:
|
||||
raise RuntimeError(f"Error running ffprobe: {result.stderr}")
|
||||
# Set logging level
|
||||
logging.getLogger("libav").setLevel(av.logging.ERROR)
|
||||
|
||||
info = json.loads(result.stdout)
|
||||
audio_stream_info = info["streams"][0] if info.get("streams") else None
|
||||
if audio_stream_info is None:
|
||||
return {"has_audio": False}
|
||||
# Getting audio stream information
|
||||
audio_info = {}
|
||||
with av.open(str(video_path), "r") as audio_file:
|
||||
try:
|
||||
audio_stream = audio_file.streams.audio[0]
|
||||
except IndexError:
|
||||
# Reset logging level
|
||||
av.logging.restore_default_callback()
|
||||
return {"has_audio": False}
|
||||
|
||||
# Return the information, defaulting to None if no audio stream is present
|
||||
return {
|
||||
"has_audio": True,
|
||||
"audio.channels": audio_stream_info.get("channels", None),
|
||||
"audio.codec": audio_stream_info.get("codec_name", None),
|
||||
"audio.bit_rate": int(audio_stream_info["bit_rate"]) if audio_stream_info.get("bit_rate") else None,
|
||||
"audio.sample_rate": int(audio_stream_info["sample_rate"])
|
||||
if audio_stream_info.get("sample_rate")
|
||||
else None,
|
||||
"audio.bit_depth": audio_stream_info.get("bit_depth", None),
|
||||
"audio.channel_layout": audio_stream_info.get("channel_layout", None),
|
||||
}
|
||||
audio_info["audio.channels"] = audio_stream.channels
|
||||
audio_info["audio.codec"] = audio_stream.codec.canonical_name
|
||||
# In an ideal loseless case : bit depth x sample rate x channels = bit rate.
|
||||
# In an actual compressed case, the bit rate is set according to the compression level : the lower the bit rate, the more compression is applied.
|
||||
audio_info["audio.bit_rate"] = audio_stream.bit_rate
|
||||
audio_info["audio.sample_rate"] = audio_stream.sample_rate # Number of samples per second
|
||||
# In an ideal loseless case : fixed number of bits per sample.
|
||||
# In an actual compressed case : variable number of bits per sample (often reduced to match a given depth rate).
|
||||
audio_info["audio.bit_depth"] = audio_stream.format.bits
|
||||
audio_info["audio.channel_layout"] = audio_stream.layout.name
|
||||
audio_info["has_audio"] = True
|
||||
|
||||
# Reset logging level
|
||||
av.logging.restore_default_callback()
|
||||
|
||||
return audio_info
|
||||
|
||||
|
||||
def get_video_info(video_path: Path | str) -> dict:
|
||||
ffprobe_video_cmd = [
|
||||
"ffprobe",
|
||||
"-v",
|
||||
"error",
|
||||
"-select_streams",
|
||||
"v:0",
|
||||
"-show_entries",
|
||||
"stream=r_frame_rate,width,height,codec_name,nb_frames,duration,pix_fmt",
|
||||
"-of",
|
||||
"json",
|
||||
str(video_path),
|
||||
]
|
||||
result = subprocess.run(ffprobe_video_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
||||
if result.returncode != 0:
|
||||
raise RuntimeError(f"Error running ffprobe: {result.stderr}")
|
||||
# Set logging level
|
||||
logging.getLogger("libav").setLevel(av.logging.ERROR)
|
||||
|
||||
info = json.loads(result.stdout)
|
||||
video_stream_info = info["streams"][0]
|
||||
# Getting video stream information
|
||||
video_info = {}
|
||||
with av.open(str(video_path), "r") as video_file:
|
||||
try:
|
||||
video_stream = video_file.streams.video[0]
|
||||
except IndexError:
|
||||
# Reset logging level
|
||||
av.logging.restore_default_callback()
|
||||
return {}
|
||||
|
||||
# Calculate fps from r_frame_rate
|
||||
r_frame_rate = video_stream_info["r_frame_rate"]
|
||||
num, denom = map(int, r_frame_rate.split("/"))
|
||||
fps = num / denom
|
||||
video_info["video.height"] = video_stream.height
|
||||
video_info["video.width"] = video_stream.width
|
||||
video_info["video.codec"] = video_stream.codec.canonical_name
|
||||
video_info["video.pix_fmt"] = video_stream.pix_fmt
|
||||
video_info["video.is_depth_map"] = False
|
||||
|
||||
pixel_channels = get_video_pixel_channels(video_stream_info["pix_fmt"])
|
||||
# Calculate fps from r_frame_rate
|
||||
video_info["video.fps"] = int(video_stream.base_rate)
|
||||
|
||||
video_info = {
|
||||
"video.fps": fps,
|
||||
"video.height": video_stream_info["height"],
|
||||
"video.width": video_stream_info["width"],
|
||||
"video.channels": pixel_channels,
|
||||
"video.codec": video_stream_info["codec_name"],
|
||||
"video.pix_fmt": video_stream_info["pix_fmt"],
|
||||
"video.is_depth_map": False,
|
||||
**get_audio_info(video_path),
|
||||
}
|
||||
pixel_channels = get_video_pixel_channels(video_stream.pix_fmt)
|
||||
video_info["video.channels"] = pixel_channels
|
||||
|
||||
# Reset logging level
|
||||
av.logging.restore_default_callback()
|
||||
|
||||
# Adding audio stream information
|
||||
video_info.update(**get_audio_info(video_path))
|
||||
|
||||
return video_info
|
||||
|
||||
|
||||
@@ -15,5 +15,6 @@
|
||||
from .act.configuration_act import ACTConfig as ACTConfig
|
||||
from .diffusion.configuration_diffusion import DiffusionConfig as DiffusionConfig
|
||||
from .pi0.configuration_pi0 import PI0Config as PI0Config
|
||||
from .smolvla.configuration_smolvla import SmolVLAConfig as SmolVLAConfig
|
||||
from .tdmpc.configuration_tdmpc import TDMPCConfig as TDMPCConfig
|
||||
from .vqbet.configuration_vqbet import VQBeTConfig as VQBeTConfig
|
||||
|
||||
@@ -27,6 +27,7 @@ from lerobot.common.policies.diffusion.configuration_diffusion import DiffusionC
|
||||
from lerobot.common.policies.pi0.configuration_pi0 import PI0Config
|
||||
from lerobot.common.policies.pi0fast.configuration_pi0fast import PI0FASTConfig
|
||||
from lerobot.common.policies.pretrained import PreTrainedPolicy
|
||||
from lerobot.common.policies.smolvla.configuration_smolvla import SmolVLAConfig
|
||||
from lerobot.common.policies.tdmpc.configuration_tdmpc import TDMPCConfig
|
||||
from lerobot.common.policies.vqbet.configuration_vqbet import VQBeTConfig
|
||||
from lerobot.configs.policies import PreTrainedConfig
|
||||
@@ -59,6 +60,10 @@ def get_policy_class(name: str) -> PreTrainedPolicy:
|
||||
from lerobot.common.policies.pi0fast.modeling_pi0fast import PI0FASTPolicy
|
||||
|
||||
return PI0FASTPolicy
|
||||
elif name == "smolvla":
|
||||
from lerobot.common.policies.smolvla.modeling_smolvla import SmolVLAPolicy
|
||||
|
||||
return SmolVLAPolicy
|
||||
else:
|
||||
raise NotImplementedError(f"Policy with name {name} is not implemented.")
|
||||
|
||||
@@ -76,6 +81,8 @@ def make_policy_config(policy_type: str, **kwargs) -> PreTrainedConfig:
|
||||
return PI0Config(**kwargs)
|
||||
elif policy_type == "pi0fast":
|
||||
return PI0FASTConfig(**kwargs)
|
||||
elif policy_type == "smolvla":
|
||||
return SmolVLAConfig(**kwargs)
|
||||
else:
|
||||
raise ValueError(f"Policy type '{policy_type}' is not available.")
|
||||
|
||||
|
||||
@@ -357,7 +357,7 @@ class PI0Policy(PreTrainedPolicy):
|
||||
if self.config.resize_imgs_with_padding is not None:
|
||||
img = resize_with_pad(img, *self.config.resize_imgs_with_padding, pad_value=0)
|
||||
|
||||
# Normalize from range [0,1] to [-1,1] as expacted by siglip
|
||||
# Normalize from range [0,1] to [-1,1] as expected by siglip
|
||||
img = img * 2.0 - 1.0
|
||||
|
||||
bsize = img.shape[0]
|
||||
|
||||
@@ -516,7 +516,7 @@ class PI0FAST(nn.Module):
|
||||
interpolate_like_pi=self.config.interpolate_like_pi,
|
||||
)
|
||||
|
||||
# Normalize from range [0,1] to [-1,1] as expacted by siglip
|
||||
# Normalize from range [0,1] to [-1,1] as expected by siglip
|
||||
img = img * 2.0 - 1.0
|
||||
|
||||
bsize = img.shape[0]
|
||||
|
||||
154
lerobot/common/policies/smolvla/configuration_smolvla.py
Normal file
@@ -0,0 +1,154 @@
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
from lerobot.common.optim.optimizers import AdamWConfig
|
||||
from lerobot.common.optim.schedulers import (
|
||||
CosineDecayWithWarmupSchedulerConfig,
|
||||
)
|
||||
from lerobot.configs.policies import PreTrainedConfig
|
||||
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
|
||||
|
||||
|
||||
@PreTrainedConfig.register_subclass("smolvla")
|
||||
@dataclass
|
||||
class SmolVLAConfig(PreTrainedConfig):
|
||||
# Input / output structure.
|
||||
n_obs_steps: int = 1
|
||||
chunk_size: int = 50
|
||||
n_action_steps: int = 50
|
||||
|
||||
normalization_mapping: dict[str, NormalizationMode] = field(
|
||||
default_factory=lambda: {
|
||||
"VISUAL": NormalizationMode.IDENTITY,
|
||||
"STATE": NormalizationMode.MEAN_STD,
|
||||
"ACTION": NormalizationMode.MEAN_STD,
|
||||
}
|
||||
)
|
||||
|
||||
# Shorter state and action vectors will be padded
|
||||
max_state_dim: int = 32
|
||||
max_action_dim: int = 32
|
||||
|
||||
# Image preprocessing
|
||||
resize_imgs_with_padding: tuple[int, int] = (512, 512)
|
||||
|
||||
# Add empty images. Used by smolvla_aloha_sim which adds the empty
|
||||
# left and right wrist cameras in addition to the top camera.
|
||||
empty_cameras: int = 0
|
||||
|
||||
# Converts the joint and gripper values from the standard Aloha space to
|
||||
# the space used by the pi internal runtime which was used to train the base model.
|
||||
adapt_to_pi_aloha: bool = False
|
||||
|
||||
# Converts joint dimensions to deltas with respect to the current state before passing to the model.
|
||||
# Gripper dimensions will remain in absolute values.
|
||||
use_delta_joint_actions_aloha: bool = False
|
||||
|
||||
# Tokenizer
|
||||
tokenizer_max_length: int = 48
|
||||
|
||||
# Decoding
|
||||
num_steps: int = 10
|
||||
|
||||
# Attention utils
|
||||
use_cache: bool = True
|
||||
|
||||
# Finetuning settings
|
||||
freeze_vision_encoder: bool = True
|
||||
train_expert_only: bool = True
|
||||
train_state_proj: bool = True
|
||||
|
||||
# Training presets
|
||||
optimizer_lr: float = 1e-4
|
||||
optimizer_betas: tuple[float, float] = (0.9, 0.95)
|
||||
optimizer_eps: float = 1e-8
|
||||
optimizer_weight_decay: float = 1e-10
|
||||
optimizer_grad_clip_norm: float = 10
|
||||
|
||||
scheduler_warmup_steps: int = 1_000
|
||||
scheduler_decay_steps: int = 30_000
|
||||
scheduler_decay_lr: float = 2.5e-6
|
||||
|
||||
vlm_model_name: str = "HuggingFaceTB/SmolVLM2-500M-Video-Instruct" # Select the VLM backbone.
|
||||
load_vlm_weights: bool = False # Set to True in case of training the expert from scratch. True when init from pretrained SmolVLA weights
|
||||
|
||||
add_image_special_tokens: bool = False # Whether to use special image tokens around image features.
|
||||
|
||||
attention_mode: str = "cross_attn"
|
||||
|
||||
prefix_length: int = -1
|
||||
|
||||
pad_language_to: str = "longest" # "max_length"
|
||||
|
||||
num_expert_layers: int = -1 # Less or equal to 0 is the default where the action expert has the same number of layers of VLM. Otherwise the expert have less layers.
|
||||
num_vlm_layers: int = 16 # Number of layers used in the VLM (first num_vlm_layers layers)
|
||||
self_attn_every_n_layers: int = 2 # Interleave SA layers each self_attn_every_n_layers
|
||||
expert_width_multiplier: float = 0.75 # The action expert hidden size (wrt to the VLM)
|
||||
|
||||
min_period: float = 4e-3 # sensitivity range for the timestep used in sine-cosine positional encoding
|
||||
max_period: float = 4.0
|
||||
|
||||
def __post_init__(self):
|
||||
super().__post_init__()
|
||||
|
||||
"""Input validation (not exhaustive)."""
|
||||
if self.n_action_steps > self.chunk_size:
|
||||
raise ValueError(
|
||||
f"The chunk size is the upper bound for the number of action steps per model invocation. Got "
|
||||
f"{self.n_action_steps} for `n_action_steps` and {self.chunk_size} for `chunk_size`."
|
||||
)
|
||||
if self.use_delta_joint_actions_aloha:
|
||||
raise NotImplementedError(
|
||||
"`use_delta_joint_actions_aloha` is used by smolvla for aloha real models. It is not ported yet in LeRobot."
|
||||
)
|
||||
|
||||
def validate_features(self) -> None:
|
||||
for i in range(self.empty_cameras):
|
||||
key = f"observation.images.empty_camera_{i}"
|
||||
empty_camera = PolicyFeature(
|
||||
type=FeatureType.VISUAL,
|
||||
shape=(3, 480, 640),
|
||||
)
|
||||
self.input_features[key] = empty_camera
|
||||
|
||||
def get_optimizer_preset(self) -> AdamWConfig:
|
||||
return AdamWConfig(
|
||||
lr=self.optimizer_lr,
|
||||
betas=self.optimizer_betas,
|
||||
eps=self.optimizer_eps,
|
||||
weight_decay=self.optimizer_weight_decay,
|
||||
grad_clip_norm=self.optimizer_grad_clip_norm,
|
||||
)
|
||||
|
||||
def get_scheduler_preset(self):
|
||||
return CosineDecayWithWarmupSchedulerConfig(
|
||||
peak_lr=self.optimizer_lr,
|
||||
decay_lr=self.scheduler_decay_lr,
|
||||
num_warmup_steps=self.scheduler_warmup_steps,
|
||||
num_decay_steps=self.scheduler_decay_steps,
|
||||
)
|
||||
|
||||
@property
|
||||
def observation_delta_indices(self) -> list:
|
||||
return [0]
|
||||
|
||||
@property
|
||||
def action_delta_indices(self) -> list:
|
||||
return list(range(self.chunk_size))
|
||||
|
||||
@property
|
||||
def reward_delta_indices(self) -> None:
|
||||
return None
|
||||
801
lerobot/common/policies/smolvla/modeling_smolvla.py
Normal file
@@ -0,0 +1,801 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2025 HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
SmolVLA:
|
||||
|
||||
[Paper](https://huggingface.co/papers/2506.01844)
|
||||
|
||||
Designed by Hugging Face.
|
||||
|
||||
Install smolvla extra dependencies:
|
||||
```bash
|
||||
pip install -e ".[smolvla]"
|
||||
```
|
||||
|
||||
Example of finetuning the smolvla pretrained model (`smolvla_base`):
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--policy.path=lerobot/smolvla_base \
|
||||
--dataset.repo_id=danaaubakirova/svla_so100_task1_v3 \
|
||||
--batch_size=64 \
|
||||
--steps=200000
|
||||
```
|
||||
|
||||
Example of finetuning a smolVLA. SmolVLA is composed of a pretrained VLM,
|
||||
and an action expert.
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--policy.type=smolvla \
|
||||
--dataset.repo_id=danaaubakirova/svla_so100_task1_v3 \
|
||||
--batch_size=64 \
|
||||
--steps=200000
|
||||
```
|
||||
|
||||
Example of using the smolvla pretrained model outside LeRobot training framework:
|
||||
```python
|
||||
policy = SmolVLAPolicy.from_pretrained("lerobot/smolvla_base")
|
||||
```
|
||||
|
||||
"""
|
||||
|
||||
import math
|
||||
from collections import deque
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F # noqa: N812
|
||||
from torch import Tensor, nn
|
||||
from transformers import AutoProcessor
|
||||
|
||||
from lerobot.common.constants import ACTION, OBS_ROBOT
|
||||
from lerobot.common.policies.normalize import (
|
||||
Normalize,
|
||||
Unnormalize,
|
||||
)
|
||||
from lerobot.common.policies.pretrained import PreTrainedPolicy
|
||||
from lerobot.common.policies.smolvla.configuration_smolvla import SmolVLAConfig
|
||||
from lerobot.common.policies.smolvla.smolvlm_with_expert import SmolVLMWithExpertModel
|
||||
from lerobot.common.policies.utils import (
|
||||
populate_queues,
|
||||
)
|
||||
from lerobot.common.utils.utils import get_safe_dtype
|
||||
|
||||
|
||||
def create_sinusoidal_pos_embedding(
|
||||
time: torch.tensor, dimension: int, min_period: float, max_period: float, device="cpu"
|
||||
) -> Tensor:
|
||||
"""Computes sine-cosine positional embedding vectors for scalar positions."""
|
||||
if dimension % 2 != 0:
|
||||
raise ValueError(f"dimension ({dimension}) must be divisible by 2")
|
||||
|
||||
if time.ndim != 1:
|
||||
raise ValueError("The time tensor is expected to be of shape `(batch_size, )`.")
|
||||
|
||||
dtype = get_safe_dtype(torch.float64, device.type)
|
||||
fraction = torch.linspace(0.0, 1.0, dimension // 2, dtype=dtype, device=device)
|
||||
period = min_period * (max_period / min_period) ** fraction
|
||||
|
||||
# Compute the outer product
|
||||
scaling_factor = 1.0 / period * 2 * math.pi
|
||||
sin_input = scaling_factor[None, :] * time[:, None]
|
||||
pos_emb = torch.cat([torch.sin(sin_input), torch.cos(sin_input)], dim=1)
|
||||
return pos_emb
|
||||
|
||||
|
||||
def sample_beta(alpha, beta, bsize, device):
|
||||
gamma1 = torch.empty((bsize,), device=device).uniform_(0, 1).pow(1 / alpha)
|
||||
gamma2 = torch.empty((bsize,), device=device).uniform_(0, 1).pow(1 / beta)
|
||||
return gamma1 / (gamma1 + gamma2)
|
||||
|
||||
|
||||
def make_att_2d_masks(pad_masks, att_masks):
|
||||
"""Copied from big_vision.
|
||||
|
||||
Tokens can attend to valid inputs tokens which have a cumulative mask_ar
|
||||
smaller or equal to theirs. This way `mask_ar` int[B, N] can be used to
|
||||
setup several types of attention, for example:
|
||||
|
||||
[[1 1 1 1 1 1]]: pure causal attention.
|
||||
|
||||
[[0 0 0 1 1 1]]: prefix-lm attention. The first 3 tokens can attend between
|
||||
themselves and the last 3 tokens have a causal attention. The first
|
||||
entry could also be a 1 without changing behaviour.
|
||||
|
||||
[[1 0 1 0 1 0 0 1 0 0]]: causal attention between 4 blocks. Tokens of a
|
||||
block can attend all previous blocks and all tokens on the same block.
|
||||
|
||||
Args:
|
||||
input_mask: bool[B, N] true if its part of the input, false if padding.
|
||||
mask_ar: int32[B, N] mask that's 1 where previous tokens cannot depend on
|
||||
it and 0 where it shares the same attention mask as the previous token.
|
||||
"""
|
||||
if att_masks.ndim != 2:
|
||||
raise ValueError(att_masks.ndim)
|
||||
if pad_masks.ndim != 2:
|
||||
raise ValueError(pad_masks.ndim)
|
||||
|
||||
cumsum = torch.cumsum(att_masks, dim=1)
|
||||
att_2d_masks = cumsum[:, None, :] <= cumsum[:, :, None]
|
||||
pad_2d_masks = pad_masks[:, None, :] * pad_masks[:, :, None]
|
||||
att_2d_masks = att_2d_masks & pad_2d_masks
|
||||
return att_2d_masks
|
||||
|
||||
|
||||
def resize_with_pad(img, width, height, pad_value=-1):
|
||||
# assume no-op when width height fits already
|
||||
if img.ndim != 4:
|
||||
raise ValueError(f"(b,c,h,w) expected, but {img.shape}")
|
||||
|
||||
cur_height, cur_width = img.shape[2:]
|
||||
|
||||
ratio = max(cur_width / width, cur_height / height)
|
||||
resized_height = int(cur_height / ratio)
|
||||
resized_width = int(cur_width / ratio)
|
||||
resized_img = F.interpolate(
|
||||
img, size=(resized_height, resized_width), mode="bilinear", align_corners=False
|
||||
)
|
||||
|
||||
pad_height = max(0, int(height - resized_height))
|
||||
pad_width = max(0, int(width - resized_width))
|
||||
|
||||
# pad on left and top of image
|
||||
padded_img = F.pad(resized_img, (pad_width, 0, pad_height, 0), value=pad_value)
|
||||
return padded_img
|
||||
|
||||
|
||||
def pad_vector(vector, new_dim):
|
||||
"""Can be (batch_size x sequence_length x features_dimension)
|
||||
or (batch_size x features_dimension)
|
||||
"""
|
||||
if vector.shape[-1] == new_dim:
|
||||
return vector
|
||||
shape = list(vector.shape)
|
||||
current_dim = shape[-1]
|
||||
shape[-1] = new_dim
|
||||
new_vector = torch.zeros(*shape, dtype=vector.dtype, device=vector.device)
|
||||
new_vector[..., :current_dim] = vector
|
||||
return new_vector
|
||||
|
||||
|
||||
def normalize(x, min_val, max_val):
|
||||
return (x - min_val) / (max_val - min_val)
|
||||
|
||||
|
||||
def unnormalize(x, min_val, max_val):
|
||||
return x * (max_val - min_val) + min_val
|
||||
|
||||
|
||||
def safe_arcsin(value):
|
||||
# This ensures that the input stays within
|
||||
# [−1,1] to avoid invalid values for arcsin
|
||||
return torch.arcsin(torch.clamp(value, -1.0, 1.0))
|
||||
|
||||
|
||||
def aloha_gripper_to_angular(value):
|
||||
# Aloha transforms the gripper positions into a linear space. The following code
|
||||
# reverses this transformation to be consistent with smolvla which is pretrained in
|
||||
# angular space.
|
||||
#
|
||||
# These values are coming from the Aloha code:
|
||||
# PUPPET_GRIPPER_POSITION_OPEN, PUPPET_GRIPPER_POSITION_CLOSED
|
||||
value = unnormalize(value, min_val=0.01844, max_val=0.05800)
|
||||
|
||||
# This is the inverse of the angular to linear transformation inside the Interbotix code.
|
||||
def linear_to_radian(linear_position, arm_length, horn_radius):
|
||||
value = (horn_radius**2 + linear_position**2 - arm_length**2) / (2 * horn_radius * linear_position)
|
||||
return safe_arcsin(value)
|
||||
|
||||
# The constants are taken from the Interbotix code.
|
||||
value = linear_to_radian(value, arm_length=0.036, horn_radius=0.022)
|
||||
|
||||
# Normalize to [0, 1].
|
||||
# The values 0.4 and 1.5 were measured on an actual Trossen robot.
|
||||
return normalize(value, min_val=0.4, max_val=1.5)
|
||||
|
||||
|
||||
def aloha_gripper_from_angular(value):
|
||||
# Convert from the gripper position used by smolvla to the gripper position that is used by Aloha.
|
||||
# Note that the units are still angular but the range is different.
|
||||
|
||||
# The values 0.4 and 1.5 were measured on an actual Trossen robot.
|
||||
value = unnormalize(value, min_val=0.4, max_val=1.5)
|
||||
|
||||
# These values are coming from the Aloha code:
|
||||
# PUPPET_GRIPPER_JOINT_OPEN, PUPPET_GRIPPER_JOINT_CLOSE
|
||||
return normalize(value, min_val=-0.6213, max_val=1.4910)
|
||||
|
||||
|
||||
def aloha_gripper_from_angular_inv(value):
|
||||
# Directly inverts the gripper_from_angular function.
|
||||
value = unnormalize(value, min_val=-0.6213, max_val=1.4910)
|
||||
return normalize(value, min_val=0.4, max_val=1.5)
|
||||
|
||||
|
||||
class SmolVLAPolicy(PreTrainedPolicy):
|
||||
"""Wrapper class around VLAFlowMatching model to train and run inference within LeRobot."""
|
||||
|
||||
config_class = SmolVLAConfig
|
||||
name = "smolvla"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config: SmolVLAConfig,
|
||||
dataset_stats: dict[str, dict[str, Tensor]] | None = None,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
config: Policy configuration class instance or None, in which case the default instantiation of
|
||||
the configuration class is used.
|
||||
dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected
|
||||
that they will be passed with a call to `load_state_dict` before the policy is used.
|
||||
"""
|
||||
|
||||
super().__init__(config)
|
||||
config.validate_features()
|
||||
self.config = config
|
||||
self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats)
|
||||
self.normalize_targets = Normalize(
|
||||
config.output_features, config.normalization_mapping, dataset_stats
|
||||
)
|
||||
self.unnormalize_outputs = Unnormalize(
|
||||
config.output_features, config.normalization_mapping, dataset_stats
|
||||
)
|
||||
|
||||
self.language_tokenizer = AutoProcessor.from_pretrained(self.config.vlm_model_name).tokenizer
|
||||
self.model = VLAFlowMatching(config)
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
"""This should be called whenever the environment is reset."""
|
||||
self._queues = {
|
||||
ACTION: deque(maxlen=self.config.n_action_steps),
|
||||
}
|
||||
|
||||
def get_optim_params(self) -> dict:
|
||||
return self.parameters()
|
||||
|
||||
@torch.no_grad
|
||||
def select_action(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor:
|
||||
"""Select a single action given environment observations.
|
||||
|
||||
This method wraps `select_actions` in order to return one action at a time for execution in the
|
||||
environment. It works by managing the actions in a queue and only calling `select_actions` when the
|
||||
queue is empty.
|
||||
"""
|
||||
self.eval()
|
||||
|
||||
if self.config.adapt_to_pi_aloha:
|
||||
batch[OBS_ROBOT] = self._pi_aloha_decode_state(batch[OBS_ROBOT])
|
||||
|
||||
batch = self.normalize_inputs(batch)
|
||||
|
||||
self._queues = populate_queues(self._queues, batch, exclude_keys=[ACTION])
|
||||
# Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by
|
||||
# querying the policy.
|
||||
if len(self._queues[ACTION]) == 0:
|
||||
for k in batch:
|
||||
if k in self._queues:
|
||||
batch[k] = torch.stack(list(self._queues[k]), dim=1)
|
||||
images, img_masks = self.prepare_images(batch)
|
||||
state = self.prepare_state(batch)
|
||||
lang_tokens, lang_masks = self.prepare_language(batch)
|
||||
|
||||
actions = self.model.sample_actions(
|
||||
images, img_masks, lang_tokens, lang_masks, state, noise=noise
|
||||
)
|
||||
# Unpad actions
|
||||
original_action_dim = self.config.action_feature.shape[0]
|
||||
actions = actions[:, :, :original_action_dim]
|
||||
|
||||
actions = self.unnormalize_outputs({"action": actions})["action"]
|
||||
|
||||
if self.config.adapt_to_pi_aloha:
|
||||
actions = self._pi_aloha_encode_actions(actions)
|
||||
|
||||
# `self.model.forward` returns a (batch_size, n_action_steps, action_dim) tensor, but the queue
|
||||
# effectively has shape (n_action_steps, batch_size, *), hence the transpose.
|
||||
self._queues[ACTION].extend(actions.transpose(0, 1)[: self.config.n_action_steps])
|
||||
return self._queues[ACTION].popleft()
|
||||
|
||||
def forward(self, batch: dict[str, Tensor], noise=None, time=None) -> dict[str, Tensor]:
|
||||
"""Do a full training forward pass to compute the loss"""
|
||||
if self.config.adapt_to_pi_aloha:
|
||||
batch[OBS_ROBOT] = self._pi_aloha_decode_state(batch[OBS_ROBOT])
|
||||
batch[ACTION] = self._pi_aloha_encode_actions_inv(batch[ACTION])
|
||||
batch = self.normalize_inputs(batch)
|
||||
batch = self.normalize_targets(batch)
|
||||
images, img_masks = self.prepare_images(batch)
|
||||
state = self.prepare_state(batch)
|
||||
lang_tokens, lang_masks = self.prepare_language(batch)
|
||||
actions = self.prepare_action(batch)
|
||||
actions_is_pad = batch.get("actions_id_pad")
|
||||
loss_dict = {}
|
||||
losses = self.model.forward(images, img_masks, lang_tokens, lang_masks, state, actions, noise, time)
|
||||
loss_dict["losses_after_forward"] = losses.clone()
|
||||
|
||||
if actions_is_pad is not None:
|
||||
in_episode_bound = ~actions_is_pad
|
||||
losses = losses * in_episode_bound.unsqueeze(-1)
|
||||
loss_dict["losses_after_in_ep_bound"] = losses.clone()
|
||||
|
||||
# Remove padding
|
||||
losses = losses[:, :, : self.config.max_action_dim]
|
||||
loss_dict["losses_after_rm_padding"] = losses.clone()
|
||||
|
||||
# For backward pass
|
||||
loss = losses.mean()
|
||||
# For backward pass
|
||||
loss_dict["loss"] = loss
|
||||
return loss, loss_dict
|
||||
|
||||
def prepare_images(self, batch):
|
||||
"""Apply SmolVLA preprocessing to the images, like resizing to 224x224 and padding to keep aspect ratio, and
|
||||
convert pixel range from [0.0, 1.0] to [-1.0, 1.0] as requested by SigLIP.
|
||||
"""
|
||||
images = []
|
||||
img_masks = []
|
||||
present_img_keys = [key for key in self.config.image_features if key in batch]
|
||||
missing_img_keys = [key for key in self.config.image_features if key not in batch]
|
||||
|
||||
if len(present_img_keys) == 0:
|
||||
raise ValueError(
|
||||
f"All image features are missing from the batch. At least one expected. (batch: {batch.keys()}) (image_features:{self.config.image_features})"
|
||||
)
|
||||
# Preprocess image features present in the batch
|
||||
for key in present_img_keys:
|
||||
img = batch[key][:, -1, :, :, :] if batch[key].ndim == 5 else batch[key]
|
||||
if self.config.resize_imgs_with_padding is not None:
|
||||
img = resize_with_pad(img, *self.config.resize_imgs_with_padding, pad_value=0)
|
||||
|
||||
# Normalize from range [0,1] to [-1,1] as expacted by siglip
|
||||
img = img * 2.0 - 1.0
|
||||
|
||||
bsize = img.shape[0]
|
||||
device = img.device
|
||||
if f"{key}_padding_mask" in batch:
|
||||
mask = batch[f"{key}_padding_mask"].bool()
|
||||
else:
|
||||
mask = torch.ones(bsize, dtype=torch.bool, device=device)
|
||||
images.append(img)
|
||||
img_masks.append(mask)
|
||||
|
||||
# Create image features not present in the batch
|
||||
# as fully 0 padded images.
|
||||
for num_empty_cameras in range(len(missing_img_keys)):
|
||||
if num_empty_cameras >= self.config.empty_cameras:
|
||||
break
|
||||
img = torch.ones_like(img) * -1
|
||||
mask = torch.zeros_like(mask)
|
||||
images.append(img)
|
||||
img_masks.append(mask)
|
||||
return images, img_masks
|
||||
|
||||
def prepare_language(self, batch) -> tuple[Tensor, Tensor]:
|
||||
"""Tokenize the text input"""
|
||||
device = batch[OBS_ROBOT].device
|
||||
tasks = batch["task"]
|
||||
if len(tasks) == 1:
|
||||
tasks = [tasks[0] for _ in range(batch[OBS_ROBOT].shape[0])]
|
||||
|
||||
tasks = [task if task.endswith("\n") else f"{task}\n" for task in tasks]
|
||||
tokenized_prompt = self.language_tokenizer.__call__(
|
||||
tasks,
|
||||
padding=self.config.pad_language_to,
|
||||
padding_side="right",
|
||||
max_length=self.config.tokenizer_max_length,
|
||||
return_tensors="pt",
|
||||
)
|
||||
lang_tokens = tokenized_prompt["input_ids"].to(device=device)
|
||||
lang_masks = tokenized_prompt["attention_mask"].to(device=device, dtype=torch.bool)
|
||||
|
||||
return lang_tokens, lang_masks
|
||||
|
||||
def _pi_aloha_decode_state(self, state):
|
||||
# Flip the joints.
|
||||
for motor_idx in [1, 2, 8, 9]:
|
||||
state[:, motor_idx] *= -1
|
||||
# Reverse the gripper transformation that is being applied by the Aloha runtime.
|
||||
for motor_idx in [6, 13]:
|
||||
state[:, motor_idx] = aloha_gripper_to_angular(state[:, motor_idx])
|
||||
return state
|
||||
|
||||
def _pi_aloha_encode_actions(self, actions):
|
||||
# Flip the joints.
|
||||
for motor_idx in [1, 2, 8, 9]:
|
||||
actions[:, :, motor_idx] *= -1
|
||||
# Reverse the gripper transformation that is being applied by the Aloha runtime.
|
||||
for motor_idx in [6, 13]:
|
||||
actions[:, :, motor_idx] = aloha_gripper_from_angular(actions[:, :, motor_idx])
|
||||
return actions
|
||||
|
||||
def _pi_aloha_encode_actions_inv(self, actions):
|
||||
# Flip the joints again.
|
||||
for motor_idx in [1, 2, 8, 9]:
|
||||
actions[:, :, motor_idx] *= -1
|
||||
# Reverse the gripper transformation that is being applied by the Aloha runtime.
|
||||
for motor_idx in [6, 13]:
|
||||
actions[:, :, motor_idx] = aloha_gripper_from_angular_inv(actions[:, :, motor_idx])
|
||||
return actions
|
||||
|
||||
def prepare_state(self, batch):
|
||||
"""Pad state"""
|
||||
state = batch[OBS_ROBOT][:, -1, :] if batch[OBS_ROBOT].ndim > 2 else batch[OBS_ROBOT]
|
||||
state = pad_vector(state, self.config.max_state_dim)
|
||||
return state
|
||||
|
||||
def prepare_action(self, batch):
|
||||
"""Pad action"""
|
||||
actions = pad_vector(batch[ACTION], self.config.max_action_dim)
|
||||
return actions
|
||||
|
||||
|
||||
def pad_tensor(tensor, max_len, pad_value=0):
|
||||
"""
|
||||
Efficiently pads a tensor along sequence dimension to match max_len.
|
||||
|
||||
Args:
|
||||
tensor (torch.Tensor): Shape (B, L, ...) or (B, L).
|
||||
max_len (int): Fixed sequence length.
|
||||
pad_value (int/float): Value for padding.
|
||||
|
||||
Returns:
|
||||
torch.Tensor: Shape (B, max_len, ...) or (B, max_len).
|
||||
"""
|
||||
b, d = tensor.shape[:2]
|
||||
|
||||
# Create a padded tensor of max_len and copy the existing values
|
||||
padded_tensor = torch.full(
|
||||
(b, max_len, *tensor.shape[2:]), pad_value, dtype=tensor.dtype, device=tensor.device
|
||||
)
|
||||
padded_tensor[:, :d] = tensor # Efficient in-place copy
|
||||
|
||||
return padded_tensor
|
||||
|
||||
|
||||
class VLAFlowMatching(nn.Module):
|
||||
"""
|
||||
SmolVLA
|
||||
|
||||
[Paper]()
|
||||
|
||||
Designed by Hugging Face.
|
||||
┌──────────────────────────────┐
|
||||
│ actions │
|
||||
│ ▲ │
|
||||
│ ┌─────────┐ ┌─|────┐ │
|
||||
│ | │────► │ │ │
|
||||
│ | │ kv │ │ │
|
||||
│ | │────► │Action│ │
|
||||
│ | VLM │cache │Expert│ |
|
||||
│ │ │────► | │ │
|
||||
│ │ │ │ │ │
|
||||
│ └▲──▲───▲─┘ └───▲──┘ |
|
||||
│ │ | | │ |
|
||||
│ | | | noise │
|
||||
│ │ │ state │
|
||||
│ │ language tokens │
|
||||
│ image(s) │
|
||||
└──────────────────────────────┘
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
|
||||
self.vlm_with_expert = SmolVLMWithExpertModel(
|
||||
model_id=self.config.vlm_model_name,
|
||||
freeze_vision_encoder=self.config.freeze_vision_encoder,
|
||||
train_expert_only=self.config.train_expert_only,
|
||||
load_vlm_weights=self.config.load_vlm_weights,
|
||||
attention_mode=self.config.attention_mode,
|
||||
num_expert_layers=self.config.num_expert_layers,
|
||||
num_vlm_layers=self.config.num_vlm_layers,
|
||||
self_attn_every_n_layers=self.config.self_attn_every_n_layers,
|
||||
expert_width_multiplier=self.config.expert_width_multiplier,
|
||||
)
|
||||
self.state_proj = nn.Linear(
|
||||
self.config.max_state_dim, self.vlm_with_expert.config.text_config.hidden_size
|
||||
)
|
||||
self.action_in_proj = nn.Linear(self.config.max_action_dim, self.vlm_with_expert.expert_hidden_size)
|
||||
self.action_out_proj = nn.Linear(self.vlm_with_expert.expert_hidden_size, self.config.max_action_dim)
|
||||
|
||||
self.action_time_mlp_in = nn.Linear(
|
||||
self.vlm_with_expert.expert_hidden_size * 2, self.vlm_with_expert.expert_hidden_size
|
||||
)
|
||||
self.action_time_mlp_out = nn.Linear(
|
||||
self.vlm_with_expert.expert_hidden_size, self.vlm_with_expert.expert_hidden_size
|
||||
)
|
||||
|
||||
self.set_requires_grad()
|
||||
self.fake_image_token = self.vlm_with_expert.processor.tokenizer.fake_image_token_id
|
||||
self.global_image_token = self.vlm_with_expert.processor.tokenizer.global_image_token_id
|
||||
self.global_image_start_token = torch.tensor(
|
||||
[self.fake_image_token, self.global_image_token], dtype=torch.long
|
||||
)
|
||||
|
||||
self.add_image_special_tokens = self.config.add_image_special_tokens
|
||||
self.image_end_token = torch.tensor([self.fake_image_token], dtype=torch.long)
|
||||
self.prefix_length = self.config.prefix_length
|
||||
|
||||
def set_requires_grad(self):
|
||||
for params in self.state_proj.parameters():
|
||||
params.requires_grad = self.config.train_state_proj
|
||||
|
||||
def sample_noise(self, shape, device):
|
||||
noise = torch.normal(
|
||||
mean=0.0,
|
||||
std=1.0,
|
||||
size=shape,
|
||||
dtype=torch.float32,
|
||||
device=device,
|
||||
)
|
||||
return noise
|
||||
|
||||
def sample_time(self, bsize, device):
|
||||
time_beta = sample_beta(1.5, 1.0, bsize, device)
|
||||
time = time_beta * 0.999 + 0.001
|
||||
return time.to(dtype=torch.float32, device=device)
|
||||
|
||||
def embed_prefix(
|
||||
self, images, img_masks, lang_tokens, lang_masks, state: torch.Tensor = None
|
||||
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""Embed images with SigLIP and language tokens with embedding layer to prepare
|
||||
for SmolVLM transformer processing.
|
||||
"""
|
||||
embs = []
|
||||
pad_masks = []
|
||||
att_masks = []
|
||||
for _img_idx, (
|
||||
img,
|
||||
img_mask,
|
||||
) in enumerate(zip(images, img_masks, strict=False)):
|
||||
if self.add_image_special_tokens:
|
||||
image_start_token = (
|
||||
self.vlm_with_expert.embed_language_tokens(
|
||||
self.global_image_start_token.to(device=self.vlm_with_expert.vlm.device)
|
||||
)
|
||||
.unsqueeze(0)
|
||||
.expand(img.shape[0], -1, -1)
|
||||
)
|
||||
image_start_mask = torch.ones_like(
|
||||
image_start_token[:, :, 0], dtype=torch.bool, device=image_start_token.device
|
||||
)
|
||||
att_masks += [0] * (image_start_mask.shape[-1])
|
||||
embs.append(image_start_token)
|
||||
pad_masks.append(image_start_mask)
|
||||
|
||||
img_emb = self.vlm_with_expert.embed_image(img)
|
||||
img_emb = img_emb
|
||||
|
||||
# Normalize image embeddings
|
||||
img_emb_dim = img_emb.shape[-1]
|
||||
img_emb = img_emb * torch.tensor(img_emb_dim**0.5, dtype=img_emb.dtype, device=img_emb.device)
|
||||
|
||||
bsize, num_img_embs = img_emb.shape[:2]
|
||||
img_mask = img_mask[:, None].expand(bsize, num_img_embs)
|
||||
|
||||
embs.append(img_emb)
|
||||
pad_masks.append(img_mask)
|
||||
|
||||
att_masks += [0] * (num_img_embs)
|
||||
if self.add_image_special_tokens:
|
||||
image_end_token = (
|
||||
self.vlm_with_expert.embed_language_tokens(
|
||||
self.image_end_token.to(device=self.vlm_with_expert.vlm.device)
|
||||
)
|
||||
.unsqueeze(0)
|
||||
.expand(img.shape[0], -1, -1)
|
||||
)
|
||||
image_end_mask = torch.ones_like(
|
||||
image_end_token[:, :, 0], dtype=torch.bool, device=image_end_token.device
|
||||
)
|
||||
embs.append(image_end_token)
|
||||
pad_masks.append(image_end_mask)
|
||||
att_masks += [0] * (image_end_mask.shape[1])
|
||||
lang_emb = self.vlm_with_expert.embed_language_tokens(lang_tokens)
|
||||
# Normalize language embeddings
|
||||
lang_emb_dim = lang_emb.shape[-1]
|
||||
lang_emb = lang_emb * math.sqrt(lang_emb_dim)
|
||||
|
||||
embs.append(lang_emb)
|
||||
pad_masks.append(lang_masks)
|
||||
|
||||
num_lang_embs = lang_emb.shape[1]
|
||||
att_masks += [0] * num_lang_embs
|
||||
|
||||
state_emb = self.state_proj(state)
|
||||
state_emb = state_emb[:, None, :] if state_emb.ndim == 2 else state_emb
|
||||
embs.append(state_emb)
|
||||
bsize = state_emb.shape[0]
|
||||
device = state_emb.device
|
||||
|
||||
states_seq_len = state_emb.shape[1]
|
||||
state_mask = torch.ones(bsize, states_seq_len, dtype=torch.bool, device=device)
|
||||
pad_masks.append(state_mask)
|
||||
|
||||
# Set attention masks so that image and language inputs do not attend to state or actions
|
||||
att_masks += [1] * (states_seq_len)
|
||||
embs = torch.cat(embs, dim=1)
|
||||
pad_masks = torch.cat(pad_masks, dim=1)
|
||||
att_masks = torch.tensor(att_masks, dtype=torch.bool, device=pad_masks.device)
|
||||
att_masks = att_masks[None, :]
|
||||
|
||||
seq_len = pad_masks.shape[1]
|
||||
if seq_len < self.prefix_length:
|
||||
embs = pad_tensor(embs, self.prefix_length, pad_value=0)
|
||||
pad_masks = pad_tensor(pad_masks, self.prefix_length, pad_value=0)
|
||||
att_masks = pad_tensor(att_masks, self.prefix_length, pad_value=0)
|
||||
|
||||
att_masks = att_masks.expand(bsize, -1)
|
||||
|
||||
return embs, pad_masks, att_masks
|
||||
|
||||
def embed_suffix(self, noisy_actions, timestep):
|
||||
"""Embed state, noisy_actions, timestep to prepare for Expert Gemma processing."""
|
||||
embs = []
|
||||
pad_masks = []
|
||||
att_masks = []
|
||||
|
||||
# Fuse timestep + action information using an MLP
|
||||
action_emb = self.action_in_proj(noisy_actions)
|
||||
device = action_emb.device
|
||||
bsize = action_emb.shape[0]
|
||||
dtype = action_emb.dtype
|
||||
# Embed timestep using sine-cosine positional encoding with sensitivity in the range [0, 1]
|
||||
time_emb = create_sinusoidal_pos_embedding(
|
||||
timestep,
|
||||
self.vlm_with_expert.expert_hidden_size,
|
||||
self.config.min_period,
|
||||
self.config.max_period,
|
||||
device=device,
|
||||
)
|
||||
time_emb = time_emb.type(dtype=dtype)
|
||||
|
||||
time_emb = time_emb[:, None, :].expand_as(action_emb)
|
||||
action_time_emb = torch.cat([action_emb, time_emb], dim=2)
|
||||
|
||||
action_time_emb = self.action_time_mlp_in(action_time_emb)
|
||||
action_time_emb = F.silu(action_time_emb) # swish == silu
|
||||
action_time_emb = self.action_time_mlp_out(action_time_emb)
|
||||
|
||||
# Add to input tokens
|
||||
embs.append(action_time_emb)
|
||||
|
||||
bsize, action_time_dim = action_time_emb.shape[:2]
|
||||
action_time_mask = torch.ones(bsize, action_time_dim, dtype=torch.bool, device=device)
|
||||
pad_masks.append(action_time_mask)
|
||||
|
||||
# Set attention masks so that image, language and state inputs do not attend to action tokens
|
||||
att_masks += [1] * self.config.chunk_size
|
||||
embs = torch.cat(embs, dim=1)
|
||||
pad_masks = torch.cat(pad_masks, dim=1)
|
||||
att_masks = torch.tensor(att_masks, dtype=embs.dtype, device=embs.device)
|
||||
att_masks = att_masks[None, :].expand(bsize, len(att_masks))
|
||||
return embs, pad_masks, att_masks
|
||||
|
||||
def forward(
|
||||
self, images, img_masks, lang_tokens, lang_masks, state, actions, noise=None, time=None
|
||||
) -> Tensor:
|
||||
"""Do a full training forward pass and compute the loss (batch_size x num_steps x num_motors)"""
|
||||
if noise is None:
|
||||
noise = self.sample_noise(actions.shape, actions.device)
|
||||
|
||||
if time is None:
|
||||
time = self.sample_time(actions.shape[0], actions.device)
|
||||
|
||||
time_expanded = time[:, None, None]
|
||||
x_t = time_expanded * noise + (1 - time_expanded) * actions
|
||||
u_t = noise - actions
|
||||
prefix_embs, prefix_pad_masks, prefix_att_masks = self.embed_prefix(
|
||||
images, img_masks, lang_tokens, lang_masks, state=state
|
||||
)
|
||||
suffix_embs, suffix_pad_masks, suffix_att_masks = self.embed_suffix(x_t, time)
|
||||
|
||||
pad_masks = torch.cat([prefix_pad_masks, suffix_pad_masks], dim=1)
|
||||
att_masks = torch.cat([prefix_att_masks, suffix_att_masks], dim=1)
|
||||
|
||||
att_2d_masks = make_att_2d_masks(pad_masks, att_masks)
|
||||
position_ids = torch.cumsum(pad_masks, dim=1) - 1
|
||||
(_, suffix_out), _ = self.vlm_with_expert.forward(
|
||||
attention_mask=att_2d_masks,
|
||||
position_ids=position_ids,
|
||||
past_key_values=None,
|
||||
inputs_embeds=[prefix_embs, suffix_embs],
|
||||
use_cache=False,
|
||||
fill_kv_cache=False,
|
||||
)
|
||||
suffix_out = suffix_out[:, -self.config.chunk_size :]
|
||||
# Original openpi code, upcast attention output
|
||||
suffix_out = suffix_out.to(dtype=torch.float32)
|
||||
v_t = self.action_out_proj(suffix_out)
|
||||
losses = F.mse_loss(u_t, v_t, reduction="none")
|
||||
return losses
|
||||
|
||||
def sample_actions(self, images, img_masks, lang_tokens, lang_masks, state, noise=None) -> Tensor:
|
||||
"""Do a full inference forward and compute the action (batch_size x num_steps x num_motors)"""
|
||||
bsize = state.shape[0]
|
||||
device = state.device
|
||||
|
||||
if noise is None:
|
||||
actions_shape = (bsize, self.config.chunk_size, self.config.max_action_dim)
|
||||
noise = self.sample_noise(actions_shape, device)
|
||||
|
||||
prefix_embs, prefix_pad_masks, prefix_att_masks = self.embed_prefix(
|
||||
images, img_masks, lang_tokens, lang_masks, state=state
|
||||
)
|
||||
prefix_att_2d_masks = make_att_2d_masks(prefix_pad_masks, prefix_att_masks)
|
||||
prefix_position_ids = torch.cumsum(prefix_pad_masks, dim=1) - 1
|
||||
# Compute image and language key value cache
|
||||
_, past_key_values = self.vlm_with_expert.forward(
|
||||
attention_mask=prefix_att_2d_masks,
|
||||
position_ids=prefix_position_ids,
|
||||
past_key_values=None,
|
||||
inputs_embeds=[prefix_embs, None],
|
||||
use_cache=self.config.use_cache,
|
||||
fill_kv_cache=True,
|
||||
)
|
||||
dt = -1.0 / self.config.num_steps
|
||||
dt = torch.tensor(dt, dtype=torch.float32, device=device)
|
||||
|
||||
x_t = noise
|
||||
time = torch.tensor(1.0, dtype=torch.float32, device=device)
|
||||
while time >= -dt / 2:
|
||||
expanded_time = time.expand(bsize)
|
||||
v_t = self.denoise_step(
|
||||
prefix_pad_masks,
|
||||
past_key_values,
|
||||
x_t,
|
||||
expanded_time,
|
||||
)
|
||||
# Euler step
|
||||
x_t += dt * v_t
|
||||
time += dt
|
||||
return x_t
|
||||
|
||||
def denoise_step(
|
||||
self,
|
||||
prefix_pad_masks,
|
||||
past_key_values,
|
||||
x_t,
|
||||
timestep,
|
||||
):
|
||||
"""Apply one denoising step of the noise `x_t` at a given timestep."""
|
||||
suffix_embs, suffix_pad_masks, suffix_att_masks = self.embed_suffix(x_t, timestep)
|
||||
|
||||
suffix_len = suffix_pad_masks.shape[1]
|
||||
batch_size = prefix_pad_masks.shape[0]
|
||||
prefix_len = prefix_pad_masks.shape[1]
|
||||
prefix_pad_2d_masks = prefix_pad_masks[:, None, :].expand(batch_size, suffix_len, prefix_len)
|
||||
|
||||
suffix_att_2d_masks = make_att_2d_masks(suffix_pad_masks, suffix_att_masks)
|
||||
|
||||
full_att_2d_masks = torch.cat([prefix_pad_2d_masks, suffix_att_2d_masks], dim=2)
|
||||
prefix_offsets = torch.sum(prefix_pad_masks, dim=-1)[:, None]
|
||||
position_ids = prefix_offsets + torch.cumsum(suffix_pad_masks, dim=1) - 1
|
||||
|
||||
outputs_embeds, _ = self.vlm_with_expert.forward(
|
||||
attention_mask=full_att_2d_masks,
|
||||
position_ids=position_ids,
|
||||
past_key_values=past_key_values,
|
||||
inputs_embeds=[None, suffix_embs],
|
||||
use_cache=self.config.use_cache,
|
||||
fill_kv_cache=False,
|
||||
)
|
||||
suffix_out = outputs_embeds[1]
|
||||
suffix_out = suffix_out[:, -self.config.chunk_size :]
|
||||
suffix_out = suffix_out.to(dtype=torch.float32)
|
||||
v_t = self.action_out_proj(suffix_out)
|
||||
return v_t
|
||||
550
lerobot/common/policies/smolvla/smolvlm_with_expert.py
Normal file
@@ -0,0 +1,550 @@
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import copy
|
||||
from typing import List, Optional
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from transformers import (
|
||||
AutoConfig,
|
||||
AutoModel,
|
||||
AutoModelForImageTextToText,
|
||||
AutoProcessor,
|
||||
SmolVLMForConditionalGeneration,
|
||||
)
|
||||
|
||||
|
||||
def apply_rope(x, positions, max_wavelength=10_000):
|
||||
"""
|
||||
Applies RoPE positions [B, L] to x [B, L, H, D].
|
||||
"""
|
||||
d_half = x.shape[-1] // 2
|
||||
device = x.device
|
||||
dtype = x.dtype
|
||||
x = x.to(torch.float32)
|
||||
|
||||
freq_exponents = (2.0 / x.shape[-1]) * torch.arange(d_half, dtype=torch.float32, device=device)
|
||||
timescale = max_wavelength**freq_exponents
|
||||
radians = positions[..., None].to(torch.float32) / timescale[None, None, :].to(torch.float32)
|
||||
|
||||
radians = radians[..., None, :]
|
||||
|
||||
sin = torch.sin(radians) # .to(dtype=dtype)
|
||||
cos = torch.cos(radians) # .to(dtype=dtype)
|
||||
|
||||
x1, x2 = x.split(d_half, dim=-1)
|
||||
res = torch.empty_like(x)
|
||||
res[..., :d_half] = x1 * cos - x2 * sin
|
||||
res[..., d_half:] = x2 * cos + x1 * sin
|
||||
|
||||
return res.to(dtype)
|
||||
|
||||
|
||||
def get_intermediate_size(hidden_dim, ffn_dim_multiplier=4, multiple_of=256):
|
||||
hidden_dim = int(2 * hidden_dim / 3)
|
||||
hidden_dim = int(ffn_dim_multiplier * hidden_dim)
|
||||
hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
|
||||
return hidden_dim
|
||||
|
||||
|
||||
class SmolVLMWithExpertModel(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
model_id: str = "HuggingFaceTB/SmolVLM2-500M-Video-Instruct",
|
||||
load_vlm_weights: bool = True,
|
||||
train_expert_only: bool = True,
|
||||
freeze_vision_encoder: bool = False,
|
||||
attention_mode: str = "self_attn",
|
||||
num_expert_layers: int = -1,
|
||||
num_vlm_layers: int = -1,
|
||||
self_attn_every_n_layers: int = -1,
|
||||
expert_width_multiplier: float = 0.5,
|
||||
):
|
||||
super().__init__()
|
||||
if load_vlm_weights:
|
||||
print(f"Loading {model_id} weights ...")
|
||||
self.vlm = AutoModelForImageTextToText.from_pretrained(
|
||||
model_id,
|
||||
device_map="auto",
|
||||
torch_dtype="bfloat16",
|
||||
low_cpu_mem_usage=True,
|
||||
)
|
||||
config = self.vlm.config
|
||||
else:
|
||||
config = AutoConfig.from_pretrained(model_id)
|
||||
self.vlm = SmolVLMForConditionalGeneration(config=config)
|
||||
self.processor = AutoProcessor.from_pretrained(model_id)
|
||||
if num_vlm_layers > 0:
|
||||
print(f"Reducing the number of VLM layers to {num_vlm_layers} ...")
|
||||
self.get_vlm_model().text_model.layers = self.get_vlm_model().text_model.layers[:num_vlm_layers]
|
||||
self.num_vlm_layers = len(self.get_vlm_model().text_model.layers)
|
||||
self.config = config
|
||||
# Smaller lm expert
|
||||
lm_expert_config = copy.deepcopy(config.text_config)
|
||||
hidden_size = lm_expert_config.hidden_size
|
||||
lm_expert_config.hidden_size = int(hidden_size * expert_width_multiplier) # hidden_size // 2
|
||||
lm_expert_config.intermediate_size = get_intermediate_size(int(hidden_size * expert_width_multiplier))
|
||||
lm_expert_config.num_hidden_layers = self.num_vlm_layers
|
||||
if num_expert_layers > 0:
|
||||
assert len(self.get_vlm_model().text_model.layers) % num_expert_layers == 0, (
|
||||
f"Number of layers in the VLM {len(self.get_vlm_model().text_model.layers)} are not multiple of num_expert_layers {num_expert_layers}"
|
||||
)
|
||||
lm_expert_config.num_hidden_layers = num_expert_layers
|
||||
self.lm_expert = AutoModel.from_config(lm_expert_config)
|
||||
|
||||
self.num_expert_layers = len(self.lm_expert.layers)
|
||||
self.self_attn_every_n_layers = self_attn_every_n_layers
|
||||
if "cross" in attention_mode:
|
||||
# Reshape qkv projections to have the same input dimension as the vlm
|
||||
for layer_idx in range(len(self.lm_expert.layers)):
|
||||
if self.self_attn_every_n_layers > 0 and layer_idx % self.self_attn_every_n_layers == 0:
|
||||
continue
|
||||
self.lm_expert.layers[layer_idx].self_attn.k_proj = nn.Linear(
|
||||
config.text_config.num_key_value_heads * config.text_config.head_dim,
|
||||
lm_expert_config.num_key_value_heads * lm_expert_config.head_dim,
|
||||
bias=lm_expert_config.attention_bias,
|
||||
)
|
||||
self.lm_expert.layers[layer_idx].self_attn.v_proj = nn.Linear(
|
||||
config.text_config.num_key_value_heads * config.text_config.head_dim,
|
||||
lm_expert_config.num_key_value_heads * lm_expert_config.head_dim,
|
||||
bias=lm_expert_config.attention_bias,
|
||||
)
|
||||
# Remove unused embed_tokens
|
||||
self.lm_expert.embed_tokens = None
|
||||
|
||||
self.num_attention_heads = self.config.text_config.num_attention_heads
|
||||
self.num_key_value_heads = self.config.text_config.num_key_value_heads
|
||||
|
||||
self.freeze_vision_encoder = freeze_vision_encoder
|
||||
self.train_expert_only = train_expert_only
|
||||
self.attention_mode = attention_mode
|
||||
self.expert_hidden_size = lm_expert_config.hidden_size
|
||||
self.set_requires_grad()
|
||||
|
||||
def get_vlm_model(self):
|
||||
return self.vlm.model
|
||||
|
||||
def set_requires_grad(self):
|
||||
if self.freeze_vision_encoder:
|
||||
self.get_vlm_model().vision_model.eval()
|
||||
for params in self.get_vlm_model().vision_model.parameters():
|
||||
params.requires_grad = False
|
||||
if self.train_expert_only:
|
||||
self.vlm.eval()
|
||||
for params in self.vlm.parameters():
|
||||
params.requires_grad = False
|
||||
else:
|
||||
# To avoid unused params issue with distributed training
|
||||
last_layers = [self.num_vlm_layers - 1]
|
||||
if (
|
||||
self.num_vlm_layers != self.num_expert_layers
|
||||
and self.num_vlm_layers % self.num_expert_layers == 0
|
||||
):
|
||||
last_layers.append(self.num_vlm_layers - 2)
|
||||
frozen_layers = [
|
||||
"lm_head",
|
||||
"text_model.model.norm.weight",
|
||||
]
|
||||
for layer in last_layers:
|
||||
frozen_layers.append(f"text_model.model.layers.{layer}.")
|
||||
|
||||
for name, params in self.vlm.named_parameters():
|
||||
if any(k in name for k in frozen_layers):
|
||||
params.requires_grad = False
|
||||
# To avoid unused params issue with distributed training
|
||||
for name, params in self.lm_expert.named_parameters():
|
||||
if "lm_head" in name:
|
||||
params.requires_grad = False
|
||||
|
||||
def train(self, mode: bool = True):
|
||||
super().train(mode)
|
||||
|
||||
if self.freeze_vision_encoder:
|
||||
self.get_vlm_model().vision_model.eval()
|
||||
|
||||
if self.train_expert_only:
|
||||
self.vlm.eval()
|
||||
|
||||
def embed_image(self, image: torch.Tensor):
|
||||
patch_attention_mask = None
|
||||
# Get sequence from the vision encoder
|
||||
image_hidden_states = (
|
||||
self.get_vlm_model()
|
||||
.vision_model(
|
||||
pixel_values=image.to(dtype=self.get_vlm_model().vision_model.dtype),
|
||||
patch_attention_mask=patch_attention_mask,
|
||||
)
|
||||
.last_hidden_state
|
||||
)
|
||||
# Modality projection & resampling
|
||||
image_hidden_states = self.get_vlm_model().connector(image_hidden_states)
|
||||
return image_hidden_states
|
||||
|
||||
def embed_language_tokens(self, tokens: torch.Tensor):
|
||||
return self.get_vlm_model().text_model.get_input_embeddings()(tokens)
|
||||
|
||||
def forward_attn_layer(
|
||||
self,
|
||||
model_layers,
|
||||
inputs_embeds,
|
||||
layer_idx,
|
||||
position_ids,
|
||||
attention_mask,
|
||||
batch_size,
|
||||
head_dim,
|
||||
use_cache: bool = True,
|
||||
fill_kv_cache: bool = True,
|
||||
past_key_values=None,
|
||||
) -> list[torch.Tensor]:
|
||||
query_states = []
|
||||
key_states = []
|
||||
value_states = []
|
||||
for i, hidden_states in enumerate(inputs_embeds):
|
||||
layer = model_layers[i][layer_idx]
|
||||
if hidden_states is None or layer is None:
|
||||
continue
|
||||
hidden_states = layer.input_layernorm(hidden_states)
|
||||
|
||||
input_shape = hidden_states.shape[:-1]
|
||||
hidden_shape = (*input_shape, -1, layer.self_attn.head_dim)
|
||||
|
||||
hidden_states = hidden_states.to(dtype=layer.self_attn.q_proj.weight.dtype)
|
||||
query_state = layer.self_attn.q_proj(hidden_states).view(hidden_shape)
|
||||
key_state = layer.self_attn.k_proj(hidden_states).view(hidden_shape)
|
||||
value_state = layer.self_attn.v_proj(hidden_states).view(hidden_shape)
|
||||
|
||||
query_states.append(query_state)
|
||||
key_states.append(key_state)
|
||||
value_states.append(value_state)
|
||||
|
||||
# B,L,H,D with L sequence length, H number of heads, D head dim
|
||||
# concatenate on the number of embeddings/tokens
|
||||
query_states = torch.cat(query_states, dim=1)
|
||||
key_states = torch.cat(key_states, dim=1)
|
||||
value_states = torch.cat(value_states, dim=1)
|
||||
seq_len = query_states.shape[1]
|
||||
if seq_len < position_ids.shape[1]:
|
||||
_position_ids = position_ids[:, :seq_len]
|
||||
_attention_mask = attention_mask[:, :seq_len, :seq_len]
|
||||
else:
|
||||
_position_ids = position_ids
|
||||
_attention_mask = attention_mask
|
||||
|
||||
attention_mask_ = _attention_mask
|
||||
position_ids_ = _position_ids
|
||||
|
||||
query_states = apply_rope(query_states, position_ids_)
|
||||
key_states = apply_rope(key_states, position_ids_)
|
||||
|
||||
if use_cache and past_key_values is None:
|
||||
past_key_values = {}
|
||||
|
||||
if use_cache:
|
||||
if fill_kv_cache:
|
||||
past_key_values[layer_idx] = {
|
||||
"key_states": key_states,
|
||||
"value_states": value_states,
|
||||
}
|
||||
else:
|
||||
# TODO here, some optimization can be done - similar to a `StaticCache` we can declare the `max_len` before.
|
||||
# so we create an empty cache, with just one cuda malloc, and if (in autoregressive case) we reach
|
||||
# the max len, then we (for instance) double the cache size. This implementation already exists
|
||||
# in `transformers`. (molbap)
|
||||
key_states = torch.cat([past_key_values[layer_idx]["key_states"], key_states], dim=1)
|
||||
value_states = torch.cat([past_key_values[layer_idx]["value_states"], value_states], dim=1)
|
||||
|
||||
attention_interface = self.get_attention_interface()
|
||||
|
||||
att_output = attention_interface(
|
||||
attention_mask_, batch_size, head_dim, query_states, key_states, value_states
|
||||
)
|
||||
return [att_output], past_key_values
|
||||
|
||||
def forward_cross_attn_layer(
|
||||
self,
|
||||
model_layers,
|
||||
inputs_embeds,
|
||||
layer_idx,
|
||||
position_ids,
|
||||
attention_mask,
|
||||
batch_size,
|
||||
head_dim,
|
||||
use_cache: bool = True,
|
||||
fill_kv_cache: bool = True,
|
||||
past_key_values=None,
|
||||
) -> list[torch.Tensor]:
|
||||
attention_interface = self.get_attention_interface()
|
||||
|
||||
att_outputs = []
|
||||
assert len(inputs_embeds) == 2 or (use_cache and past_key_values is not None and not fill_kv_cache), (
|
||||
f"Both len(inputs_embeds) == {len(inputs_embeds)} and past_key_values is {past_key_values}"
|
||||
)
|
||||
|
||||
if len(inputs_embeds) == 2 and not past_key_values:
|
||||
# Prefix attention
|
||||
seq_len = inputs_embeds[0].shape[1]
|
||||
position_id, expert_position_id = position_ids[:, :seq_len], position_ids[:, seq_len:]
|
||||
prefix_attention_mask = attention_mask[:, :seq_len, :seq_len]
|
||||
|
||||
layer = model_layers[0][layer_idx]
|
||||
|
||||
hidden_states = layer.input_layernorm(inputs_embeds[0])
|
||||
|
||||
input_shape = hidden_states.shape[:-1]
|
||||
hidden_shape = (*input_shape, -1, layer.self_attn.head_dim)
|
||||
|
||||
hidden_states = hidden_states.to(dtype=layer.self_attn.q_proj.weight.dtype)
|
||||
query_state = layer.self_attn.q_proj(hidden_states).view(hidden_shape)
|
||||
key_state = layer.self_attn.k_proj(hidden_states).view(hidden_shape)
|
||||
value_states = layer.self_attn.v_proj(hidden_states).view(hidden_shape)
|
||||
|
||||
# B,L,H,D with L sequence length, H number of heads, D head dim
|
||||
query_states = apply_rope(query_state, position_id)
|
||||
key_states = apply_rope(key_state, position_id)
|
||||
|
||||
att_output = attention_interface(
|
||||
prefix_attention_mask, batch_size, head_dim, query_states, key_states, value_states
|
||||
)
|
||||
att_outputs.append(att_output)
|
||||
else:
|
||||
expert_position_id = position_ids
|
||||
|
||||
if use_cache and past_key_values is None:
|
||||
past_key_values = {}
|
||||
|
||||
if use_cache:
|
||||
if fill_kv_cache:
|
||||
past_key_values[layer_idx] = {
|
||||
"key_states": key_states,
|
||||
"value_states": value_states,
|
||||
}
|
||||
else:
|
||||
# TODO here, some optimization can be done - similar to a `StaticCache` we can declare the `max_len` before.
|
||||
# so we create an empty cache, with just one cuda malloc, and if (in autoregressive case) we reach
|
||||
# the max len, then we (for instance) double the cache size. This implementation already exists
|
||||
# in `transformers`. (molbap)
|
||||
key_states = past_key_values[layer_idx]["key_states"]
|
||||
value_states = past_key_values[layer_idx]["value_states"]
|
||||
|
||||
# Expert
|
||||
expert_layer = model_layers[1][layer_idx]
|
||||
if expert_layer is not None:
|
||||
expert_hidden_states = expert_layer.input_layernorm(inputs_embeds[1])
|
||||
|
||||
expert_input_shape = expert_hidden_states.shape[:-1]
|
||||
expert_hidden_shape = (*expert_input_shape, -1, expert_layer.self_attn.head_dim)
|
||||
|
||||
expert_hidden_states = expert_hidden_states.to(dtype=expert_layer.self_attn.q_proj.weight.dtype)
|
||||
expert_query_state = expert_layer.self_attn.q_proj(expert_hidden_states).view(expert_hidden_shape)
|
||||
|
||||
_key_states = key_states.to(dtype=expert_layer.self_attn.k_proj.weight.dtype).view(
|
||||
*key_states.shape[:2], -1
|
||||
)
|
||||
expert_key_states = expert_layer.self_attn.k_proj(_key_states).view(
|
||||
*_key_states.shape[:-1], -1, expert_layer.self_attn.head_dim
|
||||
) # k_proj should have same dim as kv
|
||||
|
||||
_value_states = value_states.to(dtype=expert_layer.self_attn.v_proj.weight.dtype).view(
|
||||
*value_states.shape[:2], -1
|
||||
)
|
||||
expert_value_states = expert_layer.self_attn.v_proj(_value_states).view(
|
||||
*_value_states.shape[:-1], -1, expert_layer.self_attn.head_dim
|
||||
)
|
||||
|
||||
expert_position_id = (
|
||||
expert_position_id - torch.min(expert_position_id, dim=1, keepdim=True).values
|
||||
) # start from 0
|
||||
expert_attention_mask = attention_mask[
|
||||
:, -inputs_embeds[1].shape[1] :, : expert_key_states.shape[1] :
|
||||
] # take into account kv
|
||||
|
||||
expert_query_states = apply_rope(expert_query_state, expert_position_id)
|
||||
|
||||
att_output = attention_interface(
|
||||
expert_attention_mask,
|
||||
batch_size,
|
||||
head_dim,
|
||||
expert_query_states,
|
||||
expert_key_states,
|
||||
expert_value_states,
|
||||
)
|
||||
att_outputs.append(att_output)
|
||||
else:
|
||||
att_outputs.append(None)
|
||||
|
||||
# att_output = att_output.to(dtype=models[i].dtype)
|
||||
return att_outputs, past_key_values
|
||||
|
||||
def get_model_layers(self, models: list) -> list:
|
||||
vlm_layers = []
|
||||
expert_layers = []
|
||||
multiple_of = self.num_vlm_layers // self.num_expert_layers
|
||||
for i in range(self.num_vlm_layers):
|
||||
if multiple_of > 0 and i > 0 and i % multiple_of != 0:
|
||||
expert_layer = None
|
||||
else:
|
||||
expert_layer_index = i // multiple_of if multiple_of > 0 else i
|
||||
expert_layer = models[1].layers[expert_layer_index]
|
||||
vlm_layers.append(models[0].layers[i])
|
||||
expert_layers.append(expert_layer)
|
||||
return [vlm_layers, expert_layers]
|
||||
|
||||
def forward(
|
||||
self,
|
||||
attention_mask: Optional[torch.Tensor] = None,
|
||||
position_ids: Optional[torch.LongTensor] = None,
|
||||
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
||||
inputs_embeds: List[torch.FloatTensor] = None,
|
||||
use_cache: Optional[bool] = None,
|
||||
fill_kv_cache: Optional[bool] = None,
|
||||
):
|
||||
models = [self.get_vlm_model().text_model, self.lm_expert]
|
||||
model_layers = self.get_model_layers(models)
|
||||
for hidden_states in inputs_embeds:
|
||||
# TODO this is very inefficient
|
||||
# dtype is always the same, batch size too (if > 1 len)
|
||||
# device could be trickier in multi gpu edge cases but that's it
|
||||
if hidden_states is None:
|
||||
continue
|
||||
batch_size = hidden_states.shape[0]
|
||||
|
||||
# RMSNorm
|
||||
num_layers = self.num_vlm_layers
|
||||
head_dim = self.vlm.config.text_config.head_dim
|
||||
for layer_idx in range(num_layers):
|
||||
if (
|
||||
fill_kv_cache
|
||||
or "cross" not in self.attention_mode
|
||||
or (self.self_attn_every_n_layers > 0 and layer_idx % self.self_attn_every_n_layers == 0)
|
||||
):
|
||||
att_outputs, past_key_values = self.forward_attn_layer(
|
||||
model_layers,
|
||||
inputs_embeds,
|
||||
layer_idx,
|
||||
position_ids,
|
||||
attention_mask,
|
||||
batch_size,
|
||||
head_dim,
|
||||
use_cache=use_cache,
|
||||
fill_kv_cache=fill_kv_cache,
|
||||
past_key_values=past_key_values,
|
||||
)
|
||||
else:
|
||||
att_outputs, past_key_values = self.forward_cross_attn_layer(
|
||||
model_layers,
|
||||
inputs_embeds,
|
||||
layer_idx,
|
||||
position_ids,
|
||||
attention_mask,
|
||||
batch_size,
|
||||
head_dim,
|
||||
use_cache=use_cache,
|
||||
fill_kv_cache=fill_kv_cache,
|
||||
past_key_values=past_key_values,
|
||||
)
|
||||
outputs_embeds = []
|
||||
start = 0
|
||||
for i, hidden_states in enumerate(inputs_embeds):
|
||||
layer = model_layers[i][layer_idx]
|
||||
att_output = (
|
||||
att_outputs[i] if i < len(att_outputs) else att_outputs[0]
|
||||
) # in case of self_attn
|
||||
if hidden_states is not None:
|
||||
if layer is None:
|
||||
outputs_embeds.append(hidden_states)
|
||||
continue
|
||||
end = start + hidden_states.shape[1]
|
||||
|
||||
if att_output.dtype != layer.self_attn.o_proj.weight.dtype:
|
||||
att_output = att_output.to(layer.self_attn.o_proj.weight.dtype)
|
||||
att_out = att_output[:, start:end]
|
||||
out_emb = layer.self_attn.o_proj(att_out)
|
||||
|
||||
out_emb += hidden_states
|
||||
after_first_residual = out_emb.clone()
|
||||
|
||||
out_emb = layer.post_attention_layernorm(out_emb)
|
||||
out_emb = layer.mlp(out_emb)
|
||||
|
||||
out_emb += after_first_residual
|
||||
|
||||
outputs_embeds.append(out_emb)
|
||||
|
||||
start = end if len(att_outputs) == 1 else 0
|
||||
else:
|
||||
outputs_embeds.append(None)
|
||||
|
||||
inputs_embeds = outputs_embeds
|
||||
|
||||
# final norm
|
||||
outputs_embeds = []
|
||||
for i, hidden_states in enumerate(inputs_embeds):
|
||||
if hidden_states is not None:
|
||||
out_emb = models[i].norm(hidden_states)
|
||||
outputs_embeds.append(out_emb)
|
||||
else:
|
||||
outputs_embeds.append(None)
|
||||
return outputs_embeds, past_key_values
|
||||
|
||||
def get_attention_interface(self):
|
||||
attention_interface = self.eager_attention_forward
|
||||
return attention_interface
|
||||
|
||||
def eager_attention_forward(
|
||||
self, attention_mask, batch_size, head_dim, query_states, key_states, value_states
|
||||
):
|
||||
num_att_heads = self.num_attention_heads
|
||||
num_key_value_heads = self.num_key_value_heads
|
||||
num_key_value_groups = num_att_heads // num_key_value_heads
|
||||
|
||||
sequence_length = key_states.shape[1]
|
||||
|
||||
key_states = key_states[:, :, :, None, :].expand(
|
||||
batch_size, sequence_length, num_key_value_heads, num_key_value_groups, head_dim
|
||||
)
|
||||
key_states = key_states.reshape(
|
||||
batch_size, sequence_length, num_key_value_heads * num_key_value_groups, head_dim
|
||||
)
|
||||
|
||||
value_states = value_states[:, :, :, None, :].expand(
|
||||
batch_size, sequence_length, num_key_value_heads, num_key_value_groups, head_dim
|
||||
)
|
||||
value_states = value_states.reshape(
|
||||
batch_size, sequence_length, num_key_value_heads * num_key_value_groups, head_dim
|
||||
)
|
||||
|
||||
# Attention here is upcasted to float32 to match the original eager implementation.
|
||||
query_states = query_states.to(dtype=torch.float32)
|
||||
key_states = key_states.to(dtype=torch.float32)
|
||||
|
||||
query_states = query_states.transpose(1, 2)
|
||||
key_states = key_states.transpose(1, 2)
|
||||
|
||||
att_weights = torch.matmul(query_states, key_states.transpose(2, 3))
|
||||
att_weights *= head_dim**-0.5
|
||||
|
||||
att_weights = att_weights.to(dtype=torch.float32)
|
||||
big_neg = torch.finfo(att_weights.dtype).min # -2.3819763e38 # See gemma/modules.py
|
||||
masked_att_weights = torch.where(attention_mask[:, None, :, :], att_weights, big_neg)
|
||||
probs = nn.functional.softmax(masked_att_weights, dim=-1)
|
||||
probs = probs.to(dtype=value_states.dtype)
|
||||
|
||||
att_output = torch.matmul(probs, value_states.permute(0, 2, 1, 3))
|
||||
|
||||
att_output = att_output.permute(0, 2, 1, 3)
|
||||
# we use -1 because sequence length can change
|
||||
att_output = att_output.reshape(batch_size, -1, num_key_value_heads * num_key_value_groups * head_dim)
|
||||
|
||||
return att_output
|
||||
@@ -512,13 +512,13 @@ if __name__ == "__main__":
|
||||
)
|
||||
parser.add_argument(
|
||||
"--width",
|
||||
type=str,
|
||||
type=int,
|
||||
default=640,
|
||||
help="Set the width for all cameras. If not provided, use the default width of each camera.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--height",
|
||||
type=str,
|
||||
type=int,
|
||||
default=480,
|
||||
help="Set the height for all cameras. If not provided, use the default height of each camera.",
|
||||
)
|
||||
|
||||
@@ -492,13 +492,13 @@ if __name__ == "__main__":
|
||||
)
|
||||
parser.add_argument(
|
||||
"--width",
|
||||
type=str,
|
||||
type=int,
|
||||
default=None,
|
||||
help="Set the width for all cameras. If not provided, use the default width of each camera.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--height",
|
||||
type=str,
|
||||
type=int,
|
||||
default=None,
|
||||
help="Set the height for all cameras. If not provided, use the default height of each camera.",
|
||||
)
|
||||
|
||||
@@ -58,7 +58,7 @@ def log_control_info(robot: Robot, dt_s, episode_index=None, frame_index=None, f
|
||||
log_dt("dt", dt_s)
|
||||
|
||||
# TODO(aliberts): move robot-specific logs logic in robot.print_logs()
|
||||
if not robot.robot_type.startswith("stretch"):
|
||||
if not robot.robot_type.startswith(("stretch", "realman")):
|
||||
for name in robot.leader_arms:
|
||||
key = f"read_leader_{name}_pos_dt_s"
|
||||
if key in robot.logs:
|
||||
@@ -109,6 +109,10 @@ def predict_action(observation, policy, device, use_amp):
|
||||
):
|
||||
# Convert to pytorch format: channel first and float32 in [0,1] with batch dimension
|
||||
for name in observation:
|
||||
# Skip all observations that are not tensors (e.g. text)
|
||||
if not isinstance(observation[name], torch.Tensor):
|
||||
continue
|
||||
|
||||
if "image" in name:
|
||||
observation[name] = observation[name].type(torch.float32) / 255
|
||||
observation[name] = observation[name].permute(2, 0, 1).contiguous()
|
||||
@@ -243,6 +247,11 @@ def control_loop(
|
||||
|
||||
timestamp = 0
|
||||
start_episode_t = time.perf_counter()
|
||||
|
||||
# Controls starts, if policy is given it needs cleaning up
|
||||
if policy is not None:
|
||||
policy.reset()
|
||||
|
||||
while timestamp < control_time_s:
|
||||
start_loop_t = time.perf_counter()
|
||||
|
||||
@@ -250,7 +259,9 @@ def control_loop(
|
||||
observation, action = robot.teleop_step(record_data=True)
|
||||
else:
|
||||
observation = robot.capture_observation()
|
||||
|
||||
action = None
|
||||
observation["task"] = [single_task]
|
||||
observation["robot_type"] = [policy.robot_type] if hasattr(policy, "robot_type") else [""]
|
||||
if policy is not None:
|
||||
pred_action = predict_action(
|
||||
observation, policy, get_safe_torch_device(policy.config.device), policy.config.use_amp
|
||||
@@ -261,14 +272,16 @@ def control_loop(
|
||||
action = {"action": action}
|
||||
|
||||
if dataset is not None:
|
||||
observation = {k: v for k, v in observation.items() if k not in ["task", "robot_type"]}
|
||||
frame = {**observation, **action, "task": single_task}
|
||||
dataset.add_frame(frame)
|
||||
|
||||
# TODO(Steven): This should be more general (for RemoteRobot instead of checking the name, but anyways it will change soon)
|
||||
if (display_data and not is_headless()) or (display_data and robot.robot_type.startswith("lekiwi")):
|
||||
for k, v in action.items():
|
||||
for i, vv in enumerate(v):
|
||||
rr.log(f"sent_{k}_{i}", rr.Scalar(vv.numpy()))
|
||||
if action is not None:
|
||||
for k, v in action.items():
|
||||
for i, vv in enumerate(v):
|
||||
rr.log(f"sent_{k}_{i}", rr.Scalar(vv.numpy()))
|
||||
|
||||
image_keys = [key for key in observation if "image" in key]
|
||||
for key in image_keys:
|
||||
|
||||
@@ -39,3 +39,24 @@ class FeetechMotorsBusConfig(MotorsBusConfig):
|
||||
port: str
|
||||
motors: dict[str, tuple[int, str]]
|
||||
mock: bool = False
|
||||
|
||||
|
||||
@MotorsBusConfig.register_subclass("realman")
|
||||
@dataclass
|
||||
class RealmanMotorsBusConfig(MotorsBusConfig):
|
||||
ip: str
|
||||
port: int
|
||||
motors: dict[str, tuple[int, str]]
|
||||
init_joint: dict[str, list]
|
||||
|
||||
|
||||
@MotorsBusConfig.register_subclass("realman_dual")
|
||||
@dataclass
|
||||
class RealmanDualMotorsBusConfig(MotorsBusConfig):
|
||||
left_ip: str
|
||||
right_ip: str
|
||||
left_port: int
|
||||
right_port: int
|
||||
motors: dict[str, tuple[int, str]]
|
||||
init_joint: dict[str, list]
|
||||
axis: dict[str, int]
|
||||
150
lerobot/common/robot_devices/motors/realman.py
Normal file
@@ -0,0 +1,150 @@
|
||||
import time
|
||||
from typing import Dict
|
||||
from lerobot.common.robot_devices.motors.configs import RealmanMotorsBusConfig
|
||||
from Robotic_Arm.rm_robot_interface import *
|
||||
|
||||
|
||||
class RealmanMotorsBus:
|
||||
"""
|
||||
对Realman SDK的二次封装
|
||||
"""
|
||||
def __init__(self,
|
||||
config: RealmanMotorsBusConfig):
|
||||
self.rmarm = RoboticArm(rm_thread_mode_e.RM_TRIPLE_MODE_E)
|
||||
self.handle = self.rmarm.rm_create_robot_arm(config.ip, config.port)
|
||||
self.motors = config.motors
|
||||
self.init_joint_position = config.init_joint['joint'] # [6 joints + 1 gripper]
|
||||
self.safe_disable_position = config.init_joint['joint']
|
||||
self.rmarm.rm_movej(self.init_joint_position[:-1], 5, 0, 0, 1)
|
||||
time.sleep(3)
|
||||
ret = self.rmarm.rm_get_current_arm_state()
|
||||
self.init_pose = ret[1]['pose']
|
||||
|
||||
@property
|
||||
def motor_names(self) -> list[str]:
|
||||
return list(self.motors.keys())
|
||||
|
||||
@property
|
||||
def motor_models(self) -> list[str]:
|
||||
return [model for _, model in self.motors.values()]
|
||||
|
||||
@property
|
||||
def motor_indices(self) -> list[int]:
|
||||
return [idx for idx, _ in self.motors.values()]
|
||||
|
||||
|
||||
def connect(self, enable=True) -> bool:
|
||||
'''
|
||||
使能机械臂并检测使能状态,尝试5s,如果使能超时则退出程序
|
||||
'''
|
||||
enable_flag = False
|
||||
loop_flag = False
|
||||
# 设置超时时间(秒)
|
||||
timeout = 5
|
||||
# 记录进入循环前的时间
|
||||
start_time = time.time()
|
||||
elapsed_time_flag = False
|
||||
|
||||
while not loop_flag:
|
||||
elapsed_time = time.time() - start_time
|
||||
print("--------------------")
|
||||
|
||||
if enable:
|
||||
# 获取机械臂状态
|
||||
ret = self.rmarm.rm_get_current_arm_state()
|
||||
if ret[0] == 0: # 成功获取状态
|
||||
enable_flag = True
|
||||
else:
|
||||
enable_flag = False
|
||||
# 断开所有连接,销毁线程
|
||||
RoboticArm.rm_destory()
|
||||
print("使能状态:", enable_flag)
|
||||
print("--------------------")
|
||||
if(enable_flag == enable):
|
||||
loop_flag = True
|
||||
enable_flag = True
|
||||
else:
|
||||
loop_flag = False
|
||||
enable_flag = False
|
||||
# 检查是否超过超时时间
|
||||
if elapsed_time > timeout:
|
||||
print("超时....")
|
||||
elapsed_time_flag = True
|
||||
enable_flag = True
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
resp = enable_flag
|
||||
print(f"Returning response: {resp}")
|
||||
return resp
|
||||
|
||||
def motor_names(self):
|
||||
return
|
||||
|
||||
def set_calibration(self):
|
||||
return
|
||||
|
||||
def revert_calibration(self):
|
||||
return
|
||||
|
||||
def apply_calibration(self):
|
||||
"""
|
||||
移动到初始位置
|
||||
"""
|
||||
self.write(target_joint=self.init_joint_position)
|
||||
|
||||
def write(self, target_joint:list):
|
||||
# self.rmarm.rm_movej(target_joint[:-1], 50, 0, 0, 1)
|
||||
self.rmarm.rm_movej_follow(target_joint[:-1])
|
||||
self.rmarm.rm_set_gripper_position(target_joint[-1], block=False, timeout=2)
|
||||
|
||||
def write_endpose(self, target_endpose: list, gripper: int):
|
||||
self.rmarm.rm_movej_p(target_endpose, 50, 0, 0, 1)
|
||||
self.rmarm.rm_set_gripper_position(gripper, block=False, timeout=2)
|
||||
|
||||
def write_joint_slow(self, target_joint: list):
|
||||
self.rmarm.rm_movej(target_joint, 5, 0, 0, 0)
|
||||
|
||||
def write_joint_canfd(self, target_joint: list):
|
||||
self.rmarm.rm_movej_canfd(target_joint, False)
|
||||
|
||||
def write_endpose_canfd(self, target_pose: list):
|
||||
self.rmarm.rm_movep_canfd(target_pose, False)
|
||||
|
||||
def write_gripper(self, gripper: int):
|
||||
self.rmarm.rm_set_gripper_position(gripper, False, 2)
|
||||
|
||||
def read(self) -> Dict:
|
||||
"""
|
||||
- 机械臂关节消息,单位1度;[-1, 1]
|
||||
- 机械臂夹爪消息,[-1, 1]
|
||||
"""
|
||||
joint_msg = self.rmarm.rm_get_current_arm_state()[1]
|
||||
joint_state = joint_msg['joint']
|
||||
|
||||
gripper_msg = self.rmarm.rm_get_gripper_state()[1]
|
||||
gripper_state = gripper_msg['actpos']
|
||||
|
||||
return {
|
||||
"joint_1": joint_state[0]/180,
|
||||
"joint_2": joint_state[1]/180,
|
||||
"joint_3": joint_state[2]/180,
|
||||
"joint_4": joint_state[3]/180,
|
||||
"joint_5": joint_state[4]/180,
|
||||
"joint_6": joint_state[5]/180,
|
||||
"gripper": (gripper_state-500)/500
|
||||
}
|
||||
|
||||
def read_current_arm_joint_state(self):
|
||||
return self.rmarm.rm_get_current_arm_state()[1]['joint']
|
||||
|
||||
def read_current_arm_endpose_state(self):
|
||||
return self.rmarm.rm_get_current_arm_state()[1]['pose']
|
||||
|
||||
def safe_disconnect(self):
|
||||
"""
|
||||
Move to safe disconnect position
|
||||
"""
|
||||
self.write(target_joint=self.safe_disable_position)
|
||||
# 断开所有连接,销毁线程
|
||||
RoboticArm.rm_destory()
|
||||
351
lerobot/common/robot_devices/motors/realman_dual.py
Normal file
@@ -0,0 +1,351 @@
|
||||
import time
|
||||
import threading
|
||||
from typing import Dict
|
||||
from dataclasses import dataclass
|
||||
from contextlib import contextmanager
|
||||
from lerobot.common.robot_devices.motors.configs import RealmanDualMotorsBusConfig
|
||||
from Robotic_Arm.rm_robot_interface import *
|
||||
|
||||
|
||||
def compare_joint_difference(master_joints, follow_joints, threshold=30.0):
|
||||
"""
|
||||
比较主臂和从臂关节数据的差异
|
||||
|
||||
Args:
|
||||
master_joints (list): 主臂关节数据 [joint1, joint2, ..., joint6]
|
||||
follow_joints (list): 从臂关节数据 [joint1, joint2, ..., joint6]
|
||||
threshold (float): 差异阈值(度),默认5度
|
||||
|
||||
Returns:
|
||||
bool: True表示差异在阈值内,False表示超过阈值
|
||||
"""
|
||||
# 检查数据长度
|
||||
if len(master_joints) != len(follow_joints):
|
||||
return False
|
||||
|
||||
# 计算每个关节的绝对差异
|
||||
for i in range(len(master_joints)):
|
||||
diff = abs(master_joints[i] - follow_joints[i])
|
||||
if diff > threshold:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
@dataclass
|
||||
class ArmState:
|
||||
"""机械臂状态数据类"""
|
||||
joint_positions: list
|
||||
gripper_position: int
|
||||
pose: list
|
||||
|
||||
|
||||
class RealmanDualMotorsBus:
|
||||
"""
|
||||
对Realman SDK的二次封装
|
||||
"""
|
||||
def __init__(self, config: RealmanDualMotorsBusConfig):
|
||||
self.config = config
|
||||
# import pdb; pdb.set_trace()
|
||||
self._initialize_arms()
|
||||
self._initialize_parameters()
|
||||
self._initialize_positions()
|
||||
self._initialize_threading()
|
||||
|
||||
def _initialize_arms(self):
|
||||
"""初始化机械臂连接"""
|
||||
self.left_rmarm = RoboticArm(rm_thread_mode_e.RM_TRIPLE_MODE_E)
|
||||
self.right_rmarm = RoboticArm(rm_thread_mode_e.RM_TRIPLE_MODE_E)
|
||||
self.handle_left = self.left_rmarm.rm_create_robot_arm(
|
||||
self.config.left_ip, self.config.left_port
|
||||
)
|
||||
self.handle_right = self.right_rmarm.rm_create_robot_arm(
|
||||
self.config.right_ip, self.config.right_port
|
||||
)
|
||||
|
||||
def _initialize_parameters(self):
|
||||
"""初始化参数"""
|
||||
self.motors = self.config.motors
|
||||
self.axis = self.config.axis
|
||||
self.joint_count = sum(self.axis.values())
|
||||
self.left_offset = self.axis['left_joint']
|
||||
|
||||
def _initialize_positions(self):
|
||||
"""初始化位置"""
|
||||
self.init_joint_position = self.config.init_joint['joint']
|
||||
self.safe_disable_position = self.config.init_joint['joint']
|
||||
|
||||
# 移动到初始位置
|
||||
self._move_to_initial_position()
|
||||
|
||||
# 获取初始姿态
|
||||
time.sleep(3)
|
||||
self.init_pose = self._get_initial_pose()
|
||||
|
||||
def _initialize_threading(self):
|
||||
"""初始化线程控制"""
|
||||
self.left_slow_busy = False
|
||||
self.right_slow_busy = False
|
||||
self.gripper_busy = False
|
||||
self._thread_lock = threading.Lock()
|
||||
|
||||
# 添加读取相关的线程控制
|
||||
self._state_cache = {"joint": {}, "pose": {}}
|
||||
self._cache_lock = threading.Lock()
|
||||
self._keep_reading = True
|
||||
|
||||
# 启动后台读取线程
|
||||
self._start_background_readers()
|
||||
|
||||
def _start_background_readers(self):
|
||||
"""启动后台读取线程"""
|
||||
# 读取线程
|
||||
threading.Thread(
|
||||
target=self._read_task,
|
||||
daemon=True,
|
||||
name="arm_reader"
|
||||
).start()
|
||||
|
||||
@property
|
||||
def motor_names(self) -> list[str]:
|
||||
return list(self.motors.keys())
|
||||
|
||||
@property
|
||||
def motor_models(self) -> list[str]:
|
||||
return [model for _, model in self.motors.values()]
|
||||
|
||||
@property
|
||||
def motor_indices(self) -> list[int]:
|
||||
return [idx for idx, _ in self.motors.values()]
|
||||
|
||||
@contextmanager
|
||||
def _timeout_context(self, timeout: float = 5.0):
|
||||
"""超时上下文管理器"""
|
||||
start_time = time.time()
|
||||
try:
|
||||
yield lambda: time.time() - start_time < timeout
|
||||
except Exception as e:
|
||||
raise TimeoutError(f"操作超时: {e}")
|
||||
|
||||
def _read_task(self):
|
||||
"""左臂后台读取任务 - 模仿_left_slow_task的风格"""
|
||||
while self._keep_reading:
|
||||
try:
|
||||
left_state = self._read_arm_state(self.left_rmarm, "left")
|
||||
with self._cache_lock:
|
||||
self._state_cache["joint"].update(left_state["joint"])
|
||||
self._state_cache["pose"].update(left_state["pose"])
|
||||
except Exception as e:
|
||||
print(f"左臂读取失败: {e}")
|
||||
|
||||
try:
|
||||
right_state = self._read_arm_state(self.right_rmarm, "right")
|
||||
with self._cache_lock:
|
||||
self._state_cache["joint"].update(right_state["joint"])
|
||||
self._state_cache["pose"].update(right_state["pose"])
|
||||
except Exception as e:
|
||||
print(f"右臂读取失败: {e}")
|
||||
|
||||
def _read_arm_state(self, arm: RoboticArm, prefix: str) -> dict:
|
||||
"""读取单臂状态 - 保持原有逻辑"""
|
||||
joint_msg = arm.rm_get_current_arm_state()[1]
|
||||
gripper_msg = arm.rm_get_gripper_state()[1]
|
||||
|
||||
joint_state = joint_msg['joint']
|
||||
gripper_state = gripper_msg['actpos']
|
||||
pose_state = joint_msg['pose']
|
||||
|
||||
joint_state_dict = {}
|
||||
for i in range(len(joint_state)):
|
||||
joint_state_dict[f"{prefix}_joint_{i+1}"] = joint_state[i]
|
||||
joint_state_dict[f"{prefix}_gripper"] = gripper_state
|
||||
|
||||
pose_state_dict = {
|
||||
f"{prefix}_x": pose_state[0],
|
||||
f"{prefix}_y": pose_state[1],
|
||||
f"{prefix}_z": pose_state[2],
|
||||
f"{prefix}_rx": pose_state[3],
|
||||
f"{prefix}_ry": pose_state[4],
|
||||
f"{prefix}_rz": pose_state[5],
|
||||
}
|
||||
|
||||
return {"joint": joint_state_dict, 'pose': pose_state_dict}
|
||||
|
||||
def _move_to_initial_position(self):
|
||||
"""移动到初始位置"""
|
||||
left_joints = self.init_joint_position[:self.left_offset]
|
||||
right_joints = self.init_joint_position[self.left_offset+1:-1]
|
||||
|
||||
self.left_rmarm.rm_movej(left_joints, 5, 0, 0, 1)
|
||||
self.right_rmarm.rm_movej(right_joints, 5, 0, 0, 1)
|
||||
|
||||
def _get_initial_pose(self) -> list:
|
||||
"""获取初始姿态"""
|
||||
left_ret = self.left_rmarm.rm_get_current_arm_state()
|
||||
right_ret = self.right_rmarm.rm_get_current_arm_state()
|
||||
return left_ret[1]['pose'] + right_ret[1]['pose']
|
||||
|
||||
def _validate_joint_count(self, joints: list, expected_count: int):
|
||||
"""验证关节数量"""
|
||||
if len(joints) != expected_count:
|
||||
raise ValueError(f"关节数量不匹配: 期望 {expected_count}, 实际 {len(joints)}")
|
||||
|
||||
def _execute_slow_movement(self, arm: str, joint_data: list):
|
||||
"""执行慢速运动"""
|
||||
busy_flag = f"{arm}_slow_busy"
|
||||
|
||||
if not getattr(self, busy_flag):
|
||||
setattr(self, busy_flag, True)
|
||||
|
||||
target_method = getattr(self, f"_{arm}_slow_task")
|
||||
threading.Thread(
|
||||
target=target_method,
|
||||
args=(joint_data.copy(),),
|
||||
daemon=True
|
||||
).start()
|
||||
|
||||
def _left_slow_task(self, joint_data: list):
|
||||
"""左臂慢速任务"""
|
||||
try:
|
||||
self.write_left_joint_slow(joint_data)
|
||||
finally:
|
||||
self.left_slow_busy = False
|
||||
|
||||
def _right_slow_task(self, joint_data: list):
|
||||
"""右臂慢速任务"""
|
||||
try:
|
||||
self.write_right_joint_slow(joint_data)
|
||||
finally:
|
||||
self.right_slow_busy = False
|
||||
|
||||
def _execute_arm_action(self, arm: str, action: dict, master_joint: list, follow_joint: list):
|
||||
"""执行单臂动作"""
|
||||
controller_status = action['master_controller_status'][arm]
|
||||
|
||||
if controller_status['infrared'] == 1:
|
||||
if compare_joint_difference(master_joint, follow_joint):
|
||||
if arm == 'left':
|
||||
self.write_left_joint_canfd(master_joint)
|
||||
else:
|
||||
self.write_right_joint_canfd(master_joint)
|
||||
else:
|
||||
self._execute_slow_movement(arm, master_joint)
|
||||
|
||||
def write_endpose(self, target_endpose: list):
|
||||
assert target_endpose == 12, "the length of target pose is not equal 12"
|
||||
self.left_rmarm.rm_movej_p(target_endpose[:6], 50, 0, 0, 1)
|
||||
self.right_rmarm.rm_movej_p(target_endpose[6:], 50, 0, 0, 1)
|
||||
|
||||
def write_left_joint_slow(self, left_joint: list):
|
||||
assert len(left_joint) == self.left_offset, "len of left master joint is not equal the count of left joint"
|
||||
self.left_rmarm.rm_movej(left_joint, 5, 0, 0, 1)
|
||||
|
||||
def write_right_joint_slow(self, right_joint: list):
|
||||
assert len(right_joint) == self.left_offset, "len of right master joint is not equal the count of right joint"
|
||||
self.right_rmarm.rm_movej(right_joint, 5, 0, 0, 1)
|
||||
|
||||
def write_left_joint_canfd(self, left_joint: list):
|
||||
assert len(left_joint) == self.left_offset, "len of left master joint is not equal the count of left joint"
|
||||
self.left_rmarm.rm_movej_canfd(left_joint, False)
|
||||
|
||||
def write_right_joint_canfd(self, right_joint: list):
|
||||
assert len(right_joint) == self.left_offset, "len of right master joint is not equal the count of right joint"
|
||||
self.right_rmarm.rm_movej_canfd(right_joint, False)
|
||||
|
||||
def write_endpose_canfd(self, target_endpose: list):
|
||||
assert len(target_endpose) == 12, "the length of target pose is not equal 12"
|
||||
self.left_rmarm.rm_movep_canfd(target_endpose[:6], False)
|
||||
self.right_rmarm.rm_movep_canfd(target_endpose[6:], False)
|
||||
|
||||
def write_dual_gripper(self, left_gripper: int, right_gripper: int):
|
||||
try:
|
||||
self.left_rmarm.rm_set_gripper_position(left_gripper, False, 2)
|
||||
self.right_rmarm.rm_set_gripper_position(right_gripper, False, 2)
|
||||
finally:
|
||||
self.gripper_busy = False
|
||||
|
||||
def _execute_gripper_thread(self, left_gripper: int, right_gripper: int):
|
||||
if not getattr(self, 'gripper_busy'):
|
||||
setattr(self, 'gripper_busy', True)
|
||||
|
||||
threading.Thread(
|
||||
target=self.write_dual_gripper,
|
||||
args=(left_gripper, right_gripper),
|
||||
daemon=True
|
||||
).start()
|
||||
|
||||
def read_current_arm_joint_state(self):
|
||||
return self.left_rmarm.rm_get_current_arm_state()[1]['joint'] + self.right_rmarm.rm_get_current_arm_state()[1]['joint']
|
||||
|
||||
def read_current_arm_endpose_state(self):
|
||||
return self.left_rmarm.rm_get_current_arm_state()[1]['pose'] + self.right_rmarm.rm_get_current_arm_state()[1]['pose']
|
||||
|
||||
########################## lerobot function ##########################
|
||||
|
||||
def connect(self, enable: bool = True) -> bool:
|
||||
"""使能机械臂并检测使能状态"""
|
||||
with self._timeout_context() as is_timeout_valid:
|
||||
while is_timeout_valid():
|
||||
try:
|
||||
if enable:
|
||||
left_ret = self.left_rmarm.rm_get_current_arm_state()
|
||||
right_ret = self.right_rmarm.rm_get_current_arm_state()
|
||||
if left_ret[0] == 0 and right_ret[0] == 0:
|
||||
print("机械臂使能成功")
|
||||
return True
|
||||
else:
|
||||
RoboticArm.rm_destory()
|
||||
print("机械臂断开连接")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"连接异常: {e}")
|
||||
time.sleep(1)
|
||||
print("连接超时")
|
||||
return False
|
||||
|
||||
def set_calibration(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def revert_calibration(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def apply_calibration(self):
|
||||
"""
|
||||
移动到初始位置
|
||||
"""
|
||||
self.write(target_joint=self.init_joint_position)
|
||||
|
||||
def write(self, target_joint: list):
|
||||
"""写入关节位置"""
|
||||
self._validate_joint_count(target_joint, self.joint_count)
|
||||
|
||||
left_joints = target_joint[:self.left_offset]
|
||||
left_gripper = target_joint[self.left_offset]
|
||||
right_joints = target_joint[self.left_offset+1:-1]
|
||||
right_gripper = target_joint[-1]
|
||||
|
||||
self.left_rmarm.rm_movej_canfd(left_joints, follow=False)
|
||||
# self.left_rmarm.rm_movej_follow(left_joints)
|
||||
# self.left_rmarm.rm_set_gripper_position(left_gripper, block=False, timeout=2)
|
||||
self.right_rmarm.rm_movej_canfd(right_joints, follow=False)
|
||||
# self.right_rmarm.rm_movej_follow(right_joints)
|
||||
# self.right_rmarm.rm_set_gripper_position(right_gripper, block=False, timeout=2)
|
||||
self._execute_gripper_thread(left_gripper, right_gripper)
|
||||
|
||||
|
||||
def read(self) -> Dict:
|
||||
"""读取机械臂状态 - 直接从缓存获取"""
|
||||
with self._cache_lock:
|
||||
return self._state_cache.copy()
|
||||
|
||||
def safe_disconnect(self):
|
||||
"""安全断开连接"""
|
||||
try:
|
||||
self.write(target_joint=self.safe_disable_position)
|
||||
time.sleep(2) # 等待移动完成
|
||||
except Exception as e:
|
||||
print(f"移动到安全位置失败: {e}")
|
||||
finally:
|
||||
RoboticArm.rm_destory()
|
||||
|
||||
########################## lerobot function ##########################
|
||||
@@ -44,6 +44,15 @@ def make_motors_buses_from_configs(motors_bus_configs: dict[str, MotorsBusConfig
|
||||
|
||||
motors_buses[key] = FeetechMotorsBus(cfg)
|
||||
|
||||
elif cfg.type == "realman":
|
||||
from lerobot.common.robot_devices.motors.realman import RealmanMotorsBus
|
||||
|
||||
motors_buses[key] = RealmanMotorsBus(cfg)
|
||||
|
||||
elif cfg.type == "realman_dual":
|
||||
from lerobot.common.robot_devices.motors.realman_dual import RealmanDualMotorsBus
|
||||
|
||||
motors_buses[key] = RealmanDualMotorsBus(cfg)
|
||||
else:
|
||||
raise ValueError(f"The motor type '{cfg.type}' is not valid.")
|
||||
|
||||
@@ -65,3 +74,7 @@ def make_motors_bus(motor_type: str, **kwargs) -> MotorsBus:
|
||||
|
||||
else:
|
||||
raise ValueError(f"The motor type '{motor_type}' is not valid.")
|
||||
|
||||
|
||||
def get_motor_names(arm: dict[str, MotorsBus]) -> list:
|
||||
return [f"{arm}_{motor}" for arm, bus in arm.items() for motor in bus.motors]
|
||||
@@ -27,6 +27,8 @@ from lerobot.common.robot_devices.motors.configs import (
|
||||
DynamixelMotorsBusConfig,
|
||||
FeetechMotorsBusConfig,
|
||||
MotorsBusConfig,
|
||||
RealmanMotorsBusConfig,
|
||||
RealmanDualMotorsBusConfig
|
||||
)
|
||||
|
||||
|
||||
@@ -431,6 +433,69 @@ class MossRobotConfig(ManipulatorRobotConfig):
|
||||
mock: bool = False
|
||||
|
||||
|
||||
@RobotConfig.register_subclass("so101")
|
||||
@dataclass
|
||||
class So101RobotConfig(ManipulatorRobotConfig):
|
||||
calibration_dir: str = ".cache/calibration/so101"
|
||||
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
|
||||
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
|
||||
# the number of motors in your follower arms.
|
||||
max_relative_target: int | None = None
|
||||
|
||||
leader_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem58760431091",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
follower_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem585A0076891",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
cameras: dict[str, CameraConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"laptop": OpenCVCameraConfig(
|
||||
camera_index=0,
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
"phone": OpenCVCameraConfig(
|
||||
camera_index=1,
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
mock: bool = False
|
||||
|
||||
|
||||
@RobotConfig.register_subclass("so100")
|
||||
@dataclass
|
||||
class So100RobotConfig(ManipulatorRobotConfig):
|
||||
@@ -611,3 +676,147 @@ class LeKiwiRobotConfig(RobotConfig):
|
||||
)
|
||||
|
||||
mock: bool = False
|
||||
|
||||
|
||||
@RobotConfig.register_subclass("realman")
|
||||
@dataclass
|
||||
class RealmanRobotConfig(RobotConfig):
|
||||
inference_time: bool = False
|
||||
max_gripper: int = 990
|
||||
min_gripper: int = 10
|
||||
servo_config_file: str = "/home/maic/LYT/lerobot/lerobot/common/robot_devices/teleop/servo_arm.yaml"
|
||||
|
||||
|
||||
left_follower_arm: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": RealmanMotorsBusConfig(
|
||||
ip = "192.168.3.18",
|
||||
port = 8080,
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"joint_1": [1, "realman"],
|
||||
"joint_2": [2, "realman"],
|
||||
"joint_3": [3, "realman"],
|
||||
"joint_4": [4, "realman"],
|
||||
"joint_5": [5, "realman"],
|
||||
"joint_6": [6, "realman"],
|
||||
"gripper": [7, "realman"],
|
||||
},
|
||||
init_joint = {'joint': [-90, 90, 90, 90, 90, -90, 10]}
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
cameras: dict[str, CameraConfig] = field(
|
||||
default_factory=lambda: {
|
||||
# "one": OpenCVCameraConfig(
|
||||
# camera_index=4,
|
||||
# fps=30,
|
||||
# width=640,
|
||||
# height=480,
|
||||
# ),
|
||||
"left": IntelRealSenseCameraConfig(
|
||||
serial_number="153122077516",
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
use_depth=False
|
||||
),
|
||||
# "right": IntelRealSenseCameraConfig(
|
||||
# serial_number="405622075165",
|
||||
# fps=30,
|
||||
# width=640,
|
||||
# height=480,
|
||||
# use_depth=False
|
||||
# ),
|
||||
"front": IntelRealSenseCameraConfig(
|
||||
serial_number="145422072751",
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
use_depth=False
|
||||
),
|
||||
"high": IntelRealSenseCameraConfig(
|
||||
serial_number="145422072193",
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
use_depth=False
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
|
||||
@RobotConfig.register_subclass("realman_dual")
|
||||
@dataclass
|
||||
class RealmanDualRobotConfig(RobotConfig):
|
||||
inference_time: bool = False
|
||||
max_gripper: int = 990
|
||||
min_gripper: int = 10
|
||||
servo_config_file: str = "/home/maic/LYT/lerobot/lerobot/common/robot_devices/teleop/servo_dual.yaml"
|
||||
left_end_control_guid: str = '0300b14bff1100003708000010010000'
|
||||
right_end_control_guid: str = '0300e639bc2000007f50000011010000'
|
||||
|
||||
follower_arm: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": RealmanDualMotorsBusConfig(
|
||||
axis= {'left_joint': 6, 'left_gripper': 1, 'right_joint': 6, 'right_gripper': 1},
|
||||
left_ip = "192.168.3.18",
|
||||
left_port = 8080,
|
||||
right_ip = "192.168.3.19",
|
||||
right_port = 8080,
|
||||
init_joint = {'joint': [-170, 90, 0, 90, 120, 0, 10, 170, 90, 0, -90, 120, 0, 10]},
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"left_joint_1": [1, "realman"],
|
||||
"left_joint_2": [2, "realman"],
|
||||
"left_joint_3": [3, "realman"],
|
||||
"left_joint_4": [4, "realman"],
|
||||
"left_joint_5": [5, "realman"],
|
||||
"left_joint_6": [6, "realman"],
|
||||
"left_gripper": [7, "realman"],
|
||||
"right_joint_1": [8, "realman"],
|
||||
"right_joint_2": [9, "realman"],
|
||||
"right_joint_3": [10, "realman"],
|
||||
"right_joint_4": [11, "realman"],
|
||||
"right_joint_5": [12, "realman"],
|
||||
"right_joint_6": [13, "realman"],
|
||||
"right_gripper": [14, "realman"]
|
||||
},
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
cameras: dict[str, CameraConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"left": IntelRealSenseCameraConfig(
|
||||
serial_number="153122077516",
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
use_depth=False
|
||||
),
|
||||
"right": IntelRealSenseCameraConfig(
|
||||
serial_number="405622075165",
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
use_depth=False
|
||||
),
|
||||
"front": IntelRealSenseCameraConfig(
|
||||
serial_number="145422072751",
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
use_depth=False
|
||||
),
|
||||
"high": IntelRealSenseCameraConfig(
|
||||
serial_number="145422072193",
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
use_depth=False
|
||||
),
|
||||
}
|
||||
)
|
||||
@@ -36,6 +36,12 @@ ZERO_POSITION_DEGREE = 0
|
||||
ROTATED_POSITION_DEGREE = 90
|
||||
|
||||
|
||||
def reset_middle_positions(arm: MotorsBus):
|
||||
input("Please move the robot to the new middle position for calibration, then press Enter...")
|
||||
# Write 128 to Torque_Enable for all motors.
|
||||
arm.write("Torque_Enable", 128)
|
||||
|
||||
|
||||
def assert_drive_mode(drive_mode):
|
||||
# `drive_mode` is in [0,1] with 0 means original rotation direction for the motor, and 1 means inverted.
|
||||
if not np.all(np.isin(drive_mode, [0, 1])):
|
||||
@@ -439,6 +445,8 @@ def run_arm_manual_calibration(arm: MotorsBus, robot_type: str, arm_name: str, a
|
||||
|
||||
print(f"\nRunning calibration of {robot_type} {arm_name} {arm_type}...")
|
||||
|
||||
reset_middle_positions(arm)
|
||||
|
||||
print("\nMove arm to zero position")
|
||||
print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="zero"))
|
||||
input("Press Enter to continue...")
|
||||
|
||||
25
lerobot/common/robot_devices/robots/llm_prompt.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
system_prompt: |
|
||||
你是一个聪明的双臂机器人助手:
|
||||
**机器人配置:**
|
||||
- 左臂:配备夹爪,可以抓取物体
|
||||
- 右臂:没有夹爪,主要用于辅助定位和观察
|
||||
- 四个摄像头:high camera(俯视角)、front camera(正视角)、left camera(左臂视角)、right camera(右臂视角)
|
||||
**任务目标:** {query}
|
||||
**工作流程:**
|
||||
1. 我向你展示当前4个摄像头的画面
|
||||
2. 你分析场景,给出下一步具体操作指令
|
||||
3. 我执行你的指令后,再次更新画面
|
||||
4. 重复此过程直到完成任务
|
||||
**重要要求:**
|
||||
- 每次只给出ONE STEP最关键的操作指令
|
||||
- 指令要具体明确,便于执行(如"将左臂夹爪移动到试管正上方5cm处,准备下降抓取")
|
||||
- 当视角不清晰时,要求调整摄像头位置
|
||||
- 左臂负责抓取,右臂负责辅助观察和定位
|
||||
- 给出的指令必须可执行,避免模糊描述
|
||||
**输出格式要求:**
|
||||
- 使用纯文本输出,不要使用任何Markdown格式符号
|
||||
- 不要使用星号、井号、下划线等格式化符号
|
||||
- 直接给出简洁分析和具体操作指令
|
||||
- 输出内容适合语音播报
|
||||
**输出格式:**
|
||||
简洁分析 + 具体操作指令
|
||||
@@ -243,7 +243,7 @@ class ManipulatorRobot:
|
||||
|
||||
if self.robot_type in ["koch", "koch_bimanual", "aloha"]:
|
||||
from lerobot.common.robot_devices.motors.dynamixel import TorqueMode
|
||||
elif self.robot_type in ["so100", "moss", "lekiwi"]:
|
||||
elif self.robot_type in ["so100", "so101", "moss", "lekiwi"]:
|
||||
from lerobot.common.robot_devices.motors.feetech import TorqueMode
|
||||
|
||||
# We assume that at connection time, arms are in a rest position, and torque can
|
||||
@@ -260,7 +260,7 @@ class ManipulatorRobot:
|
||||
self.set_koch_robot_preset()
|
||||
elif self.robot_type == "aloha":
|
||||
self.set_aloha_robot_preset()
|
||||
elif self.robot_type in ["so100", "moss", "lekiwi"]:
|
||||
elif self.robot_type in ["so100", "so101", "moss", "lekiwi"]:
|
||||
self.set_so100_robot_preset()
|
||||
|
||||
# Enable torque on all motors of the follower arms
|
||||
@@ -313,7 +313,7 @@ class ManipulatorRobot:
|
||||
|
||||
calibration = run_arm_calibration(arm, self.robot_type, name, arm_type)
|
||||
|
||||
elif self.robot_type in ["so100", "moss", "lekiwi"]:
|
||||
elif self.robot_type in ["so100", "so101", "moss", "lekiwi"]:
|
||||
from lerobot.common.robot_devices.robots.feetech_calibration import (
|
||||
run_arm_manual_calibration,
|
||||
)
|
||||
|
||||
292
lerobot/common/robot_devices/robots/realman.py
Normal file
@@ -0,0 +1,292 @@
|
||||
"""
|
||||
Teleoperation Realman with a PS5 controller and
|
||||
"""
|
||||
|
||||
import time
|
||||
import torch
|
||||
import numpy as np
|
||||
from dataclasses import dataclass, field, replace
|
||||
from collections import deque
|
||||
from lerobot.common.robot_devices.teleop.realman_single import HybridController
|
||||
from lerobot.common.robot_devices.motors.utils import get_motor_names, make_motors_buses_from_configs
|
||||
from lerobot.common.robot_devices.cameras.utils import make_cameras_from_configs
|
||||
from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError
|
||||
from lerobot.common.robot_devices.robots.configs import RealmanRobotConfig
|
||||
|
||||
|
||||
class RealmanRobot:
|
||||
def __init__(self, config: RealmanRobotConfig | None = None, **kwargs):
|
||||
if config is None:
|
||||
config = RealmanRobotConfig()
|
||||
# Overwrite config arguments using kwargs
|
||||
self.config = replace(config, **kwargs)
|
||||
self.robot_type = self.config.type
|
||||
self.inference_time = self.config.inference_time # if it is inference time
|
||||
|
||||
# build cameras
|
||||
self.cameras = make_cameras_from_configs(self.config.cameras)
|
||||
|
||||
# build realman motors
|
||||
self.piper_motors = make_motors_buses_from_configs(self.config.left_follower_arm)
|
||||
self.arm = self.piper_motors['main']
|
||||
|
||||
# build init teleop info
|
||||
self.init_info = {
|
||||
'init_joint': self.arm.init_joint_position,
|
||||
'init_pose': self.arm.init_pose,
|
||||
'max_gripper': config.max_gripper,
|
||||
'min_gripper': config.min_gripper,
|
||||
'servo_config_file': config.servo_config_file
|
||||
}
|
||||
|
||||
# build state-action cache
|
||||
self.joint_queue = deque(maxlen=2)
|
||||
self.last_endpose = self.arm.init_pose
|
||||
|
||||
# build gamepad teleop
|
||||
if not self.inference_time:
|
||||
self.teleop = HybridController(self.init_info)
|
||||
else:
|
||||
self.teleop = None
|
||||
|
||||
self.logs = {}
|
||||
self.is_connected = False
|
||||
|
||||
@property
|
||||
def camera_features(self) -> dict:
|
||||
cam_ft = {}
|
||||
for cam_key, cam in self.cameras.items():
|
||||
key = f"observation.images.{cam_key}"
|
||||
cam_ft[key] = {
|
||||
"shape": (cam.height, cam.width, cam.channels),
|
||||
"names": ["height", "width", "channels"],
|
||||
"info": None,
|
||||
}
|
||||
return cam_ft
|
||||
|
||||
|
||||
@property
|
||||
def motor_features(self) -> dict:
|
||||
action_names = get_motor_names(self.piper_motors)
|
||||
state_names = get_motor_names(self.piper_motors)
|
||||
return {
|
||||
"action": {
|
||||
"dtype": "float32",
|
||||
"shape": (len(action_names),),
|
||||
"names": action_names,
|
||||
},
|
||||
"observation.state": {
|
||||
"dtype": "float32",
|
||||
"shape": (len(state_names),),
|
||||
"names": state_names,
|
||||
},
|
||||
}
|
||||
|
||||
@property
|
||||
def has_camera(self):
|
||||
return len(self.cameras) > 0
|
||||
|
||||
@property
|
||||
def num_cameras(self):
|
||||
return len(self.cameras)
|
||||
|
||||
|
||||
def connect(self) -> None:
|
||||
"""Connect RealmanArm and cameras"""
|
||||
if self.is_connected:
|
||||
raise RobotDeviceAlreadyConnectedError(
|
||||
"RealmanArm is already connected. Do not run `robot.connect()` twice."
|
||||
)
|
||||
|
||||
# connect RealmanArm
|
||||
self.arm.connect(enable=True)
|
||||
print("RealmanArm conneted")
|
||||
|
||||
# connect cameras
|
||||
for name in self.cameras:
|
||||
self.cameras[name].connect()
|
||||
self.is_connected = self.is_connected and self.cameras[name].is_connected
|
||||
print(f"camera {name} conneted")
|
||||
|
||||
print("All connected")
|
||||
self.is_connected = True
|
||||
|
||||
self.run_calibration()
|
||||
|
||||
|
||||
def disconnect(self) -> None:
|
||||
"""move to home position, disenable piper and cameras"""
|
||||
# move piper to home position, disable
|
||||
if not self.inference_time:
|
||||
self.teleop.stop()
|
||||
|
||||
# disconnect piper
|
||||
self.arm.safe_disconnect()
|
||||
print("RealmanArm disable after 5 seconds")
|
||||
time.sleep(5)
|
||||
self.arm.connect(enable=False)
|
||||
|
||||
# disconnect cameras
|
||||
if len(self.cameras) > 0:
|
||||
for cam in self.cameras.values():
|
||||
cam.disconnect()
|
||||
|
||||
self.is_connected = False
|
||||
|
||||
|
||||
def run_calibration(self):
|
||||
"""move piper to the home position"""
|
||||
if not self.is_connected:
|
||||
raise ConnectionError()
|
||||
|
||||
self.arm.apply_calibration()
|
||||
if not self.inference_time:
|
||||
self.teleop.reset()
|
||||
|
||||
|
||||
def teleop_step(
|
||||
self, record_data=False
|
||||
) -> None | tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]:
|
||||
if not self.is_connected:
|
||||
raise ConnectionError()
|
||||
|
||||
if self.teleop is None and self.inference_time:
|
||||
self.teleop = HybridController(self.init_info)
|
||||
|
||||
# read target pose state as
|
||||
before_read_t = time.perf_counter()
|
||||
state = self.arm.read() # read current joint position from robot
|
||||
action = self.teleop.get_action() # target joint position and pose end pos from gamepad
|
||||
self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t
|
||||
|
||||
if action['control_mode'] == 'joint':
|
||||
# 关节控制模式(主模式)
|
||||
current_pose = self.arm.read_current_arm_endpose_state()
|
||||
self.teleop.update_endpose_state(current_pose)
|
||||
|
||||
target_joints = action['joint_angles'][:-1]
|
||||
self.arm.write_gripper(action['gripper'])
|
||||
print(action['gripper'])
|
||||
if action['master_controller_status']['infrared'] == 1:
|
||||
if action['master_controller_status']['button'] == 1:
|
||||
self.arm.write_joint_canfd(target_joints)
|
||||
else:
|
||||
self.arm.write_joint_slow(target_joints)
|
||||
|
||||
# do action
|
||||
before_write_t = time.perf_counter()
|
||||
self.joint_queue.append(list(self.arm.read().values()))
|
||||
self.logs["write_pos_dt_s"] = time.perf_counter() - before_write_t
|
||||
|
||||
else:
|
||||
target_pose = list(action['end_pose'])
|
||||
# do action
|
||||
before_write_t = time.perf_counter()
|
||||
if self.last_endpose != target_pose:
|
||||
self.arm.write_endpose_canfd(target_pose)
|
||||
self.last_endpose = target_pose
|
||||
self.arm.write_gripper(action['gripper'])
|
||||
|
||||
target_joints = self.arm.read_current_arm_joint_state()
|
||||
self.joint_queue.append(list(self.arm.read().values()))
|
||||
self.teleop.update_joint_state(target_joints)
|
||||
self.logs["write_pos_dt_s"] = time.perf_counter() - before_write_t
|
||||
|
||||
if not record_data:
|
||||
return
|
||||
|
||||
state = torch.as_tensor(list(self.joint_queue[0]), dtype=torch.float32)
|
||||
action = torch.as_tensor(list(self.joint_queue[-1]), dtype=torch.float32)
|
||||
|
||||
# Capture images from cameras
|
||||
images = {}
|
||||
for name in self.cameras:
|
||||
before_camread_t = time.perf_counter()
|
||||
images[name] = self.cameras[name].async_read()
|
||||
images[name] = torch.from_numpy(images[name])
|
||||
self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
|
||||
self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
|
||||
|
||||
# Populate output dictionnaries
|
||||
obs_dict, action_dict = {}, {}
|
||||
obs_dict["observation.state"] = state
|
||||
action_dict["action"] = action
|
||||
for name in self.cameras:
|
||||
obs_dict[f"observation.images.{name}"] = images[name]
|
||||
|
||||
return obs_dict, action_dict
|
||||
|
||||
|
||||
|
||||
def send_action(self, action: torch.Tensor) -> torch.Tensor:
|
||||
"""Write the predicted actions from policy to the motors"""
|
||||
if not self.is_connected:
|
||||
raise RobotDeviceNotConnectedError(
|
||||
"Piper is not connected. You need to run `robot.connect()`."
|
||||
)
|
||||
|
||||
# send to motors, torch to list
|
||||
target_joints = action.tolist()
|
||||
len_joint = len(target_joints) - 1
|
||||
target_joints = [target_joints[i]*180 for i in range(len_joint)] + [target_joints[-1]]
|
||||
target_joints[-1] = int(target_joints[-1]*500 + 500)
|
||||
self.arm.write(target_joints)
|
||||
|
||||
return action
|
||||
|
||||
|
||||
|
||||
def capture_observation(self) -> dict:
|
||||
"""capture current images and joint positions"""
|
||||
if not self.is_connected:
|
||||
raise RobotDeviceNotConnectedError(
|
||||
"Piper is not connected. You need to run `robot.connect()`."
|
||||
)
|
||||
|
||||
# read current joint positions
|
||||
before_read_t = time.perf_counter()
|
||||
state = self.arm.read() # 6 joints + 1 gripper
|
||||
self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t
|
||||
|
||||
state = torch.as_tensor(list(state.values()), dtype=torch.float32)
|
||||
|
||||
# read images from cameras
|
||||
images = {}
|
||||
for name in self.cameras:
|
||||
before_camread_t = time.perf_counter()
|
||||
images[name] = self.cameras[name].async_read()
|
||||
images[name] = torch.from_numpy(images[name])
|
||||
self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
|
||||
self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
|
||||
|
||||
# Populate output dictionnaries and format to pytorch
|
||||
obs_dict = {}
|
||||
obs_dict["observation.state"] = state
|
||||
for name in self.cameras:
|
||||
obs_dict[f"observation.images.{name}"] = images[name]
|
||||
return obs_dict
|
||||
|
||||
def teleop_safety_stop(self):
|
||||
""" move to home position after record one episode """
|
||||
self.run_calibration()
|
||||
|
||||
|
||||
def __del__(self):
|
||||
if self.is_connected:
|
||||
self.disconnect()
|
||||
if not self.inference_time:
|
||||
self.teleop.stop()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
robot = RealmanRobot()
|
||||
robot.connect()
|
||||
# robot.run_calibration()
|
||||
while True:
|
||||
robot.teleop_step(record_data=True)
|
||||
|
||||
robot.capture_observation()
|
||||
dummy_action = torch.Tensor([-0.40586111280653214, 0.5522833506266276, 0.4998166826036241, -0.3539944542778863, -0.524433347913954, 0.9064999898274739, 0.482])
|
||||
robot.send_action(dummy_action)
|
||||
robot.disconnect()
|
||||
print('ok')
|
||||
469
lerobot/common/robot_devices/robots/realman_dual.py
Normal file
@@ -0,0 +1,469 @@
|
||||
|
||||
"""
|
||||
Teleoperation Realman with a PS5 controller and LLM interaction
|
||||
"""
|
||||
import time
|
||||
import torch
|
||||
import numpy as np
|
||||
import logging
|
||||
from typing import Optional, Tuple, Dict
|
||||
from dataclasses import dataclass, field, replace
|
||||
from collections import deque
|
||||
import signal
|
||||
import sys
|
||||
from lerobot.common.robot_devices.teleop.realman_aloha_dual import HybridController
|
||||
from lerobot.common.robot_devices.motors.utils import get_motor_names, make_motors_buses_from_configs
|
||||
from lerobot.common.robot_devices.cameras.utils import make_cameras_from_configs
|
||||
from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError
|
||||
from lerobot.common.robot_devices.robots.configs import RealmanDualRobotConfig
|
||||
from lerobot.common.robot_devices.robots.utils import (
|
||||
ask_llm,
|
||||
extract_json_from_response,
|
||||
create_keyboard_listener,
|
||||
start_keyboard_listener,
|
||||
stop_keyboard_listener,
|
||||
speak_async,
|
||||
handle_llm_interaction_with_images
|
||||
)
|
||||
|
||||
|
||||
class RealmanDualRobot:
|
||||
"""RealmanDual机器人控制类,支持双臂操作和LLM交互"""
|
||||
|
||||
def __init__(self, config: RealmanDualRobotConfig | None = None, **kwargs):
|
||||
if config is None:
|
||||
config = RealmanDualRobotConfig()
|
||||
|
||||
# 配置初始化
|
||||
self.config = replace(config, **kwargs)
|
||||
self.robot_type = self.config.type
|
||||
self.inference_time = self.config.inference_time
|
||||
|
||||
# 硬件初始化
|
||||
self.cameras = make_cameras_from_configs(self.config.cameras)
|
||||
self.piper_motors = make_motors_buses_from_configs(self.config.follower_arm)
|
||||
self.arm = self.piper_motors['main']
|
||||
|
||||
# 控制系统初始化
|
||||
self._initialize_teleop()
|
||||
self._initialize_state()
|
||||
self._initialize_keyboard_interface()
|
||||
|
||||
# 状态标志
|
||||
self._shutdown_flag = False
|
||||
self._llm_triggered = False
|
||||
|
||||
|
||||
def _initialize_teleop(self):
|
||||
"""初始化遥操作控制器"""
|
||||
self.init_info = {
|
||||
'init_joint': self.arm.init_joint_position,
|
||||
'init_pose': self.arm.init_pose,
|
||||
'max_gripper': self.config.max_gripper,
|
||||
'min_gripper': self.config.min_gripper,
|
||||
'servo_config_file': self.config.servo_config_file,
|
||||
'end_control_info': {
|
||||
'left': self.config.left_end_control_guid,
|
||||
'right': self.config.right_end_control_guid
|
||||
}
|
||||
}
|
||||
|
||||
if not self.inference_time:
|
||||
self.teleop = HybridController(self.init_info)
|
||||
else:
|
||||
self.teleop = None
|
||||
|
||||
def _initialize_state(self):
|
||||
"""初始化状态管理"""
|
||||
self.joint_queue = deque(maxlen=2)
|
||||
self.last_endpose = self.arm.init_pose
|
||||
self.logs = {}
|
||||
self.is_connected = False
|
||||
|
||||
def _initialize_keyboard_interface(self):
|
||||
"""初始化键盘交互接口"""
|
||||
self.w_pressed = False # W键触发LLM
|
||||
self.q_pressed = False # Q键退出
|
||||
self.keyboard_listener = None
|
||||
self._start_keyboard_listener()
|
||||
|
||||
def _start_keyboard_listener(self):
|
||||
"""启动键盘监听器"""
|
||||
def on_key_press(key_char):
|
||||
if key_char == 'w':
|
||||
self.w_pressed = True
|
||||
print("检测到W键按下")
|
||||
elif key_char == 'q':
|
||||
self.q_pressed = True
|
||||
print("检测到Q键按下")
|
||||
self.keyboard_listener = create_keyboard_listener(on_key_press)
|
||||
success = start_keyboard_listener(self.keyboard_listener)
|
||||
if success:
|
||||
print("键盘监听器启动成功 (W键:调用LLM, Q键:退出)")
|
||||
else:
|
||||
print("键盘监听器启动失败")
|
||||
|
||||
def _read_robot_state(self) -> dict:
|
||||
"""读取机器人状态"""
|
||||
before_read_t = time.perf_counter()
|
||||
from copy import deepcopy
|
||||
state = deepcopy(self.arm.read())
|
||||
self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t
|
||||
return state
|
||||
|
||||
def _execute_action(self, action: dict, state: dict):
|
||||
"""执行机器人动作"""
|
||||
before_write_t = time.perf_counter()
|
||||
if action['control_mode'] == 'joint':
|
||||
pass
|
||||
else:
|
||||
if list(action['pose'].values()) != list(state['pose'].values()):
|
||||
pose = list(action['pose'].values())
|
||||
self.arm.write_endpose_canfd(pose)
|
||||
elif list(action['joint'].values()) != list(state['joint'].values()):
|
||||
target_joint = list(action['joint'].values())
|
||||
self.arm.write(target_joint)
|
||||
self.logs["write_pos_dt_s"] = time.perf_counter() - before_write_t
|
||||
|
||||
def _prepare_record_data(self) -> Tuple[Dict, Dict]:
|
||||
"""准备记录数据 - 保持原有逻辑"""
|
||||
if len(self.joint_queue) < 2:
|
||||
return {}, {}
|
||||
state = torch.as_tensor(list(self.joint_queue[0]), dtype=torch.float32)
|
||||
action = torch.as_tensor(list(self.joint_queue[-1]), dtype=torch.float32)
|
||||
# 捕获图像
|
||||
images = self._capture_images()
|
||||
# 构建输出字典
|
||||
obs_dict = {
|
||||
"observation.state": state,
|
||||
**{f"observation.images.{name}": img for name, img in images.items()}
|
||||
}
|
||||
action_dict = {"action": action}
|
||||
return obs_dict, action_dict
|
||||
|
||||
def _update_state_queue(self):
|
||||
"""更新状态队列"""
|
||||
current_state = self.arm.read()['joint']
|
||||
current_state_lst = []
|
||||
for data in current_state:
|
||||
if "joint" in data:
|
||||
current_state_lst.append(current_state[data] / 180)
|
||||
elif "gripper" in data:
|
||||
current_state_lst.append((current_state[data]-500)/500)
|
||||
self.joint_queue.append(current_state_lst)
|
||||
|
||||
def _capture_images(self) -> Dict[str, torch.Tensor]:
|
||||
"""捕获摄像头图像"""
|
||||
images = {}
|
||||
for name, camera in self.cameras.items():
|
||||
before_camread_t = time.perf_counter()
|
||||
image = camera.async_read()
|
||||
images[name] = torch.from_numpy(image)
|
||||
self.logs[f"read_camera_{name}_dt_s"] = camera.logs["delta_timestamp_s"]
|
||||
self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
|
||||
return images
|
||||
|
||||
# def _handle_llm_interaction(self, obs_dict: Dict) -> bool:
|
||||
# """处理LLM交互逻辑"""
|
||||
# print("[W键已按下] 正在准备数据并调用LLM...")
|
||||
|
||||
# # 提取图像数据
|
||||
# camera_images = {}
|
||||
# for key, value in obs_dict.items():
|
||||
# if key.startswith("observation.images."):
|
||||
# camera_name = key.replace("observation.images.", "")
|
||||
# camera_images[camera_name] = value.cpu().numpy()
|
||||
|
||||
# # 使用utils中的函数处理LLM交互
|
||||
# success, response = handle_llm_interaction_with_images(
|
||||
# "将超声仪左下角试管架上的蓝色试管移动到超声仪中",
|
||||
# camera_images
|
||||
# )
|
||||
# return success
|
||||
|
||||
def connect(self) -> None:
|
||||
"""连接机器人和摄像头"""
|
||||
if self.is_connected:
|
||||
raise RobotDeviceAlreadyConnectedError(
|
||||
"RealmanArm is already connected. Do not run `robot.connect()` twice."
|
||||
)
|
||||
# 连接机械臂
|
||||
self.arm.connect(enable=True)
|
||||
print("RealmanArm conneted")
|
||||
|
||||
# 连接摄像头
|
||||
for name in self.cameras:
|
||||
self.cameras[name].connect()
|
||||
self.is_connected = self.is_connected and self.cameras[name].is_connected
|
||||
print(f"camera {name} conneted")
|
||||
|
||||
print("All connected")
|
||||
self.is_connected = True
|
||||
self.run_calibration()
|
||||
|
||||
def disconnect(self) -> None:
|
||||
"""断开连接"""
|
||||
if self._shutdown_flag:
|
||||
return
|
||||
self._shutdown_flag = True
|
||||
|
||||
try:
|
||||
# 停止键盘监听器
|
||||
stop_keyboard_listener(self.keyboard_listener)
|
||||
print("键盘监听器已停止")
|
||||
|
||||
# 停止遥操作控制器
|
||||
if hasattr(self, 'teleop') and self.teleop and not self.inference_time:
|
||||
self.teleop.stop()
|
||||
print("遥操作控制器已停止")
|
||||
|
||||
# 断开机械臂连接
|
||||
if hasattr(self, 'arm'):
|
||||
try:
|
||||
self.arm.safe_disconnect()
|
||||
print("RealmanArm 安全断开连接")
|
||||
time.sleep(2)
|
||||
self.arm.connect(enable=False)
|
||||
print("RealmanArm 已禁用")
|
||||
except Exception as e:
|
||||
print(f"断开机械臂连接时出错: {e}")
|
||||
|
||||
# 断开摄像头连接
|
||||
if len(self.cameras) > 0:
|
||||
for name, cam in self.cameras.items():
|
||||
try:
|
||||
cam.disconnect()
|
||||
print(f"Camera {name} 已断开连接")
|
||||
except Exception as e:
|
||||
print(f"断开相机 {name} 时出错: {e}")
|
||||
|
||||
self.is_connected = False
|
||||
print("所有设备已断开连接")
|
||||
except Exception as e:
|
||||
print(f"断开连接时发生错误: {e}")
|
||||
|
||||
def run_calibration(self):
|
||||
"""运行标定"""
|
||||
if not self.is_connected:
|
||||
raise ConnectionError()
|
||||
self.arm.apply_calibration()
|
||||
if not self.inference_time:
|
||||
self.teleop.reset()
|
||||
|
||||
def teleop_step(self, record_data=False) -> None | tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]:
|
||||
"""遥操作步骤 - 保持原有数据记录逻辑,添加LLM交互"""
|
||||
if not self.is_connected:
|
||||
raise ConnectionError()
|
||||
|
||||
if self.teleop is None and self.inference_time:
|
||||
self.teleop = HybridController(self.init_info)
|
||||
|
||||
try:
|
||||
# 检查退出条件
|
||||
if self.q_pressed:
|
||||
print("检测到Q键,任务终止...")
|
||||
speak_async("任务已终止")
|
||||
raise KeyboardInterrupt("用户请求退出")
|
||||
|
||||
# 执行基础遥操作
|
||||
state = self._read_robot_state()
|
||||
action = self.teleop.get_action(state)
|
||||
self._execute_action(action, state)
|
||||
|
||||
# 更新状态队列
|
||||
self._update_state_queue()
|
||||
time.sleep(0.019) # 50Hz
|
||||
|
||||
# 处理数据记录
|
||||
if record_data:
|
||||
data = self._prepare_record_data()
|
||||
|
||||
# 处理LLM交互请求(只有在有有效数据时才处理)
|
||||
if data[0] and self.w_pressed: # 如果有有效数据且W键被按下
|
||||
self.w_pressed = False
|
||||
camera_images = {
|
||||
name.replace("observation.images.", ""): img.cpu().numpy()
|
||||
for name, img in data[0].items()
|
||||
if name.startswith("observation.images.")
|
||||
}
|
||||
success, resp = handle_llm_interaction_with_images(
|
||||
"将超声仪左下角试管架上的蓝色试管移动到超声仪中",
|
||||
camera_images
|
||||
)
|
||||
# self.w_pressed = False # 重置标志位
|
||||
# self._llm_triggered = True
|
||||
# success = self._handle_llm_interaction(data[0])
|
||||
if not success:
|
||||
print("LLM交互处理失败")
|
||||
|
||||
# 如果没有有效数据,创建默认数据
|
||||
if not data[0]:
|
||||
# 创建默认的观测数据
|
||||
if len(self.joint_queue) > 0:
|
||||
state_tensor = torch.as_tensor(list(self.joint_queue[-1]), dtype=torch.float32)
|
||||
else:
|
||||
state_tensor = torch.zeros(14, dtype=torch.float32) # 根据你的机器人调整维度
|
||||
|
||||
# 捕获当前图像
|
||||
images = self._capture_images()
|
||||
obs_dict = {
|
||||
"observation.state": state_tensor,
|
||||
**{f"observation.images.{name}": img for name, img in images.items()}
|
||||
}
|
||||
action_dict = {"action": state_tensor} # 使用相同的状态作为动作
|
||||
data = (obs_dict, action_dict)
|
||||
return data
|
||||
return None
|
||||
|
||||
except KeyboardInterrupt:
|
||||
# 重新抛出键盘中断,让上层处理
|
||||
raise
|
||||
except Exception as e:
|
||||
logging.error(f"遥操作步骤失败: {e}")
|
||||
|
||||
# 即使出错,在record_data=True时也要返回有效数据
|
||||
if record_data:
|
||||
# 创建紧急默认数据
|
||||
state_tensor = torch.zeros(14, dtype=torch.float32) # 根据你的机器人调整维度
|
||||
images = {}
|
||||
try:
|
||||
images = self._capture_images()
|
||||
except:
|
||||
# 如果连图像都无法捕获,创建空图像
|
||||
for camera_name in self.cameras.keys():
|
||||
images[camera_name] = torch.zeros((480, 640, 3), dtype=torch.uint8)
|
||||
obs_dict = {
|
||||
"observation.state": state_tensor,
|
||||
**{f"observation.images.{name}": img for name, img in images.items()}
|
||||
}
|
||||
action_dict = {"action": state_tensor}
|
||||
return obs_dict, action_dict
|
||||
return None
|
||||
|
||||
def send_action(self, action: torch.Tensor) -> torch.Tensor:
|
||||
"""发送动作到机器人"""
|
||||
if not self.is_connected:
|
||||
raise RobotDeviceNotConnectedError(
|
||||
"Piper is not connected. You need to run `robot.connect()`."
|
||||
)
|
||||
|
||||
target_joints = action.tolist()
|
||||
len_joint = len(target_joints) - 1
|
||||
target_joints = [target_joints[i]*180 for i in range(len_joint)] + [target_joints[-1]]
|
||||
target_joints[-1] = int(target_joints[-1]*500 + 500)
|
||||
self.arm.write(target_joints)
|
||||
return action
|
||||
|
||||
def capture_observation(self) -> dict:
|
||||
"""捕获当前观测"""
|
||||
if not self.is_connected:
|
||||
raise RobotDeviceNotConnectedError(
|
||||
"Piper is not connected. You need to run `robot.connect()`."
|
||||
)
|
||||
before_read_t = time.perf_counter()
|
||||
state = self.arm.read()
|
||||
self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t
|
||||
|
||||
state = torch.as_tensor(list(state.values()), dtype=torch.float32)
|
||||
# 读取图像
|
||||
images = {}
|
||||
for name in self.cameras:
|
||||
before_camread_t = time.perf_counter()
|
||||
images[name] = self.cameras[name].async_read()
|
||||
images[name] = torch.from_numpy(images[name])
|
||||
self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
|
||||
self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
|
||||
# 构建观测字典
|
||||
obs_dict = {}
|
||||
obs_dict["observation.state"] = state
|
||||
for name in self.cameras:
|
||||
obs_dict[f"observation.images.{name}"] = images[name]
|
||||
return obs_dict
|
||||
|
||||
def teleop_safety_stop(self):
|
||||
"""遥操作安全停止"""
|
||||
self.run_calibration()
|
||||
@property
|
||||
def camera_features(self) -> dict:
|
||||
"""获取摄像头特征"""
|
||||
cam_ft = {}
|
||||
for cam_key, cam in self.cameras.items():
|
||||
key = f"observation.images.{cam_key}"
|
||||
cam_ft[key] = {
|
||||
"shape": (cam.height, cam.width, cam.channels),
|
||||
"names": ["height", "width", "channels"],
|
||||
"info": None,
|
||||
}
|
||||
return cam_ft
|
||||
|
||||
@property
|
||||
def motor_features(self) -> dict:
|
||||
"""获取电机特征"""
|
||||
action_names = get_motor_names(self.piper_motors)
|
||||
state_names = get_motor_names(self.piper_motors)
|
||||
return {
|
||||
"action": {
|
||||
"dtype": "float32",
|
||||
"shape": (len(action_names),),
|
||||
"names": action_names,
|
||||
},
|
||||
"observation.state": {
|
||||
"dtype": "float32",
|
||||
"shape": (len(state_names),),
|
||||
"names": state_names,
|
||||
},
|
||||
}
|
||||
|
||||
@property
|
||||
def has_camera(self):
|
||||
return len(self.cameras) > 0
|
||||
|
||||
@property
|
||||
def num_cameras(self):
|
||||
return len(self.cameras)
|
||||
|
||||
def __del__(self):
|
||||
"""析构函数"""
|
||||
try:
|
||||
if not self._shutdown_flag:
|
||||
self.disconnect()
|
||||
except:
|
||||
pass
|
||||
|
||||
def signal_handler(signum, frame):
|
||||
"""信号处理器"""
|
||||
print("\n收到中断信号,正在安全退出...")
|
||||
sys.exit(0)
|
||||
|
||||
if __name__ == '__main__':
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
robot = None
|
||||
|
||||
try:
|
||||
robot = RealmanDualRobot()
|
||||
robot.connect()
|
||||
|
||||
print("RealmanDual 机器人控制已启动")
|
||||
print("操作说明:")
|
||||
print(" - 使用手柄进行遥操作控制")
|
||||
print(" - 按 W 键:调用LLM分析当前场景")
|
||||
print(" - 按 Q 键:安全退出程序")
|
||||
print(" - Ctrl+C:强制退出")
|
||||
print("\n等待操作...")
|
||||
|
||||
while True:
|
||||
result = robot.teleop_step(record_data=True)
|
||||
if result is None:
|
||||
continue
|
||||
# 这里可以处理记录的数据
|
||||
except KeyboardInterrupt:
|
||||
print("\n收到键盘中断信号")
|
||||
except Exception as e:
|
||||
print(f"程序运行出错: {e}")
|
||||
finally:
|
||||
if robot:
|
||||
try:
|
||||
robot.disconnect()
|
||||
except:
|
||||
pass
|
||||
print("程序已完全退出")
|
||||
@@ -11,9 +11,22 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Protocol
|
||||
|
||||
from typing import Protocol, Dict
|
||||
# Robot configuration imports
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
import base64
|
||||
import yaml
|
||||
import threading
|
||||
from io import BytesIO
|
||||
from typing import Protocol, Dict
|
||||
import numpy as np
|
||||
from openai import OpenAI
|
||||
from PIL import Image
|
||||
from pynput import keyboard
|
||||
import torch
|
||||
# Robot config imports
|
||||
from lerobot.common.robot_devices.robots.configs import (
|
||||
AlohaRobotConfig,
|
||||
KochBimanualRobotConfig,
|
||||
@@ -23,30 +36,28 @@ from lerobot.common.robot_devices.robots.configs import (
|
||||
MossRobotConfig,
|
||||
RobotConfig,
|
||||
So100RobotConfig,
|
||||
So101RobotConfig,
|
||||
StretchRobotConfig,
|
||||
RealmanRobotConfig,
|
||||
RealmanDualRobotConfig
|
||||
)
|
||||
|
||||
|
||||
def get_arm_id(name, arm_type):
|
||||
"""Returns the string identifier of a robot arm. For instance, for a bimanual manipulator
|
||||
like Aloha, it could be left_follower, right_follower, left_leader, or right_leader.
|
||||
"""
|
||||
return f"{name}_{arm_type}"
|
||||
|
||||
|
||||
class Robot(Protocol):
|
||||
# TODO(rcadene, aliberts): Add unit test checking the protocol is implemented in the corresponding classes
|
||||
robot_type: str
|
||||
features: dict
|
||||
|
||||
cameras: Dict
|
||||
def connect(self): ...
|
||||
def run_calibration(self): ...
|
||||
def teleop_step(self, record_data=False): ...
|
||||
def capture_observation(self): ...
|
||||
def capture_observation(self) -> Dict: ...
|
||||
def send_action(self, action): ...
|
||||
def disconnect(self): ...
|
||||
|
||||
|
||||
def get_arm_id(name, arm_type) -> str:
|
||||
"""返回机器人手臂ID"""
|
||||
return f"{name}_{arm_type}"
|
||||
|
||||
def make_robot_config(robot_type: str, **kwargs) -> RobotConfig:
|
||||
if robot_type == "aloha":
|
||||
return AlohaRobotConfig(**kwargs)
|
||||
@@ -58,29 +69,147 @@ def make_robot_config(robot_type: str, **kwargs) -> RobotConfig:
|
||||
return MossRobotConfig(**kwargs)
|
||||
elif robot_type == "so100":
|
||||
return So100RobotConfig(**kwargs)
|
||||
elif robot_type == "so101":
|
||||
return So101RobotConfig(**kwargs)
|
||||
elif robot_type == "stretch":
|
||||
return StretchRobotConfig(**kwargs)
|
||||
elif robot_type == "lekiwi":
|
||||
return LeKiwiRobotConfig(**kwargs)
|
||||
elif robot_type == 'realman':
|
||||
return RealmanRobotConfig(**kwargs)
|
||||
elif robot_type == 'realman_dual':
|
||||
return RealmanDualRobotConfig(**kwargs)
|
||||
else:
|
||||
raise ValueError(f"Robot type '{robot_type}' is not available.")
|
||||
|
||||
|
||||
def make_robot_from_config(config: RobotConfig):
|
||||
"""根据配置生成机器人实例"""
|
||||
if isinstance(config, ManipulatorRobotConfig):
|
||||
from lerobot.common.robot_devices.robots.manipulator import ManipulatorRobot
|
||||
|
||||
return ManipulatorRobot(config)
|
||||
elif isinstance(config, LeKiwiRobotConfig):
|
||||
from lerobot.common.robot_devices.robots.mobile_manipulator import MobileManipulator
|
||||
|
||||
return MobileManipulator(config)
|
||||
elif isinstance(config, RealmanRobotConfig):
|
||||
from lerobot.common.robot_devices.robots.realman import RealmanRobot
|
||||
return RealmanRobot(config)
|
||||
elif isinstance(config, RealmanDualRobotConfig):
|
||||
from lerobot.common.robot_devices.robots.realman_dual import RealmanDualRobot
|
||||
return RealmanDualRobot(config)
|
||||
else:
|
||||
from lerobot.common.robot_devices.robots.stretch import StretchRobot
|
||||
|
||||
return StretchRobot(config)
|
||||
|
||||
|
||||
def make_robot(robot_type: str, **kwargs) -> Robot:
|
||||
"""指定类型构造机器人"""
|
||||
config = make_robot_config(robot_type, **kwargs)
|
||||
return make_robot_from_config(config)
|
||||
|
||||
def numpy_to_base64(img_array: np.ndarray) -> str:
|
||||
"""numpy数组转Base64 PNG图像"""
|
||||
if img_array.dtype != np.uint8:
|
||||
if img_array.max() <= 1.0:
|
||||
img_array = (img_array * 255).astype(np.uint8)
|
||||
else:
|
||||
img_array = img_array.astype(np.uint8)
|
||||
buffered = BytesIO()
|
||||
Image.fromarray(img_array).save(buffered, format="PNG")
|
||||
return base64.b64encode(buffered.getvalue()).decode('utf-8')
|
||||
|
||||
def extract_json_from_response(response: str) -> dict:
|
||||
"""解析LLM返回的结构化指令"""
|
||||
try:
|
||||
return json.loads(response)
|
||||
except:
|
||||
return {
|
||||
"action": "move_arm",
|
||||
"description": response.strip(),
|
||||
"arm": "left",
|
||||
"target": "unknown",
|
||||
"status": "ready"
|
||||
}
|
||||
|
||||
def create_keyboard_listener(on_key_press_callback):
|
||||
"""创建键盘监听器"""
|
||||
def on_press(key):
|
||||
try:
|
||||
if hasattr(key, 'char') and key.char:
|
||||
on_key_press_callback(key.char.lower())
|
||||
except AttributeError:
|
||||
pass
|
||||
return keyboard.Listener(on_press=on_press)
|
||||
|
||||
def start_keyboard_listener(listener):
|
||||
try:
|
||||
listener.start()
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"键盘监听器启动失败: {e}")
|
||||
return False
|
||||
|
||||
def stop_keyboard_listener(listener):
|
||||
try:
|
||||
if listener and listener.is_alive():
|
||||
listener.stop()
|
||||
return True
|
||||
except Exception:
|
||||
pass
|
||||
return False
|
||||
|
||||
def speak_async(text: str):
|
||||
"""异步语音播报"""
|
||||
from lerobot.common.utils.utils import say
|
||||
def speak_thread():
|
||||
try:
|
||||
say(text, blocking=True)
|
||||
except Exception as e:
|
||||
print(f"语音播报失败: {e}")
|
||||
thread = threading.Thread(target=speak_thread, daemon=True)
|
||||
thread.start()
|
||||
|
||||
def ask_llm(query: str, state: dict, history=None):
|
||||
"""调用LLM进行场景分析"""
|
||||
|
||||
api_key = os.getenv("ASK_LLM_API_KEY")
|
||||
base_url = os.getenv("ASK_LLM_BASE_URL")
|
||||
model_name = os.getenv("ASK_LLM_MODEL")
|
||||
if not (api_key and base_url and model_name):
|
||||
raise EnvironmentError("请设置 ASK_LLM_API_KEY, ASK_LLM_BASE_URL, ASK_LLM_MODEL 环境变量")
|
||||
prompt_path = os.path.join(os.path.dirname(__file__), "llm_prompt.yaml")
|
||||
with open(prompt_path, "r", encoding="utf-8") as f:
|
||||
prompts = yaml.safe_load(f)
|
||||
system_prompt = prompts["system_prompt"].replace("{query}", query)
|
||||
if history is None:
|
||||
history = []
|
||||
if not history:
|
||||
history.append({"role": "system", "content": system_prompt})
|
||||
if len(history) > 3:
|
||||
history = [history[0]] + history[-2:] # 保留系统提示和最后一轮
|
||||
user_content = [{"type": "text", "text": "以下是当前4个摄像头画面,请分析并给出一步操作指令"}]
|
||||
for name in ["high", "front", "left", "right"]:
|
||||
if name in state:
|
||||
img_base64 = numpy_to_base64(state[name])
|
||||
user_content.append({
|
||||
"type": "image_url",
|
||||
"image_url": {"url": f"data:image/png;base64,{img_base64}", "detail": "high"}
|
||||
})
|
||||
history.append({"role": "user", "content": user_content})
|
||||
client = OpenAI(api_key=api_key, base_url=base_url)
|
||||
completion = client.chat.completions.create(model=model_name, messages=history, max_tokens=800, temperature=0.3)
|
||||
resp_msg = completion.choices[0].message
|
||||
history.append(resp_msg)
|
||||
return resp_msg.content
|
||||
|
||||
def handle_llm_interaction_with_images(query: str, camera_images: dict):
|
||||
"""处理LLM交互"""
|
||||
if len(camera_images) != 4:
|
||||
return False, "摄像头数量不足"
|
||||
try:
|
||||
response = ask_llm(query, camera_images)
|
||||
instruction = extract_json_from_response(response)
|
||||
if desc := instruction.get("description"):
|
||||
speak_async(f"下一步操作:{desc}")
|
||||
return True, response
|
||||
except Exception as e:
|
||||
speak_async("LLM交互失败")
|
||||
return False, str(e)
|
||||
|
||||
18
lerobot/common/robot_devices/teleop/find_gamepad.py
Normal file
@@ -0,0 +1,18 @@
|
||||
import pygame
|
||||
|
||||
def find_controller_index():
|
||||
# 获取所有 pygame 控制器的设备路径
|
||||
pygame_joysticks = {}
|
||||
for i in range(pygame.joystick.get_count()):
|
||||
joystick = pygame.joystick.Joystick(i)
|
||||
joystick.init()
|
||||
pygame_joysticks[joystick.get_guid()] = {
|
||||
'index': i,
|
||||
'device_name': joystick.get_name()
|
||||
}
|
||||
|
||||
return pygame_joysticks
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print(find_controller_index())
|
||||
421
lerobot/common/robot_devices/teleop/gamepad.py
Normal file
@@ -0,0 +1,421 @@
|
||||
import pygame
|
||||
import threading
|
||||
import time
|
||||
import logging
|
||||
from typing import Dict
|
||||
from dataclasses import dataclass
|
||||
from lerobot.common.robot_devices.teleop.find_gamepad import find_controller_index
|
||||
from lerobot.common.robot_devices.teleop.servo_server import ServoArmServer
|
||||
|
||||
|
||||
class RealmanAlohaMaster:
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self._initialize_master_arm()
|
||||
|
||||
def _initialize_master_arm(self):
|
||||
"""初始化主控臂"""
|
||||
try:
|
||||
self.master_dual_arm = ServoArmServer(self.config.config_file)
|
||||
except Exception as e:
|
||||
logging.error(f"初始化主控臂失败: {e}")
|
||||
raise
|
||||
|
||||
def get_action(self) -> Dict:
|
||||
"""获取控制动作"""
|
||||
try:
|
||||
master_joint_actions = self.master_dual_arm.get_joint_data()
|
||||
return self._format_action(master_joint_actions)
|
||||
except Exception as e:
|
||||
logging.error(f"获取动作失败: {e}")
|
||||
|
||||
def _format_action(self, master_joint_actions: dict) -> dict:
|
||||
"""格式化动作数据"""
|
||||
master_controller_status = {
|
||||
'left': master_joint_actions['left_controller_status'],
|
||||
'right': master_joint_actions['right_controller_status']
|
||||
}
|
||||
|
||||
return {
|
||||
'control_mode': 'joint',
|
||||
'master_joint_actions': master_joint_actions['dual_joint_actions'],
|
||||
'left_joint_actions': master_joint_actions['left_joint_actions'][:-1],
|
||||
'right_joint_actions': master_joint_actions['right_joint_actions'][:-1],
|
||||
'left_gripper_actions': master_joint_actions['left_joint_actions'][-1], # 修复bug
|
||||
'right_gripper_actions': master_joint_actions['right_joint_actions'][-1],
|
||||
'master_controller_status': master_controller_status
|
||||
}
|
||||
|
||||
def stop(self):
|
||||
"""停止控制器"""
|
||||
try:
|
||||
if hasattr(self, 'master_dual_arm') and self.master_dual_arm:
|
||||
self.master_dual_arm.shutdown()
|
||||
print("混合控制器已退出")
|
||||
except Exception as e:
|
||||
logging.error(f"停止控制器失败: {e}")
|
||||
|
||||
|
||||
|
||||
class DummyEndposeMaster:
|
||||
def __init__(self, config):
|
||||
# 初始化pygame
|
||||
pygame.init()
|
||||
pygame.joystick.init()
|
||||
# 获取所有 USB 游戏控制器的信息
|
||||
self.joysticks = find_controller_index()
|
||||
print(self.joysticks)
|
||||
self.control_info = config.end_control_info
|
||||
left_stick = self._init_stick('left')
|
||||
right_stick = self._init_stick('right')
|
||||
self.controllers = [left_stick, right_stick]
|
||||
|
||||
def _init_stick(self, arm_name:str):
|
||||
stick_info = {}
|
||||
stick_info['index'] = self.joysticks[self.control_info[arm_name]]['index']
|
||||
stick_info['guid'] = self.control_info[arm_name]
|
||||
stick_info['name'] = f'{arm_name}'
|
||||
device_name = self.joysticks[self.control_info[arm_name]]['device_name']
|
||||
stick = XboxStick(stick_info) if "Xbox" in device_name else FlightStick(stick_info)
|
||||
stick.start_polling()
|
||||
return stick
|
||||
|
||||
def get_action(self, state) -> Dict:
|
||||
from copy import deepcopy
|
||||
|
||||
new_state = deepcopy(state)
|
||||
gamepad_action = {}
|
||||
xyz = []
|
||||
rxryrz = []
|
||||
gripper = []
|
||||
"""获取控制动作"""
|
||||
try:
|
||||
for i, controller in enumerate(self.controllers):
|
||||
# states = controller.get_raw_states()
|
||||
gamepad_action.update(controller.get_control_signal(controller.name))
|
||||
xyz += [f"{controller.name}_x", f"{controller.name}_y", f"{controller.name}_z"]
|
||||
rxryrz += [f"{controller.name}_joint_4", f"{controller.name}_joint_5", f"{controller.name}_joint_6"]
|
||||
gripper += [f"{controller.name}_gripper"]
|
||||
|
||||
for name in xyz:
|
||||
new_state['pose'][name] += (gamepad_action[name] * gamepad_action['xyz_vel'] * gamepad_action[name.split('_')[0]+'_ratio'])
|
||||
|
||||
for name in gripper:
|
||||
new_state['joint'][name] += int(gamepad_action[name] * gamepad_action['gripper_vel'] * gamepad_action[name.split('_')[0]+'_ratio'])
|
||||
new_state['joint'][name] = min(990, max(0, new_state['joint'][name]))
|
||||
|
||||
for name in rxryrz:
|
||||
new_state['joint'][name] += (gamepad_action[name] * gamepad_action['rxyz_vel'] * gamepad_action[name.split('_')[0]+'_ratio'])
|
||||
|
||||
new_state['control_mode'] = 'endpose'
|
||||
return new_state
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"获取动作失败: {e}")
|
||||
|
||||
def stop(self):
|
||||
"""停止控制器"""
|
||||
try:
|
||||
# 停止轮询线程
|
||||
for controller in self.controllers:
|
||||
controller.stop_polling()
|
||||
except Exception as e:
|
||||
logging.error(f"停止控制器失败: {e}")
|
||||
|
||||
|
||||
|
||||
class ControllerBase:
|
||||
def __init__(self, joystick_info: dict):
|
||||
# 初始化手柄对象
|
||||
self.joystick = pygame.joystick.Joystick(joystick_info['index'])
|
||||
self.joystick.init()
|
||||
self.name = joystick_info['name']
|
||||
self.guid = joystick_info['guid']
|
||||
|
||||
# 存储所有控制器状态的字典
|
||||
self.states = {
|
||||
'buttons': [False] * self.joystick.get_numbuttons(), # 按钮状态
|
||||
'axes': [0.0] * self.joystick.get_numaxes(), # 摇杆和轴状态
|
||||
'hats': [(0, 0)] * self.joystick.get_numhats() # 舵状态
|
||||
}
|
||||
|
||||
# deadzone
|
||||
self.deadzone = 0.15
|
||||
# validzone
|
||||
self.validzone = 0.05
|
||||
self.ratio = 1
|
||||
self.gripper_vel = 100
|
||||
self.rxyz_vel = 5
|
||||
self.xyz_vel = 0.02
|
||||
self.scale_up = 2
|
||||
self.scale_down = 10
|
||||
|
||||
# 线程控制标志
|
||||
self.running = False
|
||||
|
||||
def start_polling(self):
|
||||
"""启动线程以轮询控制器状态"""
|
||||
if not self.running:
|
||||
self.running = True
|
||||
self.thread = threading.Thread(target=self._poll_controller)
|
||||
self.thread.start()
|
||||
|
||||
def stop_polling(self):
|
||||
"""停止线程"""
|
||||
if self.running:
|
||||
self.running = False
|
||||
self.thread.join()
|
||||
|
||||
def _poll_controller(self):
|
||||
"""后台线程函数,用于轮询控制器状态"""
|
||||
while self.running:
|
||||
# 处理pygame事件
|
||||
pygame.event.pump()
|
||||
|
||||
# 获取按钮状态
|
||||
for i in range(self.joystick.get_numbuttons()):
|
||||
self.states['buttons'][i] = self.joystick.get_button(i)
|
||||
|
||||
# 获取摇杆和轴状态(通常范围是 -1.0 到 1.0)
|
||||
for i in range(self.joystick.get_numaxes()):
|
||||
self.states['axes'][i] = self.joystick.get_axis(i)
|
||||
|
||||
# 获取舵状态(通常返回一个元组 (x, y),值范围为 -1, 0, 1)
|
||||
for i in range(self.joystick.get_numhats()):
|
||||
self.states['hats'][i] = self.joystick.get_hat(i)
|
||||
|
||||
# 控制轮询频率
|
||||
time.sleep(0.01)
|
||||
|
||||
def get_raw_states(self):
|
||||
"""获取当前控制器状态"""
|
||||
return self.states
|
||||
|
||||
class FlightStick(ControllerBase):
|
||||
def __init__(self, joystick_info):
|
||||
super().__init__(joystick_info)
|
||||
|
||||
def get_x_control_signal(self):
|
||||
x = 0
|
||||
if self.states['axes'][0] > self.validzone:
|
||||
x = 1
|
||||
elif self.states['axes'][0] < -self.validzone:
|
||||
x = -1
|
||||
return x
|
||||
|
||||
def get_y_control_signal(self):
|
||||
y = 0
|
||||
if self.states['axes'][1] > self.validzone:
|
||||
y = -1
|
||||
elif self.states['axes'][1] < -self.validzone:
|
||||
y = 1
|
||||
return y
|
||||
|
||||
def get_z_control_signal(self):
|
||||
z = 0
|
||||
if self.states['buttons'][0]:
|
||||
z = 1
|
||||
elif self.states['buttons'][1]:
|
||||
z = -1
|
||||
return z
|
||||
|
||||
def get_gripper_control_signal(self):
|
||||
gripper = 0
|
||||
if self.states['buttons'][2] == 1:
|
||||
gripper = 1
|
||||
elif self.states['buttons'][3] == 1:
|
||||
gripper = -1
|
||||
return gripper
|
||||
|
||||
def get_ratio_control_signal(self):
|
||||
ratio = self.ratio
|
||||
if self.states['axes'][2] > 0.8:
|
||||
ratio = self.ratio / self.scale_down
|
||||
elif self.states['axes'][2] < -0.8:
|
||||
ratio = self.ratio * self.scale_up
|
||||
return ratio
|
||||
|
||||
def get_rx_control_signal(self):
|
||||
rx = 0
|
||||
if self.states['hats'][0][0] == -1:
|
||||
rx = 1
|
||||
elif self.states['hats'][0][0] == 1:
|
||||
rx = -1
|
||||
else:
|
||||
rx = 0
|
||||
return rx
|
||||
|
||||
def get_ry_control_signal(self):
|
||||
ry = 0
|
||||
if self.states['hats'][0][1] == 1:
|
||||
ry = -1
|
||||
elif self.states['hats'][0][1] == -1:
|
||||
ry = 1
|
||||
else:
|
||||
ry = 0
|
||||
return ry
|
||||
|
||||
def get_rz_control_signal(self):
|
||||
rz = 0
|
||||
if self.states['axes'][3] < -self.validzone:
|
||||
rz = -1
|
||||
elif self.states['axes'][3] > self.validzone:
|
||||
rz = 1
|
||||
else:
|
||||
rz = 0
|
||||
return rz
|
||||
|
||||
def get_control_signal(self, prefix: str = ""):
|
||||
"""获取所有控制信号"""
|
||||
return {
|
||||
f'{prefix}_x': self.get_x_control_signal(),
|
||||
f'{prefix}_y': self.get_y_control_signal(),
|
||||
f'{prefix}_z': self.get_z_control_signal(),
|
||||
f'{prefix}_joint_4': self.get_rx_control_signal(),
|
||||
f'{prefix}_joint_5': self.get_ry_control_signal(),
|
||||
f'{prefix}_joint_6': self.get_rz_control_signal(),
|
||||
f'{prefix}_gripper': self.get_gripper_control_signal(),
|
||||
f'{prefix}_ratio': self.get_ratio_control_signal(),
|
||||
'gripper_vel': self.gripper_vel,
|
||||
'rxyz_vel': self.rxyz_vel,
|
||||
'xyz_vel': self.xyz_vel
|
||||
}
|
||||
|
||||
|
||||
|
||||
class XboxStick(ControllerBase):
|
||||
def __init__(self, joystick_info: dict):
|
||||
super().__init__(joystick_info)
|
||||
|
||||
def get_x_control_signal(self):
|
||||
"""获取 X 轴控制信号"""
|
||||
x = 0
|
||||
if self.states['hats'][0][0] == -1:
|
||||
x = 1
|
||||
elif self.states['hats'][0][0] == 1:
|
||||
x = -1
|
||||
return x
|
||||
|
||||
def get_y_control_signal(self):
|
||||
"""获取 Y 轴控制信号"""
|
||||
y = 0
|
||||
if self.states['hats'][0][1] == 1:
|
||||
y = -1
|
||||
elif self.states['hats'][0][1] == -1:
|
||||
y = 1
|
||||
return y
|
||||
|
||||
def get_z_control_signal(self):
|
||||
"""获取 Z 轴控制信号"""
|
||||
z = 0
|
||||
if self.states['axes'][4] > self.deadzone: # A 按钮
|
||||
z = -1
|
||||
elif self.states['axes'][4] < -self.deadzone: # B 按钮
|
||||
z = 1
|
||||
return z
|
||||
|
||||
def get_ratio_control_signal(self):
|
||||
"""获取速度控制信号"""
|
||||
ratio = self.ratio
|
||||
if self.states['axes'][2] > 0.8: # LT 按钮
|
||||
ratio = self.ratio * self.scale_up
|
||||
elif self.states['axes'][5] > 0.8: # RT 按钮
|
||||
ratio = self.ratio / self.scale_down
|
||||
return ratio
|
||||
|
||||
def get_gripper_control_signal(self):
|
||||
gripper = 0
|
||||
if self.states['buttons'][0] == 1:
|
||||
gripper = 1
|
||||
elif self.states['buttons'][1] == 1:
|
||||
gripper = -1
|
||||
return gripper
|
||||
|
||||
def get_rx_control_signal(self):
|
||||
"""获取 RX 轴控制信号"""
|
||||
rx = 0
|
||||
if self.states['axes'][0] > self.deadzone: # 左舵
|
||||
rx = -1
|
||||
elif self.states['axes'][0] < -self.deadzone: # 右舵
|
||||
rx = 1
|
||||
return rx
|
||||
|
||||
def get_ry_control_signal(self):
|
||||
"""获取 RY 轴控制信号"""
|
||||
ry = 0
|
||||
if self.states['axes'][1] > self.deadzone: # 上舵
|
||||
ry = 1
|
||||
elif self.states['axes'][1] < -self.deadzone: # 下舵
|
||||
ry = -1
|
||||
return ry
|
||||
|
||||
def get_rz_control_signal(self):
|
||||
"""获取 RZ 轴控制信号"""
|
||||
rz = 0
|
||||
if self.states['buttons'][4] == 1: # 左摇杆
|
||||
rz = 1
|
||||
elif self.states['buttons'][5] == 1: # 右摇杆
|
||||
rz = -1
|
||||
return rz
|
||||
|
||||
def get_control_signal(self, prefix: str = ""):
|
||||
"""获取所有控制信号"""
|
||||
return {
|
||||
f'{prefix}_x': self.get_x_control_signal(),
|
||||
f'{prefix}_y': self.get_y_control_signal(),
|
||||
f'{prefix}_z': self.get_z_control_signal(),
|
||||
f'{prefix}_joint_4': self.get_rx_control_signal(),
|
||||
f'{prefix}_joint_5': self.get_ry_control_signal(),
|
||||
f'{prefix}_joint_6': self.get_rz_control_signal(),
|
||||
f'{prefix}_gripper': self.get_gripper_control_signal(),
|
||||
f'{prefix}_ratio': self.get_ratio_control_signal(),
|
||||
'gripper_vel': self.gripper_vel,
|
||||
'rxyz_vel': self.rxyz_vel,
|
||||
'xyz_vel': self.xyz_vel
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ControllerConfig:
|
||||
"""控制器配置"""
|
||||
init_joint: list
|
||||
init_pose: list
|
||||
max_gripper: int
|
||||
min_gripper: int
|
||||
config_file: str
|
||||
end_control_info: dict
|
||||
|
||||
|
||||
def parse_init_info(init_info: dict) -> ControllerConfig:
|
||||
"""解析初始化信息"""
|
||||
return ControllerConfig(
|
||||
init_joint=init_info['init_joint'],
|
||||
init_pose=init_info.get('init_pose', [0]*12),
|
||||
max_gripper=init_info['max_gripper'],
|
||||
min_gripper=init_info['min_gripper'],
|
||||
config_file=init_info['servo_config_file'],
|
||||
end_control_info=init_info['end_control_info']
|
||||
)
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
config = {
|
||||
'init_joint': {'joint': [-170, 90, 0, 90, 120, 0, 10, 170, 90, 0, -90, 120, 0, 10]},
|
||||
'init_pose': {},
|
||||
'max_gripper': {},
|
||||
'min_gripper': {},
|
||||
'servo_config_file': {},
|
||||
'end_control_info': {'left': "0300b14bff1100003708000010010000" , 'right': '0300e639bc2000007f50000011010000'}
|
||||
}
|
||||
config = parse_init_info(config)
|
||||
endpose_arm = DummyEndposeMaster(config)
|
||||
while True:
|
||||
gamepad_action = {}
|
||||
xyz = []
|
||||
for i, controller in enumerate(endpose_arm.controllers):
|
||||
# states = controller.get_raw_states()
|
||||
gamepad_action.update(controller.get_control_signal(controller.name))
|
||||
xyz += [f"{controller.name}_x", f"{controller.name}_y", f"{controller.name}_z"]
|
||||
time.sleep(1)
|
||||
print(gamepad_action)
|
||||
76
lerobot/common/robot_devices/teleop/realman_aloha_dual.py
Normal file
@@ -0,0 +1,76 @@
|
||||
import time
|
||||
import logging
|
||||
from typing import Dict
|
||||
from dataclasses import dataclass
|
||||
from lerobot.common.robot_devices.teleop.gamepad import RealmanAlohaMaster, DummyEndposeMaster
|
||||
|
||||
|
||||
@dataclass
|
||||
class ControllerConfig:
|
||||
"""控制器配置"""
|
||||
init_joint: list
|
||||
init_pose: list
|
||||
max_gripper: int
|
||||
min_gripper: int
|
||||
config_file: str
|
||||
end_control_info: dict
|
||||
|
||||
|
||||
class HybridController:
|
||||
def __init__(self, init_info):
|
||||
self.config = self._parse_init_info(init_info)
|
||||
self.joint = self.config.init_joint.copy()
|
||||
self.pose = self.config.init_pose.copy()
|
||||
|
||||
self.joint_arm = RealmanAlohaMaster(self.config)
|
||||
self.endpose_arm = DummyEndposeMaster(self.config)
|
||||
|
||||
def _parse_init_info(self, init_info: dict) -> ControllerConfig:
|
||||
"""解析初始化信息"""
|
||||
return ControllerConfig(
|
||||
init_joint=init_info['init_joint'],
|
||||
init_pose=init_info.get('init_pose', [0]*12),
|
||||
max_gripper=init_info['max_gripper'],
|
||||
min_gripper=init_info['min_gripper'],
|
||||
config_file=init_info['servo_config_file'],
|
||||
end_control_info=init_info['end_control_info']
|
||||
)
|
||||
|
||||
def get_action(self, state) -> Dict:
|
||||
"""获取控制动作"""
|
||||
try:
|
||||
endpose_action = self.endpose_arm.get_action(state)
|
||||
return endpose_action
|
||||
# return self.joint_arm.get_action()
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"获取动作失败: {e}")
|
||||
|
||||
def stop(self):
|
||||
self.joint_arm.stop()
|
||||
|
||||
def reset(self):
|
||||
"""重置控制器"""
|
||||
self.joint = self.config.init_joint.copy()
|
||||
self.pose = self.config.init_pose.copy()
|
||||
self.joint_control_mode = True
|
||||
|
||||
|
||||
# 使用示例
|
||||
if __name__ == "__main__":
|
||||
init_info = {
|
||||
'init_joint': [-175, 90, 90, 45, 90, -90, 10, 175, 90, 90, -45, 90, 90, 10],
|
||||
'init_pose': [[-0.0305, 0.125938, 0.13153, 3.141, 0.698, -1.57, -0.030486, -0.11487, 0.144707, 3.141, 0.698, 1.57]],
|
||||
'max_gripper': 990,
|
||||
'min_gripper': 10,
|
||||
'servo_config_file': '/home/maic/LYT/lerobot/lerobot/common/robot_devices/teleop/servo_dual.yaml',
|
||||
'end_control_info': {'left': '0300b14bff1100003708000010010000', 'right': '030003f05e0400008e02000010010000'}
|
||||
}
|
||||
arm_controller = HybridController(init_info)
|
||||
time.sleep(1)
|
||||
try:
|
||||
while True:
|
||||
print(arm_controller.get_action())
|
||||
time.sleep(1)
|
||||
except KeyboardInterrupt:
|
||||
arm_controller.stop()
|
||||
4
lerobot/common/robot_devices/teleop/realman_mix.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
init_joint: [-90, 90, 90, -90, -90, 90]
|
||||
max_gripper: 990
|
||||
min_gripper: 10
|
||||
servo_config_file: "/home/maic/LYT/lerobot/lerobot/common/robot_devices/teleop/servo_arm.yaml"
|
||||
466
lerobot/common/robot_devices/teleop/realman_single.py
Normal file
@@ -0,0 +1,466 @@
|
||||
import pygame
|
||||
import threading
|
||||
import time
|
||||
import serial
|
||||
import binascii
|
||||
import logging
|
||||
import yaml
|
||||
from typing import Dict
|
||||
from Robotic_Arm.rm_robot_interface import *
|
||||
|
||||
|
||||
|
||||
class ServoArm:
|
||||
def __init__(self, config_file="config.yaml"):
|
||||
"""初始化机械臂的串口连接并发送初始数据。
|
||||
|
||||
Args:
|
||||
config_file (str): 配置文件的路径。
|
||||
"""
|
||||
self.config = self._load_config(config_file)
|
||||
self.port = self.config["port"]
|
||||
self.baudrate = self.config["baudrate"]
|
||||
self.joint_hex_data = self.config["joint_hex_data"]
|
||||
self.control_hex_data = self.config["control_hex_data"]
|
||||
self.arm_axis = self.config.get("arm_axis", 7)
|
||||
|
||||
try:
|
||||
self.serial_conn = serial.Serial(self.port, self.baudrate, timeout=0)
|
||||
self.bytes_to_send = binascii.unhexlify(self.joint_hex_data.replace(" ", ""))
|
||||
self.serial_conn.write(self.bytes_to_send)
|
||||
time.sleep(1)
|
||||
self.connected = True
|
||||
logging.info(f"串口连接成功: {self.port}")
|
||||
except Exception as e:
|
||||
logging.error(f"串口连接失败: {e}")
|
||||
self.connected = False
|
||||
|
||||
def _load_config(self, config_file):
|
||||
"""加载配置文件。
|
||||
|
||||
Args:
|
||||
config_file (str): 配置文件的路径。
|
||||
|
||||
Returns:
|
||||
dict: 配置文件内容。
|
||||
"""
|
||||
try:
|
||||
with open(config_file, "r") as file:
|
||||
config = yaml.safe_load(file)
|
||||
return config
|
||||
except Exception as e:
|
||||
logging.error(f"配置文件加载失败: {e}")
|
||||
# 返回默认配置
|
||||
return {
|
||||
"port": "/dev/ttyUSB0",
|
||||
"baudrate": 460800,
|
||||
"joint_hex_data": "55 AA 02 00 00 67",
|
||||
"control_hex_data": "55 AA 08 00 00 B9",
|
||||
"arm_axis": 6
|
||||
}
|
||||
|
||||
def _bytes_to_signed_int(self, byte_data):
|
||||
"""将字节数据转换为有符号整数。
|
||||
|
||||
Args:
|
||||
byte_data (bytes): 字节数据。
|
||||
|
||||
Returns:
|
||||
int: 有符号整数。
|
||||
"""
|
||||
return int.from_bytes(byte_data, byteorder="little", signed=True)
|
||||
|
||||
def _parse_joint_data(self, hex_received):
|
||||
"""解析接收到的十六进制数据并提取关节数据。
|
||||
|
||||
Args:
|
||||
hex_received (str): 接收到的十六进制字符串数据。
|
||||
|
||||
Returns:
|
||||
dict: 解析后的关节数据。
|
||||
"""
|
||||
logging.debug(f"hex_received: {hex_received}")
|
||||
joints = {}
|
||||
for i in range(self.arm_axis):
|
||||
start = 14 + i * 10
|
||||
end = start + 8
|
||||
joint_hex = hex_received[start:end]
|
||||
joint_byte_data = bytearray.fromhex(joint_hex)
|
||||
joint_value = self._bytes_to_signed_int(joint_byte_data) / 10000.0
|
||||
joints[f"joint_{i+1}"] = joint_value
|
||||
grasp_start = 14 + self.arm_axis*10
|
||||
grasp_hex = hex_received[grasp_start:grasp_start+8]
|
||||
grasp_byte_data = bytearray.fromhex(grasp_hex)
|
||||
# 夹爪进行归一化处理
|
||||
grasp_value = self._bytes_to_signed_int(grasp_byte_data)/1000
|
||||
|
||||
joints["grasp"] = grasp_value
|
||||
return joints
|
||||
|
||||
def _parse_controller_data(self, hex_received):
|
||||
status = {
|
||||
'infrared': 0,
|
||||
'button': 0
|
||||
}
|
||||
if len(hex_received) == 18:
|
||||
status['infrared'] = self._bytes_to_signed_int(bytearray.fromhex(hex_received[12:14]))
|
||||
status['button'] = self._bytes_to_signed_int(bytearray.fromhex(hex_received[14:16]))
|
||||
# print(infrared)
|
||||
return status
|
||||
|
||||
def get_joint_actions(self):
|
||||
"""从串口读取数据并解析关节动作。
|
||||
|
||||
Returns:
|
||||
dict: 包含关节数据的字典。
|
||||
"""
|
||||
if not self.connected:
|
||||
return {}
|
||||
|
||||
try:
|
||||
self.serial_conn.write(self.bytes_to_send)
|
||||
time.sleep(0.02)
|
||||
bytes_received = self.serial_conn.read(self.serial_conn.inWaiting())
|
||||
if len(bytes_received) == 0:
|
||||
return {}
|
||||
|
||||
hex_received = binascii.hexlify(bytes_received).decode("utf-8").upper()
|
||||
actions = self._parse_joint_data(hex_received)
|
||||
return actions
|
||||
except Exception as e:
|
||||
logging.error(f"读取串口数据错误: {e}")
|
||||
return {}
|
||||
|
||||
def get_controller_status(self):
|
||||
bytes_to_send = binascii.unhexlify(self.control_hex_data.replace(" ", ""))
|
||||
self.serial_conn.write(bytes_to_send)
|
||||
time.sleep(0.02)
|
||||
bytes_received = self.serial_conn.read(self.serial_conn.inWaiting())
|
||||
hex_received = binascii.hexlify(bytes_received).decode("utf-8").upper()
|
||||
# print("control status:", hex_received)
|
||||
status = self._parse_controller_data(hex_received)
|
||||
return status
|
||||
|
||||
def close(self):
|
||||
"""关闭串口连接"""
|
||||
if self.connected and hasattr(self, 'serial_conn'):
|
||||
self.serial_conn.close()
|
||||
self.connected = False
|
||||
logging.info("串口连接已关闭")
|
||||
|
||||
|
||||
class HybridController:
|
||||
def __init__(self, init_info):
|
||||
# 初始化pygame和手柄
|
||||
pygame.init()
|
||||
pygame.joystick.init()
|
||||
|
||||
# 检查是否有连接的手柄
|
||||
if pygame.joystick.get_count() == 0:
|
||||
raise Exception("未检测到手柄")
|
||||
|
||||
# 初始化手柄
|
||||
self.joystick = pygame.joystick.Joystick(0)
|
||||
self.joystick.init()
|
||||
# 摇杆死区
|
||||
self.deadzone = 0.15
|
||||
# 控制模式: True为关节控制(主模式),False为末端控制
|
||||
self.joint_control_mode = True
|
||||
# 精细控制模式
|
||||
self.fine_control_mode = False
|
||||
|
||||
# 初始化末端姿态和关节角度
|
||||
self.init_joint = init_info['init_joint']
|
||||
self.init_pose = init_info.get('init_pose', [0]*6)
|
||||
self.max_gripper = init_info['max_gripper']
|
||||
self.min_gripper = init_info['min_gripper']
|
||||
servo_config_file = init_info['servo_config_file']
|
||||
self.joint = self.init_joint.copy()
|
||||
self.pose = self.init_pose.copy()
|
||||
self.pose_speeds = [0.0] * 6
|
||||
self.joint_speeds = [0.0] * 6
|
||||
self.tozero = False
|
||||
|
||||
# 主臂关节状态
|
||||
self.master_joint_actions = {}
|
||||
self.master_controller_status = {}
|
||||
self.use_master_arm = False
|
||||
|
||||
# 末端位姿限制
|
||||
self.pose_limits = [
|
||||
(-0.800, 0.800), # X (m)
|
||||
(-0.800, 0.800), # Y (m)
|
||||
(-0.800, 0.800), # Z (m)
|
||||
(-3.14, 3.14), # RX (rad)
|
||||
(-3.14, 3.14), # RY (rad)
|
||||
(-3.14, 3.14) # RZ (rad)
|
||||
]
|
||||
|
||||
# 关节角度限制 (度)
|
||||
self.joint_limits = [
|
||||
(-180, 180), # joint 1
|
||||
(-180, 180), # joint 2
|
||||
(-180, 180), # joint 3
|
||||
(-180, 180), # joint 4
|
||||
(-180, 180), # joint 5
|
||||
(-180, 180) # joint 6
|
||||
]
|
||||
|
||||
# 控制参数
|
||||
self.linear_step = 0.002 # 线性移动步长(m)
|
||||
self.angular_step = 0.01 # 角度步长(rad)
|
||||
|
||||
# 夹爪状态和速度
|
||||
self.gripper_speed = 10
|
||||
self.gripper = self.min_gripper
|
||||
|
||||
# 初始化串口通信(主臂关节状态获取)
|
||||
self.servo_arm = None
|
||||
if servo_config_file:
|
||||
try:
|
||||
self.servo_arm = ServoArm(servo_config_file)
|
||||
self.use_master_arm = True
|
||||
logging.info("串口主臂连接成功,启用主从控制模式")
|
||||
except Exception as e:
|
||||
logging.error(f"串口主臂连接失败: {e}")
|
||||
self.use_master_arm = False
|
||||
|
||||
# 启动更新线程
|
||||
self.running = True
|
||||
self.thread = threading.Thread(target=self.update_controller)
|
||||
self.thread.start()
|
||||
|
||||
print("混合控制器已启动")
|
||||
print("主控制模式: 关节控制")
|
||||
if self.use_master_arm:
|
||||
print("主从控制: 启用")
|
||||
print("Back按钮: 切换控制模式(关节/末端)")
|
||||
print("L3按钮: 切换精细控制模式")
|
||||
print("Start按钮: 重置到初始位置")
|
||||
|
||||
def _apply_nonlinear_mapping(self, value):
|
||||
"""应用非线性映射以提高控制精度"""
|
||||
sign = 1 if value >= 0 else -1
|
||||
return sign * (abs(value) ** 2)
|
||||
|
||||
def _normalize_angle(self, angle):
|
||||
"""将角度归一化到[-π, π]范围内"""
|
||||
import math
|
||||
while angle > math.pi:
|
||||
angle -= 2 * math.pi
|
||||
while angle < -math.pi:
|
||||
angle += 2 * math.pi
|
||||
return angle
|
||||
|
||||
def update_controller(self):
|
||||
while self.running:
|
||||
try:
|
||||
pygame.event.pump()
|
||||
except Exception as e:
|
||||
print(f"控制器错误: {e}")
|
||||
self.stop()
|
||||
continue
|
||||
|
||||
# 检查控制模式切换 (Back按钮)
|
||||
if self.joystick.get_button(6): # Back按钮
|
||||
self.joint_control_mode = not self.joint_control_mode
|
||||
mode_str = "关节控制" if self.joint_control_mode else "末端位姿控制"
|
||||
print(f"切换到{mode_str}模式")
|
||||
time.sleep(0.3) # 防止多次触发
|
||||
|
||||
# 检查精细控制模式切换 (L3按钮)
|
||||
if self.joystick.get_button(10): # L3按钮
|
||||
self.fine_control_mode = not self.fine_control_mode
|
||||
print(f"切换到{'精细' if self.fine_control_mode else '普通'}控制模式")
|
||||
time.sleep(0.3) # 防止多次触发
|
||||
|
||||
# 检查重置按钮 (Start按钮)
|
||||
if self.joystick.get_button(7): # Start按钮
|
||||
print("重置机械臂到初始位置...")
|
||||
# print("init_joint", self.init_joint.copy())
|
||||
self.tozero = True
|
||||
self.joint = self.init_joint.copy()
|
||||
self.pose = self.init_pose.copy()
|
||||
self.pose_speeds = [0.0] * 6
|
||||
self.joint_speeds = [0.0] * 6
|
||||
self.gripper_speed = 10
|
||||
self.gripper = self.min_gripper
|
||||
print("机械臂已重置到初始位置")
|
||||
time.sleep(0.3) # 防止多次触发
|
||||
|
||||
# 从串口获取主臂关节状态
|
||||
if self.servo_arm and self.servo_arm.connected:
|
||||
try:
|
||||
self.master_joint_actions = self.servo_arm.get_joint_actions()
|
||||
self.master_controller_status = self.servo_arm.get_controller_status()
|
||||
if self.master_joint_actions:
|
||||
logging.debug(f"主臂关节状态: {self.master_joint_actions}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"获取主臂状态错误: {e}")
|
||||
self.master_joint_actions = {}
|
||||
# print(self.master_joint_actions)
|
||||
|
||||
# 根据控制模式更新相应的控制逻辑
|
||||
if self.joint_control_mode:
|
||||
# 关节控制模式下,优先使用主臂数据,Xbox作为辅助
|
||||
self.update_joint_control()
|
||||
else:
|
||||
# 末端控制模式,使用Xbox控制
|
||||
self.update_end_pose()
|
||||
time.sleep(0.02)
|
||||
# print('gripper:', self.gripper)
|
||||
|
||||
def update_joint_control(self):
|
||||
"""更新关节角度控制 - 优先使用主臂数据"""
|
||||
if self.use_master_arm and self.master_joint_actions:
|
||||
# 主从控制模式:直接使用主臂的关节角度
|
||||
try:
|
||||
# 将主臂关节角度映射到从臂
|
||||
for i in range(6): # 假设只有6个关节需要控制
|
||||
joint_key = f"joint_{i+1}"
|
||||
if joint_key in self.master_joint_actions:
|
||||
# 直接使用主臂的关节角度(已经是度数)
|
||||
self.joint[i] = self.master_joint_actions[joint_key]
|
||||
|
||||
# 应用关节限制
|
||||
min_val, max_val = self.joint_limits[i]
|
||||
self.joint[i] = max(min_val, min(max_val, self.joint[i]))
|
||||
|
||||
# print(self.joint)
|
||||
logging.debug(f"主臂关节映射到从臂: {self.joint[:6]}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"主臂数据映射错误: {e}")
|
||||
|
||||
# 如果有主臂夹爪数据,使用主臂夹爪状态
|
||||
if self.use_master_arm and "grasp" in self.master_joint_actions:
|
||||
self.gripper = self.master_joint_actions["grasp"] * 1000
|
||||
self.joint[-1] = self.gripper
|
||||
|
||||
|
||||
def update_end_pose(self):
|
||||
"""更新末端位姿控制"""
|
||||
# 根据控制模式调整步长
|
||||
current_linear_step = self.linear_step * (0.1 if self.fine_control_mode else 1.0)
|
||||
current_angular_step = self.angular_step * (0.1 if self.fine_control_mode else 1.0)
|
||||
|
||||
# 方向键控制XY
|
||||
hat = self.joystick.get_hat(0)
|
||||
hat_up = hat[1] == 1 # Y+
|
||||
hat_down = hat[1] == -1 # Y-
|
||||
hat_left = hat[0] == -1 # X-
|
||||
hat_right = hat[0] == 1 # X+
|
||||
|
||||
# 右摇杆控制Z
|
||||
right_y_raw = -self.joystick.get_axis(4)
|
||||
# 左摇杆控制RZ
|
||||
left_y_raw = -self.joystick.get_axis(1)
|
||||
|
||||
# 应用死区
|
||||
right_y = 0.0 if abs(right_y_raw) < self.deadzone else right_y_raw
|
||||
left_y = 0.0 if abs(left_y_raw) < self.deadzone else left_y_raw
|
||||
|
||||
# 计算各轴速度
|
||||
self.pose_speeds[1] = current_linear_step if hat_up else (-current_linear_step if hat_down else 0.0) # Y
|
||||
self.pose_speeds[0] = -current_linear_step if hat_left else (current_linear_step if hat_right else 0.0) # X
|
||||
|
||||
# 设置Z速度(右摇杆Y轴控制)
|
||||
z_mapping = self._apply_nonlinear_mapping(right_y)
|
||||
self.pose_speeds[2] = z_mapping * current_linear_step # Z
|
||||
|
||||
# L1/R1控制RX旋转
|
||||
LB = self.joystick.get_button(4) # RX-
|
||||
RB = self.joystick.get_button(5) # RX+
|
||||
self.pose_speeds[3] = (-current_angular_step if LB else (current_angular_step if RB else 0.0))
|
||||
|
||||
# △/□控制RY旋转
|
||||
triangle = self.joystick.get_button(2) # RY+
|
||||
square = self.joystick.get_button(3) # RY-
|
||||
self.pose_speeds[4] = (current_angular_step if triangle else (-current_angular_step if square else 0.0))
|
||||
|
||||
# 左摇杆Y轴控制RZ旋转
|
||||
rz_mapping = self._apply_nonlinear_mapping(left_y)
|
||||
self.pose_speeds[5] = rz_mapping * current_angular_step * 2 # RZ
|
||||
|
||||
# 夹爪控制(圈/叉)
|
||||
circle = self.joystick.get_button(1) # 夹爪开
|
||||
cross = self.joystick.get_button(0) # 夹爪关
|
||||
if circle:
|
||||
self.gripper = min(self.max_gripper, self.gripper + self.gripper_speed)
|
||||
elif cross:
|
||||
self.gripper = max(self.min_gripper, self.gripper - self.gripper_speed)
|
||||
|
||||
# 更新末端位姿
|
||||
for i in range(6):
|
||||
self.pose[i] += self.pose_speeds[i]
|
||||
|
||||
# 角度归一化处理
|
||||
for i in range(3, 6):
|
||||
self.pose[i] = self._normalize_angle(self.pose[i])
|
||||
|
||||
def update_joint_state(self, joint):
|
||||
self.joint = joint
|
||||
# self.tozero = False
|
||||
|
||||
def update_endpose_state(self, end_pose):
|
||||
self.pose = end_pose
|
||||
# self.tozero = False
|
||||
|
||||
def update_tozero_state(self, tozero):
|
||||
self.tozero = tozero
|
||||
|
||||
|
||||
def get_action(self) -> Dict:
|
||||
"""获取当前控制命令"""
|
||||
return {
|
||||
'control_mode': 'joint' if self.joint_control_mode else 'end_pose',
|
||||
'use_master_arm': self.use_master_arm,
|
||||
'master_joint_actions': self.master_joint_actions,
|
||||
'master_controller_status': self.master_controller_status,
|
||||
'end_pose': self.pose,
|
||||
'joint_angles': self.joint,
|
||||
'gripper': int(self.gripper),
|
||||
'tozero': self.tozero
|
||||
}
|
||||
|
||||
def stop(self):
|
||||
"""停止控制器"""
|
||||
self.running = False
|
||||
if self.thread.is_alive():
|
||||
self.thread.join()
|
||||
if self.servo_arm:
|
||||
self.servo_arm.close()
|
||||
pygame.quit()
|
||||
print("混合控制器已退出")
|
||||
|
||||
def reset(self):
|
||||
"""重置到初始状态"""
|
||||
self.joint = self.init_joint.copy()
|
||||
self.pose = self.init_pose.copy()
|
||||
self.pose_speeds = [0.0] * 6
|
||||
self.joint_speeds = [0.0] * 6
|
||||
self.gripper_speed = 10
|
||||
self.gripper = self.min_gripper
|
||||
print("已重置到初始状态")
|
||||
|
||||
|
||||
# 使用示例
|
||||
if __name__ == "__main__":
|
||||
# 初始化睿尔曼机械臂
|
||||
arm = RoboticArm(rm_thread_mode_e.RM_TRIPLE_MODE_E)
|
||||
# 创建机械臂连接
|
||||
handle = arm.rm_create_robot_arm("192.168.3.18", 8080)
|
||||
print(f"机械臂连接ID: {handle.id}")
|
||||
init_pose = arm.rm_get_current_arm_state()[1]['pose']
|
||||
|
||||
with open('/home/maic/LYT/lerobot/lerobot/common/robot_devices/teleop/realman_mix.yaml', "r") as file:
|
||||
config = yaml.safe_load(file)
|
||||
config['init_pose'] = init_pose
|
||||
arm_controller = HybridController(config)
|
||||
try:
|
||||
while True:
|
||||
print(arm_controller.get_action())
|
||||
time.sleep(0.1)
|
||||
except KeyboardInterrupt:
|
||||
arm_controller.stop()
|
||||
6
lerobot/common/robot_devices/teleop/servo_arm.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
port: /dev/ttyUSB0
|
||||
right_port: /dev/ttyUSB1
|
||||
baudrate: 460800
|
||||
joint_hex_data: "55 AA 02 00 00 67"
|
||||
control_hex_data: "55 AA 08 00 00 B9"
|
||||
arm_axis: 6
|
||||
6
lerobot/common/robot_devices/teleop/servo_dual.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
left_port: /dev/ttyUSB0
|
||||
right_port: /dev/ttyUSB1
|
||||
baudrate: 460800
|
||||
joint_hex_data: "55 AA 02 00 00 67"
|
||||
control_hex_data: "55 AA 08 00 00 B9"
|
||||
arm_axis: 6
|
||||
321
lerobot/common/robot_devices/teleop/servo_server.py
Normal file
@@ -0,0 +1,321 @@
|
||||
import threading
|
||||
import time
|
||||
import serial
|
||||
import binascii
|
||||
import logging
|
||||
import yaml
|
||||
from typing import Dict
|
||||
|
||||
# logging.basicConfig(
|
||||
# level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
|
||||
# )
|
||||
|
||||
|
||||
class ServoArmServer:
|
||||
def __init__(self, config_file="servo_dual.yaml"):
|
||||
"""初始化服务器,创建左右机械臂实例"""
|
||||
self.config_file = config_file
|
||||
self.left_servo_arm = None
|
||||
self.right_servo_arm = None
|
||||
self.running = False
|
||||
self.data_lock = threading.Lock()
|
||||
self.latest_data = {
|
||||
'left_joint_actions': {},
|
||||
'right_joint_actions': {},
|
||||
'left_controller_status': {},
|
||||
'right_controller_status': {},
|
||||
'timestamp': time.time()
|
||||
}
|
||||
|
||||
# 初始化机械臂
|
||||
self._initialize_arms()
|
||||
# 启动数据采集线程
|
||||
self._start_data_collection()
|
||||
|
||||
|
||||
def _initialize_arms(self):
|
||||
"""初始化左右机械臂"""
|
||||
try:
|
||||
self.left_servo_arm = ServoArm(self.config_file, "left_port")
|
||||
logging.info("左master机械臂初始化成功")
|
||||
except Exception as e:
|
||||
logging.error(f"左master机械臂初始化失败: {e}")
|
||||
|
||||
try:
|
||||
self.right_servo_arm = ServoArm(self.config_file, "right_port")
|
||||
logging.info("右master机械臂初始化成功")
|
||||
except Exception as e:
|
||||
logging.error(f"右master机械臂初始化失败: {e}")
|
||||
|
||||
def _start_data_collection(self):
|
||||
"""启动数据采集线程"""
|
||||
self.running = True
|
||||
|
||||
# 创建左臂数据采集线程
|
||||
self.left_data_thread = threading.Thread(target=self._left_arm_data_loop)
|
||||
self.left_data_thread.daemon = True
|
||||
self.left_data_thread.start()
|
||||
|
||||
# 创建右臂数据采集线程
|
||||
self.right_data_thread = threading.Thread(target=self._right_arm_data_loop)
|
||||
self.right_data_thread.daemon = True
|
||||
self.right_data_thread.start()
|
||||
|
||||
logging.info("左右机械臂数据采集线程已启动")
|
||||
|
||||
def _left_arm_data_loop(self):
|
||||
"""左机械臂数据采集循环"""
|
||||
while self.running:
|
||||
try:
|
||||
left_actions = {}
|
||||
left_controller_status = {}
|
||||
|
||||
# 获取左机械臂数据
|
||||
if self.left_servo_arm and self.left_servo_arm.connected:
|
||||
left_actions = self.left_servo_arm.get_joint_actions()
|
||||
left_controller_status = self.left_servo_arm.get_controller_status()
|
||||
|
||||
if self._check_val_safety(left_actions) == False:
|
||||
time.sleep(0.02)
|
||||
continue
|
||||
# 更新左机械臂数据
|
||||
with self.data_lock:
|
||||
self.latest_data['left_joint_actions'] = [left_actions[k] for k in left_actions]
|
||||
self.latest_data['left_controller_status'] = left_controller_status
|
||||
# 更新dual_joint_actions
|
||||
if self.latest_data['right_joint_actions']:
|
||||
self.latest_data['dual_joint_actions'] = self.latest_data['left_joint_actions'] + self.latest_data['right_joint_actions']
|
||||
self.latest_data['timestamp'] = time.time()
|
||||
|
||||
time.sleep(0.02) # 50Hz采集频率
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"左机械臂数据采集错误: {e}")
|
||||
time.sleep(0.1)
|
||||
|
||||
def _right_arm_data_loop(self):
|
||||
"""右机械臂数据采集循环"""
|
||||
while self.running:
|
||||
try:
|
||||
right_actions = {}
|
||||
right_controller_status = {}
|
||||
|
||||
# 获取右机械臂数据
|
||||
if self.right_servo_arm and self.right_servo_arm.connected:
|
||||
right_actions = self.right_servo_arm.get_joint_actions()
|
||||
right_controller_status = self.right_servo_arm.get_controller_status()
|
||||
|
||||
if self._check_val_safety(right_actions) == False:
|
||||
time.sleep(0.02)
|
||||
continue
|
||||
# 更新右机械臂数据
|
||||
with self.data_lock:
|
||||
self.latest_data['right_joint_actions'] = [right_actions[k] for k in right_actions]
|
||||
self.latest_data['right_controller_status'] = right_controller_status
|
||||
# 更新dual_joint_actions
|
||||
if self.latest_data['left_joint_actions']:
|
||||
self.latest_data['dual_joint_actions'] = self.latest_data['left_joint_actions'] + self.latest_data['right_joint_actions']
|
||||
self.latest_data['timestamp'] = time.time()
|
||||
|
||||
time.sleep(0.02) # 50Hz采集频率
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"右机械臂数据采集错误: {e}")
|
||||
time.sleep(0.1)
|
||||
|
||||
def _check_val_safety(self, data: dict):
|
||||
data = [data[k] for k in data]
|
||||
ret = True
|
||||
if len(data) != self.left_servo_arm.arm_axis + 1:
|
||||
ret = False
|
||||
for v in data:
|
||||
if v > 180 or v < -180:
|
||||
ret = False
|
||||
return ret
|
||||
|
||||
# ZeroRPC 服务方法
|
||||
def get_joint_data(self):
|
||||
"""获取最新的关节数据"""
|
||||
with self.data_lock:
|
||||
return self.latest_data.copy()
|
||||
|
||||
def get_left_joint_actions(self):
|
||||
"""获取左机械臂关节数据和控制器状态"""
|
||||
with self.data_lock:
|
||||
return {
|
||||
'data': self.latest_data['left_joint_actions'],
|
||||
'controller_status': self.latest_data['left_controller_status'],
|
||||
'timestamp': self.latest_data['timestamp']
|
||||
}
|
||||
|
||||
def get_right_joint_actions(self):
|
||||
"""获取右机械臂关节数据和控制器状态"""
|
||||
with self.data_lock:
|
||||
return {
|
||||
'data': self.latest_data['right_joint_actions'],
|
||||
'controller_status': self.latest_data['right_controller_status'],
|
||||
'timestamp': self.latest_data['timestamp']
|
||||
}
|
||||
|
||||
def get_connection_status(self):
|
||||
"""获取连接状态"""
|
||||
return {
|
||||
'left_connected': self.left_servo_arm.connected if self.left_servo_arm else False,
|
||||
'right_connected': self.right_servo_arm.connected if self.right_servo_arm else False,
|
||||
'server_running': self.running
|
||||
}
|
||||
|
||||
def ping(self):
|
||||
"""测试连接"""
|
||||
return "pong"
|
||||
|
||||
def shutdown(self):
|
||||
"""关闭服务器"""
|
||||
logging.info("正在关闭服务器...")
|
||||
self.running = False
|
||||
|
||||
if self.left_servo_arm:
|
||||
self.left_servo_arm.close()
|
||||
if self.right_servo_arm:
|
||||
self.right_servo_arm.close()
|
||||
|
||||
return "Server shutdown"
|
||||
|
||||
|
||||
class ServoArm:
|
||||
def __init__(self, config_file="config.yaml", port_name="left_port"):
|
||||
"""初始化机械臂的串口连接并发送初始数据。
|
||||
|
||||
Args:
|
||||
config_file (str): 配置文件的路径。
|
||||
"""
|
||||
self.config = self._load_config(config_file)
|
||||
self.port = self.config[port_name]
|
||||
self.baudrate = self.config["baudrate"]
|
||||
self.joint_hex_data = self.config["joint_hex_data"]
|
||||
self.control_hex_data = self.config["control_hex_data"]
|
||||
self.arm_axis = self.config.get("arm_axis", 7)
|
||||
|
||||
try:
|
||||
self.serial_conn = serial.Serial(self.port, self.baudrate, timeout=0)
|
||||
self.bytes_to_send = binascii.unhexlify(self.joint_hex_data.replace(" ", ""))
|
||||
self.serial_conn.write(self.bytes_to_send)
|
||||
time.sleep(1)
|
||||
self.connected = True
|
||||
logging.info(f"串口连接成功: {self.port}")
|
||||
except Exception as e:
|
||||
logging.error(f"串口连接失败: {e}")
|
||||
self.connected = False
|
||||
|
||||
def _load_config(self, config_file):
|
||||
"""加载配置文件。
|
||||
|
||||
Args:
|
||||
config_file (str): 配置文件的路径。
|
||||
|
||||
Returns:
|
||||
dict: 配置文件内容。
|
||||
"""
|
||||
try:
|
||||
with open(config_file, "r") as file:
|
||||
config = yaml.safe_load(file)
|
||||
return config
|
||||
except Exception as e:
|
||||
logging.error(f"配置文件加载失败: {e}")
|
||||
# 返回默认配置
|
||||
return {
|
||||
"port": "/dev/ttyUSB0",
|
||||
"baudrate": 460800,
|
||||
"joint_hex_data": "55 AA 02 00 00 67",
|
||||
"control_hex_data": "55 AA 08 00 00 B9",
|
||||
"arm_axis": 6
|
||||
}
|
||||
|
||||
def _bytes_to_signed_int(self, byte_data):
|
||||
"""将字节数据转换为有符号整数。
|
||||
|
||||
Args:
|
||||
byte_data (bytes): 字节数据。
|
||||
|
||||
Returns:
|
||||
int: 有符号整数。
|
||||
"""
|
||||
return int.from_bytes(byte_data, byteorder="little", signed=True)
|
||||
|
||||
def _parse_joint_data(self, hex_received):
|
||||
"""解析接收到的十六进制数据并提取关节数据。
|
||||
|
||||
Args:
|
||||
hex_received (str): 接收到的十六进制字符串数据。
|
||||
|
||||
Returns:
|
||||
dict: 解析后的关节数据。
|
||||
"""
|
||||
logging.debug(f"hex_received: {hex_received}")
|
||||
joints = {}
|
||||
for i in range(self.arm_axis):
|
||||
start = 14 + i * 10
|
||||
end = start + 8
|
||||
joint_hex = hex_received[start:end]
|
||||
joint_byte_data = bytearray.fromhex(joint_hex)
|
||||
joint_value = self._bytes_to_signed_int(joint_byte_data) / 10000.0
|
||||
joints[f"joint_{i+1}"] = joint_value #/ 180
|
||||
grasp_start = 14 + self.arm_axis*10
|
||||
grasp_hex = hex_received[grasp_start:grasp_start+8]
|
||||
grasp_byte_data = bytearray.fromhex(grasp_hex)
|
||||
# 夹爪进行归一化处理
|
||||
grasp_value = self._bytes_to_signed_int(grasp_byte_data)/1000
|
||||
|
||||
joints["grasp"] = grasp_value
|
||||
return joints
|
||||
|
||||
def _parse_controller_data(self, hex_received):
|
||||
status = {
|
||||
'infrared': 0,
|
||||
'button': 0
|
||||
}
|
||||
if len(hex_received) == 18:
|
||||
status['infrared'] = self._bytes_to_signed_int(bytearray.fromhex(hex_received[12:14]))
|
||||
status['button'] = self._bytes_to_signed_int(bytearray.fromhex(hex_received[14:16]))
|
||||
# print(infrared)
|
||||
return status
|
||||
|
||||
def get_joint_actions(self):
|
||||
"""从串口读取数据并解析关节动作。
|
||||
|
||||
Returns:
|
||||
dict: 包含关节数据的字典。
|
||||
"""
|
||||
if not self.connected:
|
||||
return {}
|
||||
|
||||
try:
|
||||
self.serial_conn.write(self.bytes_to_send)
|
||||
time.sleep(0.025)
|
||||
bytes_received = self.serial_conn.read(self.serial_conn.inWaiting())
|
||||
if len(bytes_received) == 0:
|
||||
return {}
|
||||
|
||||
hex_received = binascii.hexlify(bytes_received).decode("utf-8").upper()
|
||||
actions = self._parse_joint_data(hex_received)
|
||||
return actions
|
||||
except Exception as e:
|
||||
logging.error(f"读取串口数据错误: {e}")
|
||||
return {}
|
||||
|
||||
def get_controller_status(self):
|
||||
bytes_to_send = binascii.unhexlify(self.control_hex_data.replace(" ", ""))
|
||||
self.serial_conn.write(bytes_to_send)
|
||||
time.sleep(0.025)
|
||||
bytes_received = self.serial_conn.read(self.serial_conn.inWaiting())
|
||||
hex_received = binascii.hexlify(bytes_received).decode("utf-8").upper()
|
||||
# print("control status:", hex_received)
|
||||
status = self._parse_controller_data(hex_received)
|
||||
return status
|
||||
|
||||
def close(self):
|
||||
"""关闭串口连接"""
|
||||
if self.connected and hasattr(self, 'serial_conn'):
|
||||
self.serial_conn.close()
|
||||
self.connected = False
|
||||
logging.info("串口连接已关闭")
|
||||
@@ -175,9 +175,11 @@ def say(text, blocking=False):
|
||||
cmd = ["say", text]
|
||||
|
||||
elif system == "Linux":
|
||||
cmd = ["spd-say", text]
|
||||
if blocking:
|
||||
cmd.append("--wait")
|
||||
# cmd = ["spd-say", text]
|
||||
cmd = ["edge-playback", "-t", text]
|
||||
################## 修改:注释这部分 ###########################
|
||||
# if blocking:
|
||||
# cmd.append("--wait")
|
||||
|
||||
elif system == "Windows":
|
||||
cmd = [
|
||||
|
||||
@@ -273,7 +273,6 @@ def record(
|
||||
|
||||
# Load pretrained policy
|
||||
policy = None if cfg.policy is None else make_policy(cfg.policy, ds_meta=dataset.meta)
|
||||
|
||||
if not robot.is_connected:
|
||||
robot.connect()
|
||||
|
||||
@@ -290,6 +289,9 @@ def record(
|
||||
if has_method(robot, "teleop_safety_stop"):
|
||||
robot.teleop_safety_stop()
|
||||
|
||||
# import pdb
|
||||
# pdb.set_trace()
|
||||
|
||||
recorded_episodes = 0
|
||||
while True:
|
||||
if recorded_episodes >= cfg.num_episodes:
|
||||
|
||||
@@ -94,8 +94,8 @@ def rollout(
|
||||
data will probably need to be discarded (for environments that aren't the first one to be done).
|
||||
|
||||
The return dictionary contains:
|
||||
(optional) "observation": A a dictionary of (batch, sequence + 1, *) tensors mapped to observation
|
||||
keys. NOTE the that this has an extra sequence element relative to the other keys in the
|
||||
(optional) "observation": A dictionary of (batch, sequence + 1, *) tensors mapped to observation
|
||||
keys. NOTE that this has an extra sequence element relative to the other keys in the
|
||||
dictionary. This is because an extra observation is included for after the environment is
|
||||
terminated or truncated.
|
||||
"action": A (batch, sequence, action_dim) tensor of actions applied based on the observations (not
|
||||
|
||||
@@ -174,7 +174,10 @@ def run_server(
|
||||
dataset.meta.get_video_file_path(episode_id, key) for key in dataset.meta.video_keys
|
||||
]
|
||||
videos_info = [
|
||||
{"url": url_for("static", filename=video_path), "filename": video_path.parent.name}
|
||||
{
|
||||
"url": url_for("static", filename=str(video_path).replace("\\", "/")),
|
||||
"filename": video_path.parent.name,
|
||||
}
|
||||
for video_path in video_paths
|
||||
]
|
||||
tasks = dataset.meta.episodes[episode_id]["tasks"]
|
||||
@@ -381,7 +384,7 @@ def visualize_dataset_html(
|
||||
if isinstance(dataset, LeRobotDataset):
|
||||
ln_videos_dir = static_dir / "videos"
|
||||
if not ln_videos_dir.exists():
|
||||
ln_videos_dir.symlink_to((dataset.root / "videos").resolve())
|
||||
ln_videos_dir.symlink_to((dataset.root / "videos").resolve().as_posix())
|
||||
|
||||
if serve:
|
||||
run_server(dataset, episodes, host, port, static_dir, template_dir)
|
||||
|
||||
BIN
media/so101/follower_middle.webp
Normal file
|
After Width: | Height: | Size: 64 KiB |
BIN
media/so101/follower_rest.webp
Normal file
|
After Width: | Height: | Size: 41 KiB |
BIN
media/so101/follower_rotated.webp
Normal file
|
After Width: | Height: | Size: 45 KiB |
BIN
media/so101/follower_zero.webp
Normal file
|
After Width: | Height: | Size: 64 KiB |
BIN
media/so101/leader_middle.webp
Normal file
|
After Width: | Height: | Size: 35 KiB |
BIN
media/so101/leader_rest.webp
Normal file
|
After Width: | Height: | Size: 39 KiB |
BIN
media/so101/leader_rotated.webp
Normal file
|
After Width: | Height: | Size: 38 KiB |
BIN
media/so101/leader_zero.webp
Normal file
|
After Width: | Height: | Size: 30 KiB |
BIN
media/so101/so101-leader.webp
Normal file
|
After Width: | Height: | Size: 151 KiB |
BIN
media/so101/so101.webp
Normal file
|
After Width: | Height: | Size: 130 KiB |
@@ -49,7 +49,7 @@ dependencies = [
|
||||
"datasets>=2.19.0",
|
||||
"deepdiff>=7.0.1",
|
||||
"diffusers>=0.27.2",
|
||||
"draccus>=0.10.0",
|
||||
"draccus==0.10.0",
|
||||
"einops>=0.8.0",
|
||||
"flask>=3.0.3",
|
||||
"gdown>=5.1.0",
|
||||
@@ -62,8 +62,8 @@ dependencies = [
|
||||
"omegaconf>=2.3.0",
|
||||
"opencv-python-headless>=4.9.0",
|
||||
"packaging>=24.2",
|
||||
"av>=12.0.5",
|
||||
"pymunk>=6.6.0",
|
||||
"av>=14.2.0",
|
||||
"pymunk>=6.6.0,<7.0.0",
|
||||
"pynput>=1.7.7",
|
||||
"pyzmq>=26.2.1",
|
||||
"rerun-sdk>=0.21.0",
|
||||
@@ -77,6 +77,7 @@ dependencies = [
|
||||
|
||||
[project.optional-dependencies]
|
||||
aloha = ["gym-aloha>=0.1.1 ; python_version < '4.0'"]
|
||||
docs = ["hf-doc-builder @ git+https://github.com/huggingface/doc-builder.git@main", "watchdog >= 6.0.0"]
|
||||
dev = ["pre-commit>=3.7.0", "debugpy>=1.8.1"]
|
||||
dora = [
|
||||
"gym-dora @ git+https://github.com/dora-rs/dora-lerobot.git#subdirectory=gym_dora ; python_version < '4.0'",
|
||||
@@ -85,6 +86,7 @@ dynamixel = ["dynamixel-sdk>=3.7.31", "pynput>=1.7.7"]
|
||||
feetech = ["feetech-servo-sdk>=1.0.0", "pynput>=1.7.7"]
|
||||
intelrealsense = ["pyrealsense2>=2.55.1.6486 ; sys_platform != 'darwin'"]
|
||||
pi0 = ["transformers>=4.48.0"]
|
||||
smolvla = ["transformers>=4.50.3", "num2words>=0.5.14", "accelerate>=1.7.0"]
|
||||
pusht = ["gym-pusht>=0.1.5 ; python_version < '4.0'"]
|
||||
stretch = [
|
||||
"hello-robot-stretch-body>=0.7.27 ; python_version < '4.0' and sys_platform == 'linux'",
|
||||
|
||||
168
realman.md
Normal file
@@ -0,0 +1,168 @@
|
||||
# Install
|
||||
Create a virtual environment with Python 3.10 and activate it, e.g. with [`miniconda`](https://docs.anaconda.com/free/miniconda/index.html):
|
||||
```bash
|
||||
conda create -y -n lerobot python=3.10
|
||||
conda activate lerobot
|
||||
```
|
||||
|
||||
Install 🤗 LeRobot:
|
||||
```bash
|
||||
pip install -e . -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
pip install edge-tts
|
||||
sudo apt install mpv -y
|
||||
|
||||
# pip uninstall numpy
|
||||
# pip install numpy==1.26.0
|
||||
# pip install pynput
|
||||
```
|
||||
|
||||
/!\ For Linux only, ffmpeg and opencv requires conda install for now. Run this exact sequence of commands:
|
||||
```bash
|
||||
conda install ffmpeg=7.1.1 -c conda-forge
|
||||
# pip uninstall opencv-python
|
||||
# conda install "opencv>=4.10.0"
|
||||
```
|
||||
|
||||
Install Realman SDK:
|
||||
```bash
|
||||
pip install Robotic_Arm==1.0.4.1
|
||||
pip install pygame
|
||||
```
|
||||
|
||||
# piper集成lerobot
|
||||
见lerobot_piper_tutorial/1. 🤗 LeRobot:新增机械臂的一般流程.pdf
|
||||
|
||||
# Teleoperate
|
||||
```python
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=realman_dual \
|
||||
--robot.inference_time=false \
|
||||
--control.type=teleoperate \
|
||||
--control.display_data=true
|
||||
```
|
||||
display_data=true turn on run.io else turn off.
|
||||
|
||||
# Record
|
||||
Set dataset root path
|
||||
```bash
|
||||
HF_USER=$PWD/data
|
||||
echo $HF_USER
|
||||
```
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=realman \
|
||||
--robot.inference_time=false \
|
||||
--control.type=record \
|
||||
--control.fps=15 \
|
||||
--control.single_task="move" \
|
||||
--control.repo_id=maic/test \
|
||||
--control.num_episodes=2 \
|
||||
--control.warmup_time_s=2 \
|
||||
--control.episode_time_s=10 \
|
||||
--control.reset_time_s=10 \
|
||||
--control.play_sounds=true \
|
||||
--control.push_to_hub=false \
|
||||
--control.display_data=true
|
||||
```
|
||||
|
||||
Press right arrow -> at any time during episode recording to early stop and go to resetting. Same during resetting, to early stop and to go to the next episode recording.
|
||||
Press left arrow <- at any time during episode recording or resetting to early stop, cancel the current episode, and re-record it.
|
||||
Press escape ESC at any time during episode recording to end the session early and go straight to video encoding and dataset uploading.
|
||||
|
||||
# visualize
|
||||
```bash
|
||||
python lerobot/scripts/visualize_dataset.py \
|
||||
--repo-id ${HF_USER}/test \
|
||||
--episode-index 0
|
||||
```
|
||||
|
||||
# Replay
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=piper \
|
||||
--robot.inference_time=false \
|
||||
--control.type=replay \
|
||||
--control.fps=30 \
|
||||
--control.repo_id=${HF_USER}/test \
|
||||
--control.episode=0
|
||||
```
|
||||
|
||||
# Caution
|
||||
|
||||
1. In lerobots/common/datasets/video_utils, the vcodec is set to **libopenh264**, please find your vcodec by **ffmpeg -codecs**
|
||||
|
||||
|
||||
# Train
|
||||
具体的训练流程见lerobot_piper_tutorial/2. 🤗 AutoDL训练.pdf
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--dataset.repo_id=${HF_USER}/jack \
|
||||
--policy.type=act \
|
||||
--output_dir=outputs/train/act_jack \
|
||||
--job_name=act_jack \
|
||||
--device=cuda \
|
||||
--wandb.enable=true
|
||||
```
|
||||
|
||||
# FT smolvla
|
||||
python lerobot/scripts/train.py \
|
||||
--dataset.repo_id=maic/move_the_bottle_into_ultrasonic_device_with_realman_single \
|
||||
--policy.path=lerobot/smolvla_base \
|
||||
--output_dir=outputs/train/smolvla_move_the_bottle_into_ultrasonic_device_with_realman_single \
|
||||
--job_name=smolvla_move_the_bottle_into_ultrasonic_device_with_realman_single \
|
||||
--policy.device=cuda \
|
||||
--wandb.enable=false \
|
||||
--steps=200000 \
|
||||
--batch_size=16
|
||||
|
||||
|
||||
# Inference
|
||||
还是使用control_robot.py中的record loop,配置 **--robot.inference_time=true** 可以将手柄移出。
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=realman \
|
||||
--robot.inference_time=true \
|
||||
--control.type=record \
|
||||
--control.fps=30 \
|
||||
--control.single_task="move the bottle into ultrasonic device with realman single" \
|
||||
--control.repo_id=maic/move_the_bottle_into_ultrasonic_device_with_realman_single \
|
||||
--control.num_episodes=1 \
|
||||
--control.warmup_time_s=2 \
|
||||
--control.episode_time_s=30 \
|
||||
--control.reset_time_s=10 \
|
||||
--control.push_to_hub=false \
|
||||
--control.policy.path=outputs/train/act_move_the_bottle_into_ultrasonic_device_with_realman_single/checkpoints/100000/pretrained_model
|
||||
```
|
||||
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=realman \
|
||||
--robot.inference_time=true \
|
||||
--control.type=record \
|
||||
--control.fps=30 \
|
||||
--control.single_task="move the bottle into ultrasonic device with realman single" \
|
||||
--control.repo_id=maic/eval_smolvla_move_the_bottle_into_ultrasonic_device_with_realman_single \
|
||||
--control.num_episodes=1 \
|
||||
--control.warmup_time_s=2 \
|
||||
--control.episode_time_s=60 \
|
||||
--control.reset_time_s=10 \
|
||||
--control.push_to_hub=false \
|
||||
--control.policy.path=outputs/train/smolvla_move_the_bottle_into_ultrasonic_device_with_realman_single/checkpoints/160000/pretrained_model \
|
||||
--control.display_data=true
|
||||
```
|
||||
|
||||
```bash
|
||||
export ASK_LLM_BASE_URL=https://api.apiyi.com/v1
|
||||
export ASK_LLM_API_KEY=sk-H5FY8bQn6ZwBCja56280C3A4C8824017A4CdB683Dc990e35
|
||||
export ASK_LLM_MODEL=claude-sonnet-4-20250514
|
||||
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=realman_dual \
|
||||
--robot.inference_time=false \
|
||||
--control.type=teleoperate \
|
||||
--control.display_data=true \
|
||||
--robot.left_end_control_guid="0300b14bff1100003708000010010000" \
|
||||
--robot.right_end_control_guid="0300e639bc2000007f50000011010000"
|
||||
```
|
||||
31
realman_src/dual_arm_connect_test.py
Normal file
@@ -0,0 +1,31 @@
|
||||
from Robotic_Arm.rm_robot_interface import *
|
||||
|
||||
armleft = RoboticArm(rm_thread_mode_e.RM_TRIPLE_MODE_E)
|
||||
armright = RoboticArm()
|
||||
|
||||
|
||||
lefthandle = armleft.rm_create_robot_arm("169.254.128.18", 8080)
|
||||
print("机械臂ID:", lefthandle.id)
|
||||
righthandle = armright.rm_create_robot_arm("169.254.128.19", 8080)
|
||||
print("机械臂ID:", righthandle.id)
|
||||
|
||||
# software_info = armleft.rm_get_arm_software_info()
|
||||
# if software_info[0] == 0:
|
||||
# print("\n================== Arm Software Information ==================")
|
||||
# print("Arm Model: ", software_info[1]['product_version'])
|
||||
# print("Algorithm Library Version: ", software_info[1]['algorithm_info']['version'])
|
||||
# print("Control Layer Software Version: ", software_info[1]['ctrl_info']['version'])
|
||||
# print("Dynamics Version: ", software_info[1]['dynamic_info']['model_version'])
|
||||
# print("Planning Layer Software Version: ", software_info[1]['plan_info']['version'])
|
||||
# print("==============================================================\n")
|
||||
# else:
|
||||
# print("\nFailed to get arm software information, Error code: ", software_info[0], "\n")
|
||||
|
||||
print("Left: ", armleft.rm_get_current_arm_state())
|
||||
print("Left: ", armleft.rm_get_arm_all_state())
|
||||
armleft.rm_movej_p()
|
||||
# print("Right: ", armright.rm_get_current_arm_state())
|
||||
|
||||
|
||||
# 断开所有连接,销毁线程
|
||||
RoboticArm.rm_destory()
|
||||
15
realman_src/movep_canfd.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from Robotic_Arm.rm_robot_interface import *
|
||||
import time
|
||||
|
||||
# 实例化RoboticArm类
|
||||
arm = RoboticArm(rm_thread_mode_e.RM_TRIPLE_MODE_E)
|
||||
# 创建机械臂连接,打印连接id
|
||||
handle = arm.rm_create_robot_arm("192.168.3.18", 8080)
|
||||
print(handle.id)
|
||||
|
||||
print(arm.rm_movep_follow([-0.330512, 0.255993, -0.161205, 3.141, 0.0, -1.57]))
|
||||
time.sleep(2)
|
||||
# print(arm.rm_movep_follow([0.3, 0, 0.3, 3.14, 0, 0]))
|
||||
# time.sleep(2)
|
||||
|
||||
arm.rm_delete_robot_arm()
|
||||
0
realman_src/realman_aloha/__init__.py
Normal file
4
realman_src/realman_aloha/shadow_camera/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
__pycache__/
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.pt
|
||||
0
realman_src/realman_aloha/shadow_camera/README.md
Normal file
0
realman_src/realman_aloha/shadow_camera/__init__.py
Normal file
33
realman_src/realman_aloha/shadow_camera/pyproject.toml
Normal file
@@ -0,0 +1,33 @@
|
||||
[tool.poetry]
|
||||
name = "shadow_camera"
|
||||
version = "0.1.0"
|
||||
description = "camera class, currently includes realsense"
|
||||
readme = "README.md"
|
||||
authors = ["Shadow <qiuchengzhan@gmail.com>"]
|
||||
license = "MIT"
|
||||
#include = ["realman_vision/pytransform/_pytransform.so",]
|
||||
classifiers = [
|
||||
"Operating System :: POSIX :: Linux amd64",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.9"
|
||||
numpy = ">=2.0.1"
|
||||
opencv-python = ">=4.10.0.84"
|
||||
pyrealsense2 = ">=2.55.1.6486"
|
||||
|
||||
[tool.poetry.dev-dependencies] # 列出开发时所需的依赖项,比如测试、文档生成等工具。
|
||||
pytest = ">=8.3"
|
||||
black = ">=24.10.0"
|
||||
|
||||
[tool.poetry.plugins."scripts"] # 定义命令行脚本,使得用户可以通过命令行运行指定的函数。
|
||||
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
|
||||
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core>=1.8.4"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
@@ -0,0 +1 @@
|
||||
__version__ = '0.1.0'
|
||||
@@ -0,0 +1,38 @@
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
|
||||
class BaseCamera(metaclass=ABCMeta):
|
||||
"""摄像头基类"""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def start_camera(self):
|
||||
"""启动相机"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def stop_camera(self):
|
||||
"""停止相机"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def set_resolution(self, resolution_width, resolution_height):
|
||||
"""设置相机彩色图像分辨率"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def set_frame_rate(self, fps):
|
||||
"""设置相机彩色图像帧率"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def read_frame(self):
|
||||
"""读取一帧彩色图像和深度图像"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_camera_intrinsics(self):
|
||||
"""获取彩色图像和深度图像的内参"""
|
||||
pass
|
||||
@@ -0,0 +1,38 @@
|
||||
from shadow_camera import base_camera
|
||||
import cv2
|
||||
|
||||
class OpenCVCamera(base_camera.BaseCamera):
|
||||
"""基于OpenCV的摄像头类"""
|
||||
|
||||
def __init__(self, device_id=0):
|
||||
"""初始化视频捕获
|
||||
|
||||
参数:
|
||||
device_id: 摄像头设备ID
|
||||
"""
|
||||
self.cap = cv2.VideoCapture(device_id)
|
||||
|
||||
def get_frame(self):
|
||||
"""获取当前帧
|
||||
|
||||
返回:
|
||||
frame: 当前帧的图像数据,取不到时返回None
|
||||
"""
|
||||
ret, frame = self.cap.read()
|
||||
return frame if ret else None
|
||||
|
||||
def get_frame_info(self):
|
||||
"""获取当前帧信息
|
||||
|
||||
返回:
|
||||
dict: 帧信息字典
|
||||
"""
|
||||
width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
channels = int(self.cap.get(cv2.CAP_PROP_FRAME_CHANNELS))
|
||||
|
||||
return {
|
||||
'width': width,
|
||||
'height': height,
|
||||
'channels': channels
|
||||
}
|
||||
@@ -0,0 +1,280 @@
|
||||
import time
|
||||
import logging
|
||||
import numpy as np
|
||||
import pyrealsense2 as rs
|
||||
import base_camera
|
||||
|
||||
# 设置日志配置
|
||||
logging.basicConfig(
|
||||
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
|
||||
|
||||
class RealSenseCamera(base_camera.BaseCamera):
|
||||
"""Intel RealSense相机类"""
|
||||
|
||||
def __init__(self, serial_num=None, is_depth_frame=False):
|
||||
"""
|
||||
初始化相机对象
|
||||
:param serial_num: 相机序列号,默认为None
|
||||
"""
|
||||
super().__init__()
|
||||
self._color_resolution = [640, 480]
|
||||
self._depth_resolution = [640, 480]
|
||||
self._color_frames_rate = 30
|
||||
self._depth_frames_rate = 15
|
||||
self.timestamp = 0
|
||||
self.color_timestamp = 0
|
||||
self.depth_timestamp = 0
|
||||
self._colorizer = rs.colorizer()
|
||||
self._config = rs.config()
|
||||
self.is_depth_frame = is_depth_frame
|
||||
self.camera_on = False
|
||||
self.serial_num = serial_num
|
||||
|
||||
def get_serial_num(self):
|
||||
serial_num = {}
|
||||
context = rs.context()
|
||||
devices = context.query_devices() # 获取所有设备
|
||||
if len(context.devices) > 0:
|
||||
for i, device in enumerate(devices):
|
||||
serial_num[i] = device.get_info(rs.camera_info.serial_number)
|
||||
|
||||
logging.info(f"Detected serial numbers: {serial_num}")
|
||||
return serial_num
|
||||
|
||||
def _set_config(self):
|
||||
if self.serial_num is not None:
|
||||
logging.info(f"Setting device with serial number: {self.serial_num}")
|
||||
self._config.enable_device(self.serial_num)
|
||||
|
||||
self._config.enable_stream(
|
||||
rs.stream.color,
|
||||
self._color_resolution[0],
|
||||
self._color_resolution[1],
|
||||
rs.format.rgb8,
|
||||
self._color_frames_rate,
|
||||
)
|
||||
if self.is_depth_frame:
|
||||
self._config.enable_stream(
|
||||
rs.stream.depth,
|
||||
self._depth_resolution[0],
|
||||
self._depth_resolution[1],
|
||||
rs.format.z16,
|
||||
self._depth_frames_rate,
|
||||
)
|
||||
|
||||
def start_camera(self):
|
||||
"""
|
||||
启动相机并获取内参信息,如果后续调用帧对齐,则内参均为彩色内参
|
||||
"""
|
||||
self._pipeline = rs.pipeline()
|
||||
if self.is_depth_frame:
|
||||
self.point_cloud = rs.pointcloud()
|
||||
self._align = rs.align(rs.stream.color)
|
||||
self._set_config()
|
||||
|
||||
self.profile = self._pipeline.start(self._config)
|
||||
|
||||
if self.is_depth_frame:
|
||||
self._depth_intrinsics = (
|
||||
self.profile.get_stream(rs.stream.depth)
|
||||
.as_video_stream_profile()
|
||||
.get_intrinsics()
|
||||
)
|
||||
|
||||
self._color_intrinsics = (
|
||||
self.profile.get_stream(rs.stream.color)
|
||||
.as_video_stream_profile()
|
||||
.get_intrinsics()
|
||||
)
|
||||
self.camera_on = True
|
||||
logging.info("Camera started successfully")
|
||||
logging.info(
|
||||
f"Camera started with color resolution: {self._color_resolution}, depth resolution: {self._depth_resolution}"
|
||||
)
|
||||
logging.info(
|
||||
f"Color FPS: {self._color_frames_rate}, Depth FPS: {self._depth_frames_rate}"
|
||||
)
|
||||
|
||||
def stop_camera(self):
|
||||
"""
|
||||
停止相机
|
||||
"""
|
||||
self._pipeline.stop()
|
||||
self.camera_on = False
|
||||
logging.info("Camera stopped")
|
||||
|
||||
def set_resolution(self, color_resolution, depth_resolution):
|
||||
self._color_resolution = color_resolution
|
||||
self._depth_resolution = depth_resolution
|
||||
logging.info(
|
||||
"Optional color resolution:"
|
||||
"[320, 180] [320, 240] [424, 240] [640, 360] [640, 480]"
|
||||
"[848, 480] [960, 540] [1280, 720] [1920, 1080]"
|
||||
)
|
||||
logging.info(
|
||||
"Optional depth resolution:"
|
||||
"[256, 144] [424, 240] [480, 270] [640, 360] [640, 400]"
|
||||
"[640, 480] [848, 100] [848, 480] [1280, 720] [1280, 800]"
|
||||
)
|
||||
logging.info(f"Set color resolution to: {color_resolution}")
|
||||
logging.info(f"Set depth resolution to: {depth_resolution}")
|
||||
|
||||
def set_frame_rate(self, color_fps, depth_fps):
|
||||
self._color_frames_rate = color_fps
|
||||
self._depth_frames_rate = depth_fps
|
||||
logging.info("Optional color fps: 6 15 30 60 ")
|
||||
logging.info("Optional depth fps: 6 15 30 60 90 100 300")
|
||||
logging.info(f"Set color FPS to: {color_fps}")
|
||||
logging.info(f"Set depth FPS to: {depth_fps}")
|
||||
|
||||
# TODO: 调节白平衡进行补偿
|
||||
# def set_exposure(self, exposure):
|
||||
|
||||
def read_frame(self, is_color=True, is_depth=True, is_colorized_depth=False, is_point_cloud=False):
|
||||
"""
|
||||
读取一帧彩色图像和深度图像
|
||||
:return: 彩色图像和深度图像的NumPy数组
|
||||
"""
|
||||
while not self.camera_on:
|
||||
time.sleep(0.5)
|
||||
color_image = None
|
||||
depth_image = None
|
||||
colorized_depth = None
|
||||
point_cloud = None
|
||||
try:
|
||||
frames = self._pipeline.wait_for_frames()
|
||||
if is_color:
|
||||
color_frame = frames.get_color_frame()
|
||||
color_image = np.asanyarray(color_frame.get_data())
|
||||
else:
|
||||
color_image = None
|
||||
|
||||
if is_depth:
|
||||
depth_frame = frames.get_depth_frame()
|
||||
depth_image = np.asanyarray(depth_frame.get_data())
|
||||
else:
|
||||
depth_image = None
|
||||
|
||||
colorized_depth = (
|
||||
np.asanyarray(self._colorizer.colorize(depth_frame).get_data())
|
||||
if is_colorized_depth
|
||||
else None
|
||||
)
|
||||
point_cloud = (
|
||||
np.asanyarray(self.point_cloud.calculate(depth_frame).get_vertices())
|
||||
if is_point_cloud
|
||||
else None
|
||||
)
|
||||
# 获取时间戳单位为ms,对齐后color时间戳 > depth = aligned,选择color
|
||||
self.color_timestamp = color_frame.get_timestamp()
|
||||
if self.is_depth_frame:
|
||||
self.depth_timestamp = depth_frame.get_timestamp()
|
||||
|
||||
except Exception as e:
|
||||
logging.warning(e)
|
||||
if "Frame didn't arrive within 5000" in str(e):
|
||||
logging.warning("Frame didn't arrive within 5000ms, resetting device")
|
||||
self.stop_camera()
|
||||
self.start_camera()
|
||||
|
||||
return color_image, depth_image, colorized_depth, point_cloud
|
||||
|
||||
def read_align_frame(self, is_color=True, is_depth=True, is_colorized_depth=False, is_point_cloud=False):
|
||||
"""
|
||||
读取一帧对齐的彩色图像和深度图像
|
||||
:return: 彩色图像和深度图像的NumPy数组
|
||||
"""
|
||||
while not self.camera_on:
|
||||
time.sleep(0.5)
|
||||
try:
|
||||
frames = self._pipeline.wait_for_frames()
|
||||
aligned_frames = self._align.process(frames)
|
||||
aligned_color_frame = aligned_frames.get_color_frame()
|
||||
self._aligned_depth_frame = aligned_frames.get_depth_frame()
|
||||
|
||||
color_image = np.asanyarray(aligned_color_frame.get_data())
|
||||
depth_image = np.asanyarray(self._aligned_depth_frame.get_data())
|
||||
colorized_depth = (
|
||||
np.asanyarray(
|
||||
self._colorizer.colorize(self._aligned_depth_frame).get_data()
|
||||
)
|
||||
if is_colorized_depth
|
||||
else None
|
||||
)
|
||||
|
||||
if is_point_cloud:
|
||||
points = self.point_cloud.calculate(self._aligned_depth_frame)
|
||||
# 将元组数据转换为 NumPy 数组
|
||||
point_cloud = np.array(
|
||||
[[point[0], point[1], point[2]] for point in points.get_vertices()]
|
||||
)
|
||||
else:
|
||||
point_cloud = None
|
||||
|
||||
# 获取时间戳单位为ms,对齐后color时间戳 > depth = aligned,选择color
|
||||
self.timestamp = aligned_color_frame.get_timestamp()
|
||||
|
||||
return color_image, depth_image, colorized_depth, point_cloud
|
||||
|
||||
except Exception as e:
|
||||
if "Frame didn't arrive within 5000" in str(e):
|
||||
logging.warning("Frame didn't arrive within 5000ms, resetting device")
|
||||
self.stop_camera()
|
||||
self.start_camera()
|
||||
# device = self.profile.get_device()
|
||||
# device.hardware_reset()
|
||||
|
||||
def get_camera_intrinsics(self):
|
||||
"""
|
||||
获取彩色图像和深度图像的内参信息
|
||||
:return: 彩色图像和深度图像的内参信息
|
||||
"""
|
||||
# 宽高:.width, .height; 焦距:.fx, .fy; 像素坐标:.ppx, .ppy; 畸变系数:.coeffs
|
||||
logging.info("Getting camera intrinsics")
|
||||
logging.info(
|
||||
"Width and height: .width, .height; Focal length: .fx, .fy; Pixel coordinates: .ppx, .ppy; Distortion coefficient: .coeffs"
|
||||
)
|
||||
return self._color_intrinsics, self._depth_intrinsics
|
||||
|
||||
def get_3d_camera_coordinate(self, depth_pixel, align=False):
|
||||
"""
|
||||
获取深度相机坐标系下的三维坐标
|
||||
:param depth_pixel:深度像素坐标
|
||||
:param align: 是否对齐
|
||||
|
||||
:return: 深度值和相机坐标
|
||||
"""
|
||||
if not hasattr(self, "_aligned_depth_frame"):
|
||||
raise AttributeError(
|
||||
"Aligned depth frame not set. Call read_align_frame() first."
|
||||
)
|
||||
|
||||
distance = self._aligned_depth_frame.get_distance(
|
||||
depth_pixel[0], depth_pixel[1]
|
||||
)
|
||||
intrinsics = self._color_intrinsics if align else self._depth_intrinsics
|
||||
camera_coordinate = rs.rs2_deproject_pixel_to_point(
|
||||
intrinsics, depth_pixel, distance
|
||||
)
|
||||
return distance, camera_coordinate
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
camera = RealSenseCamera(is_depth_frame=False)
|
||||
camera.get_serial_num()
|
||||
camera.start_camera()
|
||||
# camera.set_frame_rate(60, 60)
|
||||
color_image, depth_image, colorized_depth, point_cloud = camera.read_frame()
|
||||
camera.stop_camera()
|
||||
logging.info(f"Color image shape: {color_image.shape}")
|
||||
# logging.info(f"Depth image shape: {depth_image.shape}")
|
||||
# logging.info(f"Colorized depth image shape: {colorized_depth.shape}")
|
||||
# logging.info(f"Point cloud shape: {point_cloud.shape}")
|
||||
logging.info(f"Color timestamp: {camera.timestamp}")
|
||||
# logging.info(f"Depth timestamp: {camera.depth_timestamp}")
|
||||
logging.info(f"Color timestamp: {camera.color_timestamp}")
|
||||
# logging.info(f"Depth timestamp: {camera.depth_timestamp}")
|
||||
logging.info("Test passed")
|
||||
@@ -0,0 +1,101 @@
|
||||
import pyrealsense2 as rs
|
||||
import numpy as np
|
||||
import h5py
|
||||
import time
|
||||
import threading
|
||||
import keyboard # 用于监听键盘输入
|
||||
|
||||
# 全局变量
|
||||
is_recording = False # 标志位,控制录制状态
|
||||
color_images = [] # 存储彩色图像
|
||||
depth_images = [] # 存储深度图像
|
||||
timestamps = [] # 存储时间戳
|
||||
|
||||
# 配置D435相机
|
||||
def configure_camera():
|
||||
pipeline = rs.pipeline()
|
||||
config = rs.config()
|
||||
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30) # 彩色图像流
|
||||
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) # 深度图像流
|
||||
pipeline.start(config)
|
||||
return pipeline
|
||||
|
||||
# 监听键盘输入,控制录制状态
|
||||
def listen_for_keyboard():
|
||||
global is_recording
|
||||
while True:
|
||||
if keyboard.is_pressed('s'): # 按下 's' 开始录制
|
||||
is_recording = True
|
||||
print("Recording started.")
|
||||
time.sleep(0.5) # 防止重复触发
|
||||
elif keyboard.is_pressed('q'): # 按下 'q' 停止录制
|
||||
is_recording = False
|
||||
print("Recording stopped.")
|
||||
time.sleep(0.5) # 防止重复触发
|
||||
elif keyboard.is_pressed('e'): # 按下 'e' 退出程序
|
||||
print("Exiting program.")
|
||||
exit()
|
||||
time.sleep(0.1)
|
||||
|
||||
# 采集图像数据
|
||||
def capture_frames(pipeline):
|
||||
global is_recording, color_images, depth_images, timestamps
|
||||
try:
|
||||
while True:
|
||||
if is_recording:
|
||||
frames = pipeline.wait_for_frames()
|
||||
color_frame = frames.get_color_frame()
|
||||
depth_frame = frames.get_depth_frame()
|
||||
|
||||
if not color_frame or not depth_frame:
|
||||
continue
|
||||
|
||||
# 获取当前时间戳
|
||||
timestamp = time.time()
|
||||
|
||||
# 将图像转换为numpy数组
|
||||
color_image = np.asanyarray(color_frame.get_data())
|
||||
depth_image = np.asanyarray(depth_frame.get_data())
|
||||
|
||||
# 存储数据
|
||||
color_images.append(color_image)
|
||||
depth_images.append(depth_image)
|
||||
timestamps.append(timestamp)
|
||||
|
||||
print(f"Captured frame at {timestamp}")
|
||||
|
||||
else:
|
||||
time.sleep(0.1) # 如果未录制,等待一段时间
|
||||
|
||||
finally:
|
||||
pipeline.stop()
|
||||
|
||||
# 保存为HDF5文件
|
||||
def save_to_hdf5(color_images, depth_images, timestamps, filename="output.h5"):
|
||||
with h5py.File(filename, "w") as f:
|
||||
f.create_dataset("color_images", data=np.array(color_images), compression="gzip")
|
||||
f.create_dataset("depth_images", data=np.array(depth_images), compression="gzip")
|
||||
f.create_dataset("timestamps", data=np.array(timestamps), compression="gzip")
|
||||
print(f"Data saved to {filename}")
|
||||
|
||||
# 主函数
|
||||
def main():
|
||||
global is_recording, color_images, depth_images, timestamps
|
||||
|
||||
# 启动键盘监听线程
|
||||
keyboard_thread = threading.Thread(target=listen_for_keyboard)
|
||||
keyboard_thread.daemon = True
|
||||
keyboard_thread.start()
|
||||
|
||||
# 配置相机
|
||||
pipeline = configure_camera()
|
||||
|
||||
# 开始采集图像
|
||||
capture_frames(pipeline)
|
||||
|
||||
# 录制结束后保存数据
|
||||
if color_images and depth_images and timestamps:
|
||||
save_to_hdf5(color_images, depth_images, timestamps, "mobile_aloha_data.h5")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
152
realman_src/realman_aloha/shadow_camera/test/test_camera.py
Normal file
@@ -0,0 +1,152 @@
|
||||
import os
|
||||
import cv2
|
||||
import time
|
||||
import numpy as np
|
||||
from os import path
|
||||
import pyrealsense2 as rs
|
||||
from shadow_camera import realsense
|
||||
import logging
|
||||
|
||||
|
||||
|
||||
def test_camera():
|
||||
camera = realsense.RealSenseCamera('241122071186')
|
||||
camera.start_camera()
|
||||
|
||||
while True:
|
||||
# result = camera.read_align_frame()
|
||||
# if result is None:
|
||||
# print('is None')
|
||||
# continue
|
||||
# start_time = time.time()
|
||||
color_image, depth_image, colorized_depth, vtx = camera.read_frame()
|
||||
color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
|
||||
|
||||
print(f"color_image: {color_image.shape}")
|
||||
# print(f"Time: {end_time - start_time}")
|
||||
cv2.imshow("bgr_image", color_image)
|
||||
|
||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||
break
|
||||
camera.stop_camera()
|
||||
|
||||
|
||||
def test_get_serial_num():
|
||||
camera = realsense.RealSenseCamera()
|
||||
device = camera.get_serial_num()
|
||||
|
||||
|
||||
class CameraCapture:
|
||||
def __init__(self, camera_serial_num=None, save_dir="./save"):
|
||||
self._camera_serial_num = camera_serial_num
|
||||
self._color_save_dir = path.join(save_dir, "color")
|
||||
self._depth_save_dir = path.join(save_dir, "depth")
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
os.makedirs(self._color_save_dir, exist_ok=True)
|
||||
os.makedirs(self._depth_save_dir, exist_ok=True)
|
||||
|
||||
def get_serial_num(self):
|
||||
self._camera_serial_num = {}
|
||||
camera_names = ["left", "right", "head", "table"]
|
||||
context = rs.context()
|
||||
devices = context.query_devices() # 获取所有设备
|
||||
if len(context.devices) > 0:
|
||||
for i, device in enumerate(devices):
|
||||
self._camera_serial_num[camera_names[i]] = device.get_info(
|
||||
rs.camera_info.serial_number
|
||||
)
|
||||
print(self._camera_serial_num)
|
||||
|
||||
return self._camera_serial_num
|
||||
|
||||
def start_camera(self):
|
||||
if self._camera_serial_num is None:
|
||||
self.get_serial_num()
|
||||
self._camera_left = realsense.RealSenseCamera(self._camera_serial_num["left"])
|
||||
self._camera_right = realsense.RealSenseCamera(self._camera_serial_num["right"])
|
||||
self._camera_head = realsense.RealSenseCamera(self._camera_serial_num["head"])
|
||||
|
||||
self._camera_left.start_camera()
|
||||
self._camera_right.start_camera()
|
||||
self._camera_head.start_camera()
|
||||
|
||||
def stop_camera(self):
|
||||
self._camera_left.stop_camera()
|
||||
self._camera_right.stop_camera()
|
||||
self._camera_head.stop_camera()
|
||||
|
||||
def _save_datas(self, timestamp, color_image, depth_image, camera_name):
|
||||
color_filename = path.join(
|
||||
self._color_save_dir, f"{timestamp}" + camera_name + ".jpg"
|
||||
)
|
||||
depth_filename = path.join(
|
||||
self._depth_save_dir, f"{timestamp}" + camera_name + ".png"
|
||||
)
|
||||
cv2.imwrite(color_filename, color_image)
|
||||
cv2.imwrite(depth_filename, depth_image)
|
||||
|
||||
def capture_images(self):
|
||||
while True:
|
||||
(
|
||||
color_image_left,
|
||||
depth_image_left,
|
||||
_,
|
||||
_,
|
||||
) = self._camera_left.read_align_frame()
|
||||
(
|
||||
color_image_right,
|
||||
depth_image_right,
|
||||
_,
|
||||
_,
|
||||
) = self._camera_right.read_align_frame()
|
||||
(
|
||||
color_image_head,
|
||||
depth_image_head,
|
||||
_,
|
||||
point_cloud3,
|
||||
) = self._camera_head.read_align_frame()
|
||||
|
||||
bgr_color_image_left = cv2.cvtColor(color_image_left, cv2.COLOR_RGB2BGR)
|
||||
bgr_color_image_right = cv2.cvtColor(color_image_right, cv2.COLOR_RGB2BGR)
|
||||
bgr_color_image_head = cv2.cvtColor(color_image_head, cv2.COLOR_RGB2BGR)
|
||||
|
||||
timestamp = time.time() * 1000
|
||||
|
||||
cv2.imshow("Camera left", bgr_color_image_left)
|
||||
cv2.imshow("Camera right", bgr_color_image_right)
|
||||
cv2.imshow("Camera head", bgr_color_image_head)
|
||||
|
||||
# self._save_datas(
|
||||
# timestamp, bgr_color_image_left, depth_image_left, "left"
|
||||
# )
|
||||
# self._save_datas(
|
||||
# timestamp, bgr_color_image_right, depth_image_right, "right"
|
||||
# )
|
||||
# self._save_datas(
|
||||
# timestamp, bgr_color_image_head, depth_image_head, "head"
|
||||
# )
|
||||
|
||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||
break
|
||||
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
#test_camera()
|
||||
test_get_serial_num()
|
||||
"""
|
||||
输入相机序列号制定左右相机:
|
||||
dict:{'left': '241222075132', 'right': '242322076532', 'head': '242322076532'}
|
||||
保存路径:
|
||||
str:./save
|
||||
输入为空,自动分配相机序列号(不指定左、右、头部),保存路径为./save
|
||||
"""
|
||||
|
||||
# capture = CameraCapture()
|
||||
# capture.get_serial_num()
|
||||
# test_get_serial_num()
|
||||
|
||||
# capture.start_camera()
|
||||
# capture.capture_images()
|
||||
# capture.stop_camera()
|
||||
@@ -0,0 +1,71 @@
|
||||
import pytest
|
||||
import pyrealsense2 as rs
|
||||
from shadow_camera.realsense import RealSenseCamera
|
||||
|
||||
|
||||
class TestRealSenseCamera:
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup_camera(self):
|
||||
self.camera = RealSenseCamera()
|
||||
|
||||
def test_get_serial_num(self):
|
||||
serial_nums = self.camera.get_serial_num()
|
||||
assert isinstance(serial_nums, dict)
|
||||
assert len(serial_nums) > 0
|
||||
|
||||
def test_start_stop_camera(self):
|
||||
self.camera.start_camera()
|
||||
assert self.camera.camera_on is True
|
||||
self.camera.stop_camera()
|
||||
assert self.camera.camera_on is False
|
||||
|
||||
def test_set_resolution(self):
|
||||
color_resolution = [1280, 720]
|
||||
depth_resolution = [1280, 720]
|
||||
self.camera.set_resolution(color_resolution, depth_resolution)
|
||||
assert self.camera._color_resolution == color_resolution
|
||||
assert self.camera._depth_resolution == depth_resolution
|
||||
|
||||
def test_set_frame_rate(self):
|
||||
color_fps = 60
|
||||
depth_fps = 60
|
||||
self.camera.set_frame_rate(color_fps, depth_fps)
|
||||
assert self.camera._color_frames_rate == color_fps
|
||||
assert self.camera._depth_frames_rate == depth_fps
|
||||
|
||||
def test_read_frame(self):
|
||||
self.camera.start_camera()
|
||||
color_image, depth_image, colorized_depth, point_cloud = (
|
||||
self.camera.read_frame()
|
||||
)
|
||||
assert color_image is not None
|
||||
assert depth_image is not None
|
||||
self.camera.stop_camera()
|
||||
|
||||
def test_read_align_frame(self):
|
||||
self.camera.start_camera()
|
||||
color_image, depth_image, colorized_depth, point_cloud = (
|
||||
self.camera.read_align_frame()
|
||||
)
|
||||
assert color_image is not None
|
||||
assert depth_image is not None
|
||||
self.camera.stop_camera()
|
||||
|
||||
def test_get_camera_intrinsics(self):
|
||||
self.camera.start_camera()
|
||||
color_intrinsics, depth_intrinsics = self.camera.get_camera_intrinsics()
|
||||
assert color_intrinsics is not None
|
||||
assert depth_intrinsics is not None
|
||||
self.camera.stop_camera()
|
||||
|
||||
def test_get_3d_camera_coordinate(self):
|
||||
self.camera.start_camera()
|
||||
# 先调用 read_align_frame 方法以确保 _aligned_depth_frame 被设置
|
||||
self.camera.read_align_frame()
|
||||
depth_pixel = [320, 240]
|
||||
distance, camera_coordinate = self.camera.get_3d_camera_coordinate(
|
||||
depth_pixel, align=True
|
||||
)
|
||||
assert distance > 0
|
||||
assert len(camera_coordinate) == 3
|
||||
self.camera.stop_camera()
|
||||