Compare commits

..

16 Commits

Author SHA1 Message Date
nepyope
a72db65f62 working trained policy 2025-04-15 15:41:24 +02:00
nepyope
2488efa8bd add hopejr env 2025-04-13 14:30:29 +02:00
nepyope
5d9b4fcae9 hopejr_real 2025-04-13 14:28:05 +02:00
nepyope
c70054c0b9 added hopejr yaml 2025-04-13 14:10:27 +02:00
nepyope
62b1896304 rip this branch 2025-04-12 10:35:00 +02:00
Sebastian-Contrari
0306e18640 fix teleop 2025-04-11 16:28:06 +02:00
Sebastian-Contrari
54b685053e test scs 2025-04-10 17:14:40 +02:00
Sebastian-Contrari
5b7e25ed18 push new exoskeleton code 2025-04-10 16:21:34 +02:00
Sebastian-Contrari
f6e862d421 hopejr teleop code 2025-04-02 16:19:15 +02:00
Pepijn
91f549b2ce use float32 instead of int 2025-03-18 16:23:33 +01:00
nepyope
f945641de9 added SM8512BL 2025-01-14 14:19:38 +01:00
nepyope
5bd41a3dca added feetech failsafe comment 2025-01-09 18:14:01 +01:00
Remi Cadene
f996a13f70 Add test3 test4 2024-12-29 13:30:17 +01:00
Remi Cadene
743ebfa7c1 Cremaillaire HF 2024-12-24 11:33:55 +01:00
Remi Cadene
2c45660d77 WIP 2024-11-27 13:59:37 +01:00
Remi Cadene
9dd4414c6e fix autocalib moss 2024-10-26 10:43:52 +02:00
182 changed files with 13589 additions and 10997 deletions

View File

@@ -21,7 +21,7 @@ Provide a simple way for the reviewer to try out your changes.
Examples:
```bash
pytest -sx tests/test_stuff.py::test_something
DATA_DIR=tests/data pytest -sx tests/test_stuff.py::test_something
```
```bash
python lerobot/scripts/train.py --some.option=true

View File

@@ -7,8 +7,10 @@ on:
schedule:
- cron: "0 2 * * *"
# env:
env:
DATA_DIR: tests/data
# SLACK_API_TOKEN: ${{ secrets.SLACK_API_TOKEN }}
jobs:
run_all_tests_cpu:
name: CPU
@@ -28,9 +30,13 @@ jobs:
working-directory: /lerobot
steps:
- name: Tests
env:
DATA_DIR: tests/data
run: pytest -v --cov=./lerobot --disable-warnings tests
- name: Tests end-to-end
env:
DATA_DIR: tests/data
run: make test-end-to-end

View File

@@ -50,7 +50,7 @@ jobs:
uses: actions/checkout@v3
- name: Install poetry
run: pipx install "poetry<2.0.0"
run: pipx install poetry
- name: Poetry check
run: poetry check
@@ -64,7 +64,7 @@ jobs:
uses: actions/checkout@v3
- name: Install poetry
run: pipx install "poetry<2.0.0"
run: pipx install poetry
- name: Install poetry-relax
run: poetry self add poetry-relax

View File

@@ -29,6 +29,7 @@ jobs:
name: Pytest
runs-on: ubuntu-latest
env:
DATA_DIR: tests/data
MUJOCO_GL: egl
steps:
- uses: actions/checkout@v4
@@ -69,6 +70,7 @@ jobs:
name: Pytest (minimal install)
runs-on: ubuntu-latest
env:
DATA_DIR: tests/data
MUJOCO_GL: egl
steps:
- uses: actions/checkout@v4
@@ -101,39 +103,40 @@ jobs:
-W ignore::UserWarning:gymnasium.utils.env_checker:247 \
&& rm -rf tests/outputs outputs
# TODO(aliberts, rcadene): redesign after v2 migration / removing hydra
# end-to-end:
# name: End-to-end
# runs-on: ubuntu-latest
# env:
# MUJOCO_GL: egl
# steps:
# - uses: actions/checkout@v4
# with:
# lfs: true # Ensure LFS files are pulled
# - name: Install apt dependencies
# # portaudio19-dev is needed to install pyaudio
# run: |
# sudo apt-get update && \
# sudo apt-get install -y libegl1-mesa-dev portaudio19-dev
end-to-end:
name: End-to-end
runs-on: ubuntu-latest
env:
DATA_DIR: tests/data
MUJOCO_GL: egl
steps:
- uses: actions/checkout@v4
with:
lfs: true # Ensure LFS files are pulled
# - name: Install poetry
# run: |
# pipx install poetry && poetry config virtualenvs.in-project true
# echo "${{ github.workspace }}/.venv/bin" >> $GITHUB_PATH
- name: Install apt dependencies
# portaudio19-dev is needed to install pyaudio
run: |
sudo apt-get update && \
sudo apt-get install -y libegl1-mesa-dev portaudio19-dev
# - name: Set up Python 3.10
# uses: actions/setup-python@v5
# with:
# python-version: "3.10"
# cache: "poetry"
- name: Install poetry
run: |
pipx install poetry && poetry config virtualenvs.in-project true
echo "${{ github.workspace }}/.venv/bin" >> $GITHUB_PATH
# - name: Install poetry dependencies
# run: |
# poetry install --all-extras
- name: Set up Python 3.10
uses: actions/setup-python@v5
with:
python-version: "3.10"
cache: "poetry"
# - name: Test end-to-end
# run: |
# make test-end-to-end \
# && rm -rf outputs
- name: Install poetry dependencies
run: |
poetry install --all-extras
- name: Test end-to-end
run: |
make test-end-to-end \
&& rm -rf outputs

1
.gitignore vendored
View File

@@ -125,7 +125,6 @@ celerybeat.pid
# Environments
.env
.venv
env/
venv/
env.bak/
venv.bak/

View File

@@ -3,7 +3,7 @@ default_language_version:
python: python3.10
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
rev: v4.6.0
hooks:
- id: check-added-large-files
- id: debug-statements
@@ -14,11 +14,11 @@ repos:
- id: end-of-file-fixer
- id: trailing-whitespace
- repo: https://github.com/asottile/pyupgrade
rev: v3.19.0
rev: v3.16.0
hooks:
- id: pyupgrade
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.8.2
rev: v0.5.2
hooks:
- id: ruff
args: [--fix]
@@ -32,6 +32,6 @@ repos:
- "--check"
- "--no-update"
- repo: https://github.com/gitleaks/gitleaks
rev: v8.21.2
rev: v8.18.4
hooks:
- id: gitleaks

View File

@@ -267,7 +267,7 @@ We use `pytest` in order to run the tests. From the root of the
repository, here's how to run tests with `pytest` for the library:
```bash
python -m pytest -sv ./tests
DATA_DIR="tests/data" python -m pytest -sv ./tests
```

View File

@@ -55,9 +55,9 @@
<table>
<tr>
<td><img src="media/gym/aloha_act.gif" width="100%" alt="ACT policy on ALOHA env"/></td>
<td><img src="media/gym/simxarm_tdmpc.gif" width="100%" alt="TDMPC policy on SimXArm env"/></td>
<td><img src="media/gym/pusht_diffusion.gif" width="100%" alt="Diffusion policy on PushT env"/></td>
<td><img src="http://remicadene.com/assets/gif/aloha_act.gif" width="100%" alt="ACT policy on ALOHA env"/></td>
<td><img src="http://remicadene.com/assets/gif/simxarm_tdmpc.gif" width="100%" alt="TDMPC policy on SimXArm env"/></td>
<td><img src="http://remicadene.com/assets/gif/pusht_diffusion.gif" width="100%" alt="Diffusion policy on PushT env"/></td>
</tr>
<tr>
<td align="center">ACT policy on ALOHA env</td>
@@ -68,7 +68,7 @@
### Acknowledgment
- Thanks to Tony Zhao, Zipeng Fu and colleagues for open sourcing ACT policy, ALOHA environments and datasets. Ours are adapted from [ALOHA](https://tonyzhaozh.github.io/aloha) and [Mobile ALOHA](https://mobile-aloha.github.io).
- Thanks to Tony Zaho, Zipeng Fu and colleagues for open sourcing ACT policy, ALOHA environments and datasets. Ours are adapted from [ALOHA](https://tonyzhaozh.github.io/aloha) and [Mobile ALOHA](https://mobile-aloha.github.io).
- Thanks to Cheng Chi, Zhenjia Xu and colleagues for open sourcing Diffusion policy, Pusht environment and datasets, as well as UMI datasets. Ours are adapted from [Diffusion Policy](https://diffusion-policy.cs.columbia.edu) and [UMI Gripper](https://umi-gripper.github.io).
- Thanks to Nicklas Hansen, Yunhai Feng and colleagues for open sourcing TDMPC policy, Simxarm environments and datasets. Ours are adapted from [TDMPC](https://github.com/nicklashansen/tdmpc) and [FOWM](https://www.yunhaifeng.com/FOWM).
- Thanks to Antonio Loquercio and Ashish Kumar for their early support.
@@ -144,7 +144,7 @@ wandb login
### Visualize datasets
Check out [example 1](./examples/1_load_lerobot_dataset.py) that illustrates how to use our dataset class which automatically downloads data from the Hugging Face hub.
Check out [example 1](./examples/1_load_lerobot_dataset.py) that illustrates how to use our dataset class which automatically download data from the Hugging Face hub.
You can also locally visualize episodes from a dataset on the hub by executing our script from the command line:
```bash
@@ -153,12 +153,10 @@ python lerobot/scripts/visualize_dataset.py \
--episode-index 0
```
or from a dataset in a local folder with the `root` option and the `--local-files-only` (in the following case the dataset will be searched for in `./my_local_data_dir/lerobot/pusht`)
or from a dataset in a local folder with the root `DATA_DIR` environment variable (in the following case the dataset will be searched for in `./my_local_data_dir/lerobot/pusht`)
```bash
python lerobot/scripts/visualize_dataset.py \
DATA_DIR='./my_local_data_dir' python lerobot/scripts/visualize_dataset.py \
--repo-id lerobot/pusht \
--root ./my_local_data_dir \
--local-files-only 1 \
--episode-index 0
```
@@ -210,10 +208,12 @@ dataset attributes:
A `LeRobotDataset` is serialised using several widespread file formats for each of its parts, namely:
- hf_dataset stored using Hugging Face datasets library serialization to parquet
- videos are stored in mp4 format to save space
- metadata are stored in plain json/jsonl files
- videos are stored in mp4 format to save space or png files
- episode_data_index saved using `safetensor` tensor serialization format
- stats saved using `safetensor` tensor serialization format
- info are saved using JSON
Dataset can be uploaded/downloaded from the HuggingFace hub seamlessly. To work on a local dataset, you can use the `local_files_only` argument and specify its location with the `root` argument if it's not in the default `~/.cache/huggingface/lerobot` location.
Dataset can be uploaded/downloaded from the HuggingFace hub seamlessly. To work on a local dataset, you can set the `DATA_DIR` environment variable to your root dataset folder as illustrated in the above section on dataset visualization.
### Evaluate a pretrained policy
@@ -280,7 +280,7 @@ To use wandb for logging training and evaluation curves, make sure you've run `w
wandb.enable=true
```
A link to the wandb logs for the run will also show up in yellow in your terminal. Here is an example of what they look like in your browser. Please also check [here](https://github.com/huggingface/lerobot/blob/main/examples/4_train_policy_with_script.md#typical-logs-and-metrics) for the explanation of some commonly used metrics in logs.
A link to the wandb logs for the run will also show up in yellow in your terminal. Here is an example of what they look like in your browser. Please also check [here](https://github.com/huggingface/lerobot/blob/main/examples/4_train_policy_with_script.md#typical-logs-and-metrics) for the explaination of some commonly used metrics in logs.
![](media/wandb.png)

View File

@@ -21,7 +21,7 @@ How to decode videos?
## Variables
**Image content & size**
We don't expect the same optimal settings for a dataset of images from a simulation, or from real-world in an apartment, or in a factory, or outdoor, or with lots of moving objects in the scene, etc. Similarly, loading times might not vary linearly with the image size (resolution).
We don't expect the same optimal settings for a dataset of images from a simulation, or from real-world in an appartment, or in a factory, or outdoor, or with lots of moving objects in the scene, etc. Similarly, loading times might not vary linearly with the image size (resolution).
For these reasons, we run this benchmark on four representative datasets:
- `lerobot/pusht_image`: (96 x 96 pixels) simulation with simple geometric shapes, fixed camera.
- `aliberts/aloha_mobile_shrimp_image`: (480 x 640 pixels) real-world indoor, moving camera.
@@ -63,7 +63,7 @@ This of course is affected by the `-g` parameter during encoding, which specifie
Note that this differs significantly from a typical use case like watching a movie, in which every frame is loaded sequentially from the beginning to the end and it's acceptable to have big values for `-g`.
Additionally, because some policies might request single timestamps that are a few frames apart, we also have the following scenario:
Additionally, because some policies might request single timestamps that are a few frames appart, we also have the following scenario:
- `2_frames_4_space`: 2 frames with 4 consecutive frames of spacing in between (e.g `[t, t + 5 / fps]`),
However, due to how video decoding is implemented with `pyav`, we don't have access to an accurate seek so in practice this scenario is essentially the same as `6_frames` since all 6 frames between `t` and `t + 5 / fps` will be decoded.
@@ -85,8 +85,8 @@ However, due to how video decoding is implemented with `pyav`, we don't have acc
**Average Structural Similarity Index Measure (higher is better)**
`avg_ssim` evaluates the perceived quality of images by comparing luminance, contrast, and structure. SSIM values range from -1 to 1, where 1 indicates perfect similarity.
One aspect that can't be measured here with those metrics is the compatibility of the encoding across platforms, in particular on web browser, for visualization purposes.
h264, h265 and AV1 are all commonly used codecs and should not pose an issue. However, the chroma subsampling (`pix_fmt`) format might affect compatibility:
One aspect that can't be measured here with those metrics is the compatibility of the encoding accross platforms, in particular on web browser, for visualization purposes.
h264, h265 and AV1 are all commonly used codecs and should not be pose an issue. However, the chroma subsampling (`pix_fmt`) format might affect compatibility:
- `yuv420p` is more widely supported across various platforms, including web browsers.
- `yuv444p` offers higher color fidelity but might not be supported as broadly.
@@ -116,7 +116,7 @@ Additional encoding parameters exist that are not included in this benchmark. In
- `-preset` which allows for selecting encoding presets. This represents a collection of options that will provide a certain encoding speed to compression ratio. By leaving this parameter unspecified, it is considered to be `medium` for libx264 and libx265 and `8` for libsvtav1.
- `-tune` which allows to optimize the encoding for certains aspects (e.g. film quality, fast decoding, etc.).
See the documentation mentioned above for more detailed info on these settings and for a more comprehensive list of other parameters.
See the documentation mentioned above for more detailled info on these settings and for a more comprehensive list of other parameters.
Similarly on the decoding side, other decoders exist but are not implemented in our current benchmark. To name a few:
- `torchaudio`

View File

@@ -266,7 +266,7 @@ def benchmark_encoding_decoding(
)
ep_num_images = dataset.episode_data_index["to"][0].item()
width, height = tuple(dataset[0][dataset.meta.camera_keys[0]].shape[-2:])
width, height = tuple(dataset[0][dataset.camera_keys[0]].shape[-2:])
num_pixels = width * height
video_size_bytes = video_path.stat().st_size
images_size_bytes = get_directory_size(imgs_dir)

BIN
examples.zip Normal file

Binary file not shown.

View File

@@ -1,31 +1,25 @@
# Using the [SO-100](https://github.com/TheRobotStudio/SO-ARM100) with LeRobot
This tutorial explains how to use [SO-100](https://github.com/TheRobotStudio/SO-ARM100) with LeRobot.
## A. Source the parts
## Source the parts
Follow this [README](https://github.com/TheRobotStudio/SO-ARM100). It contains the bill of materials, with link to source the parts, as well as the instructions to 3D print the parts, and advices if it's your first time printing or if you don't own a 3D printer already.
**Important**: Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
## B. Install LeRobot
## Install LeRobot
On your computer:
1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
```bash
mkdir -p ~/miniconda3
# Linux:
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
# Mac M-series:
# curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh -o ~/miniconda3/miniconda.sh
# Mac Intel:
# curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -o ~/miniconda3/miniconda.sh
bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
rm ~/miniconda3/miniconda.sh
~/miniconda3/bin/conda init bash
```
2. Restart shell or `source ~/.bashrc` (*Mac*: `source ~/.bash_profile`) or `source ~/.zshrc` if you're using zshell
2. Restart shell or `source ~/.bashrc`
3. Create and activate a fresh conda environment for lerobot
```bash
@@ -42,30 +36,23 @@ git clone https://github.com/huggingface/lerobot.git ~/lerobot
cd ~/lerobot && pip install -e ".[feetech]"
```
*For Linux only (not Mac)*: install extra dependencies for recording datasets:
For Linux only (not Mac), install extra dependencies for recording datasets:
```bash
conda install -y -c conda-forge ffmpeg
pip uninstall -y opencv-python
conda install -y -c conda-forge "opencv>=4.10.0"
```
## C. Configure the motors
## Configure the motors
### 1. Find the USB ports associated to each arm
Follow steps 1 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I) which illustrates the use of our scripts below.
Designate one bus servo adapter and 6 motors for your leader arm, and similarly the other bus servo adapter and 6 motors for the follower arm.
#### a. Run the script to find ports
Follow Step 1 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I), which illustrates the use of our scripts below.
To find the port for each bus servo adapter, run the utility script:
**Find USB ports associated to your arms**
To find the correct ports for each arm, run the utility script twice:
```bash
python lerobot/scripts/find_motors_bus_port.py
```
#### b. Example outputs
Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux):
```
Finding all available ports for the MotorBus.
@@ -77,6 +64,7 @@ Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751
Reconnect the usb cable.
```
Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
```
Finding all available ports for the MotorBus.
@@ -89,20 +77,13 @@ The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081
Reconnect the usb cable.
```
#### c. Troubleshooting
On Linux, you might need to give access to the USB ports by running:
Troubleshooting: On Linux, you might need to give access to the USB ports by running:
```bash
sudo chmod 666 /dev/ttyACM0
sudo chmod 666 /dev/ttyACM1
```
#### d. Update YAML file
Now that you have the ports, modify the *port* sections in `so100.yaml`
### 2. Configure the motors
#### a. Set IDs for all 12 motors
**Configure your motors**
Plug your first motor and run this script to set its ID to 1. It will also set its present position to 2048, so expect your motor to rotate:
```bash
python lerobot/scripts/configure_motor.py \
@@ -113,7 +94,7 @@ python lerobot/scripts/configure_motor.py \
--ID 1
```
*Note: These motors are currently limitated. They can take values between 0 and 4096 only, which corresponds to a full turn. They can't turn more than that. 2048 is at the middle of this range, so we can take -2048 steps (180 degrees anticlockwise) and reach the maximum range, or take +2048 steps (180 degrees clockwise) and reach the maximum range. The configuration step also sets the homing offset to 0, so that if you misassembled the arm, you can always update the homing offset to account for a shift up to ± 2048 steps (± 180 degrees).*
Note: These motors are currently limitated. They can take values between 0 and 4096 only, which corresponds to a full turn. They can't turn more than that. 2048 is at the middle of this range, so we can take -2048 steps (180 degrees anticlockwise) and reach the maximum range, or take +2048 steps (180 degrees clockwise) and reach the maximum range. The configuration step also sets the homing offset to 0, so that if you misassembled the arm, you can always update the homing offset to account for a shift up to ± 2048 steps (± 180 degrees).
Then unplug your motor and plug the second motor and set its ID to 2.
```bash
@@ -127,41 +108,39 @@ python lerobot/scripts/configure_motor.py \
Redo the process for all your motors until ID 6. Do the same for the 6 motors of the leader arm.
**Remove the gears of the 6 leader motors**
Follow step 2 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I). You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm.
#### b. Remove the gears of the 6 leader motors
Follow step 2 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=248). You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm.
#### c. Add motor horn to all 12 motors
Follow step 3 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=569). For SO-100, you need to align the holes on the motor horn to the motor spline to be approximately 1:30, 4:30, 7:30 and 10:30.
**Add motor horn to the motors**
Follow step 3 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I). For SO-100, you need to align the holes on the motor horn to the motor spline to be approximately 1:30, 4:30, 7:30 and 10:30.
Try to avoid rotating the motor while doing so to keep position 2048 set during configuration. It is especially tricky for the leader motors as it is more sensible without the gears, but it's ok if it's a bit rotated.
## D. Assemble the arms
## Assemble the arms
Follow step 4 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=610). The first arm should take a bit more than 1 hour to assemble, but once you get use to it, you can do it under 1 hour for the second arm.
Follow step 4 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I). The first arm should take a bit more than 1 hour to assemble, but once you get use to it, you can do it under 1 hour for the second arm.
## E. Calibrate
## Calibrate
Next, you'll need to calibrate your SO-100 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one SO-100 robot to work on another.
#### a. Manual calibration of follower arm
/!\ Contrarily to step 6 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=724) which illustrates the auto calibration, we will actually do manual calibration of follower for now.
**Auto-calibration of follower arm**
Follow step 5 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I) which illustrates the auto-calibration of the follower arm. You first need to manually move your follower arm to this initial position:
You will need to move the follower arm to these positions sequentially:
<div style="text-align:center;">
<img src="../media/so100/follower_initial.webp?raw=true" alt="SO-100 follower arm initial position" title="SO-100 follower arm initial position" width="50%">
</div>
| 1. Zero position | 2. Rotated position | 3. Rest position |
|---|---|---|
| <img src="../media/so100/follower_zero.webp?raw=true" alt="SO-100 follower arm zero position" title="SO-100 follower arm zero position" style="width:100%;"> | <img src="../media/so100/follower_rotated.webp?raw=true" alt="SO-100 follower arm rotated position" title="SO-100 follower arm rotated position" style="width:100%;"> | <img src="../media/so100/follower_rest.webp?raw=true" alt="SO-100 follower arm rest position" title="SO-100 follower arm rest position" style="width:100%;"> |
Make sure both arms are connected and run this script to launch manual calibration:
Then run this script to launch auto-calibration:
```bash
python lerobot/scripts/control_robot.py calibrate \
--robot-path lerobot/configs/robot/so100.yaml \
--robot-overrides '~cameras' --arms main_follower
```
#### b. Manual calibration of leader arm
Follow step 6 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=724) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
Note: You can't run auto-calibration for the leader arm, since we removed the gears. Thus, you will need to manually calibrate the leader arm. It's less precise than auto-calibration, but precision is not as critical for the leader arm.
**Manual calibration of leader arm**
Follow step 6 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
| 1. Zero position | 2. Rotated position | 3. Rest position |
|---|---|---|
@@ -174,7 +153,7 @@ python lerobot/scripts/control_robot.py calibrate \
--robot-overrides '~cameras' --arms main_leader
```
## F. Teleoperate
## Teleoperate
**Simple teleop**
Then you are ready to teleoperate your robot! Run this simple script (it won't connect and display the cameras):
@@ -186,14 +165,14 @@ python lerobot/scripts/control_robot.py teleoperate \
```
#### a. Teleop with displaying cameras
**Teleop with displaying cameras**
Follow [this guide to setup your cameras](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#c-add-your-cameras-with-opencvcamera). Then you will be able to display the cameras on your computer while you are teleoperating by running the following code. This is useful to prepare your setup before recording your first dataset.
```bash
python lerobot/scripts/control_robot.py teleoperate \
--robot-path lerobot/configs/robot/so100.yaml
```
## G. Record a dataset
## Record a dataset
Once you're familiar with teleoperation, you can record your first dataset with SO-100.
@@ -213,6 +192,7 @@ Record 2 episodes and upload your dataset to the hub:
python lerobot/scripts/control_robot.py record \
--robot-path lerobot/configs/robot/so100.yaml \
--fps 30 \
--root data \
--repo-id ${HF_USER}/so100_test \
--tags so100 tutorial \
--warmup-time-s 5 \
@@ -222,7 +202,7 @@ python lerobot/scripts/control_robot.py record \
--push-to-hub 1
```
## H. Visualize a dataset
## Visualize a dataset
If you uploaded your dataset to the hub with `--push-to-hub 1`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
```bash
@@ -232,25 +212,27 @@ echo ${HF_USER}/so100_test
If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with:
```bash
python lerobot/scripts/visualize_dataset_html.py \
--root data \
--repo-id ${HF_USER}/so100_test
```
## I. Replay an episode
## Replay an episode
Now try to replay the first episode on your robot:
```bash
python lerobot/scripts/control_robot.py replay \
DATA_DIR=data python lerobot/scripts/control_robot.py replay \
--robot-path lerobot/configs/robot/so100.yaml \
--fps 30 \
--root data \
--repo-id ${HF_USER}/so100_test \
--episode 0
```
## J. Train a policy
## Train a policy
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
```bash
python lerobot/scripts/train.py \
DATA_DIR=data python lerobot/scripts/train.py \
dataset_repo_id=${HF_USER}/so100_test \
policy=act_so100_real \
env=so100_real \
@@ -266,16 +248,18 @@ Let's explain it:
3. We provided an environment as argument with `env=so100_real`. This loads configurations from [`lerobot/configs/env/so100_real.yaml`](../lerobot/configs/env/so100_real.yaml).
4. We provided `device=cuda` since we are training on a Nvidia GPU, but you can also use `device=mps` if you are using a Mac with Apple silicon, or `device=cpu` otherwise.
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
6. We added `DATA_DIR=data` to access your dataset stored in your local `data` directory. If you dont provide `DATA_DIR`, your dataset will be downloaded from Hugging Face hub to your cache folder `$HOME/.cache/hugginface`. In future versions of `lerobot`, both directories will be in sync.
Training should take several hours. You will find checkpoints in `outputs/train/act_so100_test/checkpoints`.
## K. Evaluate your policy
## Evaluate your policy
You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
```bash
python lerobot/scripts/control_robot.py record \
--robot-path lerobot/configs/robot/so100.yaml \
--fps 30 \
--root data \
--repo-id ${HF_USER}/eval_act_so100_test \
--tags so100 tutorial eval \
--warmup-time-s 5 \
@@ -289,7 +273,7 @@ As you can see, it's almost the same command as previously used to record your t
1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_so100_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_so100_test`).
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_act_so100_test`).
## L. More Information
## More
Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth tutorial on controlling real robots with LeRobot.

View File

@@ -123,22 +123,22 @@ Follow step 4 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMi
Next, you'll need to calibrate your Moss v1 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one Moss v1 robot to work on another.
**Manual calibration of follower arm**
/!\ Contrarily to step 6 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the auto calibration, we will actually do manual calibration of follower for now.
**Auto-calibration of follower arm**
Follow step 5 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the auto-calibration of the follower arm. You first need to manually move your follower arm to this initial position:
You will need to move the follower arm to these positions sequentially:
<div style="text-align:center;">
<img src="../media/moss/follower_initial.webp?raw=true" alt="Moss v1 follower arm initial position" title="Moss v1 follower arm initial position" width="50%">
</div>
| 1. Zero position | 2. Rotated position | 3. Rest position |
|---|---|---|
| <img src="../media/moss/follower_zero.webp?raw=true" alt="Moss v1 follower arm zero position" title="Moss v1 follower arm zero position" style="width:100%;"> | <img src="../media/moss/follower_rotated.webp?raw=true" alt="Moss v1 follower arm rotated position" title="Moss v1 follower arm rotated position" style="width:100%;"> | <img src="../media/moss/follower_rest.webp?raw=true" alt="Moss v1 follower arm rest position" title="Moss v1 follower arm rest position" style="width:100%;"> |
Make sure both arms are connected and run this script to launch manual calibration:
Then run this script to launch auto-calibration:
```bash
python lerobot/scripts/control_robot.py calibrate \
--robot-path lerobot/configs/robot/moss.yaml \
--robot-overrides '~cameras' --arms main_follower
```
Note: You can't run auto-calibration for the leader arm, since we removed the gears. Thus, you will need to manually calibrate the leader arm. It's less precise than auto-calibration, but precision is not as critical for the leader arm.
**Manual calibration of leader arm**
Follow step 6 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
@@ -192,6 +192,7 @@ Record 2 episodes and upload your dataset to the hub:
python lerobot/scripts/control_robot.py record \
--robot-path lerobot/configs/robot/moss.yaml \
--fps 30 \
--root data \
--repo-id ${HF_USER}/moss_test \
--tags moss tutorial \
--warmup-time-s 5 \
@@ -211,6 +212,7 @@ echo ${HF_USER}/moss_test
If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with:
```bash
python lerobot/scripts/visualize_dataset_html.py \
--root data \
--repo-id ${HF_USER}/moss_test
```
@@ -218,9 +220,10 @@ python lerobot/scripts/visualize_dataset_html.py \
Now try to replay the first episode on your robot:
```bash
python lerobot/scripts/control_robot.py replay \
DATA_DIR=data python lerobot/scripts/control_robot.py replay \
--robot-path lerobot/configs/robot/moss.yaml \
--fps 30 \
--root data \
--repo-id ${HF_USER}/moss_test \
--episode 0
```
@@ -229,7 +232,7 @@ python lerobot/scripts/control_robot.py replay \
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
```bash
python lerobot/scripts/train.py \
DATA_DIR=data python lerobot/scripts/train.py \
dataset_repo_id=${HF_USER}/moss_test \
policy=act_moss_real \
env=moss_real \
@@ -245,6 +248,7 @@ Let's explain it:
3. We provided an environment as argument with `env=moss_real`. This loads configurations from [`lerobot/configs/env/moss_real.yaml`](../lerobot/configs/env/moss_real.yaml).
4. We provided `device=cuda` since we are training on a Nvidia GPU, but you can also use `device=mps` if you are using a Mac with Apple silicon, or `device=cpu` otherwise.
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
6. We added `DATA_DIR=data` to access your dataset stored in your local `data` directory. If you dont provide `DATA_DIR`, your dataset will be downloaded from Hugging Face hub to your cache folder `$HOME/.cache/hugginface`. In future versions of `lerobot`, both directories will be in sync.
Training should take several hours. You will find checkpoints in `outputs/train/act_moss_test/checkpoints`.
@@ -255,6 +259,7 @@ You can use the `record` function from [`lerobot/scripts/control_robot.py`](../l
python lerobot/scripts/control_robot.py record \
--robot-path lerobot/configs/robot/moss.yaml \
--fps 30 \
--root data \
--repo-id ${HF_USER}/eval_act_moss_test \
--tags moss tutorial eval \
--warmup-time-s 5 \

View File

@@ -1,83 +0,0 @@
# Training a HIL-SERL Reward Classifier with LeRobot
This tutorial provides step-by-step instructions for training a reward classifier using LeRobot.
---
## Training Script Overview
LeRobot includes a ready-to-use training script located at [`lerobot/scripts/train_hilserl_classifier.py`](../../lerobot/scripts/train_hilserl_classifier.py). Here's an outline of its workflow:
1. **Configuration Loading**
The script uses Hydra to load a configuration file for subsequent steps. (Details on Hydra follow below.)
2. **Dataset Initialization**
It loads a `LeRobotDataset` containing images and rewards. To optimize performance, a weighted random sampler is used to balance class sampling.
3. **Classifier Initialization**
A lightweight classification head is built on top of a frozen, pretrained image encoder from HuggingFace. The classifier outputs either:
- A single probability (binary classification), or
- Logits (multi-class classification).
4. **Training Loop Execution**
The script performs:
- Forward and backward passes,
- Optimization steps,
- Periodic logging, evaluation, and checkpoint saving.
---
## Configuring with Hydra
For detailed information about Hydra usage, refer to [`examples/4_train_policy_with_script.md`](../examples/4_train_policy_with_script.md). However, note that training the reward classifier differs slightly and requires a separate configuration file.
### Config File Setup
The default `default.yaml` cannot launch the reward classifier training directly. Instead, you need a configuration file like [`lerobot/configs/policy/hilserl_classifier.yaml`](../../lerobot/configs/policy/hilserl_classifier.yaml), with the following adjustment:
Replace the `dataset_repo_id` field with the identifier for your dataset, which contains images and sparse rewards:
```yaml
# Example: lerobot/configs/policy/reward_classifier.yaml
dataset_repo_id: "my_dataset_repo_id"
## Typical logs and metrics
```
When you start the training process, you will first see your full configuration being printed in the terminal. You can check it to make sure that you config it correctly and your config is not overrided by other files. The final configuration will also be saved with the checkpoint.
After that, you will see training log like this one:
```
[2024-11-29 18:26:36,999][root][INFO] -
Epoch 5/5
Training: 82%|██████████████████████████████████████████████████████████████████████████████▋ | 91/111 [00:50<00:09, 2.04it/s, loss=0.2999, acc=69.99%]
```
or evaluation log like:
```
Validation: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 28/28 [00:20<00:00, 1.37it/s]
```
### Metrics Tracking with Weights & Biases (WandB)
If `wandb.enable` is set to `true`, the training and evaluation logs will also be saved in WandB. This allows you to track key metrics in real-time, including:
- **Training Metrics**:
- `train/accuracy`
- `train/loss`
- `train/dataloading_s`
- **Evaluation Metrics**:
- `eval/accuracy`
- `eval/loss`
- `eval/eval_s`
#### Additional Features
You can also log sample predictions during evaluation. Each logged sample will include:
- The **input image**.
- The **predicted label**.
- The **true label**.
- The **classifier's "confidence" (logits/probability)**.
These logs can be useful for diagnosing and debugging performance issues.

View File

@@ -3,120 +3,78 @@ This script demonstrates the use of `LeRobotDataset` class for handling and proc
It illustrates how to load datasets, manipulate them, and apply transformations suitable for machine learning tasks in PyTorch.
Features included in this script:
- Viewing a dataset's metadata and exploring its properties.
- Loading an existing dataset from the hub or a subset of it.
- Accessing frames by episode number.
- Loading a dataset and accessing its properties.
- Filtering data by episode number.
- Converting tensor data for visualization.
- Saving video files from dataset frames.
- Using advanced dataset features like timestamp-based frame selection.
- Demonstrating compatibility with PyTorch DataLoader for batch processing.
The script ends with examples of how to batch process data using PyTorch's DataLoader.
"""
from pathlib import Path
from pprint import pprint
import imageio
import torch
from huggingface_hub import HfApi
import lerobot
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
# We ported a number of existing datasets ourselves, use this to see the list:
print("List of available datasets:")
pprint(lerobot.available_datasets)
# You can also browse through the datasets created/ported by the community on the hub using the hub api:
hub_api = HfApi()
repo_ids = [info.id for info in hub_api.list_datasets(task_categories="robotics", tags=["LeRobot"])]
pprint(repo_ids)
# Let's take one for this example
repo_id = "lerobot/pusht"
# Or simply explore them in your web browser directly at:
# https://huggingface.co/datasets?other=LeRobot
# Let's take this one for this example
repo_id = "lerobot/aloha_mobile_cabinet"
# We can have a look and fetch its metadata to know more about it:
ds_meta = LeRobotDatasetMetadata(repo_id)
# By instantiating just this class, you can quickly access useful information about the content and the
# structure of the dataset without downloading the actual data yet (only metadata files — which are
# lightweight).
print(f"Total number of episodes: {ds_meta.total_episodes}")
print(f"Average number of frames per episode: {ds_meta.total_frames / ds_meta.total_episodes:.3f}")
print(f"Frames per second used during data collection: {ds_meta.fps}")
print(f"Robot type: {ds_meta.robot_type}")
print(f"keys to access images from cameras: {ds_meta.camera_keys=}\n")
print("Tasks:")
print(ds_meta.tasks)
print("Features:")
pprint(ds_meta.features)
# You can also get a short summary by simply printing the object:
print(ds_meta)
# You can then load the actual dataset from the hub.
# Either load any subset of episodes:
dataset = LeRobotDataset(repo_id, episodes=[0, 10, 11, 23])
# And see how many frames you have:
print(f"Selected episodes: {dataset.episodes}")
print(f"Number of episodes selected: {dataset.num_episodes}")
print(f"Number of frames selected: {dataset.num_frames}")
# Or simply load the entire dataset:
# You can easily load a dataset from a Hugging Face repository
dataset = LeRobotDataset(repo_id)
print(f"Number of episodes selected: {dataset.num_episodes}")
print(f"Number of frames selected: {dataset.num_frames}")
# The previous metadata class is contained in the 'meta' attribute of the dataset:
print(dataset.meta)
# LeRobotDataset actually wraps an underlying Hugging Face dataset
# (see https://huggingface.co/docs/datasets for more information).
# LeRobotDataset is actually a thin wrapper around an underlying Hugging Face dataset
# (see https://huggingface.co/docs/datasets/index for more information).
print(dataset)
print(dataset.hf_dataset)
# LeRobot datasets also subclasses PyTorch datasets so you can do everything you know and love from working
# with the latter, like iterating through the dataset.
# The __getitem__ iterates over the frames of the dataset. Since our datasets are also structured by
# episodes, you can access the frame indices of any episode using the episode_data_index. Here, we access
# frame indices associated to the first episode:
# And provides additional utilities for robotics and compatibility with Pytorch
print(f"\naverage number of frames per episode: {dataset.num_samples / dataset.num_episodes:.3f}")
print(f"frames per second used during data collection: {dataset.fps=}")
print(f"keys to access images from cameras: {dataset.camera_keys=}\n")
# Access frame indexes associated to first episode
episode_index = 0
from_idx = dataset.episode_data_index["from"][episode_index].item()
to_idx = dataset.episode_data_index["to"][episode_index].item()
# Then we grab all the image frames from the first camera:
camera_key = dataset.meta.camera_keys[0]
frames = [dataset[idx][camera_key] for idx in range(from_idx, to_idx)]
# LeRobot datasets actually subclass PyTorch datasets so you can do everything you know and love from working
# with the latter, like iterating through the dataset. Here we grab all the image frames.
frames = [dataset[idx]["observation.image"] for idx in range(from_idx, to_idx)]
# The objects returned by the dataset are all torch.Tensors
print(type(frames[0]))
print(frames[0].shape)
# Video frames are now float32 in range [0,1] channel first (c,h,w) to follow pytorch convention. To visualize
# them, we convert to uint8 in range [0,255]
frames = [(frame * 255).type(torch.uint8) for frame in frames]
# and to channel last (h,w,c).
frames = [frame.permute((1, 2, 0)).numpy() for frame in frames]
# Since we're using pytorch, the shape is in pytorch, channel-first convention (c, h, w).
# We can compare this shape with the information available for that feature
pprint(dataset.features[camera_key])
# In particular:
print(dataset.features[camera_key]["shape"])
# The shape is in (h, w, c) which is a more universal format.
# Finally, we save the frames to a mp4 video for visualization.
Path("outputs/examples/1_load_lerobot_dataset").mkdir(parents=True, exist_ok=True)
imageio.mimsave("outputs/examples/1_load_lerobot_dataset/episode_0.mp4", frames, fps=dataset.fps)
# For many machine learning applications we need to load the history of past observations or trajectories of
# future actions. Our datasets can load previous and future frames for each key/modality, using timestamps
# differences with the current loaded frame. For instance:
delta_timestamps = {
# loads 4 images: 1 second before current frame, 500 ms before, 200 ms before, and current frame
camera_key: [-1, -0.5, -0.20, 0],
# loads 8 state vectors: 1.5 seconds before, 1 second before, ... 200 ms, 100 ms, and current frame
"observation.state": [-1.5, -1, -0.5, -0.20, -0.10, 0],
"observation.image": [-1, -0.5, -0.20, 0],
# loads 8 state vectors: 1.5 seconds before, 1 second before, ... 20 ms, 10 ms, and current frame
"observation.state": [-1.5, -1, -0.5, -0.20, -0.10, -0.02, -0.01, 0],
# loads 64 action vectors: current frame, 1 frame in the future, 2 frames, ... 63 frames in the future
"action": [t / dataset.fps for t in range(64)],
}
# Note that in any case, these delta_timestamps values need to be multiples of (1/fps) so that added to any
# timestamp, you still get a valid timestamp.
dataset = LeRobotDataset(repo_id, delta_timestamps=delta_timestamps)
print(f"\n{dataset[0][camera_key].shape=}") # (4, c, h, w)
print(f"{dataset[0]['observation.state'].shape=}") # (6, c)
print(f"{dataset[0]['action'].shape=}\n") # (64, c)
print(f"\n{dataset[0]['observation.image'].shape=}") # (4,c,h,w)
print(f"{dataset[0]['observation.state'].shape=}") # (8,c)
print(f"{dataset[0]['action'].shape=}\n") # (64,c)
# Finally, our datasets are fully compatible with PyTorch dataloaders and samplers because they are just
# PyTorch datasets.
@@ -126,9 +84,8 @@ dataloader = torch.utils.data.DataLoader(
batch_size=32,
shuffle=True,
)
for batch in dataloader:
print(f"{batch[camera_key].shape=}") # (32, 4, c, h, w)
print(f"{batch['observation.state'].shape=}") # (32, 5, c)
print(f"{batch['action'].shape=}") # (32, 64, c)
print(f"{batch['observation.image'].shape=}") # (32,4,c,h,w)
print(f"{batch['observation.state'].shape=}") # (32,8,c)
print(f"{batch['action'].shape=}") # (32,64,c)
break

View File

@@ -40,7 +40,7 @@ dataset = LeRobotDataset("lerobot/pusht", delta_timestamps=delta_timestamps)
# For this example, no arguments need to be passed because the defaults are set up for PushT.
# If you're doing something different, you will likely need to change at least some of the defaults.
cfg = DiffusionConfig()
policy = DiffusionPolicy(cfg, dataset_stats=dataset.meta.stats)
policy = DiffusionPolicy(cfg, dataset_stats=dataset.stats)
policy.train()
policy.to(device)

View File

@@ -1,7 +1,7 @@
"""
This script demonstrates how to use torchvision's image transformation with LeRobotDataset for data
augmentation purposes. The transformations are passed to the dataset as an argument upon creation, and
transforms are applied to the observation images before they are returned in the dataset's __getitem__.
transforms are applied to the observation images before they are returned in the dataset's __get_item__.
"""
from pathlib import Path
@@ -10,17 +10,17 @@ from torchvision.transforms import ToPILImage, v2
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
dataset_repo_id = "lerobot/aloha_static_screw_driver"
dataset_repo_id = "lerobot/aloha_static_tape"
# Create a LeRobotDataset with no transformations
dataset = LeRobotDataset(dataset_repo_id, episodes=[0])
dataset = LeRobotDataset(dataset_repo_id)
# This is equivalent to `dataset = LeRobotDataset(dataset_repo_id, image_transforms=None)`
# Get the index of the first observation in the first episode
first_idx = dataset.episode_data_index["from"][0].item()
# Get the frame corresponding to the first camera
frame = dataset[first_idx][dataset.meta.camera_keys[0]]
frame = dataset[first_idx][dataset.camera_keys[0]]
# Define the transformations
@@ -28,16 +28,15 @@ transforms = v2.Compose(
[
v2.ColorJitter(brightness=(0.5, 1.5)),
v2.ColorJitter(contrast=(0.5, 1.5)),
v2.ColorJitter(hue=(-0.1, 0.1)),
v2.RandomAdjustSharpness(sharpness_factor=2, p=1),
]
)
# Create another LeRobotDataset with the defined transformations
transformed_dataset = LeRobotDataset(dataset_repo_id, episodes=[0], image_transforms=transforms)
transformed_dataset = LeRobotDataset(dataset_repo_id, image_transforms=transforms)
# Get a frame from the transformed dataset
transformed_frame = transformed_dataset[first_idx][transformed_dataset.meta.camera_keys[0]]
transformed_frame = transformed_dataset[first_idx][transformed_dataset.camera_keys[0]]
# Create a directory to store output images
output_dir = Path("outputs/image_transforms")

View File

@@ -29,7 +29,7 @@ For a visual walkthrough of the assembly process, you can refer to [this video t
## 2. Configure motors, calibrate arms, teleoperate your Koch v1.1
First, install the additional dependencies required for robots built with dynamixel motors like Koch v1.1 by running one of the following commands (make sure gcc is installed).
First, install the additional dependencies required for robots built with dynamixel motors like Koch v1.1 by running one of the following commands.
Using `pip`:
```bash
@@ -778,6 +778,7 @@ Now run this to record 2 episodes:
python lerobot/scripts/control_robot.py record \
--robot-path lerobot/configs/robot/koch.yaml \
--fps 30 \
--root data \
--repo-id ${HF_USER}/koch_test \
--tags tutorial \
--warmup-time-s 5 \
@@ -786,7 +787,7 @@ python lerobot/scripts/control_robot.py record \
--num-episodes 2
```
This will write your dataset locally to `~/.cache/huggingface/lerobot/{repo-id}` (e.g. `data/cadene/koch_test`) and push it on the hub at `https://huggingface.co/datasets/{HF_USER}/{repo-id}`. Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example).
This will write your dataset locally to `{root}/{repo-id}` (e.g. `data/cadene/koch_test`) and push it on the hub at `https://huggingface.co/datasets/{HF_USER}/{repo-id}`. Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example).
You can look for other LeRobot datasets on the hub by searching for `LeRobot` tags: https://huggingface.co/datasets?other=LeRobot
@@ -839,6 +840,7 @@ In the coming months, we plan to release a foundational model for robotics. We a
You can visualize your dataset by running:
```bash
python lerobot/scripts/visualize_dataset_html.py \
--root data \
--repo-id ${HF_USER}/koch_test
```
@@ -856,6 +858,7 @@ To replay the first episode of the dataset you just recorded, run the following
python lerobot/scripts/control_robot.py replay \
--robot-path lerobot/configs/robot/koch.yaml \
--fps 30 \
--root data \
--repo-id ${HF_USER}/koch_test \
--episode 0
```
@@ -868,7 +871,7 @@ Your robot should replicate movements similar to those you recorded. For example
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
```bash
python lerobot/scripts/train.py \
DATA_DIR=data python lerobot/scripts/train.py \
dataset_repo_id=${HF_USER}/koch_test \
policy=act_koch_real \
env=koch_real \
@@ -915,6 +918,7 @@ env:
It should match your dataset (e.g. `fps: 30`) and your robot (e.g. `state_dim: 6` and `action_dim: 6`). We are still working on simplifying this in future versions of `lerobot`.
4. We provided `device=cuda` since we are training on a Nvidia GPU, but you could use `device=mps` to train on Apple silicon.
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
6. We added `DATA_DIR=data` to access your dataset stored in your local `data` directory. If you dont provide `DATA_DIR`, your dataset will be downloaded from Hugging Face hub to your cache folder `$HOME/.cache/hugginface`. In future versions of `lerobot`, both directories will be in sync.
For more information on the `train` script see the previous tutorial: [`examples/4_train_policy_with_script.md`](../examples/4_train_policy_with_script.md)
@@ -987,6 +991,7 @@ To this end, you can use the `record` function from [`lerobot/scripts/control_ro
python lerobot/scripts/control_robot.py record \
--robot-path lerobot/configs/robot/koch.yaml \
--fps 30 \
--root data \
--repo-id ${HF_USER}/eval_koch_test \
--tags tutorial eval \
--warmup-time-s 5 \
@@ -1005,6 +1010,7 @@ As you can see, it's almost the same command as previously used to record your t
You can then visualize your evaluation dataset by running the same command as before but with the new inference dataset as argument:
```bash
python lerobot/scripts/visualize_dataset.py \
--root data \
--repo-id ${HF_USER}/eval_koch_test
```

View File

@@ -128,6 +128,7 @@ Record one episode:
python lerobot/scripts/control_robot.py record \
--robot-path lerobot/configs/robot/stretch.yaml \
--fps 20 \
--root data \
--repo-id ${HF_USER}/stretch_test \
--tags stretch tutorial \
--warmup-time-s 3 \
@@ -145,6 +146,7 @@ Now try to replay this episode (make sure the robot's initial position is the sa
python lerobot/scripts/control_robot.py replay \
--robot-path lerobot/configs/robot/stretch.yaml \
--fps 20 \
--root data \
--repo-id ${HF_USER}/stretch_test \
--episode 0
```

View File

@@ -56,7 +56,7 @@ python lerobot/scripts/control_robot.py teleoperate \
--robot-overrides max_relative_target=5
```
By adding `--robot-overrides max_relative_target=5`, we override the default value for `max_relative_target` defined in `lerobot/configs/robot/aloha.yaml`. It is expected to be `5` to limit the magnitude of the movement for more safety, but the teleoperation won't be smooth. When you feel confident, you can disable this limit by adding `--robot-overrides max_relative_target=null` to the command line:
By adding `--robot-overrides max_relative_target=5`, we override the default value for `max_relative_target` defined in `lerobot/configs/robot/aloha.yaml`. It is expected to be `5` to limit the magnitude of the movement for more safety, but the teloperation won't be smooth. When you feel confident, you can disable this limit by adding `--robot-overrides max_relative_target=null` to the command line:
```bash
python lerobot/scripts/control_robot.py teleoperate \
--robot-path lerobot/configs/robot/aloha.yaml \
@@ -84,6 +84,7 @@ python lerobot/scripts/control_robot.py record \
--robot-path lerobot/configs/robot/aloha.yaml \
--robot-overrides max_relative_target=null \
--fps 30 \
--root data \
--repo-id ${HF_USER}/aloha_test \
--tags aloha tutorial \
--warmup-time-s 5 \
@@ -103,6 +104,7 @@ echo ${HF_USER}/aloha_test
If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with:
```bash
python lerobot/scripts/visualize_dataset_html.py \
--root data \
--repo-id ${HF_USER}/aloha_test
```
@@ -117,6 +119,7 @@ python lerobot/scripts/control_robot.py replay \
--robot-path lerobot/configs/robot/aloha.yaml \
--robot-overrides max_relative_target=null \
--fps 30 \
--root data \
--repo-id ${HF_USER}/aloha_test \
--episode 0
```
@@ -125,7 +128,7 @@ python lerobot/scripts/control_robot.py replay \
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
```bash
python lerobot/scripts/train.py \
DATA_DIR=data python lerobot/scripts/train.py \
dataset_repo_id=${HF_USER}/aloha_test \
policy=act_aloha_real \
env=aloha_real \
@@ -141,6 +144,7 @@ Let's explain it:
3. We provided an environment as argument with `env=aloha_real`. This loads configurations from [`lerobot/configs/env/aloha_real.yaml`](../lerobot/configs/env/aloha_real.yaml). Note: this yaml defines 18 dimensions for the `state_dim` and `action_dim`, corresponding to 18 motors, not 14 motors as used in previous Aloha work. This is because, we include the `shoulder_shadow` and `elbow_shadow` motors for simplicity.
4. We provided `device=cuda` since we are training on a Nvidia GPU.
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
6. We added `DATA_DIR=data` to access your dataset stored in your local `data` directory. If you dont provide `DATA_DIR`, your dataset will be downloaded from Hugging Face hub to your cache folder `$HOME/.cache/hugginface`. In future versions of `lerobot`, both directories will be in sync.
Training should take several hours. You will find checkpoints in `outputs/train/act_aloha_test/checkpoints`.
@@ -152,6 +156,7 @@ python lerobot/scripts/control_robot.py record \
--robot-path lerobot/configs/robot/aloha.yaml \
--robot-overrides max_relative_target=null \
--fps 30 \
--root data \
--repo-id ${HF_USER}/eval_act_aloha_test \
--tags aloha tutorial eval \
--warmup-time-s 5 \

View File

@@ -14,7 +14,7 @@ from pathlib import Path
import torch
from huggingface_hub import snapshot_download
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy
device = torch.device("cuda")
@@ -41,20 +41,26 @@ delta_timestamps = {
}
# Load the last 10% of episodes of the dataset as a validation set.
# - Load dataset metadata
dataset_metadata = LeRobotDatasetMetadata("lerobot/pusht")
# - Calculate train and val episodes
total_episodes = dataset_metadata.total_episodes
episodes = list(range(dataset_metadata.total_episodes))
num_train_episodes = math.floor(total_episodes * 90 / 100)
train_episodes = episodes[:num_train_episodes]
val_episodes = episodes[num_train_episodes:]
print(f"Number of episodes in full dataset: {total_episodes}")
print(f"Number of episodes in training dataset (90% subset): {len(train_episodes)}")
print(f"Number of episodes in validation dataset (10% subset): {len(val_episodes)}")
# - Load train an val datasets
train_dataset = LeRobotDataset("lerobot/pusht", episodes=train_episodes, delta_timestamps=delta_timestamps)
val_dataset = LeRobotDataset("lerobot/pusht", episodes=val_episodes, delta_timestamps=delta_timestamps)
# - Load full dataset
full_dataset = LeRobotDataset("lerobot/pusht", split="train")
# - Calculate train and val subsets
num_train_episodes = math.floor(full_dataset.num_episodes * 90 / 100)
num_val_episodes = full_dataset.num_episodes - num_train_episodes
print(f"Number of episodes in full dataset: {full_dataset.num_episodes}")
print(f"Number of episodes in training dataset (90% subset): {num_train_episodes}")
print(f"Number of episodes in validation dataset (10% subset): {num_val_episodes}")
# - Get first frame index of the validation set
first_val_frame_index = full_dataset.episode_data_index["from"][num_train_episodes].item()
# - Load frames subset belonging to validation set using the `split` argument.
# It utilizes the `datasets` library's syntax for slicing datasets.
# For more information on the Slice API, please see:
# https://huggingface.co/docs/datasets/v2.19.0/loading#slice-splits
train_dataset = LeRobotDataset(
"lerobot/pusht", split=f"train[:{first_val_frame_index}]", delta_timestamps=delta_timestamps
)
val_dataset = LeRobotDataset(
"lerobot/pusht", split=f"train[{first_val_frame_index}:]", delta_timestamps=delta_timestamps
)
print(f"Number of frames in training dataset (90% subset): {len(train_dataset)}")
print(f"Number of frames in validation dataset (10% subset): {len(val_dataset)}")

1
examples/hopejr/BOM Normal file
View File

@@ -0,0 +1 @@
Bearings - https://amzn.eu/d/8Xz7m4C - https://amzn.eu/d/1xOo8re - https://amzn.eu/d/9LXO205 (17x) - https://amzn.eu/d/eKGj9gf (2x) Bike Components - https://amzn.eu/d/cNiQi0O (1x) Accessories - https://amzn.eu/d/ipjCq1R (1x) - https://amzn.eu/d/0ZMzC3G (1x) Screws - https://amzn.eu/d/dzNhSkJ - https://amzn.eu/d/41AhVIU - https://amzn.eu/d/8G91txy - https://amzn.eu/d/9xu0pLa - https://amzn.eu/d/c5xaClV - https://amzn.eu/d/7kudpAo - https://amzn.eu/d/2BEgJFc - https://amzn.eu/d/4q9RNby - https://amzn.eu/d/4RE2lPV - https://amzn.eu/d/63YU0l1 Inserts - https://amzn.eu/d/7fjOtOC

624
examples/hopejr/README.md Normal file
View File

@@ -0,0 +1,624 @@
# Using the [SO-100](https://github.com/TheRobotStudio/SO-ARM100) with LeRobot
## Table of Contents
- [A. Source the parts](#a-source-the-parts)
- [B. Install LeRobot](#b-install-lerobot)
- [C. Configure the Motors](#c-configure-the-motors)
- [D. Step-by-Step Assembly Instructions](#d-step-by-step-assembly-instructions)
- [E. Calibrate](#e-calibrate)
- [F. Teleoperate](#f-teleoperate)
- [G. Record a dataset](#g-record-a-dataset)
- [H. Visualize a dataset](#h-visualize-a-dataset)
- [I. Replay an episode](#i-replay-an-episode)
- [J. Train a policy](#j-train-a-policy)
- [K. Evaluate your policy](#k-evaluate-your-policy)
- [L. More Information](#l-more-information)
## A. Source the parts
Follow this [README](https://github.com/TheRobotStudio/SO-ARM100). It contains the bill of materials, with a link to source the parts, as well as the instructions to 3D print the parts,
and advice if it's your first time printing or if you don't own a 3D printer.
Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
## B. Install LeRobot
> [!TIP]
> We use the Command Prompt (cmd) quite a lot. If you are not comfortable using the cmd or want to brush up using the command line you can have a look here: [Command line crash course](https://developer.mozilla.org/en-US/docs/Learn_web_development/Getting_started/Environment_setup/Command_line)
On your computer:
#### 1. [Install Miniconda](https://docs.anaconda.com/miniconda/install/#quick-command-line-install):
#### 2. Restart shell
Copy paste in your shell: `source ~/.bashrc` or for Mac: `source ~/.bash_profile` or `source ~/.zshrc` if you're using zshell
#### 3. Create and activate a fresh conda environment for lerobot
<details>
<summary><strong>Video install instructions</strong></summary>
<video src="https://github.com/user-attachments/assets/17172d3b-3b64-4b80-9cf1-b2b7c5cbd236"></video>
</details>
```bash
conda create -y -n lerobot python=3.10
```
Then activate your conda environment (do this each time you open a shell to use lerobot!):
```bash
conda activate lerobot
```
#### 4. Clone LeRobot:
```bash
git clone https://github.com/huggingface/lerobot.git ~/lerobot
```
#### 5. Install ffmpeg in your environment:
When using `miniconda`, install `ffmpeg` in your environment:
```bash
conda install ffmpeg -c conda-forge
```
#### 6. Install LeRobot with dependencies for the feetech motors:
```bash
cd ~/lerobot && pip install -e ".[feetech]"
```
Great :hugs:! You are now done installing LeRobot and we can begin assembling the SO100 arms :robot:.
Every time you now want to use LeRobot you can go to the `~/lerobot` folder where we installed LeRobot and run one of the commands.
## C. Configure the motors
> [!NOTE]
> Throughout this tutorial you will find videos on how to do the steps, the full video tutorial can be found here: [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I).
### 1. Find the USB ports associated to each arm
Designate one bus servo adapter and 6 motors for your leader arm, and similarly the other bus servo adapter and 6 motors for the follower arm. It's convenient to label them and write on each motor if it's for the follower `F` or for the leader `L` and it's ID from 1 to 6 (F1...F6 and L1...L6).
#### a. Run the script to find port
<details>
<summary><strong>Video finding port</strong></summary>
<video src="https://github.com/user-attachments/assets/4a21a14d-2046-4805-93c4-ee97a30ba33f"></video>
<video src="https://github.com/user-attachments/assets/1cc3aecf-c16d-4ff9-aec7-8c175afbbce2"></video>
</details>
To find the port for each bus servo adapter, run the utility script:
```bash
python lerobot/scripts/find_motors_bus_port.py
```
#### b. Example outputs
Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux):
```
Finding all available ports for the MotorBus.
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
Remove the usb cable from your MotorsBus and press Enter when done.
[...Disconnect leader arm and press Enter...]
The port of this MotorsBus is /dev/tty.usbmodem575E0031751
Reconnect the usb cable.
```
Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
```
Finding all available ports for the MotorBus.
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
Remove the usb cable from your MotorsBus and press Enter when done.
[...Disconnect follower arm and press Enter...]
The port of this MotorsBus is /dev/tty.usbmodem575E0032081
Reconnect the usb cable.
```
#### c. Troubleshooting
On Linux, you might need to give access to the USB ports by running:
```bash
sudo chmod 666 /dev/ttyACM0
sudo chmod 666 /dev/ttyACM1
```
#### d. Update config file
IMPORTANTLY: Now that you have your ports, update the **port** default values of [`SO100RobotConfig`](../lerobot/common/robot_devices/robots/configs.py). You will find something like:
```python
@RobotConfig.register_subclass("so100")
@dataclass
class So100RobotConfig(ManipulatorRobotConfig):
calibration_dir: str = ".cache/calibration/so100"
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
# the number of motors in your follower arms.
max_relative_target: int | None = None
leader_arms: dict[str, MotorsBusConfig] = field(
default_factory=lambda: {
"main": FeetechMotorsBusConfig(
port="/dev/tty.usbmodem58760431091", <-- UPDATE HERE
motors={
# name: (index, model)
"shoulder_pan": [1, "sts3215"],
"shoulder_lift": [2, "sts3215"],
"elbow_flex": [3, "sts3215"],
"wrist_flex": [4, "sts3215"],
"wrist_roll": [5, "sts3215"],
"gripper": [6, "sts3215"],
},
),
}
)
follower_arms: dict[str, MotorsBusConfig] = field(
default_factory=lambda: {
"main": FeetechMotorsBusConfig(
port="/dev/tty.usbmodem585A0076891", <-- UPDATE HERE
motors={
# name: (index, model)
"shoulder_pan": [1, "sts3215"],
"shoulder_lift": [2, "sts3215"],
"elbow_flex": [3, "sts3215"],
"wrist_flex": [4, "sts3215"],
"wrist_roll": [5, "sts3215"],
"gripper": [6, "sts3215"],
},
),
}
)
```
### 2. Assembling the Base
Let's begin with assembling the follower arm base
#### a. Set IDs for all 12 motors
<details>
<summary><strong>Video configuring motor</strong></summary>
<video src="https://github.com/user-attachments/assets/ef9b3317-2e11-4858-b9d3-f0a02fb48ecf"></video>
<video src="https://github.com/user-attachments/assets/f36b5ed5-c803-4ebe-8947-b39278776a0d"></video>
</details>
Plug your first motor F1 and run this script to set its ID to 1. It will also set its present position to 2048, so expect your motor to rotate. Replace the text after --port to the corresponding follower control board port and run this command in cmd:
```bash
python lerobot/scripts/configure_motor.py \
--port /dev/tty.usbmodem58760432961 \
--brand feetech \
--model sts3215 \
--baudrate 1000000 \
--ID 1
```
> [!NOTE]
> These motors are currently limited. They can take values between 0 and 4096 only, which corresponds to a full turn. They can't turn more than that. 2048 is at the middle of this range, so we can take -2048 steps (180 degrees anticlockwise) and reach the maximum range, or take +2048 steps (180 degrees clockwise) and reach the maximum range. The configuration step also sets the homing offset to 0, so that if you misassembled the arm, you can always update the homing offset to account for a shift up to ± 2048 steps (± 180 degrees).
Then unplug your motor and plug the second motor and set its ID to 2.
```bash
python lerobot/scripts/configure_motor.py \
--port /dev/tty.usbmodem58760432961 \
--brand feetech \
--model sts3215 \
--baudrate 1000000 \
--ID 2
```
Redo the process for all your motors until ID 6. Do the same for the 6 motors of the leader arm.
#### b. Remove the gears of the 6 leader motors
<details>
<summary><strong>Video removing gears</strong></summary>
<video src="https://github.com/user-attachments/assets/0c95b88c-5b85-413d-ba19-aee2f864f2a7"></video>
</details>
Follow the video for removing gears. You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm.
## D. Step-by-Step Assembly Instructions
**Step 1: Clean Parts**
- Remove all support material from the 3D-printed parts.
---
### Additional Guidance
<details>
<summary><strong>Video assembling arms</strong></summary>
<video src="https://github.com/user-attachments/assets/488a39de-0189-4461-9de3-05b015f90cca"></video>
</details>
**Note:**
This video provides visual guidance for assembling the arms, but it doesn't specify when or how to do the wiring. Inserting the cables beforehand is much easier than doing it afterward. The first arm may take a bit more than 1 hour to assemble, but once you get used to it, you can assemble the second arm in under 1 hour.
---
### First Motor
**Step 2: Insert Wires**
- Insert two wires into the first motor.
<img src="../media/tutorial/img1.jpg" style="height:300px;">
**Step 3: Install in Base**
- Place the first motor into the base.
<img src="../media/tutorial/img2.jpg" style="height:300px;">
**Step 4: Secure Motor**
- Fasten the motor with 4 screws. Two from the bottom and two from top.
**Step 5: Attach Motor Holder**
- Slide over the first motor holder and fasten it using two screws (one on each side).
<img src="../media/tutorial/img4.jpg" style="height:300px;">
**Step 6: Attach Motor Horns**
- Install both motor horns, securing the top horn with a screw. Try not to move the motor position when attaching the motor horn, especially for the leader arms, where we removed the gears.
<img src="../media/tutorial/img5.jpg" style="height:300px;">
<details>
<summary><strong>Video adding motor horn</strong></summary>
<video src="https://github.com/user-attachments/assets/ef3391a4-ad05-4100-b2bd-1699bf86c969"></video>
</details>
**Step 7: Attach Shoulder Part**
- Route one wire to the back of the robot and the other to the left or in photo towards you (see photo).
- Attach the shoulder part.
<img src="../media/tutorial/img6.jpg" style="height:300px;">
**Step 8: Secure Shoulder**
- Tighten the shoulder part with 4 screws on top and 4 on the bottom
*(access bottom holes by turning the shoulder).*
---
### Second Motor Assembly
**Step 9: Install Motor 2**
- Slide the second motor in from the top and link the wire from motor 1 to motor 2.
<img src="../media/tutorial/img8.jpg" style="height:300px;">
**Step 10: Attach Shoulder Holder**
- Add the shoulder motor holder.
- Ensure the wire from motor 1 to motor 2 goes behind the holder while the other wire is routed upward (see photo).
- This part can be tight to assemble, you can use a workbench like the image or a similar setup to push the part around the motor.
<div style="display: flex;">
<img src="../media/tutorial/img9.jpg" style="height:250px;">
<img src="../media/tutorial/img10.jpg" style="height:250px;">
<img src="../media/tutorial/img12.jpg" style="height:250px;">
</div>
**Step 11: Secure Motor 2**
- Fasten the second motor with 4 screws.
**Step 12: Attach Motor Horn**
- Attach both motor horns to motor 2, again use the horn screw.
**Step 13: Attach Base**
- Install the base attachment using 2 screws.
<img src="../media/tutorial/img11.jpg" style="height:300px;">
**Step 14: Attach Upper Arm**
- Attach the upper arm with 4 screws on each side.
<img src="../media/tutorial/img13.jpg" style="height:300px;">
---
### Third Motor Assembly
**Step 15: Install Motor 3**
- Route the motor cable from motor 2 through the cable holder to motor 3, then secure motor 3 with 4 screws.
**Step 16: Attach Motor Horn**
- Attach both motor horns to motor 3 and secure one again with a horn screw.
<img src="../media/tutorial/img14.jpg" style="height:300px;">
**Step 17: Attach Forearm**
- Connect the forearm to motor 3 using 4 screws on each side.
<img src="../media/tutorial/img15.jpg" style="height:300px;">
---
### Fourth Motor Assembly
**Step 18: Install Motor 4**
- Slide in motor 4, attach the cable from motor 3, and secure the cable in its holder with a screw.
<div style="display: flex;">
<img src="../media/tutorial/img16.jpg" style="height:300px;">
<img src="../media/tutorial/img19.jpg" style="height:300px;">
</div>
**Step 19: Attach Motor Holder 4**
- Install the fourth motor holder (a tight fit). Ensure one wire is routed upward and the wire from motor 3 is routed downward (see photo).
<img src="../media/tutorial/img17.jpg" style="height:300px;">
**Step 20: Secure Motor 4 & Attach Horn**
- Fasten motor 4 with 4 screws and attach its motor horns, use for one a horn screw.
<img src="../media/tutorial/img18.jpg" style="height:300px;">
---
### Wrist Assembly
**Step 21: Install Motor 5**
- Insert motor 5 into the wrist holder and secure it with 2 front screws.
<img src="../media/tutorial/img20.jpg" style="height:300px;">
**Step 22: Attach Wrist**
- Connect the wire from motor 4 to motor 5. And already insert the other wire for the gripper.
- Secure the wrist to motor 4 using 4 screws on both sides.
<img src="../media/tutorial/img22.jpg" style="height:300px;">
**Step 23: Attach Wrist Horn**
- Install only one motor horn on the wrist motor and secure it with a horn screw.
<img src="../media/tutorial/img23.jpg" style="height:300px;">
---
### Follower Configuration
**Step 24: Attach Gripper**
- Attach the gripper to motor 5.
<img src="../media/tutorial/img24.jpg" style="height:300px;">
**Step 25: Install Gripper Motor**
- Insert the gripper motor, connect the motor wire from motor 5 to motor 6, and secure it with 3 screws on each side.
<img src="../media/tutorial/img25.jpg" style="height:300px;">
**Step 26: Attach Gripper Horn & Claw**
- Attach the motor horns and again use a horn screw.
- Install the gripper claw and secure it with 4 screws on both sides.
<img src="../media/tutorial/img26.jpg" style="height:300px;">
**Step 27: Mount Controller**
- Attach the motor controller on the back.
<div style="display: flex;">
<img src="../media/tutorial/img27.jpg" style="height:300px;">
<img src="../media/tutorial/img28.jpg" style="height:300px;">
</div>
*Assembly complete proceed to Leader arm assembly.*
---
### Leader Configuration
For the leader configuration, perform **Steps 123**. Make sure that you removed the motor gears from the motors.
**Step 24: Attach Leader Holder**
- Mount the leader holder onto the wrist and secure it with a screw.
<img src="../media/tutorial/img29.jpg" style="height:300px;">
**Step 25: Attach Handle**
- Attach the handle to motor 5 using 4 screws.
<img src="../media/tutorial/img30.jpg" style="height:300px;">
**Step 26: Install Gripper Motor**
- Insert the gripper motor, secure it with 3 screws on each side, attach a motor horn using a horn screw, and connect the motor wire.
<img src="../media/tutorial/img31.jpg" style="height:300px;">
**Step 27: Attach Trigger**
- Attach the follower trigger with 4 screws.
<img src="../media/tutorial/img32.jpg" style="height:300px;">
**Step 28: Mount Controller**
- Attach the motor controller on the back.
<div style="display: flex;">
<img src="../media/tutorial/img27.jpg" style="height:300px;">
<img src="../media/tutorial/img28.jpg" style="height:300px;">
</div>
*Assembly complete proceed to calibration.*
## E. Calibrate
Next, you'll need to calibrate your SO-100 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one SO-100 robot to work on another.
#### a. Manual calibration of follower arm
> [!IMPORTANT]
> Contrarily to step 6 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=724) which illustrates the auto calibration, we will actually do manual calibration of follower for now.
You will need to move the follower arm to these positions sequentially:
| 1. Zero position | 2. Rotated position | 3. Rest position |
| ------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| <img src="../media/so100/follower_zero.webp?raw=true" alt="SO-100 follower arm zero position" title="SO-100 follower arm zero position" style="width:100%;"> | <img src="../media/so100/follower_rotated.webp?raw=true" alt="SO-100 follower arm rotated position" title="SO-100 follower arm rotated position" style="width:100%;"> | <img src="../media/so100/follower_rest.webp?raw=true" alt="SO-100 follower arm rest position" title="SO-100 follower arm rest position" style="width:100%;"> |
Make sure both arms are connected and run this script to launch manual calibration:
```bash
python lerobot/scripts/control_robot.py \
--robot.type=so100 \
--robot.cameras='{}' \
--control.type=calibrate \
--control.arms='["main_follower"]'
```
#### b. Manual calibration of leader arm
Follow step 6 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=724) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
| 1. Zero position | 2. Rotated position | 3. Rest position |
| ------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| <img src="../media/so100/leader_zero.webp?raw=true" alt="SO-100 leader arm zero position" title="SO-100 leader arm zero position" style="width:100%;"> | <img src="../media/so100/leader_rotated.webp?raw=true" alt="SO-100 leader arm rotated position" title="SO-100 leader arm rotated position" style="width:100%;"> | <img src="../media/so100/leader_rest.webp?raw=true" alt="SO-100 leader arm rest position" title="SO-100 leader arm rest position" style="width:100%;"> |
Run this script to launch manual calibration:
```bash
python lerobot/scripts/control_robot.py \
--robot.type=so100 \
--robot.cameras='{}' \
--control.type=calibrate \
--control.arms='["main_leader"]'
```
## F. Teleoperate
**Simple teleop**
Then you are ready to teleoperate your robot! Run this simple script (it won't connect and display the cameras):
```bash
python lerobot/scripts/control_robot.py \
--robot.type=so100 \
--robot.cameras='{}' \
--control.type=teleoperate
```
#### a. Teleop with displaying cameras
Follow [this guide to setup your cameras](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#c-add-your-cameras-with-opencvcamera). Then you will be able to display the cameras on your computer while you are teleoperating by running the following code. This is useful to prepare your setup before recording your first dataset.
> **NOTE:** To visualize the data, enable `--control.display_data=true`. This streams the data using `rerun`.
```bash
python lerobot/scripts/control_robot.py \
--robot.type=so100 \
--control.type=teleoperate
```
## G. Record a dataset
Once you're familiar with teleoperation, you can record your first dataset with SO-100.
If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
```bash
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
```
Store your Hugging Face repository name in a variable to run these commands:
```bash
HF_USER=$(huggingface-cli whoami | head -n 1)
echo $HF_USER
```
Record 2 episodes and upload your dataset to the hub:
```bash
python lerobot/scripts/control_robot.py \
--robot.type=so100 \
--control.type=record \
--control.fps=30 \
--control.single_task="Grasp a lego block and put it in the bin." \
--control.repo_id=${HF_USER}/so100_test \
--control.tags='["so100","tutorial"]' \
--control.warmup_time_s=5 \
--control.episode_time_s=30 \
--control.reset_time_s=30 \
--control.num_episodes=2 \
--control.push_to_hub=true
```
Note: You can resume recording by adding `--control.resume=true`.
## H. Visualize a dataset
If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
```bash
echo ${HF_USER}/so100_test
```
If you didn't upload with `--control.push_to_hub=false`, you can also visualize it locally with (a window can be opened in the browser `http://127.0.0.1:9090` with the visualization tool):
```bash
python lerobot/scripts/visualize_dataset_html.py \
--repo-id ${HF_USER}/so100_test \
--local-files-only 1
```
## I. Replay an episode
Now try to replay the first episode on your robot:
```bash
python lerobot/scripts/control_robot.py \
--robot.type=so100 \
--control.type=replay \
--control.fps=30 \
--control.repo_id=${HF_USER}/so100_test \
--control.episode=0
```
## J. Train a policy
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
```bash
python lerobot/scripts/train.py \
--dataset.repo_id=${HF_USER}/so100_test \
--policy.type=act \
--output_dir=outputs/train/act_so100_test \
--job_name=act_so100_test \
--policy.device=cuda \
--wandb.enable=true
```
Let's explain it:
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/so100_test`.
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
Training should take several hours. You will find checkpoints in `outputs/train/act_so100_test/checkpoints`.
To resume training from a checkpoint, below is an example command to resume from `last` checkpoint of the `act_so100_test` policy:
```bash
python lerobot/scripts/train.py \
--config_path=outputs/train/act_so100_test/checkpoints/last/pretrained_model/train_config.json \
--resume=true
```
## K. Evaluate your policy
You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
```bash
python lerobot/scripts/control_robot.py \
--robot.type=so100 \
--control.type=record \
--control.fps=30 \
--control.single_task="Grasp a lego block and put it in the bin." \
--control.repo_id=${HF_USER}/eval_act_so100_test \
--control.tags='["tutorial"]' \
--control.warmup_time_s=5 \
--control.episode_time_s=30 \
--control.reset_time_s=30 \
--control.num_episodes=10 \
--control.push_to_hub=true \
--control.policy.path=outputs/train/act_so100_test/checkpoints/last/pretrained_model
```
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_so100_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_so100_test`).
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_so100_test`).
## L. More Information
Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth tutorial on controlling real robots with LeRobot.
> [!TIP]
> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb) in the channel [`#so100-arm`](https://discord.com/channels/1216765309076115607/1237741463832363039).

45
examples/hopejr/agugu.py Normal file
View File

@@ -0,0 +1,45 @@
import time
import numpy as np
from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus, CalibrationMode
@staticmethod
def degps_to_raw(degps: float) -> int:
steps_per_deg = 4096.0 / 360.0
speed_in_steps = abs(degps) * steps_per_deg
speed_int = int(round(speed_in_steps))
if speed_int > 0x7FFF:
speed_int = 0x7FFF
if degps < 0:
return speed_int | 0x8000
else:
return speed_int & 0x7FFF
@staticmethod
def raw_to_degps(raw_speed: int) -> float:
steps_per_deg = 4096.0 / 360.0
magnitude = raw_speed & 0x7FFF
degps = magnitude / steps_per_deg
if raw_speed & 0x8000:
degps = -degps
return degps
def main():
# Instantiate the bus for a single motor on port /dev/ttyACM0.
arm_bus = FeetechMotorsBus(
port="/dev/ttyACM0",
motors={"wrist_pitch": [1, "scs0009"]},
protocol_version=1,
group_sync_read=False, # using individual read calls
)
arm_bus.connect()
# Read the current raw motor position.
# Note that "Present_Position" is in the raw units.
current_raw = arm_bus.read("Present_Position", ["wrist_pitch"])[0]
print("Current raw position:", current_raw)
arm_bus.write("Goal_Position", 1000)
arm_bus.disconnect()
exit()
if __name__ == "__main__":
main()

BIN
examples/hopejr/asd.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 148 KiB

View File

@@ -0,0 +1,46 @@
import serial
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from collections import deque
# Adjust this to match your actual serial port and baud rate
SERIAL_PORT = '/dev/ttyACM0' # or COM3 on Windows
BAUD_RATE = 115200
# Set up serial connection
ser = serial.Serial(SERIAL_PORT, BAUD_RATE)
# Buffers for real-time plot
buffer_len = 200
val1_buffer = deque([0]*buffer_len, maxlen=buffer_len)
val2_buffer = deque([0]*buffer_len, maxlen=buffer_len)
# Setup the plot
fig, ax = plt.subplots()
line1, = ax.plot([], [], label='Sensor 0')
line2, = ax.plot([], [], label='Sensor 1')
ax.set_ylim(0, 4096)
ax.set_xlim(0, buffer_len)
ax.legend()
def update(frame):
while ser.in_waiting:
line = ser.readline().decode('utf-8').strip()
parts = line.split()
if len(parts) >= 2:
try:
val1 = int(parts[0])
val2 = int(parts[1])
val1_buffer.append(val1)
val2_buffer.append(val2)
except ValueError:
pass # skip malformed lines
line1.set_ydata(val1_buffer)
line1.set_xdata(range(len(val1_buffer)))
line2.set_ydata(val2_buffer)
line2.set_xdata(range(len(val2_buffer)))
return line1, line2
ani = animation.FuncAnimation(fig, update, interval=50)
plt.show()

View File

@@ -0,0 +1,64 @@
import serial
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from collections import deque
# Config
SERIAL_PORT = '/dev/ttyACM1' # Change as needed
BAUD_RATE = 115200
BUFFER_LEN = 200
# Sensor names in order
sensor_names = [
"wrist_roll",
"wrist_pitch",
"wrist_yaw",
"elbow_flex",
"shoulder_roll",
"shoulder_yaw",
"shoulder_pitch"
]
# Initialize buffers
sensor_data = {
name: deque([0]*BUFFER_LEN, maxlen=BUFFER_LEN)
for name in sensor_names
}
# Setup plot
fig, axes = plt.subplots(len(sensor_names), 1, figsize=(8, 12), sharex=True)
fig.tight_layout(pad=3.0)
lines = {}
for i, name in enumerate(sensor_names):
axes[i].set_title(name)
axes[i].set_xlim(0, BUFFER_LEN)
axes[i].set_ylim(0, 4096)
line, = axes[i].plot([], [], label=name)
axes[i].legend()
lines[name] = line
# Connect to serial
ser = serial.Serial(SERIAL_PORT, BAUD_RATE)
# Update function
def update(frame):
while ser.in_waiting:
line = ser.readline().decode().strip()
parts = line.split()
if len(parts) != 7:
continue
try:
values = list(map(int, parts))
except ValueError:
continue
for i, name in enumerate(sensor_names):
sensor_data[name].append(values[i])
for name in sensor_names:
x = range(len(sensor_data[name]))
lines[name].set_data(x, sensor_data[name])
return lines.values()
# Animate
ani = animation.FuncAnimation(fig, update, interval=50, blit=False)
plt.show()

View File

@@ -0,0 +1,161 @@
import serial
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from collections import deque
# Adjust this to match your actual serial port and baud rate
SERIAL_PORT = '/dev/ttyACM0' # or COM3 on Windows
BAUD_RATE = 115200
# Set up serial connection
ser = serial.Serial(SERIAL_PORT, BAUD_RATE)
# How many data points to keep in the scrolling buffer
buffer_len = 200
# -------------------------------------------------------------------
# 1) Sensor buffers for existing sensors + new wrist_pitch, wrist_yaw
# -------------------------------------------------------------------
sensor_buffers = {
'wrist_roll': {
'val1': deque([0]*buffer_len, maxlen=buffer_len),
'val2': deque([0]*buffer_len, maxlen=buffer_len)
},
'elbow_pitch': {
'val1': deque([0]*buffer_len, maxlen=buffer_len),
'val2': deque([0]*buffer_len, maxlen=buffer_len)
},
'shoulder_pitch': {
'val1': deque([0]*buffer_len, maxlen=buffer_len),
'val2': deque([0]*buffer_len, maxlen=buffer_len)
},
'shoulder_yaw': {
'val1': deque([0]*buffer_len, maxlen=buffer_len),
'val2': deque([0]*buffer_len, maxlen=buffer_len)
},
'shoulder_roll': {
'val1': deque([0]*buffer_len, maxlen=buffer_len),
'val2': deque([0]*buffer_len, maxlen=buffer_len)
},
# --- New single-valued sensors ---
'wrist_pitch': {
'val1': deque([0]*buffer_len, maxlen=buffer_len) # Only one line
},
'wrist_yaw': {
'val1': deque([0]*buffer_len, maxlen=buffer_len) # Only one line
},
}
# -------------------------------------------------------------------
# 2) Figure with 7 subplots (was 5). We keep the original 5 + 2 new.
# -------------------------------------------------------------------
fig, axes = plt.subplots(7, 1, figsize=(8, 14), sharex=True)
fig.tight_layout(pad=3.0)
# We'll store line references in a dict so we can update them in update().
lines = {}
# -------------------------------------------------------------------
# 3) Define each subplot, including new ones at the end.
# -------------------------------------------------------------------
subplot_info = [
('wrist_roll', 'Wrist Roll (2,3)', axes[0]),
('elbow_pitch', 'Elbow Pitch (0,1)', axes[1]),
('shoulder_pitch', 'Shoulder Pitch (10,11)', axes[2]),
('shoulder_yaw', 'Shoulder Yaw (12,13)', axes[3]),
('shoulder_roll', 'Shoulder Roll (14,15)', axes[4]),
('wrist_pitch', 'Wrist Pitch (0)', axes[5]), # new
('wrist_yaw', 'Wrist Yaw (1)', axes[6]), # new
]
# Set up each subplot
for (sensor_name, label, ax) in subplot_info:
ax.set_title(label)
ax.set_xlim(0, buffer_len)
ax.set_ylim(0, 4096) # adjust if needed
# For existing sensors, plot 2 lines (val1, val2)
# For the new single-line sensors, plot just 1 line
if sensor_name in ['wrist_pitch', 'wrist_yaw']:
# Single-valued
line, = ax.plot([], [], label=f"{sensor_name}")
lines[sensor_name] = line
else:
# Pair of values
line1, = ax.plot([], [], label=f"{sensor_name} - val1")
line2, = ax.plot([], [], label=f"{sensor_name} - val2")
lines[sensor_name] = [line1, line2]
ax.legend()
def update(frame):
# Read all available lines from the serial buffer
while ser.in_waiting:
raw_line = ser.readline().decode('utf-8').strip()
parts = raw_line.split()
# We expect at least 16 values if all sensors are present
if len(parts) < 7:
continue
try:
values = list(map(int, parts))
except ValueError:
# If there's a parsing error, skip this line
continue
# Original code: extract the relevant values and append to the correct buffer
sensor_buffers['elbow_pitch']['val1'].append(values[13])
sensor_buffers['elbow_pitch']['val2'].append(values[13])
sensor_buffers['wrist_roll']['val1'].append(values[3])
sensor_buffers['wrist_roll']['val2'].append(values[3])
sensor_buffers['shoulder_pitch']['val1'].append(values[14])
sensor_buffers['shoulder_pitch']['val2'].append(values[14])
sensor_buffers['shoulder_yaw']['val1'].append(values[8])
sensor_buffers['shoulder_yaw']['val2'].append(values[8])
sensor_buffers['shoulder_roll']['val1'].append(values[10])
sensor_buffers['shoulder_roll']['val2'].append(values[10])
# -------------------------------------------------------------------
# 4) New code: also read wrist_pitch (index 0) and wrist_yaw (index 1)
# -------------------------------------------------------------------
sensor_buffers['wrist_yaw']['val1'].append(values[0])
sensor_buffers['wrist_pitch']['val1'].append(values[1])
# Update each line's data in each subplot
all_lines = []
for (sensor_name, _, ax) in subplot_info:
# x-values are just the index range of the buffer for val1
x_data = range(len(sensor_buffers[sensor_name]['val1']))
# If this sensor has two lines
if isinstance(lines[sensor_name], list):
# First line
lines[sensor_name][0].set_data(
x_data,
sensor_buffers[sensor_name]['val1']
)
# Second line
lines[sensor_name][1].set_data(
x_data,
sensor_buffers[sensor_name]['val2']
)
all_lines.extend(lines[sensor_name])
else:
# Single line only (wrist_pitch, wrist_yaw)
lines[sensor_name].set_data(
x_data,
sensor_buffers[sensor_name]['val1']
)
all_lines.append(lines[sensor_name])
return all_lines
# Create the animation
ani = animation.FuncAnimation(fig, update, interval=50, blit=False)
plt.show()

186
examples/hopejr/follower.py Normal file
View File

@@ -0,0 +1,186 @@
from lerobot.common.robot_devices.motors.feetech import (
CalibrationMode,
FeetechMotorsBus,
)
import yaml
class HopeJuniorRobot:
def __init__(self):
self.arm_port = "/dev/tty.usbserial-140"
self.hand_port = "/dev/tty.usbmodem58760436961"
self.arm_bus = FeetechMotorsBus(
port = self.arm_port,
motors={
# "motor1": (1, "sts3250"),
# "motor2": (2, "sts3250"),
# "motor3": (3, "sts3250"),
#"shoulder_pitch": [1, "sts3215"],
"shoulder_pitch": [1, "sm8512bl"],
"shoulder_yaw": [2, "sts3250"], # TODO: sts3250
"shoulder_roll": [3, "sts3250"], # TODO: sts3250
"elbow_flex": [4, "sts3250"],
"wrist_roll": [5, "sts3215"],
"wrist_yaw": [6, "sts3215"],
"wrist_pitch": [7, "sts3215"],
},
protocol_version=0,
)
self.hand_bus = FeetechMotorsBus(
port=self.hand_port,
motors = {
# Thumb
"thumb_basel_rotation": [1, "scs0009"],
"thumb_mcp": [3, "scs0009"],
"thumb_pip": [4, "scs0009"],
"thumb_dip": [13, "scs0009"],
# Index
"index_thumb_side": [5, "scs0009"],
"index_pinky_side": [6, "scs0009"],
"index_flexor": [16, "scs0009"],
# Middle
"middle_thumb_side": [8, "scs0009"],
"middle_pinky_side": [9, "scs0009"],
"middle_flexor": [2, "scs0009"],
# Ring
"ring_thumb_side": [11, "scs0009"],
"ring_pinky_side": [12, "scs0009"],
"ring_flexor": [7, "scs0009"],
# Pinky
"pinky_thumb_side": [14, "scs0009"],
"pinky_pinky_side": [15, "scs0009"],
"pinky_flexor": [10, "scs0009"],
},
protocol_version=1,#1
group_sync_read=False,
)
self.arm_calib_dict = self.get_arm_calibration()
self.hand_calib_dict = self.get_hand_calibration()
def apply_arm_config(self, config_file):
with open(config_file, "r") as file:
config = yaml.safe_load(file)
for param, value in config.get("robot", {}).get("arm_bus", {}).items():
self.arm_bus.write(param, value)
def apply_hand_config(config_file, robot):
with open(config_file, "r") as file:
config = yaml.safe_load(file)
for param, value in config.get("robot", {}).get("hand_bus", {}).items():
robot.arm_bus.write(param, value)
def get_hand_calibration(self):
homing_offset = [0] * len(self.hand_bus.motor_names)
drive_mode = [0] * len(self.hand_bus.motor_names)
start_pos = [
750, # thumb_basel_rotation
100, # thumb_mcp
700, # thumb_pip
100, # thumb_dip
800, # index_thumb_side
950, # index_pinky_side
0, # index_flexor
250, # middle_thumb_side
850, # middle_pinky_side
0, # middle_flexor
850, # ring_thumb_side
900, # ring_pinky_side
0, # ring_flexor
00, # pinky_thumb_side
950, # pinky_pinky_side
0, # pinky_flexor
]
end_pos = [
start_pos[0] - 550, # thumb_basel_rotation
start_pos[1] + 400, # thumb_mcp
start_pos[2] + 300, # thumb_pip
start_pos[3] + 200, # thumb_dip
start_pos[4] - 700, # index_thumb_side
start_pos[5] - 300, # index_pinky_side
start_pos[6] + 600, # index_flexor
start_pos[7] + 700, # middle_thumb_side
start_pos[8] - 400, # middle_pinky_side
start_pos[9] + 600, # middle_flexor
start_pos[10] - 600, # ring_thumb_side
start_pos[11] - 400, # ring_pinky_side
start_pos[12] + 600, # ring_flexor
start_pos[13] + 400, # pinky_thumb_side
start_pos[14] - 450, # pinky_pinky_side
start_pos[15] + 600, # pinky_flexor
]
calib_modes = [CalibrationMode.LINEAR.name] * len(self.hand_bus.motor_names)
calib_dict = {
"homing_offset": homing_offset,
"drive_mode": drive_mode,
"start_pos": start_pos,
"end_pos": end_pos,
"calib_mode": calib_modes,
"motor_names": self.hand_bus.motor_names,
}
return calib_dict
def get_arm_calibration(self):
homing_offset = [0] * len(self.arm_bus.motor_names)
drive_mode = [0] * len(self.arm_bus.motor_names)
start_pos = [
1800, # shoulder_up
2800, # shoulder_forward
1800, # shoulder_roll
1200, # bend_elbow
700, # wrist_roll
1850, # wrist_yaw
1700, # wrist_pitch
]
end_pos = [
2800, # shoulder_up
3150, # shoulder_forward
400, #shoulder_roll
2300, # bend_elbow
2300, # wrist_roll
2150, # wrist_yaw
2300, # wrist_pitch
]
calib_modes = [CalibrationMode.LINEAR.name] * len(self.arm_bus.motor_names)
calib_dict = {
"homing_offset": homing_offset,
"drive_mode": drive_mode,
"start_pos": start_pos,
"end_pos": end_pos,
"calib_mode": calib_modes,
"motor_names": self.arm_bus.motor_names,
}
return calib_dict
def connect_arm(self):
self.arm_bus.connect()
def connect_hand(self):
self.hand_bus.connect()

730
examples/hopejr/leader.py Normal file
View File

@@ -0,0 +1,730 @@
from lerobot.common.robot_devices.motors.feetech import (
CalibrationMode,
FeetechMotorsBus,
)
import serial
import threading
import time
from typing import Callable
import pickle
import cv2
import numpy as np
from collections import deque
import json
import os
LOWER_BOUND_LINEAR = -100
UPPER_BOUND_LINEAR = 200
class HomonculusArm:
def __init__(self, serial_port: str = "/dev/ttyACM1", baud_rate: int = 115200):
self.serial_port = serial_port
self.baud_rate = 115200
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
# Number of past values to keep in memory
self.buffer_size = 10
# Initialize a buffer (deque) for each joint
self.joint_buffer = {
"wrist_roll": deque(maxlen=self.buffer_size),
"wrist_pitch": deque(maxlen=self.buffer_size),
"wrist_yaw": deque(maxlen=self.buffer_size),
"elbow_flex": deque(maxlen=self.buffer_size),
"shoulder_roll": deque(maxlen=self.buffer_size),
"shoulder_yaw": deque(maxlen=self.buffer_size),
"shoulder_pitch": deque(maxlen=self.buffer_size),
}
# Start the reading thread
self.thread = threading.Thread(target=self.async_read, daemon=True)
self.thread.start()
# Last read dictionary
self.last_d = {
"wrist_roll": 100,
"wrist_pitch": 100,
"wrist_yaw": 100,
"elbow_flex": 100,
"shoulder_roll": 100,
"shoulder_yaw": 100,
"shoulder_pitch": 100,
}
self.calibration = None
# For adaptive EMA, we store a "previous smoothed" state per joint
self.adaptive_ema_state = {
"wrist_roll": None,
"wrist_pitch": None,
"wrist_yaw": None,
"elbow_flex": None,
"shoulder_roll": None,
"shoulder_yaw": None,
"shoulder_pitch": None,
}
self.kalman_state = {
joint: {"x": None, "P": None} for joint in self.joint_buffer.keys()
}
@property
def joint_names(self):
return list(self.last_d.keys())
def read(self, motor_names: list[str] | None = None):
"""
Return the most recent (single) values from self.last_d,
optionally applying calibration.
"""
if motor_names is None:
motor_names = self.joint_names
# Get raw (last) values
values = np.array([self.last_d[k] for k in motor_names])
#print(motor_names)
print(values)
# Apply calibration if available
if self.calibration is not None:
values = self.apply_calibration(values, motor_names)
print(values)
return values
def read_running_average(self, motor_names: list[str] | None = None, linearize=False):
"""
Return the AVERAGE of the most recent self.buffer_size (or fewer, if not enough data) readings
for each joint, optionally applying calibration.
"""
if motor_names is None:
motor_names = self.joint_names
# Gather averaged readings from buffers
smoothed_vals = []
for name in motor_names:
buf = self.joint_buffer[name]
if len(buf) == 0:
# If no data has been read yet, fall back to last_d
smoothed_vals.append(self.last_d[name])
else:
# Otherwise, average over the existing buffer
smoothed_vals.append(np.mean(buf))
smoothed_vals = np.array(smoothed_vals, dtype=np.float32)
# Apply calibration if available
if self.calibration is not None:
if False:
for i, joint_name in enumerate(motor_names):
# Re-use the same raw_min / raw_max from the calibration
calib_idx = self.calibration["motor_names"].index(joint_name)
min_reading = self.calibration["start_pos"][calib_idx]
max_reading = self.calibration["end_pos"][calib_idx]
B_value = smoothed_vals[i]
print(joint_name)
if joint_name == "elbow_flex":
print('elbow')
try:
smoothed_vals[i] = int(min_reading+(max_reading - min_reading)*np.arcsin((B_value-min_reading)/(max_reading-min_reading))/(np.pi / 2))
except:
print('not working')
print(smoothed_vals)
print('not working')
smoothed_vals = self.apply_calibration(smoothed_vals, motor_names)
return smoothed_vals
def read_kalman_filter(
self,
Q: float = 1.0,
R: float = 100.0,
motor_names: list[str] | None = None
) -> np.ndarray:
"""
Return a Kalman-filtered reading for each requested joint.
We store a separate Kalman filter (x, P) per joint. For each new measurement Z:
1) Predict:
x_pred = x (assuming no motion model)
P_pred = P + Q
2) Update:
K = P_pred / (P_pred + R)
x = x_pred + K * (Z - x_pred)
P = (1 - K) * P_pred
:param Q: Process noise. Larger Q means the estimate can change more freely.
:param R: Measurement noise. Larger R means we trust our sensor less.
:param motor_names: If not specified, all joints are filtered.
:return: Kalman-filtered positions as a numpy array.
"""
if motor_names is None:
motor_names = self.joint_names
current_vals = np.array([self.last_d[name] for name in motor_names], dtype=np.float32)
filtered_vals = np.zeros_like(current_vals)
for i, name in enumerate(motor_names):
# Retrieve the filter state for this joint
x = self.kalman_state[name]["x"]
P = self.kalman_state[name]["P"]
Z = current_vals[i]
# If this is the first reading, initialize
if x is None or P is None:
x = Z
P = 1.0 # or some large initial uncertainty
# 1) Predict step
x_pred = x # no velocity model, so x_pred = x
P_pred = P + Q
# 2) Update step
K = P_pred / (P_pred + R) # Kalman gain
x_new = x_pred + K * (Z - x_pred) # new state estimate
P_new = (1 - K) * P_pred # new covariance
# Save back
self.kalman_state[name]["x"] = x_new
self.kalman_state[name]["P"] = P_new
filtered_vals[i] = x_new
if self.calibration is not None:
filtered_vals = self.apply_calibration(filtered_vals, motor_names)
return filtered_vals
def async_read(self):
"""
Continuously read from the serial buffer in its own thread,
store into `self.last_d` and also append to the rolling buffer (joint_buffer).
"""
while True:
if self.serial.in_waiting > 0:
self.serial.flush()
vals = self.serial.readline().decode("utf-8").strip()
vals = vals.split(" ")
if len(vals) != 7:
continue
try:
vals = [int(val) for val in vals]#remove last digit
except ValueError:
self.serial.flush()
vals = self.serial.readline().decode("utf-8").strip()
vals = vals.split(" ")
vals = [int(val) for val in vals]
d = {
"wrist_roll": vals[0],
"wrist_yaw": vals[1],
"wrist_pitch": vals[2],
"elbow_flex": vals[3],
"shoulder_roll": vals[4],
"shoulder_yaw": vals[5],
"shoulder_pitch": vals[6],
}
# Update the last_d dictionary
self.last_d = d
# Also push these new values into the rolling buffers
for joint_name, joint_val in d.items():
self.joint_buffer[joint_name].append(joint_val)
# Optional: short sleep to avoid busy-loop
# time.sleep(0.001)
def run_calibration(self, robot):
robot.arm_bus.write("Acceleration", 50)
n_joints = len(self.joint_names)
max_open_all = np.zeros(n_joints, dtype=np.float32)
min_open_all = np.zeros(n_joints, dtype=np.float32)
max_closed_all = np.zeros(n_joints, dtype=np.float32)
min_closed_all = np.zeros(n_joints, dtype=np.float32)
for i, jname in enumerate(self.joint_names):
print(f"\n--- Calibrating joint '{jname}' ---")
joint_idx = robot.arm_calib_dict["motor_names"].index(jname)
open_val = robot.arm_calib_dict["start_pos"][joint_idx]
print(f"Commanding {jname} to OPEN position {open_val}...")
robot.arm_bus.write("Goal_Position", [open_val], [jname])
input("Physically verify or adjust the joint. Press Enter when ready to capture...")
open_pos_list = []
for _ in range(100):
all_joints_vals = self.read() # read entire arm
open_pos_list.append(all_joints_vals[i]) # store only this joint
time.sleep(0.01)
# Convert to numpy and track min/max
open_array = np.array(open_pos_list, dtype=np.float32)
max_open_all[i] = open_array.max()
min_open_all[i] = open_array.min()
closed_val = robot.arm_calib_dict["end_pos"][joint_idx]
if jname == "elbow_flex":
closed_val = closed_val - 700
closed_val = robot.arm_calib_dict["end_pos"][joint_idx]
print(f"Commanding {jname} to CLOSED position {closed_val}...")
robot.arm_bus.write("Goal_Position", [closed_val], [jname])
input("Physically verify or adjust the joint. Press Enter when ready to capture...")
closed_pos_list = []
for _ in range(100):
all_joints_vals = self.read()
closed_pos_list.append(all_joints_vals[i])
time.sleep(0.01)
closed_array = np.array(closed_pos_list, dtype=np.float32)
# Some thresholding for closed positions
#closed_array[closed_array < 1000] = 60000
max_closed_all[i] = closed_array.max()
min_closed_all[i] = closed_array.min()
robot.arm_bus.write("Goal_Position", [int((closed_val+open_val)/2)], [jname])
open_pos = np.maximum(max_open_all, max_closed_all)
closed_pos = np.minimum(min_open_all, min_closed_all)
for i, jname in enumerate(self.joint_names):
if jname not in ["wrist_pitch", "shoulder_pitch"]:
# Swap open/closed for these joints
tmp_pos = open_pos[i]
open_pos[i] = closed_pos[i]
closed_pos[i] = tmp_pos
# Debug prints
print("\nFinal open/closed arrays after any swaps/inversions:")
print(f"open_pos={open_pos}")
print(f"closed_pos={closed_pos}")
homing_offset = [0] * n_joints
drive_mode = [0] * n_joints
calib_modes = [CalibrationMode.LINEAR.name] * n_joints
calib_dict = {
"homing_offset": homing_offset,
"drive_mode": drive_mode,
"start_pos": open_pos,
"end_pos": closed_pos,
"calib_mode": calib_modes,
"motor_names": self.joint_names,
}
file_path = "examples/hopejr/settings/arm_calib.pkl"
if not os.path.exists(file_path):
with open(file_path, "wb") as f:
pickle.dump(calib_dict, f)
print(f"Dictionary saved to {file_path}")
self.set_calibration(calib_dict)
def set_calibration(self, calibration: dict[str, list]):
self.calibration = calibration
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
"""
Example calibration that linearly maps [start_pos, end_pos] to [0,100].
Extend or modify for your needs.
"""
if motor_names is None:
motor_names = self.joint_names
values = values.astype(np.float32)
for i, name in enumerate(motor_names):
calib_idx = self.calibration["motor_names"].index(name)
calib_mode = self.calibration["calib_mode"][calib_idx]
if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
start_pos = self.calibration["start_pos"][calib_idx]
end_pos = self.calibration["end_pos"][calib_idx]
# Rescale the present position to [0, 100]
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
# Check boundaries
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
# If you want to handle out-of-range differently:
# raise JointOutOfRangeError(msg)
msg = (
f"Wrong motor position range detected for {name}. "
f"Value = {values[i]} %, expected within [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}]"
)
print(msg)
return values
class HomonculusGlove:
def __init__(self, serial_port: str = "/dev/ttyACM1", baud_rate: int = 115200):
self.serial_port = serial_port
self.baud_rate = baud_rate
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
# Number of past values to keep in memory
self.buffer_size = 10
# Initialize a buffer (deque) for each joint
self.joint_buffer = {
"thumb_0": deque(maxlen=self.buffer_size),
"thumb_1": deque(maxlen=self.buffer_size),
"thumb_2": deque(maxlen=self.buffer_size),
"thumb_3": deque(maxlen=self.buffer_size),
"index_0": deque(maxlen=self.buffer_size),
"index_1": deque(maxlen=self.buffer_size),
"index_2": deque(maxlen=self.buffer_size),
"middle_0": deque(maxlen=self.buffer_size),
"middle_1": deque(maxlen=self.buffer_size),
"middle_2": deque(maxlen=self.buffer_size),
"ring_0": deque(maxlen=self.buffer_size),
"ring_1": deque(maxlen=self.buffer_size),
"ring_2": deque(maxlen=self.buffer_size),
"pinky_0": deque(maxlen=self.buffer_size),
"pinky_1": deque(maxlen=self.buffer_size),
"pinky_2": deque(maxlen=self.buffer_size),
"battery_voltage": deque(maxlen=self.buffer_size),
}
# Start the reading thread
self.thread = threading.Thread(target=self.async_read, daemon=True)
self.thread.start()
# Last read dictionary
self.last_d = {
"thumb_0": 100,
"thumb_1": 100,
"thumb_2": 100,
"thumb_3": 100,
"index_0": 100,
"index_1": 100,
"index_2": 100,
"middle_0": 100,
"middle_1": 100,
"middle_2": 100,
"ring_0": 100,
"ring_1": 100,
"ring_2": 100,
"pinky_0": 100,
"pinky_1": 100,
"pinky_2": 100,
"battery_voltage": 100,
}
self.calibration = None
@property
def joint_names(self):
return list(self.last_d.keys())
def read(self, motor_names: list[str] | None = None):
"""
Return the most recent (single) values from self.last_d,
optionally applying calibration.
"""
if motor_names is None:
motor_names = self.joint_names
# Get raw (last) values
values = np.array([self.last_d[k] for k in motor_names])
print(values)
# Apply calibration if available
if self.calibration is not None:
values = self.apply_calibration(values, motor_names)
print(values)
return values
def read_running_average(self, motor_names: list[str] | None = None, linearize=False):
"""
Return the AVERAGE of the most recent self.buffer_size (or fewer, if not enough data) readings
for each joint, optionally applying calibration.
"""
if motor_names is None:
motor_names = self.joint_names
# Gather averaged readings from buffers
smoothed_vals = []
for name in motor_names:
buf = self.joint_buffer[name]
if len(buf) == 0:
# If no data has been read yet, fall back to last_d
smoothed_vals.append(self.last_d[name])
else:
# Otherwise, average over the existing buffer
smoothed_vals.append(np.mean(buf))
smoothed_vals = np.array(smoothed_vals, dtype=np.float32)
# Apply calibration if available
if self.calibration is not None:
smoothed_vals = self.apply_calibration(smoothed_vals, motor_names)
return smoothed_vals
def async_read(self):
"""
Continuously read from the serial buffer in its own thread,
store into `self.last_d` and also append to the rolling buffer (joint_buffer).
"""
while True:
if self.serial.in_waiting > 0:
self.serial.flush()
vals = self.serial.readline().decode("utf-8").strip()
vals = vals.split(" ")
if len(vals) != 17:
continue
vals = [int(val) for val in vals]
d = {
"thumb_0": vals[0],
"thumb_1": vals[1],
"thumb_2": vals[2],
"thumb_3": vals[3],
"index_0": vals[4],
"index_1": vals[5],
"index_2": vals[6],
"middle_0": vals[7],
"middle_1": vals[8],
"middle_2": vals[9],
"ring_0": vals[10],
"ring_1": vals[11],
"ring_2": vals[12],
"pinky_0": vals[13],
"pinky_1": vals[14],
"pinky_2": vals[15],
"battery_voltage": vals[16],
}
# Update the last_d dictionary
self.last_d = d
# Also push these new values into the rolling buffers
for joint_name, joint_val in d.items():
self.joint_buffer[joint_name].append(joint_val)
def run_calibration(self):
print("\nMove arm to open position")
input("Press Enter to continue...")
open_pos_list = []
for _ in range(100):
open_pos = self.read()
open_pos_list.append(open_pos)
time.sleep(0.01)
open_pos = np.array(open_pos_list)
max_open_pos = open_pos.max(axis=0)
min_open_pos = open_pos.min(axis=0)
print(f"{max_open_pos=}")
print(f"{min_open_pos=}")
print("\nMove arm to closed position")
input("Press Enter to continue...")
closed_pos_list = []
for _ in range(100):
closed_pos = self.read()
closed_pos_list.append(closed_pos)
time.sleep(0.01)
closed_pos = np.array(closed_pos_list)
max_closed_pos = closed_pos.max(axis=0)
closed_pos[closed_pos < 1000] = 60000
min_closed_pos = closed_pos.min(axis=0)
print(f"{max_closed_pos=}")
print(f"{min_closed_pos=}")
open_pos = np.array([max_open_pos, max_closed_pos]).max(axis=0)
closed_pos = np.array([min_open_pos, min_closed_pos]).min(axis=0)
# INVERTION
# INVERTION
# INVERTION
# INVERTION
# INVERTION
# INVERTION
# INVERTION
for i, jname in enumerate(self.joint_names):
if jname in [
"thumb_0",
"thumb_3",
"index_2",
"middle_2",
"ring_2",
"pinky_2",
"index_0",
]:
tmp_pos = open_pos[i]
open_pos[i] = closed_pos[i]
closed_pos[i] = tmp_pos
print()
print(f"{open_pos=}")
print(f"{closed_pos=}")
homing_offset = [0] * len(self.joint_names)
drive_mode = [0] * len(self.joint_names)
calib_modes = [CalibrationMode.LINEAR.name] * len(self.joint_names)
calib_dict = {
"homing_offset": homing_offset,
"drive_mode": drive_mode,
"start_pos": open_pos,
"end_pos": closed_pos,
"calib_mode": calib_modes,
"motor_names": self.joint_names,
}
file_path = "examples/hopejr/settings/hand_calib.pkl"
if not os.path.exists(file_path):
with open(file_path, "wb") as f:
pickle.dump(calib_dict, f)
print(f"Dictionary saved to {file_path}")
# return calib_dict
self.set_calibration(calib_dict)
def set_calibration(self, calibration: dict[str, list]):
self.calibration = calibration
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
"""Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
a "zero position" at 0 degree.
Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor
rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range.
Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation
when given a goal position that is + or - their resolution. For instance, feetech xl330-m077 have a resolution of 4096, and
at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830,
or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor.
To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work
in the centered nominal degree range ]-180, 180[.
"""
if motor_names is None:
motor_names = self.motor_names
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
values = values.astype(np.float32)
for i, name in enumerate(motor_names):
calib_idx = self.calibration["motor_names"].index(name)
calib_mode = self.calibration["calib_mode"][calib_idx]
if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
start_pos = self.calibration["start_pos"][calib_idx]
end_pos = self.calibration["end_pos"][calib_idx]
# Rescale the present position to a nominal range [0, 100] %,
# useful for joints with linear motions like Aloha gripper
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
if name == "pinky_1" and (values[i] < LOWER_BOUND_LINEAR):
values[i] = end_pos
else:
msg = (
f"Wrong motor position range detected for {name}. "
f"Expected to be in nominal range of [0, 100] % (a full linear translation), "
f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, "
f"but present value is {values[i]} %. "
"This might be due to a cable connection issue creating an artificial jump in motor values. "
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
)
print(msg)
# raise JointOutOfRangeError(msg)
return values
# def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
# """Inverse of `apply_calibration`."""
# if motor_names is None:
# motor_names = self.motor_names
# for i, name in enumerate(motor_names):
# calib_idx = self.calibration["motor_names"].index(name)
# calib_mode = self.calibration["calib_mode"][calib_idx]
# if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
# start_pos = self.calibration["start_pos"][calib_idx]
# end_pos = self.calibration["end_pos"][calib_idx]
# # Convert from nominal lnear range of [0, 100] % to
# # actual motor range of values which can be arbitrary.
# values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos
# values = np.round(values).astype(np.int32)
# return values
class EncoderReader:
def __init__(self, serial_port="/dev/ttyUSB1", baud_rate=115200):
self.serial_port = serial_port
self.baud_rate = baud_rate
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
# Start a background thread to continuously read from the serial port
self.thread = threading.Thread(target=self.async_read, daemon=True)
self.thread.start()
# Store the latest encoder reading in this dictionary
self.last_d = {"encoder": 500}
def async_read(self):
while True:
# Read one line from serial
line = self.serial.readline().decode("utf-8").strip()
if line:
try:
val = int(line) # Parse the incoming line as integer
self.last_d["encoder"] = val
except ValueError:
# If we couldn't parse it as an integer, just skip
pass
def read(self):
"""
Returns the last encoder value that was read.
"""
return self.last_d["encoder"]
class Tac_Man:
def __init__(self, serial_port="/dev/ttyUSB1", baud_rate=115200):
self.serial_port = serial_port
self.baud_rate = baud_rate
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
# Start a background thread to continuously read from the serial port
self.thread = threading.Thread(target=self.async_read, daemon=True)
self.thread.start()
# Store the latest encoder readings in this list
self.last_d = [0, 0, 0] # Default values for three readings
def async_read(self):
while True:
# Read one line from serial
line = self.serial.readline().decode("utf-8").strip()
if line:
try:
# Parse the incoming line as three comma-separated integers
values = [int(val) for val in line.split(",")]
if len(values) == 3: # Ensure we have exactly three values
self.last_d = values
except ValueError:
# If parsing fails, skip this line
pass
def read(self):
"""
Returns the last encoder values that were read as a list of three integers.
"""
return self.last_d

111
examples/hopejr/notes Normal file
View File

@@ -0,0 +1,111 @@
test and test4
installed serial and opencv
after pip install -e .
pip install -e ".[feetech]"
robot.hand_bus.read("Present_Position")
array([ 349, 799, 1000, 1004, 508, 503, 673, 608, 791, 390, 552,
506, 600, 565, 428, 379], dtype=int32)
robot.hand_bus.write("Goal_Position",[349,799,500,500,508,503,673,608,791,390,552,506,600,565,428,379])
robot.arm_bus.write("Goal_Position", [1825, 2045, 2010, 2035, 1414, 1800, 1615])
robot.arm_bus.read("Present_Position")
robot.arm_bus.write("Goal_Position", [1500], ["elbow_flex"])
robot.arm_bus.write("Goal_Position", [2000], ["wrist_yaw"])
ranges: [600-2300, 1500-2300, 1300-2800, 1000-2500, 600-2800,400-1700, 1300-2300]
shoulder_up,
shoulder forward,
shoulder yaw,
elbow_flex
wrist_yaw,
wrist_pitch,
wrist_roll
COM18
C:/Users/Lenovo/AppData/Local/Programs/Python/Python310/python.exe c:/Users/Lenovo/Documents/HuggingFace/lerobot/examples/test4.py
wrist pitch is fucked
so the wrist motor was fucked
and we didnt know which one it was because
if the chain hjas an issue we dont know how to locate whihc motor is at fault (cables are hard to remove)
to calibrate:
python lerobot/scripts/configure_motor.py \
--port /dev/ttyACM1 \
--brand feetech \
--model sts3250 \
--baudrate 1000000 \
--ID 2
python lerobot/scripts/configure_motor.py \
--port /dev/ttyACM0 \
--brand feetech \
--model sm8512bl \
--baudrate 115200 \
--ID 1
python lerobot/scripts/configure_motor.py \
--port /dev/ttyACM1 \
--brand feetech \
--model scs0009 \
--baudrate 1000000 \
--ID 30
why are the motors beeping?
#interpolate between start and end pos
robot.arm_bus.write("Goal_Position", [int((i*interp+j*(1-interp))) for i, j in zip(arm_calibration["start_pos"], arm_calibration["end_pos"])])
control maj M to look for stuff
set calibration is useless
move the joints to that position too
/home/nepyope/Desktop/HuggingFace/lerobot/lerobot/common/robot_devices/motors/feetech.py
theres clearly some lag, and its probably because of an out of range issue
# hand_calibration = robot.get_hand_calibration()
# joint = input("Enter joint name: ")
# j1 = f"{joint}_pinky_side"
# j2 = f"{joint}_thumb_side"
# encoder = EncoderReader("/dev/ttyUSB0", 115200)
# start_angle1 = hand_calibration['start_pos'][hand_calibration['motor_names'].index(j1)]
# end_angle1 = hand_calibration['end_pos'][hand_calibration['motor_names'].index(j1)]
# start_angle2 = hand_calibration['start_pos'][hand_calibration['motor_names'].index(j2)]
# end_angle2 = hand_calibration['end_pos'][hand_calibration['motor_names'].index(j2)]
# # start_angle = shoulder_calibration['start_pos'][shoulder_calibration['motor_names'].index(joint)]
# # end_angle = shoulder_calibration['end_pos'][shoulder_calibration['motor_names'].index(joint)]
# while True:
# angle1 = int(start_angle1+(end_angle1-start_angle1)*encoder.read()/1000)
# angle2 = int(start_angle2+(end_angle2-start_angle2)*encoder.read()/1000)
# robot.hand_bus.write("Goal_Position",angle1, [j1])
# robot.hand_bus.write("Goal_Position",angle2, [j2])
# print(angle1, angle2)
# time.sleep(0.1)
# print(robot.hand_bus.find_motor_indices())
# exit()
maybe divide the 3.3 by 2 and use that as a reference
https://jlcpcb.com/partdetail/23831236-OPA340UA_UMW/C22365307
-90 is good for the op amp

52
examples/hopejr/read.cs Normal file
View File

@@ -0,0 +1,52 @@
#include <Arduino.h>
// Define multiplexer input pins
#define S0 5
#define S1 6
#define S2 8
#define S3 7
#define SENSOR_INPUT 4
#define SENSOR_COUNT 16
int rawVals[SENSOR_COUNT];
void measureRawValues() {
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
digitalWrite(S0, (i & 0b1) ^ 0b1);;
digitalWrite(S1, (i >> 1 & 0b1) ^ 0b1);;
digitalWrite(S2, (i >> 2 & 0b1) ^ 0b1);;
digitalWrite(S3, i >> 3 & 0b1);
delay(1);
rawVals[i] = analogRead(SENSOR_INPUT);
}
}
void printRawValues() {
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
Serial.print(rawVals[i]);
if (i < SENSOR_COUNT - 1) Serial.print(" ");
}
Serial.println();
}
void setup() {
Serial.begin(115200);
pinMode(S0, OUTPUT);
pinMode(S1, OUTPUT);
pinMode(S2, OUTPUT);
pinMode(S3, OUTPUT);
digitalWrite(S0, LOW);
digitalWrite(S1, LOW);
digitalWrite(S2, LOW);
digitalWrite(S3, LOW);
}
void loop() {
measureRawValues();
printRawValues();
delay(1);
}

View File

@@ -0,0 +1,52 @@
#include <Arduino.h>
// Define multiplexer input pins
#define S0 5
#define S1 6
#define S2 8
#define S3 7
#define SENSOR_INPUT 4
#define SENSOR_COUNT 16
int rawVals[SENSOR_COUNT];
void measureRawValues() {
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
digitalWrite(S0, (i & 0b1) ^ 0b1);;
digitalWrite(S1, (i >> 1 & 0b1) ^ 0b1);;
digitalWrite(S2, (i >> 2 & 0b1) ^ 0b1);;
digitalWrite(S3, i >> 3 & 0b1);
delay(1);
rawVals[i] = analogRead(SENSOR_INPUT);
}
}
void printRawValues() {
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
Serial.print(rawVals[i]);
if (i < SENSOR_COUNT - 1) Serial.print(" ");
}
Serial.println();
}
void setup() {
Serial.begin(115200);
pinMode(S0, OUTPUT);
pinMode(S1, OUTPUT);
pinMode(S2, OUTPUT);
pinMode(S3, OUTPUT);
digitalWrite(S0, LOW);
digitalWrite(S1, LOW);
digitalWrite(S2, LOW);
digitalWrite(S3, LOW);
}
void loop() {
measureRawValues();
printRawValues();
delay(1);
}

View File

@@ -0,0 +1,52 @@
#include <Arduino.h>
// Define multiplexer input pins
#define S0 5
#define S1 6
#define S2 8
#define S3 7
#define SENSOR_INPUT 4
#define SENSOR_COUNT 16
int rawVals[SENSOR_COUNT];
void measureRawValues() {
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
digitalWrite(S0, (i & 0b1) ^ 0b1);;
digitalWrite(S1, (i >> 1 & 0b1) ^ 0b1);;
digitalWrite(S2, (i >> 2 & 0b1) ^ 0b1);;
digitalWrite(S3, i >> 3 & 0b1);
delay(1);
rawVals[i] = analogRead(SENSOR_INPUT);
}
}
void printRawValues() {
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
Serial.print(rawVals[i]);
if (i < SENSOR_COUNT - 1) Serial.print(" ");
}
Serial.println();
}
void setup() {
Serial.begin(115200);
pinMode(S0, OUTPUT);
pinMode(S1, OUTPUT);
pinMode(S2, OUTPUT);
pinMode(S3, OUTPUT);
digitalWrite(S0, LOW);
digitalWrite(S1, LOW);
digitalWrite(S2, LOW);
digitalWrite(S3, LOW);
}
void loop() {
measureRawValues();
printRawValues();
delay(1);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 75 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.2 KiB

View File

@@ -0,0 +1,74 @@
import numpy as np
from PIL import Image, ImageSequence
def coalesce_gif(im):
"""
Attempt to coalesce frames so each one is a full image.
This handles many (though not all) partial-frame GIFs.
"""
# Convert mode to RGBA
im = im.convert("RGBA")
# Prepare an accumulator the same size as the base frame
base = Image.new("RGBA", im.size)
frames = []
# Go through each frame
for frame in ImageSequence.Iterator(im):
base.alpha_composite(frame.convert("RGBA"))
frames.append(base.copy())
return frames
def remove_white_make_black(arr, threshold=250):
"""
For each pixel in arr (H,W,3), if R,G,B >= threshold, set to black (0,0,0).
This effectively 'removes' white so it won't affect the sum.
"""
mask = (arr[..., 0] >= threshold) & \
(arr[..., 1] >= threshold) & \
(arr[..., 2] >= threshold)
arr[mask] = 0 # set to black
def main():
# Load the animated GIF
gif = Image.open("input.gif")
# Coalesce frames so each is full-size
frames = coalesce_gif(gif)
if not frames:
print("No frames found!")
return
# Convert first frame to RGB array, initialize sum array
w, h = frames[0].size
sum_array = np.zeros((h, w, 3), dtype=np.uint16) # 16-bit to avoid overflow
# For each frame:
for f in frames:
# Convert to RGB
rgb = f.convert("RGB")
arr = np.array(rgb, dtype=np.uint16) # shape (H, W, 3)
# Remove near-white by setting it to black
remove_white_make_black(arr, threshold=250)
# Add to sum_array, then clamp to 255
sum_array += arr
np.clip(sum_array, 0, 255, out=sum_array)
# Convert sum_array back to 8-bit
sum_array = sum_array.astype(np.uint8)
# Finally, any pixel that stayed black is presumably "empty," so we set it to white
black_mask = (sum_array[..., 0] == 0) & \
(sum_array[..., 1] == 0) & \
(sum_array[..., 2] == 0)
sum_array[black_mask] = [255, 255, 255]
# Create final Pillow image
final_img = Image.fromarray(sum_array, mode="RGB")
final_img.save("result.png")
print("Done! Wrote result.png.")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,31 @@
import time
import serial
import numpy as np
import matplotlib.pyplot as plt
# Import the motor bus (adjust the import path as needed)
from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus
def main():
bus = FeetechMotorsBus(
port="/dev/ttyACM0",
motors={
"leader": [1, "scs0009"],
"follower": [2, "scs0009"]
},
protocol_version=1,
group_sync_read=False
)
bus.connect()
print(bus.read("Present_Position", "leader"))
bus.write("Torque_Enable", 0, ["leader"])
bus.write("Torque_Enable", 1, ["follower"])
for i in range(10000000):
time.sleep(0.01)
pos = bus.read("Present_Position", "leader")
if pos[0] > 1 and pos[0] < 1022:
bus.write("Goal_Position", pos, ["follower"])
if __name__ == "__main__":
main()

Binary file not shown.

View File

@@ -0,0 +1,15 @@
robot:
arm_bus:
Lock: 0
Torque_Limit: 1000
Protection_Current: 500
Over_Current_Protection_Time: 10
Max_Torque_Limit: 1000
Overload_Torque: 40 # Play around with this
Protection_Time: 1000 # When does it kick in?
Protective_Torque: 1
Maximum_Acceleration: 100
Torque_Enable: 1
Acceleration: 30
hand_bus:
Acceleration: 100

Binary file not shown.

View File

@@ -0,0 +1,61 @@
import time
import numpy as np
from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus
def main():
# Instantiate the bus for a single motor on port /dev/ttyACM0.
arm_bus = FeetechMotorsBus(
port="/dev/ttyACM0",
motors={"wrist_pitch": [1, "scs0009"]},
protocol_version=1,
group_sync_read=False, # using individual read calls
)
arm_bus.connect()
# Configure continuous rotation mode.
arm_bus.write("Min_Angle_Limit", 0)
arm_bus.write("Max_Angle_Limit", 1024)
# For model "scs0009", the raw reading runs from 0 to ~1022.
resolution_max = 1022 # use 1022 as the effective maximum raw value
# Read initial raw motor position.
prev_raw = arm_bus.read("Present_Position", ["wrist_pitch"])[0]
print("Initial raw position:", prev_raw)
# Command continuous rotation.
arm_bus.write("Goal_Position", 1024)
# Initialize loop counter.
loops_count = 0
target_effective = 1780
tolerance = 50 # stop when effective position is within ±50 of target
while True:
current_raw = arm_bus.read("Present_Position", ["wrist_pitch"])[0]
# Detect wrap-around: if the previous reading was near the top (>= 1020)
# and current reading is low (< 100), count that as one full loop.
if prev_raw >= 1020 and current_raw < 100:
loops_count += 1
print(f"Wrap detected! loops_count increased to {loops_count}")
# Compute the effective position.
effective_position = loops_count * resolution_max + current_raw
print(f"Raw position: {current_raw} | loops_count: {loops_count} | Effective position: {effective_position}")
# Check if effective position is within tolerance of the target.
if abs(effective_position - target_effective) <= tolerance:
# Command motor to stop by setting the current raw position as goal.
arm_bus.write("Goal_Position", current_raw)
print(f"Target reached (effective position: {effective_position}). Stopping motor at raw position {current_raw}.")
break
prev_raw = current_raw
time.sleep(0.01) # 10 ms delay
time.sleep(1)
arm_bus.disconnect()
if __name__ == "__main__":
main()

226
examples/hopejr/teleop.py Normal file
View File

@@ -0,0 +1,226 @@
from follower import HopeJuniorRobot
from leader import (
HomonculusArm,
HomonculusGlove,
EncoderReader
)
from visualizer import value_to_color
import time
import numpy as np
import pickle
import pygame
import typer
def main(
calibrate_glove: bool = typer.Option(False, "--calibrate-glove", help="Calibrate the glove"),
calibrate_exoskeleton: bool = typer.Option(False, "--calibrate-exoskeleton", help="Calibrate the exoskeleton"),
freeze_fingers: bool = typer.Option(False, "--freeze-fingers", help="Freeze the fingers"),
freeze_arm: bool = typer.Option(False, "--freeze-arm", help="Freeze the arm")):
show_loads: bool = typer.Option(False, "--show-loads", help="Show the loads in a GUI")
robot = HopeJuniorRobot()
robot.connect_hand()
robot.connect_arm()
#read pos
print(robot.hand_bus.read("Present_Position"))
print(robot.arm_bus.read("Present_Position", "shoulder_pitch"))
print(robot.arm_bus.read("Present_Position",["shoulder_yaw","shoulder_roll","elbow_flex","wrist_roll","wrist_yaw","wrist_pitch"]))
#robot.arm_bus.write("Goal_Position", robot.arm_calib_dict["start_pos"][0]*1 +robot.arm_calib_dict["end_pos"][0]*0, ["wrist_roll"])
for i in range(10):
time.sleep(0.1)
robot.apply_arm_config('examples/hopejr/settings/config.yaml')
# #calibrate arm
arm_calibration = robot.get_arm_calibration()
exoskeleton = HomonculusArm(serial_port="/dev/tty.usbmodem1201")
if calibrate_exoskeleton:
exoskeleton.run_calibration(robot)
file_path = "examples/hopejr/settings/arm_calib.pkl"
with open(file_path, "rb") as f:
calib_dict = pickle.load(f)
print("Loaded dictionary:", calib_dict)
exoskeleton.set_calibration(calib_dict)
#calibrate hand
hand_calibration = robot.get_hand_calibration()
glove = HomonculusGlove(serial_port = "/dev/tty.usbmodem1101")
if calibrate_glove:
glove.run_calibration()
file_path = "examples/hopejr/settings/hand_calib.pkl"
with open(file_path, "rb") as f:
calib_dict = pickle.load(f)
print("Loaded dictionary:", calib_dict)
glove.set_calibration(calib_dict)
robot.hand_bus.set_calibration(hand_calibration)
robot.arm_bus.set_calibration(arm_calibration)
# Initialize Pygame
# pygame.init()
# # Set up the display
# screen = pygame.display.set_mode((800, 600))
# pygame.display.set_caption("Robot Hand Visualization")
# # Create hand structure with 16 squares and initial values
# hand_components = []
# # Add thumb (4 squares in diamond shape)
# thumb_positions = [
# (150, 300), (125, 350),
# (175, 350), (150, 400)
# ]
# for pos in thumb_positions:
# hand_components.append({"pos": pos, "value": 0})
# # Add fingers (4 fingers with 3 squares each in vertical lines)
# finger_positions = [
# (200, 100), # Index
# (250, 100), # Middle
# (300, 100), # Ring
# (350, 100) # Pinky
# ]
# for x, y in finger_positions:
# for i in range(3):
# hand_components.append({"pos": (x, y + i * 50), "value": 0})
for i in range(1000000000000000):
robot.apply_arm_config('examples/hopejr/settings/config.yaml')
#robot.arm_bus.write("Acceleration", 50, "shoulder_yaw")
joint_names = ["shoulder_pitch", "shoulder_yaw", "shoulder_roll", "elbow_flex", "wrist_roll", "wrist_yaw", "wrist_pitch"]
#only wrist roll
#joint_names = ["shoulder_pitch"]
joint_values = exoskeleton.read(motor_names=joint_names)
#joint_values = joint_values.round().astype(int)
joint_dict = {k: v for k, v in zip(joint_names, joint_values, strict=False)}
motor_values = []
motor_names = []
motor_names += ["shoulder_pitch", "shoulder_yaw", "shoulder_roll", "elbow_flex", "wrist_roll", "wrist_yaw", "wrist_pitch"]
#motor_names += ["shoulder_pitch"]
motor_values += [joint_dict[name] for name in motor_names]
#remove 50 from shoulder_roll
#motor_values += [joint_dict[name] for name in motor_names]
motor_values = np.array(motor_values)
motor_values = np.clip(motor_values, 0, 100)
print(motor_names, motor_values)
if not freeze_arm:
robot.arm_bus.write("Goal_Position", motor_values, motor_names)
if not freeze_fingers:#include hand
hand_joint_names = []
hand_joint_names += ["thumb_3", "thumb_2", "thumb_1", "thumb_0"]#, "thumb_3"]
hand_joint_names += ["index_0", "index_1", "index_2"]
hand_joint_names += ["middle_0", "middle_1", "middle_2"]
hand_joint_names += ["ring_0", "ring_1", "ring_2"]
hand_joint_names += ["pinky_0", "pinky_1", "pinky_2"]
hand_joint_values = glove.read(hand_joint_names)
hand_joint_values = hand_joint_values.round( ).astype(int)
hand_joint_dict = {k: v for k, v in zip(hand_joint_names, hand_joint_values, strict=False)}
hand_motor_values = []
hand_motor_names = []
# Thumb
hand_motor_names += ["thumb_basel_rotation", "thumb_mcp", "thumb_pip", "thumb_dip"]#, "thumb_MCP"]
hand_motor_values += [
hand_joint_dict["thumb_3"],
hand_joint_dict["thumb_2"],
hand_joint_dict["thumb_1"],
hand_joint_dict["thumb_0"]
]
# # Index finger
index_splay = 0.1
hand_motor_names += ["index_flexor", "index_pinky_side", "index_thumb_side"]
hand_motor_values += [
hand_joint_dict["index_2"],
(100 - hand_joint_dict["index_0"]) * index_splay + hand_joint_dict["index_1"] * (1 - index_splay),
hand_joint_dict["index_0"] * index_splay + hand_joint_dict["index_1"] * (1 - index_splay),
]
# Middle finger
middle_splay = 0.1
hand_motor_names += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
hand_motor_values += [
hand_joint_dict["middle_2"],
hand_joint_dict["middle_0"] * middle_splay + hand_joint_dict["middle_1"] * (1 - middle_splay),
(100 - hand_joint_dict["middle_0"]) * middle_splay + hand_joint_dict["middle_1"] * (1 - middle_splay),
]
# # Ring finger
ring_splay = 0.1
hand_motor_names += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
hand_motor_values += [
hand_joint_dict["ring_2"],
(100 - hand_joint_dict["ring_0"]) * ring_splay + hand_joint_dict["ring_1"] * (1 - ring_splay),
hand_joint_dict["ring_0"] * ring_splay + hand_joint_dict["ring_1"] * (1 - ring_splay),
]
# # Pinky finger
pinky_splay = -.1
hand_motor_names += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
hand_motor_values += [
hand_joint_dict["pinky_2"],
hand_joint_dict["pinky_0"] * pinky_splay + hand_joint_dict["pinky_1"] * (1 - pinky_splay),
(100 - hand_joint_dict["pinky_0"]) * pinky_splay + hand_joint_dict["pinky_1"] * (1 - pinky_splay),
]
hand_motor_values = np.array(hand_motor_values)
hand_motor_values = np.clip(hand_motor_values, 0, 100)
robot.hand_bus.write("Acceleration", 255, hand_motor_names)
robot.hand_bus.write("Goal_Position", hand_motor_values, hand_motor_names)
# if i%20==0 and i > 100:
# try:
# loads = robot.hand_bus.read("Present_Load")
# for i, comp in enumerate(hand_components):
# # Wave oscillates between 0 and 2024:
# # Center (1012) +/- 1012 * sin(...)
# comp["value"] = loads[i]
# except:
# pass
time.sleep(0.01)
# for event in pygame.event.get():
# if event.type == pygame.QUIT:
# robot.hand_bus.disconnect()
# robot.arm_bus.disconnect()
# exit()
# # Check for user pressing 'q' to quit
# if event.type == pygame.KEYDOWN:
# if event.key == pygame.K_q:
# robot.hand_bus.disconnect()
# robot.arm_bus.disconnect()
# exit()
# # Draw background
# screen.fill((0, 0, 0)) # Black background
# # Draw hand components
# for comp in hand_components:
# x, y = comp["pos"]
# color = value_to_color(comp["value"])
# pygame.draw.rect(screen, color, (x, y, 30, 30))
# pygame.display.flip()
if __name__ == "__main__":
typer.run(main)

View File

@@ -0,0 +1,135 @@
import serial
import threading
import time
import numpy as np
import matplotlib.pyplot as plt
# Thread function to read from a serial port continuously until stop_event is set.
def read_serial(port, baudrate, stop_event, data_list):
try:
ser = serial.Serial(port, baudrate, timeout=1)
except Exception as e:
print(f"Error opening {port}: {e}")
return
while not stop_event.is_set():
try:
line = ser.readline().decode('utf-8').strip()
except Exception as e:
print(f"Decode error on {port}: {e}")
continue
if line:
try:
# Split the line into integer values.
values = [int(x) for x in line.split()]
# For ACM1, ignore the extra value if present.
if len(values) >= 16:
if len(values) > 16:
values = values[:16]
# Save the timestamp (relative to start) with the sensor readings.
timestamp = time.time()
data_list.append((timestamp, values))
except Exception as e:
print(f"Error parsing line from {port}: '{line}' -> {e}")
ser.close()
def main():
# --- Configuration ---
# Set your serial port names here (adjust for your system)
acm0_port = "/dev/ttyACM0" # Example for Linux (or "COM3" on Windows)
acm1_port = "/dev/ttyACM1" # Example for Linux (or "COM4" on Windows)
baudrate = 115200
# Data storage for each device:
data_acm0 = [] # Will hold tuples of (timestamp, [16 sensor values])
data_acm1 = []
# Event to signal threads to stop reading.
stop_event = threading.Event()
# Create and start reader threads.
thread_acm0 = threading.Thread(target=read_serial, args=(acm0_port, baudrate, stop_event, data_acm0))
thread_acm1 = threading.Thread(target=read_serial, args=(acm1_port, baudrate, stop_event, data_acm1))
thread_acm0.start()
thread_acm1.start()
# Record data for 10 seconds.
record_duration = 10 # seconds
start_time = time.time()
time.sleep(record_duration)
stop_event.set() # signal threads to stop
# Wait for both threads to finish.
thread_acm0.join()
thread_acm1.join()
print("Finished recording.")
# --- Process the Data ---
# Convert lists of (timestamp, values) to numpy arrays.
# Compute time relative to the start of the recording.
times_acm0 = np.array([t - start_time for t, _ in data_acm0])
sensor_acm0 = np.array([vals for _, vals in data_acm0]) # shape (N0, 16)
times_acm1 = np.array([t - start_time for t, _ in data_acm1])
sensor_acm1 = np.array([vals for _, vals in data_acm1]) # shape (N1, 16)
# --- Plot 1: Overlapping Time Series ---
plt.figure(figsize=(12, 8))
# Plot each sensor from ACM0 in red.
for i in range(16):
plt.plot(times_acm0, sensor_acm0[:, i], color='red', alpha=0.7,
label='ACM0 Sensor 1' if i == 0 else None)
# Plot each sensor from ACM1 in blue.
for i in range(16):
plt.plot(times_acm1, sensor_acm1[:, i], color='blue', alpha=0.7,
label='ACM1 Sensor 1' if i == 0 else None)
plt.xlabel("Time (s)")
plt.ylabel("Sensor Reading")
plt.title("Overlapping Sensor Readings (ACM0 in Red, ACM1 in Blue)")
plt.legend()
plt.tight_layout()
plt.savefig("overlapping_sensor_readings.png", dpi=300)
plt.close()
print("Saved overlapping_sensor_readings.png")
# --- Plot 2: Variance of Noise for Each Sensor ---
# Compute variance (over time) for each sensor channel.
variance_acm0 = np.var(sensor_acm0, axis=0)
variance_acm1 = np.var(sensor_acm1, axis=0)
sensor_numbers = np.arange(1, 17)
bar_width = 0.35
plt.figure(figsize=(12, 6))
plt.bar(sensor_numbers - bar_width/2, variance_acm0, bar_width, color='red', label='ACM0')
plt.bar(sensor_numbers + bar_width/2, variance_acm1, bar_width, color='blue', label='ACM1')
plt.xlabel("Sensor Number")
plt.ylabel("Variance")
plt.title("Noise Variance per Sensor")
plt.xticks(sensor_numbers)
plt.legend()
plt.tight_layout()
plt.savefig("sensor_variance.png", dpi=300)
plt.close()
print("Saved sensor_variance.png")
# --- Plot 3: Difference Between ACM0 and ACM1 Readings ---
# Since the two devices may not sample at exactly the same time,
# we interpolate ACM1's data onto ACM0's time base for each sensor.
plt.figure(figsize=(12, 8))
for i in range(16):
if len(times_acm1) > 1 and len(times_acm0) > 1:
interp_acm1 = np.interp(times_acm0, times_acm1, sensor_acm1[:, i])
diff = sensor_acm0[:, i] - interp_acm1
plt.plot(times_acm0, diff, label=f"Sensor {i+1}")
plt.xlabel("Time (s)")
plt.ylabel("Difference (ACM0 - ACM1)")
plt.title("Difference in Sensor Readings")
plt.legend(fontsize='small', ncol=2)
plt.tight_layout()
plt.savefig("sensor_differences.png", dpi=300)
plt.close()
print("Saved sensor_differences.png")
if __name__ == "__main__":
main()

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 87 KiB

View File

@@ -0,0 +1,84 @@
import time
import serial
import numpy as np
import matplotlib.pyplot as plt
# Import the motor bus (adjust the import path as needed)
from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus
def main():
# -------------------------------
# Setup the motor bus (ACM0)
# -------------------------------
arm_bus = FeetechMotorsBus(
port="/dev/ttyACM0",
motors={
"wrist_pitch": [7, "sts3215"],
},
protocol_version=0,
)
arm_bus.connect()
# -------------------------------
# Setup the serial connection for sensor (ACM1)
# -------------------------------
try:
ser = serial.Serial("/dev/ttyACM1", 115200, timeout=1)
except Exception as e:
print(f"Error opening serial port /dev/ttyACM1: {e}")
return
# Lists to store the motor positions and sensor values.
positions = []
sensor_values = []
# -------------------------------
# Loop: move motor and collect sensor data
# -------------------------------
# We assume that 2800 > 1480 so we decrement by 10 each step.
for pos in range(2800, 1500, -10): # 2800 down to 1480 (inclusive)
# Command the motor to go to position 'pos'
arm_bus.write("Goal_Position", pos, ["wrist_pitch"])
# Wait a short period for the motor to move and the sensor to update.
time.sleep(0.01)
# Read one line from the sensor device.
sensor_val = np.nan # default if reading fails
try:
line = ser.readline().decode('utf-8').strip()
if line:
# Split the line into parts and convert each part to int.
parts = line.split()
# Ensure there are enough values (we expect at least 15 values)
if len(parts) >= 15:
values = [int(x) for x in parts]
# Use the 15th value (index 14)
sensor_val = values[14]
except Exception as e:
print(f"Error parsing sensor data: {e}")
positions.append(pos)
sensor_values.append(sensor_val)
print(f"Motor pos: {pos} | Sensor 15th value: {sensor_val}")
#move it back to
arm_bus.write("Goal_Position", 2800, ["wrist_pitch"])
# -------------------------------
# Plot the data: Motor Angle vs. Sensor 15th Value
# -------------------------------
plt.figure(figsize=(10, 6))
plt.plot(positions, sensor_values, marker='o', linestyle='-')
plt.xlabel("Motor Angle")
plt.ylabel("Sensor 15th Value")
plt.title("Motor Angle vs Sensor 15th Value")
plt.grid(True)
plt.savefig("asd.png", dpi=300)
plt.close()
print("Plot saved as asd.png")
# Close the serial connection.
ser.close()
if __name__ == "__main__":
main()

682
examples/hopejr/test.py Normal file
View File

@@ -0,0 +1,682 @@
import threading
import time
from typing import Callable
import cv2
import numpy as np
# from qai_hub_models.models.mediapipe_hand.app import MediaPipeHandApp
# from qai_hub_models.models.mediapipe_hand.model import (
# MediaPipeHand,
# )
# from qai_hub_models.utils.image_processing import (
# app_to_net_image_inputs,
# )
from lerobot.common.robot_devices.motors.feetech import (
CalibrationMode,
FeetechMotorsBus,
)
LOWER_BOUND_LINEAR = -100
UPPER_BOUND_LINEAR = 200
import serial
class HomonculusGlove:
def __init__(self):
self.serial_port = "COM10"
self.baud_rate = 115200
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
self.thread = threading.Thread(target=self.async_read)
self.thread.start()
self.last_d = {
"thumb_0": 100,
"thumb_1": 100,
"thumb_2": 100,
"thumb_3": 100,
"index_0": 100,
"index_1": 100,
"index_2": 100,
"middle_0": 100,
"middle_1": 100,
"middle_2": 100,
"ring_0": 100,
"ring_1": 100,
"ring_2": 100,
"pinky_0": 100,
"pinky_1": 100,
"pinky_2": 100,
"battery_voltage": 100,
}
self.calibration = None
@property
def joint_names(self):
return list(self.last_d.keys())
def read(self, motor_names: list[str] | None = None):
if motor_names is None:
motor_names = self.joint_names
values = np.array([self.last_d[k] for k in motor_names])
print(motor_names)
print(values)
if self.calibration is not None:
values = self.apply_calibration(values, motor_names)
print(values)
return values
def async_read(self):
while True:
if self.serial.in_waiting > 0:
self.serial.flush()
vals = self.serial.readline().decode("utf-8").strip()
vals = vals.split(" ")
if len(vals) != 17:
continue
vals = [int(val) for val in vals]
d = {
"thumb_0": vals[0],
"thumb_1": vals[1],
"thumb_2": vals[2],
"thumb_3": vals[3],
"index_0": vals[4],
"index_1": vals[5],
"index_2": vals[6],
"middle_0": vals[7],
"middle_1": vals[8],
"middle_2": vals[9],
"ring_0": vals[10],
"ring_1": vals[11],
"ring_2": vals[12],
"pinky_0": vals[13],
"pinky_1": vals[14],
"pinky_2": vals[15],
"battery_voltage": vals[16],
}
self.last_d = d
# print(d.values())
def run_calibration(self):
print("\nMove arm to open position")
input("Press Enter to continue...")
open_pos_list = []
for _ in range(300):
open_pos = self.read()
open_pos_list.append(open_pos)
time.sleep(0.01)
open_pos = np.array(open_pos_list)
max_open_pos = open_pos.max(axis=0)
min_open_pos = open_pos.min(axis=0)
print(f"{max_open_pos=}")
print(f"{min_open_pos=}")
print("\nMove arm to closed position")
input("Press Enter to continue...")
closed_pos_list = []
for _ in range(300):
closed_pos = self.read()
closed_pos_list.append(closed_pos)
time.sleep(0.01)
closed_pos = np.array(closed_pos_list)
max_closed_pos = closed_pos.max(axis=0)
closed_pos[closed_pos < 1000] = 60000
min_closed_pos = closed_pos.min(axis=0)
print(f"{max_closed_pos=}")
print(f"{min_closed_pos=}")
open_pos = np.array([max_open_pos, max_closed_pos]).max(axis=0)
closed_pos = np.array([min_open_pos, min_closed_pos]).min(axis=0)
# INVERTION
# INVERTION
# INVERTION
# INVERTION
# INVERTION
# INVERTION
# INVERTION
for i, jname in enumerate(self.joint_names):
if jname in ["thumb_0", "thumb_3", "index_2", "middle_2", "ring_2", "pinky_0", "pinky_2"]:
tmp_pos = open_pos[i]
open_pos[i] = closed_pos[i]
closed_pos[i] = tmp_pos
print()
print(f"{open_pos=}")
print(f"{closed_pos=}")
homing_offset = [0] * len(self.joint_names)
drive_mode = [0] * len(self.joint_names)
calib_modes = [CalibrationMode.LINEAR.name] * len(self.joint_names)
calib_dict = {
"homing_offset": homing_offset,
"drive_mode": drive_mode,
"start_pos": open_pos,
"end_pos": closed_pos,
"calib_mode": calib_modes,
"motor_names": self.joint_names,
}
# return calib_dict
self.set_calibration(calib_dict)
def set_calibration(self, calibration: dict[str, list]):
self.calibration = calibration
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
"""Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
a "zero position" at 0 degree.
Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor
rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range.
Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation
when given a goal position that is + or - their resolution. For instance, feetech xl330-m077 have a resolution of 4096, and
at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830,
or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor.
To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work
in the centered nominal degree range ]-180, 180[.
"""
if motor_names is None:
motor_names = self.motor_names
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
values = values.astype(np.float32)
for i, name in enumerate(motor_names):
calib_idx = self.calibration["motor_names"].index(name)
calib_mode = self.calibration["calib_mode"][calib_idx]
if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
start_pos = self.calibration["start_pos"][calib_idx]
end_pos = self.calibration["end_pos"][calib_idx]
# Rescale the present position to a nominal range [0, 100] %,
# useful for joints with linear motions like Aloha gripper
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
if name == "pinky_1" and (values[i] < LOWER_BOUND_LINEAR):
values[i] = end_pos
else:
msg = (
f"Wrong motor position range detected for {name}. "
f"Expected to be in nominal range of [0, 100] % (a full linear translation), "
f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, "
f"but present value is {values[i]} %. "
"This might be due to a cable connection issue creating an artificial jump in motor values. "
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
)
print(msg)
# raise JointOutOfRangeError(msg)
return values
# def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
# """Inverse of `apply_calibration`."""
# if motor_names is None:
# motor_names = self.motor_names
# for i, name in enumerate(motor_names):
# calib_idx = self.calibration["motor_names"].index(name)
# calib_mode = self.calibration["calib_mode"][calib_idx]
# if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
# start_pos = self.calibration["start_pos"][calib_idx]
# end_pos = self.calibration["end_pos"][calib_idx]
# # Convert from nominal lnear range of [0, 100] % to
# # actual motor range of values which can be arbitrary.
# values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos
# values = np.round(values).astype(np.int32)
# return values
class HopeJuniorRobot:
def __init__(self):
self.arm_bus = FeetechMotorsBus(
port="COM14",
motors={
# "motor1": (2, "sts3250"),
# "motor2": (1, "scs0009"),
"shoulder_pitch": [1, "sts3250"],
"shoulder_yaw": [2, "sts3215"], # TODO: sts3250
"shoulder_roll": [3, "sts3215"], # TODO: sts3250
"elbow_flex": [4, "sts3250"],
"wrist_roll": [5, "sts3215"],
"wrist_yaw": [6, "sts3215"],
"wrist_pitch": [7, "sts3215"],
},
protocol_version=0,
)
self.hand_bus = FeetechMotorsBus(
port="COM15",
motors={
"thumb_basel_rotation": [30, "scs0009"],
"thumb_flexor": [27, "scs0009"],
"thumb_pinky_side": [26, "scs0009"],
"thumb_thumb_side": [28, "scs0009"],
"index_flexor": [25, "scs0009"],
"index_pinky_side": [31, "scs0009"],
"index_thumb_side": [32, "scs0009"],
"middle_flexor": [24, "scs0009"],
"middle_pinky_side": [33, "scs0009"],
"middle_thumb_side": [34, "scs0009"],
"ring_flexor": [21, "scs0009"],
"ring_pinky_side": [36, "scs0009"],
"ring_thumb_side": [35, "scs0009"],
"pinky_flexor": [23, "scs0009"],
"pinky_pinky_side": [38, "scs0009"],
"pinky_thumb_side": [37, "scs0009"],
},
protocol_version=1,
group_sync_read=False,
)
def get_hand_calibration(self):
homing_offset = [0] * len(self.hand_bus.motor_names)
drive_mode = [0] * len(self.hand_bus.motor_names)
start_pos = [
500,
900,
1000,
0,
100,
250,
750,
100,
400,
150,
100,
120,
980,
100,
950,
750,
]
end_pos = [
500 - 250,
900 - 300,
1000 - 550,
0 + 550,
1000,
250 + 700,
750 - 700,
1000,
400 + 700,
150 + 700,
1000,
120 + 700,
980 - 700,
1000,
950 - 700,
750 - 700,
]
calib_modes = [CalibrationMode.LINEAR.name] * len(self.hand_bus.motor_names)
calib_dict = {
"homing_offset": homing_offset,
"drive_mode": drive_mode,
"start_pos": start_pos,
"end_pos": end_pos,
"calib_mode": calib_modes,
"motor_names": self.hand_bus.motor_names,
}
return calib_dict
def connect(self):
self.arm_bus.connect()
#self.hand_bus.connect()
ESCAPE_KEY_ID = 27
def capture_and_display_processed_frames(
frame_processor: Callable[[np.ndarray], np.ndarray],
window_display_name: str,
cap_device: int = 0,
) -> None:
"""
Capture frames from the given input camera device, run them through
the frame processor, and display the outputs in a window with the given name.
User should press Esc to exit.
Inputs:
frame_processor: Callable[[np.ndarray], np.ndarray]
Processes frames.
Input and output are numpy arrays of shape (H W C) with BGR channel layout and dtype uint8 / byte.
window_display_name: str
Name of the window used to display frames.
cap_device: int
Identifier for the camera to use to capture frames.
"""
cv2.namedWindow(window_display_name)
capture = cv2.VideoCapture(cap_device)
if not capture.isOpened():
raise ValueError("Unable to open video capture.")
frame_count = 0
has_frame, frame = capture.read()
while has_frame:
assert isinstance(frame, np.ndarray)
frame_count = frame_count + 1
# mirror frame
frame = np.ascontiguousarray(frame[:, ::-1, ::-1])
# process & show frame
processed_frame = frame_processor(frame)
cv2.imshow(window_display_name, processed_frame[:, :, ::-1])
has_frame, frame = capture.read()
key = cv2.waitKey(1)
if key == ESCAPE_KEY_ID:
break
capture.release()
def main():
robot = HopeJuniorRobot()
robot.connect()
# robot.hand_bus.calibration = None
# breakpoint()
# print(robot.arm_bus.read("Present_Position"))
robot.arm_bus.write("Torque_Enable", 1)
robot.arm_bus.write("Acceleration", 20)
robot.arm_bus.read("Acceleration")
robot.arm_bus.write("Goal_Position", calibration["start_pos"])
exit()
calibration = robot.get_hand_calibration()
robot.arm_bus.write("Goal_Position", calibration["start_pos"])
# robot.hand_bus.write("Goal_Position", calibration["end_pos"][:4], robot.hand_bus.motor_names[:4])
robot.hand_bus.set_calibration(calibration)
lol = 1
# # print(motors_bus.write("Goal_Position", 500))
# print(robot.hand_bus.read("Present_Position"))
# # pos = hand_bus.read("Present_Position")
# # hand_bus.write("Goal_Position", pos[0]+20, hand_bus.motor_names[0])
# # hand_bus.write("Goal_Position", pos[i]+delta, hand_bus.motor_names[i])
# robot.hand_bus.read("Acceleration")
# robot.hand_bus.write("Acceleration", 10)
# sleep = 1
# # robot.hand_bus.write(
# # "Goal_Position", [glove.last_d['index_2']-1500,300,300], ["index_pinky_side", "index_flexor", "index_thumb_side"]
# # )
# #time.sleep(sleep)
# time.sleep(sleep)
# robot.hand_bus.write(
# "Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
# )
# time.sleep(sleep)
# robot.hand_bus.write(
# "Goal_Position", [100, 0, 0], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
# )
# time.sleep(sleep)
# robot.hand_bus.write(
# "Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
# )
# time.sleep(sleep)
# robot.hand_bus.write(
# "Goal_Position", [200, 100, 600], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
# )
# time.sleep(sleep)
# breakpoint()
glove = HomonculusGlove()
glove.run_calibration()
# while True:
# joint_names = ["index_1", "index_2"]
# joint_values = glove.read(joint_names)
# print(joint_values)
input()
while True:
joint_names = []
joint_names += ["thumb_0", "thumb_2", "thumb_3"]
joint_names += ["index_1", "index_2"]
joint_names += ["middle_1", "middle_2"]
joint_names += ["ring_1", "ring_2"]
joint_names += ["pinky_1", "pinky_2"]
joint_values = glove.read(joint_names)
joint_values = joint_values.round().astype(int)
joint_dict = {k: v for k, v in zip(joint_names, joint_values, strict=False)}
motor_values = []
motor_names = []
motor_names += ["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"]
motor_values += [
joint_dict["thumb_3"],
joint_dict["thumb_0"],
joint_dict["thumb_2"],
joint_dict["thumb_2"],
]
motor_names += ["index_flexor", "index_pinky_side", "index_thumb_side"]
motor_values += [joint_dict["index_2"], joint_dict["index_1"], joint_dict["index_1"]]
motor_names += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
motor_values += [joint_dict["middle_2"], joint_dict["middle_1"], joint_dict["middle_1"]]
motor_names += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
motor_values += [joint_dict["ring_2"], joint_dict["ring_1"], joint_dict["ring_1"]]
motor_names += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
motor_values += [joint_dict["pinky_2"], joint_dict["pinky_1"], joint_dict["pinky_1"]]
motor_values = np.array(motor_values)
motor_values = np.clip(motor_values, 0, 100)
robot.hand_bus.write("Goal_Position", motor_values, motor_names)
time.sleep(0.02)
while True:
# print(glove.read()['index_2']-1500)
glove_index_flexor = glove.read()["index_2"] - 1500
glove_index_subflexor = glove.read()["index_1"] - 1500
glove_index_side = glove.read()["index_0"] - 2100
vals = [glove_index_flexor, 1000 - (glove_index_subflexor), glove_index_subflexor]
keys = ["index_flexor", "index_pinky_side", "index_thumb_side"]
glove_middle_flexor = glove.read()["middle_2"] - 1500
glove_middle_subflexor = 1000 - (glove.read()["middle_1"] - 1700)
vals += [glove_middle_flexor, glove_middle_subflexor, glove_middle_subflexor - 200]
keys += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
glove_ring_flexor = glove.read()["ring_2"] - 1300
print(glove_ring_flexor)
glove_ring_subflexor = glove.read()["ring_1"] - 1100
vals += [glove_ring_flexor, 1000 - glove_ring_subflexor, glove_ring_subflexor]
keys += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
glove_pinky_flexor = glove.read()["pinky_2"] - 1500
glove_pinky_subflexor = glove.read()["pinky_1"] - 1300
vals += [300 + glove_pinky_flexor, max(1000 - glove_pinky_subflexor - 100, 0), glove_pinky_subflexor]
keys += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
robot.hand_bus.write("Goal_Position", vals, keys)
time.sleep(0.1)
time.sleep(3)
def move_arm(loop=10):
sleep = 1
for i in range(loop):
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1195])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 2195])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1457, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 2357, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 974, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 2674, 1957, 1695])
time.sleep(sleep + 2)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 1632, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 1369, 1632, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 1330, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [2381, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1681, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
def move_hand(loop=10):
sleep = 0.5
for i in range(loop):
robot.hand_bus.write(
"Goal_Position",
[500, 1000, 0, 1000],
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position", [100, 1000, 150], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position", [200, 100, 700], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position",
[500, 1000 - 250, 0 + 300, 1000 - 200],
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position",
[100 + 450, 100 + 400, 100 + 400],
["index_flexor", "index_pinky_side", "index_thumb_side"],
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position",
[100 + 350, 1000 - 450, 150 + 450],
["middle_flexor", "middle_pinky_side", "middle_thumb_side"],
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position",
[200 + 650, 200 + 350, 0 + 350],
["ring_flexor", "ring_pinky_side", "ring_thumb_side"],
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position",
[200 + 450, 100 + 400, 700 - 400],
["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"],
)
time.sleep(sleep)
move_hand(3)
move_arm(1)
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor() as executor:
executor.submit(move_arm)
executor.submit(move_hand)
# initial position
for i in range(3):
robot.hand_bus.write(
"Goal_Position", [500, 1000, 0, 1000, 100, 950, 100, 100, 1000, 150, 200, 200, 0, 200, 100, 700]
)
time.sleep(1)
# for i in range(3):
# robot.hand_bus.write("Goal_Position", [500, 1000-150, 0+250, 1000-150,
# 100+300, 950-250, 100+250,
# 100+200, 1000-300, 150+300,
# 200+500, 200+200, 0+200,
# 200+300, 100+200, 700-200])
# time.sleep(1)
# camera = 0
# score_threshold = 0.95
# iou_threshold = 0.3
# app = MediaPipeHandApp(MediaPipeHand.from_pretrained(), score_threshold, iou_threshold)
# def frame_processor(frame: np.ndarray) -> np.ndarray:
# # Input Prep
# NHWC_int_numpy_frames, NCHW_fp32_torch_frames = app_to_net_image_inputs(frame)
# # Run Bounding Box & Keypoint Detector
# batched_selected_boxes, batched_selected_keypoints = app._run_box_detector(NCHW_fp32_torch_frames)
# # The region of interest ( bounding box of 4 (x, y) corners).
# # list[torch.Tensor(shape=[Num Boxes, 4, 2])],
# # where 2 == (x, y)
# #
# # A list element will be None if there is no selected ROI.
# batched_roi_4corners = app._compute_object_roi(batched_selected_boxes, batched_selected_keypoints)
# # selected landmarks for the ROI (if any)
# # list[torch.Tensor(shape=[Num Selected Landmarks, K, 3])],
# # where K == number of landmark keypoints, 3 == (x, y, confidence)
# #
# # A list element will be None if there is no ROI.
# landmarks_out = app._run_landmark_detector(NHWC_int_numpy_frames, batched_roi_4corners)
# app._draw_predictions(
# NHWC_int_numpy_frames,
# batched_selected_boxes,
# batched_selected_keypoints,
# batched_roi_4corners,
# *landmarks_out,
# )
# return NHWC_int_numpy_frames[0]
# capture_and_display_processed_frames(frame_processor, "QAIHM Mediapipe Hand Demo", camera)
if __name__ == "__main__":
main()

231
examples/hopejr/utils.py Normal file
View File

@@ -0,0 +1,231 @@
#robot.arm_bus.write("Acceleration", [20], ["shoulder_pitch"])
####DEBUGGER####################
# joint = input("Enter joint name: ")
# encoder = EncoderReader("/dev/ttyUSB1", 115200)
# start_angle = arm_calibration['start_pos'][arm_calibration['motor_names'].index(joint)]
# end_angle = arm_calibration['end_pos'][arm_calibration['motor_names'].index(joint)]
# # start_angle = shoulder_calibration['start_pos'][shoulder_calibration['motor_names'].index(joint)]
# # end_angle = shoulder_calibration['end_pos'][shoulder_calibration['motor_names'].index(joint)]
# while True:
# angle = int(start_angle+(end_angle-start_angle)*encoder.read()/1000)
# # robot.shoulder_bus.set_bus_baudrate(115200)
# # robot.shoulder_bus.write("Goal_Position",angle, [joint])
# robot.shoulder_bus.set_bus_baudrate(1000000)
# robot.arm_bus.write("Goal_Position",angle, [joint])
# print(angle)
# time.sleep(0.1)
#####SAFETY CHECKS EXPLAINED#####
#There are two safety checks built-in: one is based on load and the other is based on current.
#Current: if Protection_Current > Present_Current we wait Over_Current_Protection_Time (expressed in ms) and set Torque_Enable to 0
#Load: if Max_Torque_Limit*Overload_Torque (expressed as a percentage) > Present_Load, we wait Protection_Time (expressed in ms
#and set Max_Torque_Limit to Protective_Torque)
#Though we can specify Min-Max_Angle_Limit, Max_Temperature_Limit, Min-Max_Voltage_Limit, no safety checks are implemented for these values
#robot.arm_bus.set_calibration(arm_calibration)
#method 1
# robot.arm_bus.write("Overload_Torque", 80)
# robot.arm_bus.write("Protection_Time", 10)
# robot.arm_bus.write("Protective_Torque", 1)
# robot.arm_bus.write("Protection_Current", 200,["shoulder_pitch"])
# robot.arm_bus.write("Over_Current_Protection_Time", 10)
#method 2
# robot.arm_bus.write("Protection_Current", 500,["shoulder_pitch"])
# robot.arm_bus.write("Over_Current_Protection_Time", 10)
# robot.arm_bus.write("Max_Torque_Limit", 1000)
# robot.arm_bus.write("Overload_Torque", 40)
# robot.arm_bus.write("Protection_Time", 10)
# robot.arm_bus.write("Protective_Torque", 1)
# robot.shoulder_bus.set_bus_baudrate(115200)
# robot.shoulder_bus.write("Goal_Position",2500)
# exit()
######LOGGER####################
# from test_torque.log_and_plot_feetech import log_and_plot_params
# params_to_log = [
# "Protection_Current",
# "Present_Current",
# "Max_Torque_Limit",
# "Protection_Time",
# "Overload_Torque",
# "Present_Load",
# "Present_Position",
# ]
# servo_names = ["shoulder_pitch"]
# servo_data, timestamps = log_and_plot_params(robot.shoulder_bus, params_to_log, servo_names, test_id="shoulder_pitch")
# exit()
#robot.arm_bus.write("Goal_Position",2300, ["shoulder_pitch"])
# dt = 2
# steps = 4
# max_pos = 1500
# min_pos = 2300
# increment = (max_pos - min_pos) / steps
# # Move from min_pos to max_pos in steps
# for i in range(steps + 1): # Include the last step
# current_pos = min_pos + int(i * increment)
# robot.arm_bus.write("Goal_Position", [current_pos], ["shoulder_pitch"])
# time.sleep(dt)
# # Move back from max_pos to min_pos in steps
# for i in range(steps + 1): # Include the last step
# current_pos = max_pos - int(i * increment)
# robot.arm_bus.write("Goal_Position", [current_pos], ["shoulder_pitch"])
# time.sleep(dt)shoulder_pitch
#demo to show how sending a lot of values makes the robt shake
# # Step increment
#
# # Move from min_pos to max_pos in steps
# for i in range(steps + 1): # Include the last step
# current_pos = min_pos + int(i * increment)
# robot.arm_bus.write("Goal_Position", [current_pos], ["elbow_flex"])
# time.sleep(dt)
# # Move back from max_pos to min_pos in steps
# for i in range(steps + 1): # Include the last step
# current_pos = max_pos - int(i * increment)
# robot.arm_bus.write("Goal_Position", [current_pos], ["elbow_flex"])
# time.sleep(dt)
# exit()
#robot.arm_bus.write("Goal_Position", a # shoulder_calibration = robot.get_shoulder_calibration()
# print(shoulder_calibration)m_calibration["start_pos"])
# robot.arm_bus.write("Over_Current_Protection_Time", 50)
# robot.arm_bus.write("Protection_Current", 310, ["shoulder_pitch"])
# robot.arm_bus.write("Overload_Torque", 80, ["shoulder_pitch"])
# robot.arm_bus.write("Protection_Time", 100, ["shoulder_pitch"])
# robot.arm_bus.write("Over_Current_Protection_Time", 50, ["shoulder_pitch"])
# robot.arm_bus.write("Protective_Torque", 20, ["shoulder_pitch"])
# robot.arm_bus.write("Goal_Position", [600],["shoulder_pitch"])
# from test_torque.log_and_plot_feetech import log_and_plot_params
# params_to_log = [
# "Present_Current",
# "Protection_Current",
# "Overload_Torque",
# "Protection_Time",
# "Protective_Torque",
# "Present_Load",
# "Present_Position",
# ]
# servo_names = ["shoulder_pitch"]
#
#robot.arm_bus.write("Goal_Position", arm_calibration["start_pos"])
#robot.hand_bus.set_calibration(hand_calibration)
#interp = 0.3
#robot.arm_bus.write("Goal_Position", [int((i*interp+j*(1-interp))) for i, j in zip(arm_calibration["start_pos"], arm_calibration["end_pos"])])
#exit()
# glove = HomonculusGlove()
# glove.run_calibration()
####GOOD FOR GRASPING
# start_pos = [
# 500,
# 900,
# 500,
# 1000,
# 100,
# 450,#250
# 950,#750
# 100,
# 300,#400
# 50,#150
# 100,
# 120,
# 980,
# 100,
# 950,
# 750,
# ]
# end_pos = [
# start_pos[0] - 400,
# start_pos[1] - 300,
# start_pos[2] + 500,
# start_pos[3] - 50,
# start_pos[4] + 900,
# start_pos[5] + 500,
# start_pos[6] - 500,
# start_pos[7] + 900,
# start_pos[8] + 700,
# start_pos[9] + 700,
# start_pos[10] + 900,
# start_pos[11] + 700,
# start_pos[12] - 700,
# start_pos[13] + 900,
# start_pos[14] - 700,
# start_pos[15] - 700,
# ]
SCS_SERIES_CONTROL_TABLE = {
# "Max_Torque_Limit": (16, 2),
# "Phase": (18, 1),
# "Unloading_Condition": (19, 1),
"Protective_Torque": (37, 1),
"Protection_Time": (38, 1),
#Baud_Rate": (48, 1),
}
def read_and_print_scs_values(robot):
for param_name in SCS_SERIES_CONTROL_TABLE:
value = robot.hand_bus.read(param_name)
print(f"{param_name}: {value}")
motor_1_values = {
"Lock" : 255,
#"Protection_Time": 20#so if you write to these they turn to 0 for some fucking reason. protection time was 100, procetive to
}
# motor_1_values = {
# "Lock": 1,
# "Protection_Time": 100,
# "Protective_Torque": 20,
# "Phase": 1,#thisu is bullshit
# "Unloading_Condition": 32,
# }
#bug in writing to specific values of the scs0009
# Write values to motor 2, there is overload torque there
#ok so i can write, the jittering is because of the overload torque which is still being triggered
#TODO: i have to write a functioining version for the sc009 (or i dont who cares)

View File

@@ -0,0 +1,18 @@
# Color gradient function (0-2024 scaled to 0-10)
def value_to_color(value):
# Clamp the value between 0 and 2024
value = max(0, min(2024, value))
# Scale from [0..2024] to [0..10]
scaled_value = (value / 2024) * 10
# Green to Yellow (scaled_value 0..5), then Yellow to Red (scaled_value 5..10)
if scaled_value <= 5:
r = int(255 * (scaled_value / 5))
g = 255
else:
r = 255
g = int(255 * (1 - (scaled_value - 5) / 5))
b = 0
return (r, g, b)

View File

@@ -1,222 +0,0 @@
import shutil
from pathlib import Path
import numpy as np
import torch
from lerobot.common.datasets.lerobot_dataset import LEROBOT_HOME, LeRobotDataset
from lerobot.common.datasets.push_dataset_to_hub._download_raw import download_raw
PUSHT_TASK = "Push the T-shaped blue block onto the T-shaped green target surface."
PUSHT_FEATURES = {
"observation.state": {
"dtype": "float32",
"shape": (2,),
"names": {
"axes": ["x", "y"],
},
},
"action": {
"dtype": "float32",
"shape": (2,),
"names": {
"axes": ["x", "y"],
},
},
"next.reward": {
"dtype": "float32",
"shape": (1,),
"names": None,
},
"next.success": {
"dtype": "bool",
"shape": (1,),
"names": None,
},
"observation.environment_state": {
"dtype": "float32",
"shape": (16,),
"names": [
"keypoints",
],
},
"observation.image": {
"dtype": None,
"shape": (3, 96, 96),
"names": [
"channel",
"height",
"width",
],
},
}
def build_features(mode: str) -> dict:
features = PUSHT_FEATURES
if mode == "keypoints":
features.pop("observation.image")
else:
features.pop("observation.environment_state")
features["observation.image"]["dtype"] = mode
return features
def load_raw_dataset(zarr_path: Path):
try:
from lerobot.common.datasets.push_dataset_to_hub._diffusion_policy_replay_buffer import (
ReplayBuffer as DiffusionPolicyReplayBuffer,
)
except ModuleNotFoundError as e:
print("`gym_pusht` is not installed. Please install it with `pip install 'lerobot[gym_pusht]'`")
raise e
zarr_data = DiffusionPolicyReplayBuffer.copy_from_path(zarr_path)
return zarr_data
def calculate_coverage(zarr_data):
try:
import pymunk
from gym_pusht.envs.pusht import PushTEnv, pymunk_to_shapely
except ModuleNotFoundError as e:
print("`gym_pusht` is not installed. Please install it with `pip install 'lerobot[gym_pusht]'`")
raise e
block_pos = zarr_data["state"][:, 2:4]
block_angle = zarr_data["state"][:, 4]
num_frames = len(block_pos)
coverage = np.zeros((num_frames,))
# 8 keypoints with 2 coords each
keypoints = np.zeros((num_frames, 16))
# Set x, y, theta (in radians)
goal_pos_angle = np.array([256, 256, np.pi / 4])
goal_body = PushTEnv.get_goal_pose_body(goal_pos_angle)
for i in range(num_frames):
space = pymunk.Space()
space.gravity = 0, 0
space.damping = 0
# Add walls.
walls = [
PushTEnv.add_segment(space, (5, 506), (5, 5), 2),
PushTEnv.add_segment(space, (5, 5), (506, 5), 2),
PushTEnv.add_segment(space, (506, 5), (506, 506), 2),
PushTEnv.add_segment(space, (5, 506), (506, 506), 2),
]
space.add(*walls)
block_body, block_shapes = PushTEnv.add_tee(space, block_pos[i].tolist(), block_angle[i].item())
goal_geom = pymunk_to_shapely(goal_body, block_body.shapes)
block_geom = pymunk_to_shapely(block_body, block_body.shapes)
intersection_area = goal_geom.intersection(block_geom).area
goal_area = goal_geom.area
coverage[i] = intersection_area / goal_area
keypoints[i] = torch.from_numpy(PushTEnv.get_keypoints(block_shapes).flatten())
return coverage, keypoints
def calculate_success(coverage: float, success_threshold: float):
return coverage > success_threshold
def calculate_reward(coverage: float, success_threshold: float):
return np.clip(coverage / success_threshold, 0, 1)
def main(raw_dir: Path, repo_id: str, mode: str = "video", push_to_hub: bool = True):
if mode not in ["video", "image", "keypoints"]:
raise ValueError(mode)
if (LEROBOT_HOME / repo_id).exists():
shutil.rmtree(LEROBOT_HOME / repo_id)
if not raw_dir.exists():
download_raw(raw_dir, repo_id="lerobot-raw/pusht_raw")
zarr_data = load_raw_dataset(zarr_path=raw_dir / "pusht_cchi_v7_replay.zarr")
env_state = zarr_data["state"][:]
agent_pos = env_state[:, :2]
action = zarr_data["action"][:]
image = zarr_data["img"] # (b, h, w, c)
episode_data_index = {
"from": np.concatenate(([0], zarr_data.meta["episode_ends"][:-1])),
"to": zarr_data.meta["episode_ends"],
}
# Calculate success and reward based on the overlapping area
# of the T-object and the T-area.
coverage, keypoints = calculate_coverage(zarr_data)
success = calculate_success(coverage, success_threshold=0.95)
reward = calculate_reward(coverage, success_threshold=0.95)
features = build_features(mode)
dataset = LeRobotDataset.create(
repo_id=repo_id,
fps=10,
robot_type="2d pointer",
features=features,
image_writer_threads=4,
)
episodes = range(len(episode_data_index["from"]))
for ep_idx in episodes:
from_idx = episode_data_index["from"][ep_idx]
to_idx = episode_data_index["to"][ep_idx]
num_frames = to_idx - from_idx
for frame_idx in range(num_frames):
i = from_idx + frame_idx
frame = {
"action": torch.from_numpy(action[i]),
# Shift reward and success by +1 until the last item of the episode
"next.reward": reward[i + (frame_idx < num_frames - 1)],
"next.success": success[i + (frame_idx < num_frames - 1)],
}
frame["observation.state"] = torch.from_numpy(agent_pos[i])
if mode == "keypoints":
frame["observation.environment_state"] = torch.from_numpy(keypoints[i])
else:
frame["observation.image"] = torch.from_numpy(image[i])
dataset.add_frame(frame)
dataset.save_episode(task=PUSHT_TASK)
dataset.consolidate()
if push_to_hub:
dataset.push_to_hub()
if __name__ == "__main__":
# To try this script, modify the repo id with your own HuggingFace user (e.g cadene/pusht)
repo_id = "lerobot/pusht"
modes = ["video", "image", "keypoints"]
# Uncomment if you want to try with a specific mode
# modes = ["video"]
# modes = ["image"]
# modes = ["keypoints"]
raw_dir = Path("data/lerobot-raw/pusht_raw")
for mode in modes:
if mode in ["image", "keypoints"]:
repo_id += f"_{mode}"
# download and load raw dataset, create LeRobotDataset, populate it, push to hub
main(raw_dir, repo_id=repo_id, mode=mode)
# Uncomment if you want to load the local dataset and explore it
# dataset = LeRobotDataset(repo_id=repo_id, local_files_only=True)
# breakpoint()

681
examples/test.py Normal file
View File

@@ -0,0 +1,681 @@
import threading
import time
from typing import Callable
import cv2
import numpy as np
# from qai_hub_models.models.mediapipe_hand.app import MediaPipeHandApp
# from qai_hub_models.models.mediapipe_hand.model import (
# MediaPipeHand,
# )
# from qai_hub_models.utils.image_processing import (
# app_to_net_image_inputs,
# )
from lerobot.common.robot_devices.motors.feetech import (
CalibrationMode,
FeetechMotorsBus,
)
LOWER_BOUND_LINEAR = -100
UPPER_BOUND_LINEAR = 200
import serial
class HomonculusGlove:
def __init__(self):
self.serial_port = "/dev/tty.usbmodem21401"
self.baud_rate = 115200
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
self.thread = threading.Thread(target=self.async_read)
self.thread.start()
self.last_d = {
"thumb_0": 100,
"thumb_1": 100,
"thumb_2": 100,
"thumb_3": 100,
"index_0": 100,
"index_1": 100,
"index_2": 100,
"middle_0": 100,
"middle_1": 100,
"middle_2": 100,
"ring_0": 100,
"ring_1": 100,
"ring_2": 100,
"pinky_0": 100,
"pinky_1": 100,
"pinky_2": 100,
"battery_voltage": 100,
}
self.calibration = None
@property
def joint_names(self):
return list(self.last_d.keys())
def read(self, motor_names: list[str] | None = None):
if motor_names is None:
motor_names = self.joint_names
values = np.array([self.last_d[k] for k in motor_names])
print(motor_names)
print(values)
if self.calibration is not None:
values = self.apply_calibration(values, motor_names)
print(values)
return values
def async_read(self):
while True:
if self.serial.in_waiting > 0:
self.serial.flush()
vals = self.serial.readline().decode("utf-8").strip()
vals = vals.split(" ")
if len(vals) != 17:
continue
vals = [int(val) for val in vals]
d = {
"thumb_0": vals[0],
"thumb_1": vals[1],
"thumb_2": vals[2],
"thumb_3": vals[3],
"index_0": vals[4],
"index_1": vals[5],
"index_2": vals[6],
"middle_0": vals[7],
"middle_1": vals[8],
"middle_2": vals[9],
"ring_0": vals[10],
"ring_1": vals[11],
"ring_2": vals[12],
"pinky_0": vals[13],
"pinky_1": vals[14],
"pinky_2": vals[15],
"battery_voltage": vals[16],
}
self.last_d = d
# print(d.values())
def run_calibration(self):
print("\nMove arm to open position")
input("Press Enter to continue...")
open_pos_list = []
for _ in range(300):
open_pos = self.read()
open_pos_list.append(open_pos)
time.sleep(0.01)
open_pos = np.array(open_pos_list)
max_open_pos = open_pos.max(axis=0)
min_open_pos = open_pos.min(axis=0)
print(f"{max_open_pos=}")
print(f"{min_open_pos=}")
print("\nMove arm to closed position")
input("Press Enter to continue...")
closed_pos_list = []
for _ in range(300):
closed_pos = self.read()
closed_pos_list.append(closed_pos)
time.sleep(0.01)
closed_pos = np.array(closed_pos_list)
max_closed_pos = closed_pos.max(axis=0)
closed_pos[closed_pos < 1000] = 60000
min_closed_pos = closed_pos.min(axis=0)
print(f"{max_closed_pos=}")
print(f"{min_closed_pos=}")
open_pos = np.array([max_open_pos, max_closed_pos]).max(axis=0)
closed_pos = np.array([min_open_pos, min_closed_pos]).min(axis=0)
# INVERTION
# INVERTION
# INVERTION
# INVERTION
# INVERTION
# INVERTION
# INVERTION
for i, jname in enumerate(self.joint_names):
if jname in ["thumb_0", "thumb_3", "index_2", "middle_2", "ring_2", "pinky_0", "pinky_2"]:
tmp_pos = open_pos[i]
open_pos[i] = closed_pos[i]
closed_pos[i] = tmp_pos
print()
print(f"{open_pos=}")
print(f"{closed_pos=}")
homing_offset = [0] * len(self.joint_names)
drive_mode = [0] * len(self.joint_names)
calib_modes = [CalibrationMode.LINEAR.name] * len(self.joint_names)
calib_dict = {
"homing_offset": homing_offset,
"drive_mode": drive_mode,
"start_pos": open_pos,
"end_pos": closed_pos,
"calib_mode": calib_modes,
"motor_names": self.joint_names,
}
# return calib_dict
self.set_calibration(calib_dict)
def set_calibration(self, calibration: dict[str, list]):
self.calibration = calibration
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
"""Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
a "zero position" at 0 degree.
Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor
rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range.
Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation
when given a goal position that is + or - their resolution. For instance, feetech xl330-m077 have a resolution of 4096, and
at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830,
or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor.
To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work
in the centered nominal degree range ]-180, 180[.
"""
if motor_names is None:
motor_names = self.motor_names
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
values = values.astype(np.float32)
for i, name in enumerate(motor_names):
calib_idx = self.calibration["motor_names"].index(name)
calib_mode = self.calibration["calib_mode"][calib_idx]
if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
start_pos = self.calibration["start_pos"][calib_idx]
end_pos = self.calibration["end_pos"][calib_idx]
# Rescale the present position to a nominal range [0, 100] %,
# useful for joints with linear motions like Aloha gripper
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
if name == "pinky_1" and (values[i] < LOWER_BOUND_LINEAR):
values[i] = end_pos
else:
msg = (
f"Wrong motor position range detected for {name}. "
f"Expected to be in nominal range of [0, 100] % (a full linear translation), "
f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, "
f"but present value is {values[i]} %. "
"This might be due to a cable connection issue creating an artificial jump in motor values. "
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
)
print(msg)
# raise JointOutOfRangeError(msg)
return values
# def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
# """Inverse of `apply_calibration`."""
# if motor_names is None:
# motor_names = self.motor_names
# for i, name in enumerate(motor_names):
# calib_idx = self.calibration["motor_names"].index(name)
# calib_mode = self.calibration["calib_mode"][calib_idx]
# if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
# start_pos = self.calibration["start_pos"][calib_idx]
# end_pos = self.calibration["end_pos"][calib_idx]
# # Convert from nominal lnear range of [0, 100] % to
# # actual motor range of values which can be arbitrary.
# values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos
# values = np.round(values).astype(np.int32)
# return values
class HopeJuniorRobot:
def __init__(self):
self.arm_bus = FeetechMotorsBus(
port="/dev/tty.usbmodem58760429571",
motors={
# "motor1": (2, "sts3250"),
# "motor2": (1, "scs0009"),
"shoulder_pitch": [1, "sts3250"],
"shoulder_yaw": [2, "sts3215"], # TODO: sts3250
"shoulder_roll": [3, "sts3215"], # TODO: sts3250
"elbow_flex": [4, "sts3250"],
"wrist_roll": [5, "sts3215"],
"wrist_yaw": [6, "sts3215"],
"wrist_pitch": [7, "sts3215"],
},
protocol_version=0,
)
self.hand_bus = FeetechMotorsBus(
port="/dev/tty.usbmodem585A0077581",
motors={
"thumb_basel_rotation": [30, "scs0009"],
"thumb_flexor": [27, "scs0009"],
"thumb_pinky_side": [26, "scs0009"],
"thumb_thumb_side": [28, "scs0009"],
"index_flexor": [25, "scs0009"],
"index_pinky_side": [31, "scs0009"],
"index_thumb_side": [32, "scs0009"],
"middle_flexor": [24, "scs0009"],
"middle_pinky_side": [33, "scs0009"],
"middle_thumb_side": [34, "scs0009"],
"ring_flexor": [21, "scs0009"],
"ring_pinky_side": [36, "scs0009"],
"ring_thumb_side": [35, "scs0009"],
"pinky_flexor": [23, "scs0009"],
"pinky_pinky_side": [38, "scs0009"],
"pinky_thumb_side": [37, "scs0009"],
},
protocol_version=1,
group_sync_read=False,
)
def get_hand_calibration(self):
homing_offset = [0] * len(self.hand_bus.motor_names)
drive_mode = [0] * len(self.hand_bus.motor_names)
start_pos = [
500,
900,
1000,
0,
100,
250,
750,
100,
400,
150,
100,
120,
980,
100,
950,
750,
]
end_pos = [
500 - 250,
900 - 300,
1000 - 550,
0 + 550,
1000,
250 + 700,
750 - 700,
1000,
400 + 700,
150 + 700,
1000,
120 + 700,
980 - 700,
1000,
950 - 700,
750 - 700,
]
calib_modes = [CalibrationMode.LINEAR.name] * len(self.hand_bus.motor_names)
calib_dict = {
"homing_offset": homing_offset,
"drive_mode": drive_mode,
"start_pos": start_pos,
"end_pos": end_pos,
"calib_mode": calib_modes,
"motor_names": self.hand_bus.motor_names,
}
return calib_dict
def connect(self):
self.arm_bus.connect()
self.hand_bus.connect()
ESCAPE_KEY_ID = 27
def capture_and_display_processed_frames(
frame_processor: Callable[[np.ndarray], np.ndarray],
window_display_name: str,
cap_device: int = 0,
) -> None:
"""
Capture frames from the given input camera device, run them through
the frame processor, and display the outputs in a window with the given name.
User should press Esc to exit.
Inputs:
frame_processor: Callable[[np.ndarray], np.ndarray]
Processes frames.
Input and output are numpy arrays of shape (H W C) with BGR channel layout and dtype uint8 / byte.
window_display_name: str
Name of the window used to display frames.
cap_device: int
Identifier for the camera to use to capture frames.
"""
cv2.namedWindow(window_display_name)
capture = cv2.VideoCapture(cap_device)
if not capture.isOpened():
raise ValueError("Unable to open video capture.")
frame_count = 0
has_frame, frame = capture.read()
while has_frame:
assert isinstance(frame, np.ndarray)
frame_count = frame_count + 1
# mirror frame
frame = np.ascontiguousarray(frame[:, ::-1, ::-1])
# process & show frame
processed_frame = frame_processor(frame)
cv2.imshow(window_display_name, processed_frame[:, :, ::-1])
has_frame, frame = capture.read()
key = cv2.waitKey(1)
if key == ESCAPE_KEY_ID:
break
capture.release()
def main():
robot = HopeJuniorRobot()
robot.connect()
# robot.hand_bus.calibration = None
# breakpoint()
# print(robot.arm_bus.read("Present_Position"))
robot.arm_bus.write("Torque_Enable", 1)
robot.arm_bus.write("Acceleration", 20)
robot.arm_bus.read("Acceleration")
calibration = robot.get_hand_calibration()
robot.hand_bus.write("Goal_Position", calibration["start_pos"])
# robot.hand_bus.write("Goal_Position", calibration["end_pos"][:4], robot.hand_bus.motor_names[:4])
robot.hand_bus.set_calibration(calibration)
lol = 1
# # print(motors_bus.write("Goal_Position", 500))
# print(robot.hand_bus.read("Present_Position"))
# # pos = hand_bus.read("Present_Position")
# # hand_bus.write("Goal_Position", pos[0]+20, hand_bus.motor_names[0])
# # hand_bus.write("Goal_Position", pos[i]+delta, hand_bus.motor_names[i])
# robot.hand_bus.read("Acceleration")
# robot.hand_bus.write("Acceleration", 10)
# sleep = 1
# # robot.hand_bus.write(
# # "Goal_Position", [glove.last_d['index_2']-1500,300,300], ["index_pinky_side", "index_flexor", "index_thumb_side"]
# # )
# #time.sleep(sleep)
# time.sleep(sleep)
# robot.hand_bus.write(
# "Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
# )
# time.sleep(sleep)
# robot.hand_bus.write(
# "Goal_Position", [100, 0, 0], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
# )
# time.sleep(sleep)
# robot.hand_bus.write(
# "Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
# )
# time.sleep(sleep)
# robot.hand_bus.write(
# "Goal_Position", [200, 100, 600], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
# )
# time.sleep(sleep)
# breakpoint()
glove = HomonculusGlove()
glove.run_calibration()
# while True:
# joint_names = ["index_1", "index_2"]
# joint_values = glove.read(joint_names)
# print(joint_values)
input()
while True:
joint_names = []
joint_names += ["thumb_0", "thumb_2", "thumb_3"]
joint_names += ["index_1", "index_2"]
joint_names += ["middle_1", "middle_2"]
joint_names += ["ring_1", "ring_2"]
joint_names += ["pinky_1", "pinky_2"]
joint_values = glove.read(joint_names)
joint_values = joint_values.round().astype(int)
joint_dict = {k: v for k, v in zip(joint_names, joint_values, strict=False)}
motor_values = []
motor_names = []
motor_names += ["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"]
motor_values += [
joint_dict["thumb_3"],
joint_dict["thumb_0"],
joint_dict["thumb_2"],
joint_dict["thumb_2"],
]
motor_names += ["index_flexor", "index_pinky_side", "index_thumb_side"]
motor_values += [joint_dict["index_2"], joint_dict["index_1"], joint_dict["index_1"]]
motor_names += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
motor_values += [joint_dict["middle_2"], joint_dict["middle_1"], joint_dict["middle_1"]]
motor_names += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
motor_values += [joint_dict["ring_2"], joint_dict["ring_1"], joint_dict["ring_1"]]
motor_names += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
motor_values += [joint_dict["pinky_2"], joint_dict["pinky_1"], joint_dict["pinky_1"]]
motor_values = np.array(motor_values)
motor_values = np.clip(motor_values, 0, 100)
robot.hand_bus.write("Goal_Position", motor_values, motor_names)
time.sleep(0.02)
while True:
# print(glove.read()['index_2']-1500)
glove_index_flexor = glove.read()["index_2"] - 1500
glove_index_subflexor = glove.read()["index_1"] - 1500
glove_index_side = glove.read()["index_0"] - 2100
vals = [glove_index_flexor, 1000 - (glove_index_subflexor), glove_index_subflexor]
keys = ["index_flexor", "index_pinky_side", "index_thumb_side"]
glove_middle_flexor = glove.read()["middle_2"] - 1500
glove_middle_subflexor = 1000 - (glove.read()["middle_1"] - 1700)
vals += [glove_middle_flexor, glove_middle_subflexor, glove_middle_subflexor - 200]
keys += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
glove_ring_flexor = glove.read()["ring_2"] - 1300
print(glove_ring_flexor)
glove_ring_subflexor = glove.read()["ring_1"] - 1100
vals += [glove_ring_flexor, 1000 - glove_ring_subflexor, glove_ring_subflexor]
keys += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
glove_pinky_flexor = glove.read()["pinky_2"] - 1500
glove_pinky_subflexor = glove.read()["pinky_1"] - 1300
vals += [300 + glove_pinky_flexor, max(1000 - glove_pinky_subflexor - 100, 0), glove_pinky_subflexor]
keys += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
robot.hand_bus.write("Goal_Position", vals, keys)
time.sleep(0.1)
time.sleep(3)
def move_arm(loop=10):
sleep = 1
for i in range(loop):
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1195])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 2195])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1457, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 2357, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 974, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 2674, 1957, 1695])
time.sleep(sleep + 2)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 1632, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 1369, 1632, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 1330, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [2381, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1681, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
def move_hand(loop=10):
sleep = 0.5
for i in range(loop):
robot.hand_bus.write(
"Goal_Position",
[500, 1000, 0, 1000],
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position", [100, 1000, 150], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position", [200, 100, 700], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position",
[500, 1000 - 250, 0 + 300, 1000 - 200],
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position",
[100 + 450, 100 + 400, 100 + 400],
["index_flexor", "index_pinky_side", "index_thumb_side"],
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position",
[100 + 350, 1000 - 450, 150 + 450],
["middle_flexor", "middle_pinky_side", "middle_thumb_side"],
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position",
[200 + 650, 200 + 350, 0 + 350],
["ring_flexor", "ring_pinky_side", "ring_thumb_side"],
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position",
[200 + 450, 100 + 400, 700 - 400],
["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"],
)
time.sleep(sleep)
move_hand(3)
move_arm(1)
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor() as executor:
executor.submit(move_arm)
executor.submit(move_hand)
# initial position
for i in range(3):
robot.hand_bus.write(
"Goal_Position", [500, 1000, 0, 1000, 100, 950, 100, 100, 1000, 150, 200, 200, 0, 200, 100, 700]
)
time.sleep(1)
# for i in range(3):
# robot.hand_bus.write("Goal_Position", [500, 1000-150, 0+250, 1000-150,
# 100+300, 950-250, 100+250,
# 100+200, 1000-300, 150+300,
# 200+500, 200+200, 0+200,
# 200+300, 100+200, 700-200])
# time.sleep(1)
# camera = 0
# score_threshold = 0.95
# iou_threshold = 0.3
# app = MediaPipeHandApp(MediaPipeHand.from_pretrained(), score_threshold, iou_threshold)
# def frame_processor(frame: np.ndarray) -> np.ndarray:
# # Input Prep
# NHWC_int_numpy_frames, NCHW_fp32_torch_frames = app_to_net_image_inputs(frame)
# # Run Bounding Box & Keypoint Detector
# batched_selected_boxes, batched_selected_keypoints = app._run_box_detector(NCHW_fp32_torch_frames)
# # The region of interest ( bounding box of 4 (x, y) corners).
# # list[torch.Tensor(shape=[Num Boxes, 4, 2])],
# # where 2 == (x, y)
# #
# # A list element will be None if there is no selected ROI.
# batched_roi_4corners = app._compute_object_roi(batched_selected_boxes, batched_selected_keypoints)
# # selected landmarks for the ROI (if any)
# # list[torch.Tensor(shape=[Num Selected Landmarks, K, 3])],
# # where K == number of landmark keypoints, 3 == (x, y, confidence)
# #
# # A list element will be None if there is no ROI.
# landmarks_out = app._run_landmark_detector(NHWC_int_numpy_frames, batched_roi_4corners)
# app._draw_predictions(
# NHWC_int_numpy_frames,
# batched_selected_boxes,
# batched_selected_keypoints,
# batched_roi_4corners,
# *landmarks_out,
# )
# return NHWC_int_numpy_frames[0]
# capture_and_display_processed_frames(frame_processor, "QAIHM Mediapipe Hand Demo", camera)
if __name__ == "__main__":
main()

133
examples/test2.py Normal file
View File

@@ -0,0 +1,133 @@
#!/usr/bin/env python
#
# ********* Ping Example *********
#
#
# Available SCServo model on this example : All models using Protocol SCS
# This example is tested with a SCServo(STS/SMS/SCS), and an URT
# Be sure that SCServo(STS/SMS/SCS) properties are already set as %% ID : 1 / Baudnum : 6 (Baudrate : 1000000)
#
import os
if os.name == "nt":
import msvcrt
def getch():
return msvcrt.getch().decode()
else:
import sys
import termios
import tty
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
def getch():
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
from scservo_sdk import * # Uses SCServo SDK library
# Default setting
SCS_ID = 1 # SCServo ID : 1
BAUDRATE = 1000000 # SCServo default baudrate : 1000000
DEVICENAME = "/dev/tty.usbserial-2130" # Check which port is being used on your controller
# ex) Windows: "COM1" Linux: "/dev/ttyUSB0" Mac: "/dev/tty.usbserial-*"
protocol_end = 1 # SCServo bit end(STS/SMS=0, SCS=1)
# Initialize PortHandler instance
# Set the port path
# Get methods and members of PortHandlerLinux or PortHandlerWindows
portHandler = PortHandler(DEVICENAME)
# Initialize PacketHandler instance
# Get methods and members of Protocol
packetHandler = PacketHandler(protocol_end)
# Open port
if portHandler.openPort():
print("Succeeded to open the port")
else:
print("Failed to open the port")
print("Press any key to terminate...")
getch()
quit()
# Set port baudrate
if portHandler.setBaudRate(BAUDRATE):
print("Succeeded to change the baudrate")
else:
print("Failed to change the baudrate")
print("Press any key to terminate...")
getch()
quit()
# Try to ping the SCServo
# Get SCServo model number
scs_model_number, scs_comm_result, scs_error = packetHandler.ping(portHandler, SCS_ID)
if scs_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(scs_comm_result))
elif scs_error != 0:
print("%s" % packetHandler.getRxPacketError(scs_error))
else:
print("[ID:%03d] ping Succeeded. SCServo model number : %d" % (SCS_ID, scs_model_number))
ADDR_SCS_PRESENT_POSITION = 56
scs_present_position, scs_comm_result, scs_error = packetHandler.read2ByteTxRx(
portHandler, SCS_ID, ADDR_SCS_PRESENT_POSITION
)
if scs_comm_result != COMM_SUCCESS:
print(packetHandler.getTxRxResult(scs_comm_result))
elif scs_error != 0:
print(packetHandler.getRxPacketError(scs_error))
breakpoint()
scs_present_position = SCS_LOWORD(scs_present_position)
# scs_present_speed = SCS_HIWORD(scs_present_position_speed)
# print("[ID:%03d] PresPos:%03d PresSpd:%03d" % (SCS_ID, scs_present_position, SCS_TOHOST(scs_present_speed, 15)))
print("[ID:%03d] PresPos:%03d" % (SCS_ID, scs_present_position))
groupSyncRead = GroupSyncRead(portHandler, packetHandler, ADDR_SCS_PRESENT_POSITION, 2)
scs_addparam_result = groupSyncRead.addParam(SCS_ID)
if scs_addparam_result != True:
print("[ID:%03d] groupSyncRead addparam failed" % SCS_ID)
quit()
# Syncread present position
scs_comm_result = groupSyncRead.txRxPacket()
if scs_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(scs_comm_result))
# Check if groupsyncread data of SCServo#1 is available
scs_getdata_result = groupSyncRead.isAvailable(SCS_ID, ADDR_SCS_PRESENT_POSITION, 2)
if scs_getdata_result == True:
# Get SCServo#1 present position value
scs_present_position = groupSyncRead.getData(SCS_ID, ADDR_SCS_PRESENT_POSITION, 2)
else:
scs_present_position = 0
print("[ID:%03d] groupSyncRead getdata failed" % SCS_ID)
# # Check if groupsyncread data of SCServo#2 is available
# scs_getdata_result = groupSyncRead.isAvailable(SCS2_ID, ADDR_SCS_PRESENT_POSITION, 2)
# if scs_getdata_result == True:
# # Get SCServo#2 present position value
# scs2_present_position_speed = groupSyncRead.getData(SCS2_ID, ADDR_SCS_PRESENT_POSITION, 2)
# else:
# print("[ID:%03d] groupSyncRead getdata failed" % SCS2_ID)
scs_present_position = SCS_LOWORD(scs_present_position)
print("[ID:%03d] PresPos:%03d" % (SCS_ID, scs_present_position))
# Close port
portHandler.closePort()

45
examples/test3.py Normal file
View File

@@ -0,0 +1,45 @@
import serial
class HomonculusGlove:
def __init__(self):
self.serial_port = "/dev/tty.usbmodem1101"
self.baud_rate = 115200
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
def read(self):
while True:
if self.serial.in_waiting > 0:
vals = self.serial.readline().decode("utf-8").strip()
vals = vals.split(" ")
vals = [int(val) for val in vals]
d = {
"thumb_0": vals[0],
"thumb_1": vals[1],
"thumb_2": vals[2],
"thumb_3": vals[3],
"index_0": vals[4],
"index_1": vals[5],
"index_2": vals[6],
"middle_0": vals[7],
"middle_1": vals[8],
"middle_2": vals[9],
"ring_0": vals[10],
"ring_1": vals[11],
"ring_2": vals[12],
"pinky_0": vals[13],
"pinky_1": vals[14],
"pinky_2": vals[15],
}
return d
# if ser.in_waiting > 0:
# line = ser.readline().decode('utf-8').strip()
# print(line)
if __name__ == "__main__":
glove = HomonculusGlove()
d = glove.read()
lol = 1

693
examples/test4.py Normal file
View File

@@ -0,0 +1,693 @@
import threading
import time
from typing import Callable
import cv2
import numpy as np
# from qai_hub_models.models.mediapipe_hand.app import MediaPipeHandApp
# from qai_hub_models.models.mediapipe_hand.model import (
# MediaPipeHand,
# )
# from qai_hub_models.utils.image_processing import (
# app_to_net_image_inputs,
# )
from lerobot.common.robot_devices.motors.feetech import (
CalibrationMode,
FeetechMotorsBus,
)
LOWER_BOUND_LINEAR = -100
UPPER_BOUND_LINEAR = 200
import serial
class HomonculusGlove:
def __init__(self):
self.serial_port = "/dev/tty.usbmodem1401"
self.baud_rate = 115200
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
self.thread = threading.Thread(target=self.async_read)
self.thread.start()
self.last_d = {
"thumb_0": 100,
"thumb_1": 100,
"thumb_2": 100,
"thumb_3": 100,
"index_0": 100,
"index_1": 100,
"index_2": 100,
"middle_0": 100,
"middle_1": 100,
"middle_2": 100,
"ring_0": 100,
"ring_1": 100,
"ring_2": 100,
"pinky_0": 100,
"pinky_1": 100,
"pinky_2": 100,
"battery_voltage": 100,
}
self.calibration = None
@property
def joint_names(self):
return list(self.last_d.keys())
def read(self, motor_names: list[str] | None = None):
if motor_names is None:
motor_names = self.joint_names
values = np.array([self.last_d[k] for k in motor_names])
print(motor_names)
print(values)
if self.calibration is not None:
values = self.apply_calibration(values, motor_names)
print(values)
return values
def async_read(self):
while True:
if self.serial.in_waiting > 0:
self.serial.flush()
vals = self.serial.readline().decode("utf-8").strip()
vals = vals.split(" ")
if len(vals) != 17:
continue
vals = [int(val) for val in vals]
d = {
"thumb_0": vals[0],
"thumb_1": vals[1],
"thumb_2": vals[2],
"thumb_3": vals[3],
"index_0": vals[4],
"index_1": vals[5],
"index_2": vals[6],
"middle_0": vals[7],
"middle_1": vals[8],
"middle_2": vals[9],
"ring_0": vals[10],
"ring_1": vals[11],
"ring_2": vals[12],
"pinky_0": vals[13],
"pinky_1": vals[14],
"pinky_2": vals[15],
"battery_voltage": vals[16],
}
self.last_d = d
# print(d.values())
def run_calibration(self):
print("\nMove arm to open position")
input("Press Enter to continue...")
open_pos_list = []
for _ in range(300):
open_pos = self.read()
open_pos_list.append(open_pos)
time.sleep(0.01)
open_pos = np.array(open_pos_list)
max_open_pos = open_pos.max(axis=0)
min_open_pos = open_pos.min(axis=0)
print(f"{max_open_pos=}")
print(f"{min_open_pos=}")
print("\nMove arm to closed position")
input("Press Enter to continue...")
closed_pos_list = []
for _ in range(300):
closed_pos = self.read()
closed_pos_list.append(closed_pos)
time.sleep(0.01)
closed_pos = np.array(closed_pos_list)
max_closed_pos = closed_pos.max(axis=0)
closed_pos[closed_pos < 1000] = 60000
min_closed_pos = closed_pos.min(axis=0)
print(f"{max_closed_pos=}")
print(f"{min_closed_pos=}")
open_pos = np.array([max_open_pos, max_closed_pos]).max(axis=0)
closed_pos = np.array([min_open_pos, min_closed_pos]).min(axis=0)
# INVERTION
# INVERTION
# INVERTION
# INVERTION
# INVERTION
# INVERTION
# INVERTION
for i, jname in enumerate(self.joint_names):
if jname in [
"thumb_0",
"thumb_3",
"index_2",
"middle_2",
"ring_2",
"pinky_0",
"pinky_2",
"index_0",
]:
tmp_pos = open_pos[i]
open_pos[i] = closed_pos[i]
closed_pos[i] = tmp_pos
print()
print(f"{open_pos=}")
print(f"{closed_pos=}")
homing_offset = [0] * len(self.joint_names)
drive_mode = [0] * len(self.joint_names)
calib_modes = [CalibrationMode.LINEAR.name] * len(self.joint_names)
calib_dict = {
"homing_offset": homing_offset,
"drive_mode": drive_mode,
"start_pos": open_pos,
"end_pos": closed_pos,
"calib_mode": calib_modes,
"motor_names": self.joint_names,
}
# return calib_dict
self.set_calibration(calib_dict)
def set_calibration(self, calibration: dict[str, list]):
self.calibration = calibration
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
"""Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
a "zero position" at 0 degree.
Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor
rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range.
Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation
when given a goal position that is + or - their resolution. For instance, feetech xl330-m077 have a resolution of 4096, and
at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830,
or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor.
To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work
in the centered nominal degree range ]-180, 180[.
"""
if motor_names is None:
motor_names = self.motor_names
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
values = values.astype(np.float32)
for i, name in enumerate(motor_names):
calib_idx = self.calibration["motor_names"].index(name)
calib_mode = self.calibration["calib_mode"][calib_idx]
if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
start_pos = self.calibration["start_pos"][calib_idx]
end_pos = self.calibration["end_pos"][calib_idx]
# Rescale the present position to a nominal range [0, 100] %,
# useful for joints with linear motions like Aloha gripper
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
if name == "pinky_1" and (values[i] < LOWER_BOUND_LINEAR):
values[i] = end_pos
else:
msg = (
f"Wrong motor position range detected for {name}. "
f"Expected to be in nominal range of [0, 100] % (a full linear translation), "
f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, "
f"but present value is {values[i]} %. "
"This might be due to a cable connection issue creating an artificial jump in motor values. "
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
)
print(msg)
# raise JointOutOfRangeError(msg)
return values
# def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
# """Inverse of `apply_calibration`."""
# if motor_names is None:
# motor_names = self.motor_names
# for i, name in enumerate(motor_names):
# calib_idx = self.calibration["motor_names"].index(name)
# calib_mode = self.calibration["calib_mode"][calib_idx]
# if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
# start_pos = self.calibration["start_pos"][calib_idx]
# end_pos = self.calibration["end_pos"][calib_idx]
# # Convert from nominal lnear range of [0, 100] % to
# # actual motor range of values which can be arbitrary.
# values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos
# values = np.round(values).astype(np.int32)
# return values
class HopeJuniorRobot:
def __init__(self):
self.arm_bus = FeetechMotorsBus(
port="/dev/tty.usbmodem58760429571",
motors={
# "motor1": (2, "sts3250"),
# "motor2": (1, "scs0009"),
"shoulder_pitch": [1, "sts3250"],
"shoulder_yaw": [2, "sts3215"], # TODO: sts3250
"shoulder_roll": [3, "sts3215"], # TODO: sts3250
"elbow_flex": [4, "sts3250"],
"wrist_roll": [5, "sts3215"],
"wrist_yaw": [6, "sts3215"],
"wrist_pitch": [7, "sts3215"],
},
protocol_version=0,
)
self.hand_bus = FeetechMotorsBus(
port="/dev/tty.usbmodem585A0077581",
motors={
"thumb_basel_rotation": [30, "scs0009"],
"thumb_flexor": [27, "scs0009"],
"thumb_pinky_side": [26, "scs0009"],
"thumb_thumb_side": [28, "scs0009"],
"index_flexor": [25, "scs0009"],
"index_pinky_side": [31, "scs0009"],
"index_thumb_side": [32, "scs0009"],
"middle_flexor": [24, "scs0009"],
"middle_pinky_side": [33, "scs0009"],
"middle_thumb_side": [34, "scs0009"],
"ring_flexor": [21, "scs0009"],
"ring_pinky_side": [36, "scs0009"],
"ring_thumb_side": [35, "scs0009"],
"pinky_flexor": [23, "scs0009"],
"pinky_pinky_side": [38, "scs0009"],
"pinky_thumb_side": [37, "scs0009"],
},
protocol_version=1,
group_sync_read=False,
)
def get_hand_calibration(self):
homing_offset = [0] * len(self.hand_bus.motor_names)
drive_mode = [0] * len(self.hand_bus.motor_names)
start_pos = [
500,
900,
1000,
0,
100,
250,
750,
100,
400,
150,
100,
120,
980,
100,
950,
750,
]
end_pos = [
500 - 250,
900 - 300,
1000 - 550,
0 + 550,
1000,
start_pos[5] + 500,
start_pos[6] - 500,
1000,
400 + 700,
150 + 700,
1000,
120 + 700,
980 - 700,
1000,
950 - 700,
750 - 700,
]
calib_modes = [CalibrationMode.LINEAR.name] * len(self.hand_bus.motor_names)
calib_dict = {
"homing_offset": homing_offset,
"drive_mode": drive_mode,
"start_pos": start_pos,
"end_pos": end_pos,
"calib_mode": calib_modes,
"motor_names": self.hand_bus.motor_names,
}
return calib_dict
def connect(self):
self.arm_bus.connect()
self.hand_bus.connect()
ESCAPE_KEY_ID = 27
def capture_and_display_processed_frames(
frame_processor: Callable[[np.ndarray], np.ndarray],
window_display_name: str,
cap_device: int = 0,
) -> None:
"""
Capture frames from the given input camera device, run them through
the frame processor, and display the outputs in a window with the given name.
User should press Esc to exit.
Inputs:
frame_processor: Callable[[np.ndarray], np.ndarray]
Processes frames.
Input and output are numpy arrays of shape (H W C) with BGR channel layout and dtype uint8 / byte.
window_display_name: str
Name of the window used to display frames.
cap_device: int
Identifier for the camera to use to capture frames.
"""
cv2.namedWindow(window_display_name)
capture = cv2.VideoCapture(cap_device)
if not capture.isOpened():
raise ValueError("Unable to open video capture.")
frame_count = 0
has_frame, frame = capture.read()
while has_frame:
assert isinstance(frame, np.ndarray)
frame_count = frame_count + 1
# mirror frame
frame = np.ascontiguousarray(frame[:, ::-1, ::-1])
# process & show frame
processed_frame = frame_processor(frame)
cv2.imshow(window_display_name, processed_frame[:, :, ::-1])
has_frame, frame = capture.read()
key = cv2.waitKey(1)
if key == ESCAPE_KEY_ID:
break
capture.release()
def main():
robot = HopeJuniorRobot()
robot.connect()
# robot.hand_bus.calibration = None
# breakpoint()
# print(robot.arm_bus.read("Present_Position"))
robot.arm_bus.write("Torque_Enable", 1)
robot.arm_bus.write("Acceleration", 20)
robot.arm_bus.read("Acceleration")
calibration = robot.get_hand_calibration()
robot.hand_bus.write("Goal_Position", calibration["start_pos"])
# robot.hand_bus.write("Goal_Position", calibration["end_pos"][:4], robot.hand_bus.motor_names[:4])
robot.hand_bus.set_calibration(calibration)
lol = 1
# # print(motors_bus.write("Goal_Position", 500))
# print(robot.hand_bus.read("Present_Position"))
# # pos = hand_bus.read("Present_Position")
# # hand_bus.write("Goal_Position", pos[0]+20, hand_bus.motor_names[0])
# # hand_bus.write("Goal_Position", pos[i]+delta, hand_bus.motor_names[i])
# robot.hand_bus.read("Acceleration")
# robot.hand_bus.write("Acceleration", 10)
# sleep = 1
# # robot.hand_bus.write(
# # "Goal_Position", [glove.last_d['index_2']-1500,300,300], ["index_pinky_side", "index_flexor", "index_thumb_side"]
# # )
# #time.sleep(sleep)
# time.sleep(sleep)
# robot.hand_bus.write(
# "Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
# )
# time.sleep(sleep)
# robot.hand_bus.write(
# "Goal_Position", [100, 0, 0], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
# )
# time.sleep(sleep)
# robot.hand_bus.write(
# "Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
# )
# time.sleep(sleep)
# robot.hand_bus.write(
# "Goal_Position", [200, 100, 600], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
# )
# time.sleep(sleep)
# breakpoint()
glove = HomonculusGlove()
glove.run_calibration()
# while True:
# joint_names = ["index_1", "index_2"]
# joint_values = glove.read(joint_names)
# print(joint_values)
input()
while True:
joint_names = []
# joint_names += ["thumb_0", "thumb_2", "thumb_3"]
joint_names += ["index_0", "index_1"]
# joint_names += ["middle_1", "middle_2"]
# joint_names += ["ring_1", "ring_2"]
# joint_names += ["pinky_0", "pinky_2"]
joint_values = glove.read(joint_names)
joint_values = joint_values.round().astype(int)
joint_dict = {k: v for k, v in zip(joint_names, joint_values, strict=False)}
motor_values = []
motor_names = []
# motor_names += ["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"]
# motor_values += [joint_dict["thumb_3"], joint_dict["thumb_0"], joint_dict["thumb_2"], joint_dict["thumb_2"]]
motor_names += ["index_pinky_side", "index_thumb_side"]
# if joint_dict["index_0"] -2100 > 0:
splayamount = 0.5
motor_values += [
(100 - joint_dict["index_0"]) * splayamount + joint_dict["index_1"] * (1 - splayamount),
(joint_dict["index_0"]) * splayamount + joint_dict["index_1"] * (1 - splayamount),
]
# else:
# motor_values += [100-joint_dict["index_0"], joint_dict["index_0"]]
# motor_names += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
# motor_values += [joint_dict["middle_2"], joint_dict["middle_1"], joint_dict["middle_1"]]
# motor_names += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
# motor_values += [joint_dict["ring_2"], joint_dict["ring_1"], joint_dict["ring_1"]]
# motor_names += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
# motor_values += [joint_dict["pinky_2"], joint_dict["pinky_0"], joint_dict["pinky_0"]]
motor_values = np.array(motor_values)
motor_values = np.clip(motor_values, 0, 100)
robot.hand_bus.write("Goal_Position", motor_values, motor_names)
time.sleep(0.02)
while True:
# print(glove.read()['index_2']-1500)
glove_index_flexor = glove.read()["index_2"] - 1500
glove_index_subflexor = glove.read()["index_1"] - 1500
glove_index_side = glove.read()["index_0"] - 2100
vals = [glove_index_flexor, 1000 - (glove_index_subflexor), glove_index_subflexor]
keys = ["index_flexor", "index_pinky_side", "index_thumb_side"]
glove_middle_flexor = glove.read()["middle_2"] - 1500
glove_middle_subflexor = 1000 - (glove.read()["middle_1"] - 1700)
vals += [glove_middle_flexor, glove_middle_subflexor, glove_middle_subflexor - 200]
keys += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
glove_ring_flexor = glove.read()["ring_2"] - 1300
print(glove_ring_flexor)
glove_ring_subflexor = glove.read()["ring_1"] - 1100
vals += [glove_ring_flexor, 1000 - glove_ring_subflexor, glove_ring_subflexor]
keys += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
glove_pinky_flexor = glove.read()["pinky_2"] - 1500
glove_pinky_subflexor = glove.read()["pinky_1"] - 1300
vals += [300 + glove_pinky_flexor, max(1000 - glove_pinky_subflexor - 100, 0), glove_pinky_subflexor]
keys += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
robot.hand_bus.write("Goal_Position", vals, keys)
time.sleep(0.1)
time.sleep(3)
def move_arm(loop=10):
sleep = 1
for i in range(loop):
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1195])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 2195])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1457, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 2357, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 974, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 2674, 1957, 1695])
time.sleep(sleep + 2)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 1632, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 1369, 1632, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 1330, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [2381, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1681, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
time.sleep(sleep)
def move_hand(loop=10):
sleep = 0.5
for i in range(loop):
robot.hand_bus.write(
"Goal_Position",
[500, 1000, 0, 1000],
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position", [100, 1000, 150], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position", [200, 100, 700], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position",
[500, 1000 - 250, 0 + 300, 1000 - 200],
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position",
[100 + 450, 100 + 400, 100 + 400],
["index_flexor", "index_pinky_side", "index_thumb_side"],
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position",
[100 + 350, 1000 - 450, 150 + 450],
["middle_flexor", "middle_pinky_side", "middle_thumb_side"],
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position",
[200 + 650, 200 + 350, 0 + 350],
["ring_flexor", "ring_pinky_side", "ring_thumb_side"],
)
time.sleep(sleep)
robot.hand_bus.write(
"Goal_Position",
[200 + 450, 100 + 400, 700 - 400],
["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"],
)
time.sleep(sleep)
move_hand(3)
move_arm(1)
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor() as executor:
executor.submit(move_arm)
executor.submit(move_hand)
# initial position
for i in range(3):
robot.hand_bus.write(
"Goal_Position", [500, 1000, 0, 1000, 100, 950, 100, 100, 1000, 150, 200, 200, 0, 200, 100, 700]
)
time.sleep(1)
# for i in range(3):
# robot.hand_bus.write("Goal_Position", [500, 1000-150, 0+250, 1000-150,
# 100+300, 950-250, 100+250,
# 100+200, 1000-300, 150+300,
# 200+500, 200+200, 0+200,
# 200+300, 100+200, 700-200])
# time.sleep(1)
# camera = 0
# score_threshold = 0.95
# iou_threshold = 0.3
# app = MediaPipeHandApp(MediaPipeHand.from_pretrained(), score_threshold, iou_threshold)
# def frame_processor(frame: np.ndarray) -> np.ndarray:
# # Input Prep
# NHWC_int_numpy_frames, NCHW_fp32_torch_frames = app_to_net_image_inputs(frame)
# # Run Bounding Box & Keypoint Detector
# batched_selected_boxes, batched_selected_keypoints = app._run_box_detector(NCHW_fp32_torch_frames)
# # The region of interest ( bounding box of 4 (x, y) corners).
# # list[torch.Tensor(shape=[Num Boxes, 4, 2])],
# # where 2 == (x, y)
# #
# # A list element will be None if there is no selected ROI.
# batched_roi_4corners = app._compute_object_roi(batched_selected_boxes, batched_selected_keypoints)
# # selected landmarks for the ROI (if any)
# # list[torch.Tensor(shape=[Num Selected Landmarks, K, 3])],
# # where K == number of landmark keypoints, 3 == (x, y, confidence)
# #
# # A list element will be None if there is no ROI.
# landmarks_out = app._run_landmark_detector(NHWC_int_numpy_frames, batched_roi_4corners)
# app._draw_predictions(
# NHWC_int_numpy_frames,
# batched_selected_boxes,
# batched_selected_keypoints,
# batched_roi_4corners,
# *landmarks_out,
# )
# return NHWC_int_numpy_frames[0]
# capture_and_display_processed_frames(frame_processor, "QAIHM Mediapipe Hand Demo", camera)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,97 @@
#faulty servo
Model = [777]
ID = [7]
Baud_Rate = [0]
Return_Delay = [0]
Response_Status_Level = [1]
Min_Angle_Limit = [1140]
Max_Angle_Limit = [2750]
Max_Temperature_Limit = [70]
Max_Voltage_Limit = [140]
Min_Voltage_Limit = [40]
Max_Torque_Limit = [1000]
Phase = [12]
Unloading_Condition = [44]
LED_Alarm_Condition = [47]
P_Coefficient = [32]
D_Coefficient = [32]
I_Coefficient = [0]
Minimum_Startup_Force = [16]
CW_Dead_Zone = [1]
CCW_Dead_Zone = [1]
Protection_Current = [310]
Angular_Resolution = [1]
Offset = [1047]
Mode = [0]
Protective_Torque = [20]
Protection_Time = [200]
Overload_Torque = [80]
Speed_closed_loop_P_proportional_coefficient = [10]
Over_Current_Protection_Time = [200]
Velocity_closed_loop_I_integral_coefficient = [200]
Torque_Enable = [1]
Acceleration = [20]
Goal_Position = [0]
Goal_Time = [0]
Goal_Speed = [0]
Torque_Limit = [1000]
Lock = [1]
Present_Position = [1494]
Present_Speed = [0]
Present_Load = [0]
Present_Voltage = [123]
Present_Temperature = [24]
Status = [0]
Moving = [0]
Present_Current = [0]
Maximum_Acceleration = [306]
#all servos of hopejr
Model = [2825 777 777 2825 777 777 777]
ID = [1 2 3 4 5 6 7]
Baud_Rate = [0 0 0 0 0 0 0]
Return_Delay = [0 0 0 0 0 0 0]
Response_Status_Level = [1 1 1 1 1 1 1]
Min_Angle_Limit = [ 650 1300 1300 1200 600 1725 0]
Max_Angle_Limit = [2600 2050 2800 2500 4096 2250 4095]
Max_Temperature_Limit = [80 70 70 80 70 70 70]
Max_Voltage_Limit = [160 140 140 160 140 140 80]
Min_Voltage_Limit = [60 40 40 60 40 40 40]
Max_Torque_Limit = [1000 1000 1000 1000 1000 1000 1000]
Phase = [12 12 12 12 12 12 12]
Unloading_Condition = [45 44 44 45 44 44 44]
LED_Alarm_Condition = [45 47 47 45 47 47 47]
P_Coefficient = [32 32 32 32 32 32 32]
D_Coefficient = [32 32 32 32 32 32 32]
I_Coefficient = [0 0 0 0 0 0 0]
Minimum_Startup_Force = [15 16 16 12 16 16 16]
CW_Dead_Zone = [0 1 1 0 1 1 1]
CCW_Dead_Zone = [0 1 1 0 1 1 1]
Protection_Current = [310 310 310 310 310 310 500]
Angular_Resolution = [1 1 1 1 1 1 1]
Offset = [ 0 1047 1024 1047 1024 1024 0]
Mode = [0 0 0 0 0 0 0]
Protective_Torque = [20 20 20 20 20 20 20]
Protection_Time = [200 200 200 200 200 200 200]
Overload_Torque = [80 80 80 80 80 80 80]
Speed_closed_loop_P_proportional_coefficient = [10 10 10 10 10 10 10]
Over_Current_Protection_Time = [250 200 200 250 200 200 200]
Velocity_closed_loop_I_integral_coefficient = [200 200 200 200 200 200 200]
Torque_Enable = [1 1 1 1 1 1 1]
Acceleration = [20 20 20 20 20 20 20]
Goal_Position = [1909 1977 1820 1000 707 1941 1036]
Goal_Time = [0 0 0 0 0 0 0]
Goal_Speed = [0 0 0 0 0 0 0]
Torque_Limit = [1000 1000 1000 200 1000 1000 1000]
Lock = [1 1 1 1 1 1 1]
Present_Position = [1909 1982 1821 1200 710 1941 1036]
Present_Speed = [0 0 0 0 0 0 0]
Present_Load = [ 0 48 0 0 32 0 0]
Present_Voltage = [122 123 122 123 122 122 122]
Present_Temperature = [23 28 28 26 29 28 28]
Status = [0 0 0 0 0 0 1]
Moving = [0 0 0 0 0 0 0]
Present_Current = [0 1 0 1 1 0 1]
Maximum_Acceleration = [1530 306 306 1530 306 306 254]

View File

@@ -0,0 +1,192 @@
import threading
import time
from typing import Callable
import cv2
import numpy as np
import serial
from lerobot.common.robot_devices.motors.feetech import (
CalibrationMode,
FeetechMotorsBus,
)
LOWER_BOUND_LINEAR = -100
UPPER_BOUND_LINEAR = 200
ESCAPE_KEY_ID = 27
class HopeJuniorRobot:
def __init__(self):
self.arm_bus = FeetechMotorsBus(
port="/dev/ttyACM1",
motors={
# "motor1": (2, "sts3250"),
# "motor2": (1, "scs0009"),
#"shoulder_pitch": [1, "sts3250"],
#"shoulder_yaw": [2, "sts3215"], # TODO: sts3250
#"shoulder_roll": [3, "sts3215"], # TODO: sts3250
#"elbow_flex": [4, "sts3250"],
#"wrist_roll": [5, "sts3215"],
#"wrist_yaw": [6, "sts3215"],
"wrist_pitch": [7, "sts3215"],
},
protocol_version=0,
)
self.hand_bus = FeetechMotorsBus(
port="/dev/ttyACM1",
motors={
"thumb_basel_rotation": [30, "scs0009"],
"thumb_flexor": [27, "scs0009"],
"thumb_pinky_side": [26, "scs0009"],
"thumb_thumb_side": [28, "scs0009"],
"index_flexor": [25, "scs0009"],
"index_pinky_side": [31, "scs0009"],
"index_thumb_side": [32, "scs0009"],
"middle_flexor": [24, "scs0009"],
"middle_pinky_side": [33, "scs0009"],
"middle_thumb_side": [34, "scs0009"],
"ring_flexor": [21, "scs0009"],
"ring_pinky_side": [36, "scs0009"],
"ring_thumb_side": [35, "scs0009"],
"pinky_flexor": [23, "scs0009"],
"pinky_pinky_side": [38, "scs0009"],
"pinky_thumb_side": [37, "scs0009"],
},
protocol_version=1,
group_sync_read=False,
)
def get_hand_calibration(self):
"""
Returns a dictionary containing calibration settings for each motor
on the hand bus.
"""
homing_offset = [0] * len(self.hand_bus.motor_names)
drive_mode = [0] * len(self.hand_bus.motor_names)
start_pos = [
500, 900, 0, 1000, 100, 250, 750, 100, 400, 150, 100, 120, 980, 100, 950, 750,
]
end_pos = [
start_pos[0] - 400, # 500 - 400 = 100
start_pos[1] - 300, # 900 - 300 = 600
start_pos[2] + 550, # 0 + 550 = 550
start_pos[3] - 550, # 1000 - 550 = 450
start_pos[4] + 900, # 100 + 900 = 1000
start_pos[5] + 500, # 250 + 500 = 750
start_pos[6] - 500, # 750 - 500 = 250
start_pos[7] + 900, # 100 + 900 = 1000
start_pos[8] + 700, # 400 + 700 = 1100
start_pos[9] + 700, # 150 + 700 = 850
start_pos[10] + 900, # 100 + 900 = 1000
start_pos[11] + 700, # 120 + 700 = 820
start_pos[12] - 700, # 980 - 700 = 280
start_pos[13] + 900, # 100 + 900 = 1000
start_pos[14] - 700, # 950 - 700 = 250
start_pos[15] - 700, # 750 - 700 = 50
]
calib_modes = [CalibrationMode.LINEAR.name] * len(self.hand_bus.motor_names)
calib_dict = {
"homing_offset": homing_offset,
"drive_mode": drive_mode,
"start_pos": start_pos,
"end_pos": end_pos,
"calib_mode": calib_modes,
"motor_names": self.hand_bus.motor_names,
}
return calib_dict
def get_arm_calibration(self):
"""
Returns a dictionary containing calibration settings for each motor
on the arm bus.
"""
homing_offset = [0] * len(self.arm_bus.motor_names)
drive_mode = [0] * len(self.arm_bus.motor_names)
# Example placeholders
start_pos = [
600, # shoulder_up
1500, # shoulder_forward
1300, # shoulder_yaw
1000, # bend_elbow
1600, # wrist_roll
1700, # wrist_yaw
600, # wrist_pitch
]
end_pos = [
2300, # shoulder_up
2300, # shoulder_forward
2800, # shoulder_yaw
2500, # bend_elbow
2800, # wrist_roll
2200, # wrist_yaw
1700, # wrist_pitch
]
calib_modes = [CalibrationMode.LINEAR.name] * len(self.arm_bus.motor_names)
calib_dict = {
"homing_offset": homing_offset,
"drive_mode": drive_mode,
"start_pos": start_pos,
"end_pos": end_pos,
"calib_mode": calib_modes,
"motor_names": self.arm_bus.motor_names,
}
return calib_dict
def connect(self):
"""Connect to the Feetech buses."""
self.arm_bus.connect()
# self.hand_bus.connect()
def capture_and_display_processed_frames(
frame_processor: Callable[[np.ndarray], np.ndarray],
window_display_name: str,
cap_device: int = 0,
) -> None:
"""
Capture frames from the given input camera device, run them through
the frame processor, and display the outputs in a window with the given name.
User should press Esc to exit.
Inputs:
frame_processor: Callable[[np.ndarray], np.ndarray]
Processes frames.
Input and output are numpy arrays of shape (H W C) with BGR channel layout and dtype uint8 / byte.
window_display_name: str
Name of the window used to display frames.
cap_device: int
Identifier for the camera to use to capture frames.
"""
cv2.namedWindow(window_display_name)
capture = cv2.VideoCapture(cap_device)
if not capture.isOpened():
raise ValueError("Unable to open video capture.")
frame_count = 0
has_frame, frame = capture.read()
while has_frame:
frame_count = frame_count + 1
# Mirror frame horizontally and flip color for demonstration
frame = np.ascontiguousarray(frame[:, ::-1, ::-1])
# process & show frame
processed_frame = frame_processor(frame)
cv2.imshow(window_display_name, processed_frame[:, :, ::-1])
has_frame, frame = capture.read()
key = cv2.waitKey(1)
if key == ESCAPE_KEY_ID:
break
capture.release()

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

View File

@@ -0,0 +1,44 @@
import matplotlib.pyplot as plt
import time
from typing import List, Tuple
def log_and_plot_params(bus, params_to_log: list, servo_names: list,
test_id="servo_log", interval=0.1, duration=5, save_plot=True) -> Tuple[dict, List[float]]:
"""
Logs specific servo parameters for a given duration and generates a plot.
"""
servo_data = {servo_name: {param: [] for param in params_to_log} for servo_name in servo_names}
timestamps = []
start_time = time.time()
while time.time() - start_time < duration:
timestamp = time.time() - start_time
timestamps.append(timestamp)
for param in params_to_log:
values = bus.read(param, servo_names)
for servo_name, value in zip(servo_names, values):
servo_data[servo_name][param].append(value)
time.sleep(interval)
if save_plot:
for servo_name, data in servo_data.items():
plt.figure(figsize=(10, 6))
for param in params_to_log:
if all(v is not None for v in data[param]):
plt.plot(timestamps, data[param], label=param)
plt.xlabel("Time (s)")
plt.ylabel("Parameter Values")
plt.title(f"Parameter Trends for Servo: {servo_name}")
plt.legend()
plt.grid(True)
plt.tight_layout()
plot_filename = f"{test_id}_{servo_name}.png"
plt.savefig(plot_filename)
print(f"Plot saved as {plot_filename}")
return servo_data, timestamps

Binary file not shown.

After

Width:  |  Height:  |  Size: 65 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

View File

@@ -0,0 +1,68 @@
STS_SERIES_CONTROL_TABLE = {
"Model": (3, 2),
"ID": (5, 1),
"Baud_Rate": (6, 1),
"Return_Delay": (7, 1),
"Response_Status_Level": (8, 1),
"Min_Angle_Limit": (9, 2),
"Max_Angle_Limit": (11, 2),
"Max_Temperature_Limit": (13, 1),
"Max_Voltage_Limit": (14, 1),
"Min_Voltage_Limit": (15, 1),
"Max_Torque_Limit": (16, 2),
"Phase": (18, 1),
"Unloading_Condition": (19, 1),
"LED_Alarm_Condition": (20, 1),
"P_Coefficient": (21, 1),
"D_Coefficient": (22, 1),
"I_Coefficient": (23, 1),
"Minimum_Startup_Force": (24, 2),
"CW_Dead_Zone": (26, 1),
"CCW_Dead_Zone": (27, 1),
"Protection_Current": (28, 2),
"Angular_Resolution": (30, 1),
"Offset": (31, 2),
"Mode": (33, 1),
"Protective_Torque": (34, 1),
"Protection_Time": (35, 1),
"Overload_Torque": (36, 1),
"Speed_closed_loop_P_proportional_coefficient": (37, 1),
"Over_Current_Protection_Time": (38, 1),
"Velocity_closed_loop_I_integral_coefficient": (39, 1),
"Torque_Enable": (40, 1),
"Acceleration": (41, 1),
"Goal_Position": (42, 2),
"Goal_Time": (44, 2),
"Goal_Speed": (46, 2),
"Torque_Limit": (48, 2),
"Lock": (55, 1),
"Present_Position": (56, 2),
"Present_Speed": (58, 2),
"Present_Load": (60, 2),
"Present_Voltage": (62, 1),
"Present_Temperature": (63, 1),
"Status": (65, 1),
"Moving": (66, 1),
"Present_Current": (69, 2),
# Not in the Memory Table
"Maximum_Acceleration": (85, 2),
}
import time
# Assuming STS_SERIES_CONTROL_TABLE is defined globally
def print_all_params(robot):
"""
Reads all parameters from the STS_SERIES_CONTROL_TABLE and prints their values.
"""
for param in STS_SERIES_CONTROL_TABLE.keys():
try:
val = robot.arm_bus.read(param)
print(f"{param} = {val}")
except Exception as e:
print(f"{param} read failed: {e}")
# Example usage:
print_all_params(robot)

View File

@@ -0,0 +1,26 @@
#include <DFRobot_VisualRotaryEncoder.h>
DFRobot_VisualRotaryEncoder_I2C sensor(0x54, &Wire);
void setup()
{
Serial.begin(115200);
// Attempt to initialize the sensor
while (NO_ERR != sensor.begin()) {
// Failed? Just wait a bit and try again
delay(3000);
}
}
void loop()
{
// Read the encoder value
uint16_t encoderValue = sensor.getEncoderValue();
// Print it followed by a newline
Serial.println(encoderValue);
// Delay 10ms between readings
delay(10);
}

View File

@@ -0,0 +1,544 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"what are the actual interest values on the hopejr? make like a map"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"can change these dynamically so if the arm is moving down we can relax it instead of tensing it? so for example decreasing torque if the target position is lower than the actual position, for example. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"arm_calibration = robot.get_arm_calibration()\n",
"robot.arm_bus.write(\"Goal_Position\", arm_calibration[\"start_pos\"])"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Present Position: [1494]\n",
"Acceleration Read: [20]\n"
]
}
],
"source": [
"import time\n",
"from hopejr import HopeJuniorRobot\n",
"\n",
"\n",
"robot = HopeJuniorRobot()\n",
"robot.connect()\n",
"\n",
"# Example read of the current position\n",
"print(\"Present Position:\", robot.arm_bus.read(\"Present_Position\"))\n",
"\n",
"# Enable torque and set acceleration\n",
"robot.arm_bus.write(\"Torque_Enable\", 1)\n",
"robot.arm_bus.write(\"Acceleration\", 20)\n",
"print(\"Acceleration Read:\", robot.arm_bus.read(\"Acceleration\"))\n"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"robot.arm_bus.write(\"Torque_Limit\", 100,[\"elbow_flex\"])\n",
"robot.arm_bus.write(\"Protective_Torque\", 0, [\"elbow_flex\"])\n",
"robot.arm_bus.write(\"Acceleration\", 20)\n",
"robot.arm_bus.write(\"Goal_Position\", [2000], [\"elbow_flex\"])"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([1000, 1000, 1000, 1000, 1000, 1000, 1000])"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"robot.arm_bus.read(\"Max_Torque_Limit\")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"robot.arm_bus.write(\"Goal_Position\", [1909, 1977, 1820, 1000, 707, 1941, 1036]) #"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"robot.arm_bus.write(\"Max_Voltage_Limit\", [160, 140, 140, 160, 140, 140, 140]) #so its not torque limit nor max torque limit, , no protective torque or overload torque\n",
"#it's 1) max voltage limit, min-max angle limits are arbitrairly set for all the motors, max temp is only set for the shoulder\n",
"#max acceleration is also set, we could lower that in the elbow to make it less responsive to commands basically\n",
"#so we limit speed and temperature, maybe we should limit torque thouhg, minimum startup force is also important. protection current as well\n",
"#changed that to 310.\n",
"#\"Max_Voltage_Limit\" also needs to be changed, different from torque_limit"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model = [777]\n",
"ID = [7]\n",
"Baud_Rate = [0]\n",
"Return_Delay = [0]\n",
"Response_Status_Level = [1]\n",
"Min_Angle_Limit = [1140]\n",
"Max_Angle_Limit = [2750]\n",
"Max_Temperature_Limit = [70]\n",
"Max_Voltage_Limit = [140]\n",
"Min_Voltage_Limit = [40]\n",
"Max_Torque_Limit = [1000]\n",
"Phase = [12]\n",
"Unloading_Condition = [44]\n",
"LED_Alarm_Condition = [47]\n",
"P_Coefficient = [32]\n",
"D_Coefficient = [32]\n",
"I_Coefficient = [0]\n",
"Minimum_Startup_Force = [16]\n",
"CW_Dead_Zone = [1]\n",
"CCW_Dead_Zone = [1]\n",
"Protection_Current = [310]\n",
"Angular_Resolution = [1]\n",
"Offset = [1047]\n",
"Mode = [0]\n",
"Protective_Torque = [20]\n",
"Protection_Time = [200]\n",
"Overload_Torque = [80]\n",
"Speed_closed_loop_P_proportional_coefficient = [10]\n",
"Over_Current_Protection_Time = [200]\n",
"Velocity_closed_loop_I_integral_coefficient = [200]\n",
"Torque_Enable = [1]\n",
"Acceleration = [20]\n",
"Goal_Position = [0]\n",
"Goal_Time = [0]\n",
"Goal_Speed = [0]\n",
"Torque_Limit = [1000]\n",
"Lock = [1]\n",
"Present_Position = [1494]\n",
"Present_Speed = [0]\n",
"Present_Load = [0]\n",
"Present_Voltage = [123]\n",
"Present_Temperature = [24]\n",
"Status = [0]\n",
"Moving = [0]\n",
"Present_Current = [0]\n",
"Maximum_Acceleration = [306]\n"
]
}
],
"source": [
"STS_SERIES_CONTROL_TABLE = {\n",
" \"Model\": (3, 2),\n",
" \"ID\": (5, 1),\n",
" \"Baud_Rate\": (6, 1),\n",
" \"Return_Delay\": (7, 1),\n",
" \"Response_Status_Level\": (8, 1),\n",
" \"Min_Angle_Limit\": (9, 2),\n",
" \"Max_Angle_Limit\": (11, 2),\n",
" \"Max_Temperature_Limit\": (13, 1),\n",
" \"Max_Voltage_Limit\": (14, 1),\n",
" \"Min_Voltage_Limit\": (15, 1),\n",
" \"Max_Torque_Limit\": (16, 2),\n",
" \"Phase\": (18, 1),\n",
" \"Unloading_Condition\": (19, 1),\n",
" \"LED_Alarm_Condition\": (20, 1),\n",
" \"P_Coefficient\": (21, 1),\n",
" \"D_Coefficient\": (22, 1),\n",
" \"I_Coefficient\": (23, 1),\n",
" \"Minimum_Startup_Force\": (24, 2),\n",
" \"CW_Dead_Zone\": (26, 1),\n",
" \"CCW_Dead_Zone\": (27, 1),\n",
" \"Protection_Current\": (28, 2),\n",
" \"Angular_Resolution\": (30, 1),\n",
" \"Offset\": (31, 2),\n",
" \"Mode\": (33, 1),\n",
" \"Protective_Torque\": (34, 1),\n",
" \"Protection_Time\": (35, 1),\n",
" \"Overload_Torque\": (36, 1),\n",
" \"Speed_closed_loop_P_proportional_coefficient\": (37, 1),\n",
" \"Over_Current_Protection_Time\": (38, 1),\n",
" \"Velocity_closed_loop_I_integral_coefficient\": (39, 1),\n",
" \"Torque_Enable\": (40, 1),\n",
" \"Acceleration\": (41, 1),\n",
" \"Goal_Position\": (42, 2),\n",
" \"Goal_Time\": (44, 2),\n",
" \"Goal_Speed\": (46, 2),\n",
" \"Torque_Limit\": (48, 2),\n",
" \"Lock\": (55, 1),\n",
" \"Present_Position\": (56, 2),\n",
" \"Present_Speed\": (58, 2),\n",
" \"Present_Load\": (60, 2),\n",
" \"Present_Voltage\": (62, 1),\n",
" \"Present_Temperature\": (63, 1),\n",
" \"Status\": (65, 1),\n",
" \"Moving\": (66, 1),\n",
" \"Present_Current\": (69, 2),\n",
" # Not in the Memory Table\n",
" \"Maximum_Acceleration\": (85, 2),\n",
"}\n",
"\n",
"import time\n",
"\n",
"# Assuming STS_SERIES_CONTROL_TABLE is defined globally\n",
"\n",
"def print_all_params(robot):\n",
" \"\"\"\n",
" Reads all parameters from the STS_SERIES_CONTROL_TABLE and prints their values.\n",
" \"\"\"\n",
" for param in STS_SERIES_CONTROL_TABLE.keys():\n",
" try:\n",
" val = robot.arm_bus.read(param)\n",
" print(f\"{param} = {val}\")\n",
" except Exception as e:\n",
" print(f\"{param} read failed: {e}\")\n",
"\n",
"\n",
"# Example usage:\n",
"print_all_params(robot)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"probably max input voltage, we can also look at temperature and "
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Acceleration Read: [20 20]\n"
]
}
],
"source": [
"\n",
"print(\"Acceleration Read:\", robot.arm_bus.read(\"Acceleration\"))"
]
},
{
"cell_type": "code",
"execution_count": 37,
"metadata": {},
"outputs": [],
"source": [
"robot.arm_bus.write(\"LED_Alarm_Condition\", 2, [\"elbow_flex\"])"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
"robot.arm_bus.write(\"Acceleration\", 20, [\"elbow_flex\"])\n",
"robot.arm_bus.write(\"Acceleration\", 100, [\"wrist_yaw\"])"
]
},
{
"cell_type": "code",
"execution_count": 73,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"robot.arm_bus.write(\"Goal_Position\", [1000, 1000], [\"elbow_flex\", \"wrist_yaw\"])\n",
"time.sleep(2)\n",
"robot.arm_bus.write(\"Goal_Position\", [2000, 2000], [\"elbow_flex\", \"wrist_yaw\"])"
]
},
{
"cell_type": "code",
"execution_count": 68,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Elbow Flex Current: [1]\n",
"Elbow Flex Current: [0]\n",
"Elbow Flex Current: [3]\n",
"Elbow Flex Current: [1]\n",
"Elbow Flex Current: [1]\n",
"Elbow Flex Current: [2]\n",
"Elbow Flex Current: [1]\n",
"Elbow Flex Current: [1]\n",
"Elbow Flex Current: [2]\n",
"Elbow Flex Current: [1]\n",
"Elbow Flex Current: [1]\n",
"Elbow Flex Current: [0]\n"
]
},
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[68], line 25\u001b[0m\n\u001b[1;32m 22\u001b[0m time\u001b[38;5;241m.\u001b[39msleep(\u001b[38;5;241m2\u001b[39m)\n\u001b[1;32m 23\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 24\u001b[0m \u001b[38;5;66;03m# If current is zero, hold at pos_a for a bit\u001b[39;00m\n\u001b[0;32m---> 25\u001b[0m \u001b[43mtime\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msleep\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
],
"source": [
"import time\n",
"\n",
"# Enable torque on elbow_flex\n",
"robot.arm_bus.write(\"Torque_Enable\", 1, [\"elbow_flex\"])\n",
"\n",
"pos_a = 2500\n",
"pos_b = 1000\n",
"\n",
"robot.arm_bus.write(\"Goal_Position\", pos_a, [\"elbow_flex\"])\n",
"time.sleep(2)\n",
"\n",
"while True:\n",
" current_val = robot.arm_bus.read(\"Present_Current\", \"elbow_flex\")\n",
" print(\"Elbow Flex Current:\", current_val)\n",
" \n",
" # If the servo is under non-zero load/current, switch position\n",
" if current_val > 1:\n",
" robot.arm_bus.write(\"Goal_Position\", pos_b, [\"elbow_flex\"])\n",
" time.sleep(2)\n",
" # Go back to pos_a again\n",
" robot.arm_bus.write(\"Goal_Position\", pos_a, [\"elbow_flex\"])\n",
" time.sleep(2)\n",
" else:\n",
" # If current is zero, hold at pos_a for a bit\n",
" time.sleep(1)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\"Acceleration\" = 0, infinitely fast\n",
"\"Acceleration\" = 20 slow\n",
"elbow_flex is the LED one (4)\n",
"\n",
"robot.arm_bus.write(\"LED_Alarm_Condition\", 2, [\"elbow_flex\"]) #on constantly\n",
"robot.arm_bus.write(\"LED_Alarm_Condition\", 1, [\"elbow_flex\"]) #beeping\n",
"robot.arm_bus.write(\"LED_Alarm_Condition\", 0, [\"elbow_flex\"]) #off\n",
"\n",
"\"Max_Torque_Limit\": (16, 2), is what i have o play around with or \"Protective_Torque\": (37, 1), maybe\n",
"\n",
"robot.arm_bus.write(\"Torque_Enable\", 1, [\"elbow_flex\"]) 1 can move 0 cant move\n",
"\n",
"robot.arm_bus.write(\"Torque_Limit\", 300, [\"elbow_flex\"]) #how strong/weak the servo is. 0 makes it so that it cannot move basically, but i'd like to have that value change honestly and for it to be waeaker\n",
"\n",
"robot.arm_bus.write(\"Torque_Limit\", 20,[\"elbow_flex\"]) 20 in ordre to get some motion\n",
"\n",
"default is 1000\n",
"\n",
"robot.arm_bus.write(\"Goal_Speed\", -s, [\"elbow_flex\"]) #changes how fast the servo moves when going to the target, does not make it move with a specific speed "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"import time\n",
"\n",
"# Enable torque on elbow_flex\n",
"robot.arm_bus.write(\"Torque_Enable\", 1, [\"elbow_flex\"])\n",
"\n",
"pos_a = 1000\n",
"pos_b = 2500\n",
"\n",
"robot.arm_bus.write(\"Goal_Position\", pos_a, [\"elbow_flex\"])\n",
"time.sleep(2)\n",
"\n",
"while True:\n",
" current_val = robot.arm_bus.read(\"Present_Current\", \"elbow_flex\")\n",
" print(\"Elbow Flex Current:\", current_val)\n",
" \n",
" # If the servo is under non-zero load/current, switch position\n",
" if current_val > 1:\n",
" robot.arm_bus.write(\"Goal_Position\", pos_b, [\"elbow_flex\"])\n",
" time.sleep(2)\n",
" # Go back to pos_a again\n",
" robot.arm_bus.write(\"Goal_Position\", pos_a, [\"elbow_flex\"])\n",
" time.sleep(2)\n",
" else:\n",
" # If current is zero, hold at pos_a for a bit\n",
" time.sleep(1)\n",
"\n",
"\n",
"so if current is larger than x then you disable it \n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"robot.arm_bus.write(\"LED_Alarm_Condition\", 2, [\"elbow_flex\"])"
]
},
{
"cell_type": "code",
"execution_count": 43,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0]\n",
"[0]\n",
"[2]\n",
"[4]\n",
"[0]\n",
"[0]\n",
"[0]\n"
]
},
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[43], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28mprint\u001b[39m(robot\u001b[38;5;241m.\u001b[39marm_bus\u001b[38;5;241m.\u001b[39mread(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mPresent_Current\u001b[39m\u001b[38;5;124m\"\u001b[39m, [\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124melbow_flex\u001b[39m\u001b[38;5;124m\"\u001b[39m]))\n\u001b[0;32m----> 3\u001b[0m \u001b[43mtime\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msleep\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
],
"source": [
"while True:\n",
" print(robot.arm_bus.read(\"Present_Current\", [\"elbow_flex\"]))\n",
" time.sleep(1)"
]
},
{
"cell_type": "code",
"execution_count": 47,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Max_Voltage_Limit = [160 140 140 160 140 140 80]\n",
"Min_Angle_Limit = [ 650 1300 1300 1200 600 1725 0]\n",
"Max_Angle_Limit = [2600 2050 2800 2500 4096 2250 4095]\n",
"Max_Temperature_Limit = [80 70 70 80 70 70 70]\n",
"Acceleration = [20 20 20 20 20 20 20]\n",
"Torque_Limit = [1000 1000 1000 200 1000 1000 1000]\n",
"Minimum_Startup_Force = [15 16 16 12 16 16 16]\n",
"Protection_Current = [310 310 310 310 310 310 500]\n"
]
}
],
"source": [
"import time\n",
"\n",
"def print_important_params(robot):\n",
"\n",
" # Example parameters you mentioned; adjust as needed\n",
" param_list = [\n",
" \"Max_Voltage_Limit\",\n",
" \"Min_Angle_Limit\",\n",
" \"Max_Angle_Limit\",\n",
" \"Max_Temperature_Limit\",\n",
" \"Acceleration\", # or \"Maximum_Acceleration\" if you prefer that register\n",
" \"Torque_Limit\", # or \"Max_Torque_Limit\" if your table uses that\n",
" \"Minimum_Startup_Force\",\n",
" \"Protection_Current\",\n",
" ]\n",
" \n",
" for param in param_list:\n",
" try:\n",
" val = robot.arm_bus.read(param)\n",
" print(f\"{param} = {val}\")\n",
" except Exception as e:\n",
" print(f\"{param} read failed: {e}\")\n",
"\n",
"# -------------------------------\n",
"# Example usage\n",
"\n",
"print_important_params(robot)\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "lerobot",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.16"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,27 @@
import time
from hopejr import HopeJuniorRobot
def main():
# Instantiate and connect to the robot
robot = HopeJuniorRobot()
robot.connect()
# Example read of the current position
print("Present Position:", robot.arm_bus.read("Present_Position"))
# Enable torque and set acceleration
robot.arm_bus.write("Torque_Enable", 1)
robot.arm_bus.write("Acceleration", 20)
print("Acceleration Read:", robot.arm_bus.read("Acceleration"))
# Move elbow_flex and wrist_yaw a few times
robot.arm_bus.write("Goal_Position", [1000, 1000], ["elbow_flex", "wrist_yaw"])
time.sleep(2)
robot.arm_bus.write("Goal_Position", [1500, 1500], ["elbow_flex", "wrist_yaw"])
time.sleep(2)
robot.arm_bus.write("Goal_Position", [1000, 1000], ["elbow_flex", "wrist_yaw"])
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,49 @@
STS_SERIES_CONTROL_TABLE = {
"Model": (3, 2),
"ID": (5, 1),
"Baud_Rate": (6, 1),
"Return_Delay": (7, 1),
"Response_Status_Level": (8, 1),
"Min_Angle_Limit": (9, 2),
"Max_Angle_Limit": (11, 2),
"Max_Temperature_Limit": (13, 1),
"Max_Voltage_Limit": (14, 1),
"Min_Voltage_Limit": (15, 1),
"Max_Torque_Limit": (16, 2),
"Phase": (18, 1),
"Unloading_Condition": (19, 1),
"LED_Alarm_Condition": (20, 1),
"P_Coefficient": (21, 1),
"D_Coefficient": (22, 1),
"I_Coefficient": (23, 1),
"Minimum_Startup_Force": (24, 2),
"CW_Dead_Zone": (26, 1),
"CCW_Dead_Zone": (27, 1),
"Protection_Current": (28, 2),
"Angular_Resolution": (30, 1),
"Offset": (31, 2),
"Mode": (33, 1),
"Protective_Torque": (34, 1),
"Protection_Time": (35, 1),
"Overload_Torque": (36, 1),
"Speed_closed_loop_P_proportional_coefficient": (37, 1),
"Over_Current_Protection_Time": (38, 1),
"Velocity_closed_loop_I_integral_coefficient": (39, 1),
"Torque_Enable": (40, 1),
"Acceleration": (41, 1),
"Goal_Position": (42, 2),
"Goal_Time": (44, 2),
"Goal_Speed": (46, 2),
"Torque_Limit": (48, 2),
"Lock": (55, 1),
"Present_Position": (56, 2),
"Present_Speed": (58, 2),
"Present_Load": (60, 2),
"Present_Voltage": (62, 1),
"Present_Temperature": (63, 1),
"Status": (65, 1),
"Moving": (66, 1),
"Present_Current": (69, 2),
# Not in the Memory Table
"Maximum_Acceleration": (85, 2),
}

View File

@@ -0,0 +1,16 @@
First check that kicks in is current:
Protection_Current (310) amperes or sth
Present_Current, compared against protection crrent
Over_Current_Protection_Time, how long until you shut it down
make a quick update about this
variables of interest are
Max_Torque_Limit = 1000,
Present_Load = 1000-something, which triggered the overload torque mechanism
Overload_Torque = 80, how much of the max torque limit do we allow?
Protection_Time = 200, after how long do we set Torque_Enable to 1? *not true lol
Protective_Torque = 20, after we trigger the safety mechanism, how much torque do we allow the motor to have?
theres actually no temperature or voltage check that the feetechs perform, the only two are current and torque, which works like i said above

View File

@@ -181,8 +181,8 @@ available_real_world_datasets = [
"lerobot/usc_cloth_sim",
]
available_datasets = sorted(
set(itertools.chain(*available_datasets_per_env.values(), available_real_world_datasets))
available_datasets = list(
itertools.chain(*available_datasets_per_env.values(), available_real_world_datasets)
)
# lists all available policies from `lerobot/common/policies`

View File

@@ -1,27 +0,0 @@
---
# For reference on dataset card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1
# Doc / guide: https://huggingface.co/docs/hub/datasets-cards
{{ card_data }}
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
{{ dataset_description | default("", true) }}
- **Homepage:** {{ url | default("[More Information Needed]", true)}}
- **Paper:** {{ paper | default("[More Information Needed]", true)}}
- **License:** {{ license | default("[More Information Needed]", true)}}
## Dataset Structure
{{ dataset_structure | default("[More Information Needed]", true)}}
## Citation
**BibTeX:**
```bibtex
{{ citation_bibtex | default("[More Information Needed]", true)}}
```

View File

@@ -19,6 +19,9 @@ from math import ceil
import einops
import torch
import tqdm
from datasets import Image
from lerobot.common.datasets.video_utils import VideoFrame
def get_stats_einops_patterns(dataset, num_workers=0):
@@ -36,13 +39,15 @@ def get_stats_einops_patterns(dataset, num_workers=0):
batch = next(iter(dataloader))
stats_patterns = {}
for key, feats_type in dataset.features.items():
# NOTE: skip language_instruction embedding in stats computation
if key == "language_instruction":
continue
for key in dataset.features:
# sanity check that tensors are not float64
assert batch[key].dtype != torch.float64
# if isinstance(feats_type, (VideoFrame, Image)):
if key in dataset.meta.camera_keys:
if isinstance(feats_type, (VideoFrame, Image)):
# sanity check that images are channel first
_, c, h, w = batch[key].shape
assert c < h and c < w, f"expect channel first images, but instead {batch[key].shape}"
@@ -58,7 +63,7 @@ def get_stats_einops_patterns(dataset, num_workers=0):
elif batch[key].ndim == 1:
stats_patterns[key] = "b -> 1"
else:
raise ValueError(f"{key}, {batch[key].shape}")
raise ValueError(f"{key}, {feats_type}, {batch[key].shape}")
return stats_patterns
@@ -170,45 +175,39 @@ def aggregate_stats(ls_datasets) -> dict[str, torch.Tensor]:
"""
data_keys = set()
for dataset in ls_datasets:
data_keys.update(dataset.meta.stats.keys())
data_keys.update(dataset.stats.keys())
stats = {k: {} for k in data_keys}
for data_key in data_keys:
for stat_key in ["min", "max"]:
# compute `max(dataset_0["max"], dataset_1["max"], ...)`
stats[data_key][stat_key] = einops.reduce(
torch.stack(
[ds.meta.stats[data_key][stat_key] for ds in ls_datasets if data_key in ds.meta.stats],
dim=0,
),
torch.stack([d.stats[data_key][stat_key] for d in ls_datasets if data_key in d.stats], dim=0),
"n ... -> ...",
stat_key,
)
total_samples = sum(d.num_frames for d in ls_datasets if data_key in d.meta.stats)
total_samples = sum(d.num_samples for d in ls_datasets if data_key in d.stats)
# Compute the "sum" statistic by multiplying each mean by the number of samples in the respective
# dataset, then divide by total_samples to get the overall "mean".
# NOTE: the brackets around (d.num_frames / total_samples) are needed tor minimize the risk of
# NOTE: the brackets around (d.num_samples / total_samples) are needed tor minimize the risk of
# numerical overflow!
stats[data_key]["mean"] = sum(
d.meta.stats[data_key]["mean"] * (d.num_frames / total_samples)
d.stats[data_key]["mean"] * (d.num_samples / total_samples)
for d in ls_datasets
if data_key in d.meta.stats
if data_key in d.stats
)
# The derivation for standard deviation is a little more involved but is much in the same spirit as
# the computation of the mean.
# Given two sets of data where the statistics are known:
# σ_combined = sqrt[ (n1 * (σ1^2 + d1^2) + n2 * (σ2^2 + d2^2)) / (n1 + n2) ]
# where d1 = μ1 - μ_combined, d2 = μ2 - μ_combined
# NOTE: the brackets around (d.num_frames / total_samples) are needed tor minimize the risk of
# NOTE: the brackets around (d.num_samples / total_samples) are needed tor minimize the risk of
# numerical overflow!
stats[data_key]["std"] = torch.sqrt(
sum(
(
d.meta.stats[data_key]["std"] ** 2
+ (d.meta.stats[data_key]["mean"] - stats[data_key]["mean"]) ** 2
)
* (d.num_frames / total_samples)
(d.stats[data_key]["std"] ** 2 + (d.stats[data_key]["mean"] - stats[data_key]["mean"]) ** 2)
* (d.num_samples / total_samples)
for d in ls_datasets
if data_key in d.meta.stats
if data_key in d.stats
)
)
return stats

View File

@@ -91,9 +91,9 @@ def make_dataset(cfg, split: str = "train") -> LeRobotDataset | MultiLeRobotData
)
if isinstance(cfg.dataset_repo_id, str):
# TODO (aliberts): add 'episodes' arg from config after removing hydra
dataset = LeRobotDataset(
cfg.dataset_repo_id,
split=split,
delta_timestamps=cfg.training.get("delta_timestamps"),
image_transforms=image_transforms,
video_backend=cfg.video_backend,
@@ -101,6 +101,7 @@ def make_dataset(cfg, split: str = "train") -> LeRobotDataset | MultiLeRobotData
else:
dataset = MultiLeRobotDataset(
cfg.dataset_repo_id,
split=split,
delta_timestamps=cfg.training.get("delta_timestamps"),
image_transforms=image_transforms,
video_backend=cfg.video_backend,
@@ -111,6 +112,6 @@ def make_dataset(cfg, split: str = "train") -> LeRobotDataset | MultiLeRobotData
for stats_type, listconfig in stats_dict.items():
# example of stats_type: min, max, mean, std
stats = OmegaConf.to_container(listconfig, resolve=True)
dataset.meta.stats[key][stats_type] = torch.tensor(stats, dtype=torch.float32)
dataset.stats[key][stats_type] = torch.tensor(stats, dtype=torch.float32)
return dataset

View File

@@ -1,160 +0,0 @@
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import queue
import threading
from pathlib import Path
import numpy as np
import PIL.Image
import torch
def safe_stop_image_writer(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
dataset = kwargs.get("dataset")
image_writer = getattr(dataset, "image_writer", None) if dataset else None
if image_writer is not None:
print("Waiting for image writer to terminate...")
image_writer.stop()
raise e
return wrapper
def image_array_to_image(image_array: np.ndarray) -> PIL.Image.Image:
# TODO(aliberts): handle 1 channel and 4 for depth images
if image_array.ndim == 3 and image_array.shape[0] in [1, 3]:
# Transpose from pytorch convention (C, H, W) to (H, W, C)
image_array = image_array.transpose(1, 2, 0)
if image_array.dtype != np.uint8:
# Assume the image is in [0, 1] range for floating-point data
image_array = np.clip(image_array, 0, 1)
image_array = (image_array * 255).astype(np.uint8)
return PIL.Image.fromarray(image_array)
def write_image(image: np.ndarray | PIL.Image.Image, fpath: Path):
try:
if isinstance(image, np.ndarray):
img = image_array_to_image(image)
elif isinstance(image, PIL.Image.Image):
img = image
else:
raise TypeError(f"Unsupported image type: {type(image)}")
img.save(fpath)
except Exception as e:
print(f"Error writing image {fpath}: {e}")
def worker_thread_loop(queue: queue.Queue):
while True:
item = queue.get()
if item is None:
queue.task_done()
break
image_array, fpath = item
write_image(image_array, fpath)
queue.task_done()
def worker_process(queue: queue.Queue, num_threads: int):
threads = []
for _ in range(num_threads):
t = threading.Thread(target=worker_thread_loop, args=(queue,))
t.daemon = True
t.start()
threads.append(t)
for t in threads:
t.join()
class AsyncImageWriter:
"""
This class abstract away the initialisation of processes or/and threads to
save images on disk asynchrounously, which is critical to control a robot and record data
at a high frame rate.
When `num_processes=0`, it creates a threads pool of size `num_threads`.
When `num_processes>0`, it creates processes pool of size `num_processes`, where each subprocess starts
their own threads pool of size `num_threads`.
The optimal number of processes and threads depends on your computer capabilities.
We advise to use 4 threads per camera with 0 processes. If the fps is not stable, try to increase or lower
the number of threads. If it is still not stable, try to use 1 subprocess, or more.
"""
def __init__(self, num_processes: int = 0, num_threads: int = 1):
self.num_processes = num_processes
self.num_threads = num_threads
self.queue = None
self.threads = []
self.processes = []
self._stopped = False
if num_threads <= 0 and num_processes <= 0:
raise ValueError("Number of threads and processes must be greater than zero.")
if self.num_processes == 0:
# Use threading
self.queue = queue.Queue()
for _ in range(self.num_threads):
t = threading.Thread(target=worker_thread_loop, args=(self.queue,))
t.daemon = True
t.start()
self.threads.append(t)
else:
# Use multiprocessing
self.queue = multiprocessing.JoinableQueue()
for _ in range(self.num_processes):
p = multiprocessing.Process(target=worker_process, args=(self.queue, self.num_threads))
p.daemon = True
p.start()
self.processes.append(p)
def save_image(self, image: torch.Tensor | np.ndarray | PIL.Image.Image, fpath: Path):
if isinstance(image, torch.Tensor):
# Convert tensor to numpy array to minimize main process time
image = image.cpu().numpy()
self.queue.put((image, fpath))
def wait_until_done(self):
self.queue.join()
def stop(self):
if self._stopped:
return
if self.num_processes == 0:
for _ in self.threads:
self.queue.put(None)
for t in self.threads:
t.join()
else:
num_nones = self.num_processes * self.num_threads
for _ in range(num_nones):
self.queue.put(None)
for p in self.processes:
p.join()
if p.is_alive():
p.terminate()
self.queue.close()
self.queue.join_thread()
self._stopped = True

File diff suppressed because it is too large Load Diff

View File

@@ -187,7 +187,7 @@ class OnlineBuffer(torch.utils.data.Dataset):
assert data[OnlineBuffer.INDEX_KEY][0].item() == 0
# Shift the incoming indices if necessary.
if self.num_frames > 0:
if self.num_samples > 0:
last_episode_index = self._data[OnlineBuffer.EPISODE_INDEX_KEY][next_index - 1]
last_data_index = self._data[OnlineBuffer.INDEX_KEY][next_index - 1]
data[OnlineBuffer.EPISODE_INDEX_KEY] += last_episode_index + 1
@@ -227,11 +227,11 @@ class OnlineBuffer(torch.utils.data.Dataset):
)
@property
def num_frames(self) -> int:
def num_samples(self) -> int:
return np.count_nonzero(self._data[OnlineBuffer.OCCUPANCY_MASK_KEY])
def __len__(self):
return self.num_frames
return self.num_samples
def _item_to_tensors(self, item: dict) -> dict:
item_ = {}

View File

@@ -0,0 +1,468 @@
"""Functions to create an empty dataset, and populate it with frames."""
# TODO(rcadene, aliberts): to adapt as class methods of next version of LeRobotDataset
import concurrent
import json
import logging
import multiprocessing
import shutil
from pathlib import Path
import torch
import tqdm
from PIL import Image
from lerobot.common.datasets.compute_stats import compute_stats
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset
from lerobot.common.datasets.push_dataset_to_hub.aloha_hdf5_format import to_hf_dataset
from lerobot.common.datasets.push_dataset_to_hub.utils import concatenate_episodes, get_default_encoding
from lerobot.common.datasets.utils import calculate_episode_data_index, create_branch
from lerobot.common.datasets.video_utils import encode_video_frames
from lerobot.common.utils.utils import log_say
from lerobot.scripts.push_dataset_to_hub import (
push_dataset_card_to_hub,
push_meta_data_to_hub,
push_videos_to_hub,
save_meta_data,
)
########################################################################################
# Asynchrounous saving of images on disk
########################################################################################
def safe_stop_image_writer(func):
# TODO(aliberts): Allow to pass custom exceptions
# (e.g. ThreadServiceExit, KeyboardInterrupt, SystemExit, UnpluggedError, DynamixelCommError)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
image_writer = kwargs.get("dataset", {}).get("image_writer")
if image_writer is not None:
print("Waiting for image writer to terminate...")
stop_image_writer(image_writer, timeout=20)
raise e
return wrapper
def save_image(img_tensor, key, frame_index, episode_index, videos_dir: str):
img = Image.fromarray(img_tensor.numpy())
path = Path(videos_dir) / f"{key}_episode_{episode_index:06d}" / f"frame_{frame_index:06d}.png"
path.parent.mkdir(parents=True, exist_ok=True)
img.save(str(path), quality=100)
def loop_to_save_images_in_threads(image_queue, num_threads):
if num_threads < 1:
raise NotImplementedError(f"Only `num_threads>=1` is supported for now, but {num_threads=} given.")
with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor:
futures = []
while True:
# Blocks until a frame is available
frame_data = image_queue.get()
# As usually done, exit loop when receiving None to stop the worker
if frame_data is None:
break
image, key, frame_index, episode_index, videos_dir = frame_data
futures.append(executor.submit(save_image, image, key, frame_index, episode_index, videos_dir))
# Before exiting function, wait for all threads to complete
with tqdm.tqdm(total=len(futures), desc="Writing images") as progress_bar:
concurrent.futures.wait(futures)
progress_bar.update(len(futures))
def start_image_writer_processes(image_queue, num_processes, num_threads_per_process):
if num_processes < 1:
raise ValueError(f"Only `num_processes>=1` is supported, but {num_processes=} given.")
if num_threads_per_process < 1:
raise NotImplementedError(
"Only `num_threads_per_process>=1` is supported for now, but {num_threads_per_process=} given."
)
processes = []
for _ in range(num_processes):
process = multiprocessing.Process(
target=loop_to_save_images_in_threads,
args=(image_queue, num_threads_per_process),
)
process.start()
processes.append(process)
return processes
def stop_processes(processes, queue, timeout):
# Send None to each process to signal them to stop
for _ in processes:
queue.put(None)
# Wait maximum 20 seconds for all processes to terminate
for process in processes:
process.join(timeout=timeout)
# If not terminated after 20 seconds, force termination
if process.is_alive():
process.terminate()
# Close the queue, no more items can be put in the queue
queue.close()
# Ensure all background queue threads have finished
queue.join_thread()
def start_image_writer(num_processes, num_threads):
"""This function abstract away the initialisation of processes or/and threads to
save images on disk asynchrounously, which is critical to control a robot and record data
at a high frame rate.
When `num_processes=0`, it returns a dictionary containing a threads pool of size `num_threads`.
When `num_processes>0`, it returns a dictionary containing a processes pool of size `num_processes`,
where each subprocess starts their own threads pool of size `num_threads`.
The optimal number of processes and threads depends on your computer capabilities.
We advise to use 4 threads per camera with 0 processes. If the fps is not stable, try to increase or lower
the number of threads. If it is still not stable, try to use 1 subprocess, or more.
"""
image_writer = {}
if num_processes == 0:
futures = []
threads_pool = concurrent.futures.ThreadPoolExecutor(max_workers=num_threads)
image_writer["threads_pool"], image_writer["futures"] = threads_pool, futures
else:
# TODO(rcadene): When using num_processes>1, `multiprocessing.Manager().Queue()`
# might be better than `multiprocessing.Queue()`. Source: https://www.geeksforgeeks.org/python-multiprocessing-queue-vs-multiprocessing-manager-queue
image_queue = multiprocessing.Queue()
processes_pool = start_image_writer_processes(
image_queue, num_processes=num_processes, num_threads_per_process=num_threads
)
image_writer["processes_pool"], image_writer["image_queue"] = processes_pool, image_queue
return image_writer
def async_save_image(image_writer, image, key, frame_index, episode_index, videos_dir):
"""This function abstract away the saving of an image on disk asynchrounously. It uses a dictionary
called image writer which contains either a pool of processes or a pool of threads.
"""
if "threads_pool" in image_writer:
threads_pool, futures = image_writer["threads_pool"], image_writer["futures"]
futures.append(threads_pool.submit(save_image, image, key, frame_index, episode_index, videos_dir))
else:
image_queue = image_writer["image_queue"]
image_queue.put((image, key, frame_index, episode_index, videos_dir))
def stop_image_writer(image_writer, timeout):
if "threads_pool" in image_writer:
futures = image_writer["futures"]
# Before exiting function, wait for all threads to complete
with tqdm.tqdm(total=len(futures), desc="Writing images") as progress_bar:
concurrent.futures.wait(futures, timeout=timeout)
progress_bar.update(len(futures))
else:
processes_pool, image_queue = image_writer["processes_pool"], image_writer["image_queue"]
stop_processes(processes_pool, image_queue, timeout=timeout)
########################################################################################
# Functions to initialize, resume and populate a dataset
########################################################################################
def init_dataset(
repo_id,
root,
force_override,
fps,
video,
write_images,
num_image_writer_processes,
num_image_writer_threads,
):
local_dir = Path(root) / repo_id
if local_dir.exists() and force_override:
shutil.rmtree(local_dir)
episodes_dir = local_dir / "episodes"
episodes_dir.mkdir(parents=True, exist_ok=True)
videos_dir = local_dir / "videos"
videos_dir.mkdir(parents=True, exist_ok=True)
# Logic to resume data recording
rec_info_path = episodes_dir / "data_recording_info.json"
if rec_info_path.exists():
with open(rec_info_path) as f:
rec_info = json.load(f)
num_episodes = rec_info["last_episode_index"] + 1
else:
num_episodes = 0
dataset = {
"repo_id": repo_id,
"local_dir": local_dir,
"videos_dir": videos_dir,
"episodes_dir": episodes_dir,
"fps": fps,
"video": video,
"rec_info_path": rec_info_path,
"num_episodes": num_episodes,
}
if write_images:
# Initialize processes or/and threads dedicated to save images on disk asynchronously,
# which is critical to control a robot and record data at a high frame rate.
image_writer = start_image_writer(
num_processes=num_image_writer_processes,
num_threads=num_image_writer_threads,
)
dataset["image_writer"] = image_writer
return dataset
def add_frame(dataset, observation, action):
if "current_episode" not in dataset:
# initialize episode dictionary
ep_dict = {}
for key in observation:
if key not in ep_dict:
ep_dict[key] = []
for key in action:
if key not in ep_dict:
ep_dict[key] = []
ep_dict["episode_index"] = []
ep_dict["frame_index"] = []
ep_dict["timestamp"] = []
ep_dict["next.done"] = []
dataset["current_episode"] = ep_dict
dataset["current_frame_index"] = 0
ep_dict = dataset["current_episode"]
episode_index = dataset["num_episodes"]
frame_index = dataset["current_frame_index"]
videos_dir = dataset["videos_dir"]
video = dataset["video"]
fps = dataset["fps"]
ep_dict["episode_index"].append(episode_index)
ep_dict["frame_index"].append(frame_index)
ep_dict["timestamp"].append(frame_index / fps)
ep_dict["next.done"].append(False)
img_keys = [key for key in observation if "image" in key]
non_img_keys = [key for key in observation if "image" not in key]
# Save all observed modalities except images
for key in non_img_keys:
ep_dict[key].append(observation[key])
# Save actions
for key in action:
ep_dict[key].append(action[key])
if "image_writer" not in dataset:
dataset["current_frame_index"] += 1
return
# Save images
image_writer = dataset["image_writer"]
for key in img_keys:
imgs_dir = videos_dir / f"{key}_episode_{episode_index:06d}"
async_save_image(
image_writer,
image=observation[key],
key=key,
frame_index=frame_index,
episode_index=episode_index,
videos_dir=str(videos_dir),
)
if video:
fname = f"{key}_episode_{episode_index:06d}.mp4"
frame_info = {"path": f"videos/{fname}", "timestamp": frame_index / fps}
else:
frame_info = str(imgs_dir / f"frame_{frame_index:06d}.png")
ep_dict[key].append(frame_info)
dataset["current_frame_index"] += 1
def delete_current_episode(dataset):
del dataset["current_episode"]
del dataset["current_frame_index"]
# delete temporary images
episode_index = dataset["num_episodes"]
videos_dir = dataset["videos_dir"]
for tmp_imgs_dir in videos_dir.glob(f"*_episode_{episode_index:06d}"):
shutil.rmtree(tmp_imgs_dir)
def save_current_episode(dataset):
episode_index = dataset["num_episodes"]
ep_dict = dataset["current_episode"]
episodes_dir = dataset["episodes_dir"]
rec_info_path = dataset["rec_info_path"]
ep_dict["next.done"][-1] = True
for key in ep_dict:
if "observation" in key and "image" not in key:
ep_dict[key] = torch.stack(ep_dict[key])
ep_dict["action"] = torch.stack(ep_dict["action"])
ep_dict["episode_index"] = torch.tensor(ep_dict["episode_index"])
ep_dict["frame_index"] = torch.tensor(ep_dict["frame_index"])
ep_dict["timestamp"] = torch.tensor(ep_dict["timestamp"])
ep_dict["next.done"] = torch.tensor(ep_dict["next.done"])
ep_path = episodes_dir / f"episode_{episode_index}.pth"
torch.save(ep_dict, ep_path)
rec_info = {
"last_episode_index": episode_index,
}
with open(rec_info_path, "w") as f:
json.dump(rec_info, f)
# force re-initialization of episode dictionnary during add_frame
del dataset["current_episode"]
dataset["num_episodes"] += 1
def encode_videos(dataset, image_keys, play_sounds):
log_say("Encoding videos", play_sounds)
num_episodes = dataset["num_episodes"]
videos_dir = dataset["videos_dir"]
local_dir = dataset["local_dir"]
fps = dataset["fps"]
# Use ffmpeg to convert frames stored as png into mp4 videos
for episode_index in tqdm.tqdm(range(num_episodes)):
for key in image_keys:
# key = f"observation.images.{name}"
tmp_imgs_dir = videos_dir / f"{key}_episode_{episode_index:06d}"
fname = f"{key}_episode_{episode_index:06d}.mp4"
video_path = local_dir / "videos" / fname
if video_path.exists():
# Skip if video is already encoded. Could be the case when resuming data recording.
continue
# note: `encode_video_frames` is a blocking call. Making it asynchronous shouldn't speedup encoding,
# since video encoding with ffmpeg is already using multithreading.
encode_video_frames(tmp_imgs_dir, video_path, fps, overwrite=True)
shutil.rmtree(tmp_imgs_dir)
def from_dataset_to_lerobot_dataset(dataset, play_sounds):
log_say("Consolidate episodes", play_sounds)
num_episodes = dataset["num_episodes"]
episodes_dir = dataset["episodes_dir"]
videos_dir = dataset["videos_dir"]
video = dataset["video"]
fps = dataset["fps"]
repo_id = dataset["repo_id"]
ep_dicts = []
for episode_index in tqdm.tqdm(range(num_episodes)):
ep_path = episodes_dir / f"episode_{episode_index}.pth"
ep_dict = torch.load(ep_path)
ep_dicts.append(ep_dict)
data_dict = concatenate_episodes(ep_dicts)
if video:
image_keys = [key for key in data_dict if "image" in key]
encode_videos(dataset, image_keys, play_sounds)
hf_dataset = to_hf_dataset(data_dict, video)
episode_data_index = calculate_episode_data_index(hf_dataset)
info = {
"codebase_version": CODEBASE_VERSION,
"fps": fps,
"video": video,
}
if video:
info["encoding"] = get_default_encoding()
lerobot_dataset = LeRobotDataset.from_preloaded(
repo_id=repo_id,
hf_dataset=hf_dataset,
episode_data_index=episode_data_index,
info=info,
videos_dir=videos_dir,
)
return lerobot_dataset
def save_lerobot_dataset_on_disk(lerobot_dataset):
hf_dataset = lerobot_dataset.hf_dataset
info = lerobot_dataset.info
stats = lerobot_dataset.stats
episode_data_index = lerobot_dataset.episode_data_index
local_dir = lerobot_dataset.videos_dir.parent
meta_data_dir = local_dir / "meta_data"
hf_dataset = hf_dataset.with_format(None) # to remove transforms that cant be saved
hf_dataset.save_to_disk(str(local_dir / "train"))
save_meta_data(info, stats, episode_data_index, meta_data_dir)
def push_lerobot_dataset_to_hub(lerobot_dataset, tags):
hf_dataset = lerobot_dataset.hf_dataset
local_dir = lerobot_dataset.videos_dir.parent
videos_dir = lerobot_dataset.videos_dir
repo_id = lerobot_dataset.repo_id
video = lerobot_dataset.video
meta_data_dir = local_dir / "meta_data"
if not (local_dir / "train").exists():
raise ValueError(
"You need to run `save_lerobot_dataset_on_disk(lerobot_dataset)` before pushing to the hub."
)
hf_dataset.push_to_hub(repo_id, revision="main")
push_meta_data_to_hub(repo_id, meta_data_dir, revision="main")
push_dataset_card_to_hub(repo_id, revision="main", tags=tags)
if video:
push_videos_to_hub(repo_id, videos_dir, revision="main")
create_branch(repo_id, repo_type="dataset", branch=CODEBASE_VERSION)
def create_lerobot_dataset(dataset, run_compute_stats, push_to_hub, tags, play_sounds):
if "image_writer" in dataset:
logging.info("Waiting for image writer to terminate...")
image_writer = dataset["image_writer"]
stop_image_writer(image_writer, timeout=20)
lerobot_dataset = from_dataset_to_lerobot_dataset(dataset, play_sounds)
if run_compute_stats:
log_say("Computing dataset statistics", play_sounds)
lerobot_dataset.stats = compute_stats(lerobot_dataset)
else:
logging.info("Skipping computation of the dataset statistics")
lerobot_dataset.stats = {}
save_lerobot_dataset_on_disk(lerobot_dataset)
if push_to_hub:
push_lerobot_dataset_to_hub(lerobot_dataset, tags)
return lerobot_dataset

View File

@@ -30,12 +30,12 @@ from PIL import Image as PILImage
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
from lerobot.common.datasets.push_dataset_to_hub.utils import (
calculate_episode_data_index,
concatenate_episodes,
get_default_encoding,
save_images_concurrently,
)
from lerobot.common.datasets.utils import (
calculate_episode_data_index,
hf_transform_to_torch,
)
from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames

View File

@@ -24,11 +24,8 @@ from datasets import Dataset, Features, Image, Value
from PIL import Image as PILImage
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
from lerobot.common.datasets.push_dataset_to_hub.utils import (
calculate_episode_data_index,
concatenate_episodes,
)
from lerobot.common.datasets.utils import hf_transform_to_torch
from lerobot.common.datasets.push_dataset_to_hub.utils import concatenate_episodes
from lerobot.common.datasets.utils import calculate_episode_data_index, hf_transform_to_torch
from lerobot.common.datasets.video_utils import VideoFrame

View File

@@ -26,8 +26,8 @@ import torch
from datasets import Dataset, Features, Image, Sequence, Value
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
from lerobot.common.datasets.push_dataset_to_hub.utils import calculate_episode_data_index
from lerobot.common.datasets.utils import (
calculate_episode_data_index,
hf_transform_to_torch,
)
from lerobot.common.datasets.video_utils import VideoFrame

View File

@@ -0,0 +1,639 @@
OPENX_DATASET_CONFIGS:
fractal20220817_data:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- base_pose_tool_reached
- gripper_closed
fps: 3
kuka:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- clip_function_input/base_pose_tool_reached
- gripper_closed
fps: 10
bridge_openx:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- EEF_state
- gripper_state
fps: 5
taco_play:
image_obs_keys:
- rgb_static
- rgb_gripper
depth_obs_keys:
- depth_static
- depth_gripper
state_obs_keys:
- state_eef
- state_gripper
fps: 15
jaco_play:
image_obs_keys:
- image
- image_wrist
depth_obs_keys:
- null
state_obs_keys:
- state_eef
- state_gripper
fps: 10
berkeley_cable_routing:
image_obs_keys:
- image
- top_image
- wrist45_image
- wrist225_image
depth_obs_keys:
- null
state_obs_keys:
- robot_state
fps: 10
roboturk:
image_obs_keys:
- front_rgb
depth_obs_keys:
- null
state_obs_keys:
- null
fps: 10
nyu_door_opening_surprising_effectiveness:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- null
fps: 3
viola:
image_obs_keys:
- agentview_rgb
- eye_in_hand_rgb
depth_obs_keys:
- null
state_obs_keys:
- joint_states
- gripper_states
fps: 20
berkeley_autolab_ur5:
image_obs_keys:
- image
- hand_image
depth_obs_keys:
- image_with_depth
state_obs_keys:
- state
fps: 5
toto:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- state
fps: 30
language_table:
image_obs_keys:
- rgb
depth_obs_keys:
- null
state_obs_keys:
- effector_translation
fps: 10
columbia_cairlab_pusht_real:
image_obs_keys:
- image
- wrist_image
depth_obs_keys:
- null
state_obs_keys:
- robot_state
fps: 10
stanford_kuka_multimodal_dataset_converted_externally_to_rlds:
image_obs_keys:
- image
depth_obs_keys:
- depth_image
state_obs_keys:
- ee_position
- ee_orientation
fps: 20
nyu_rot_dataset_converted_externally_to_rlds:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- eef_state
- gripper_state
fps: 3
io_ai_tech:
image_obs_keys:
- image
- image_fisheye
- image_left_side
- image_right_side
depth_obs_keys:
- null
state_obs_keys:
- state
fps: 3
stanford_hydra_dataset_converted_externally_to_rlds:
image_obs_keys:
- image
- wrist_image
depth_obs_keys:
- null
state_obs_keys:
- eef_state
- gripper_state
fps: 10
austin_buds_dataset_converted_externally_to_rlds:
image_obs_keys:
- image
- wrist_image
depth_obs_keys:
- null
state_obs_keys:
- state
fps: 20
nyu_franka_play_dataset_converted_externally_to_rlds:
image_obs_keys:
- image
- image_additional_view
depth_obs_keys:
- depth
- depth_additional_view
state_obs_keys:
- eef_state
fps: 3
maniskill_dataset_converted_externally_to_rlds:
image_obs_keys:
- image
- wrist_image
depth_obs_keys:
- depth
- wrist_depth
state_obs_keys:
- tcp_pose
- gripper_state
fps: 20
furniture_bench_dataset_converted_externally_to_rlds:
image_obs_keys:
- image
- wrist_image
depth_obs_keys:
- null
state_obs_keys:
- state
fps: 10
cmu_franka_exploration_dataset_converted_externally_to_rlds:
image_obs_keys:
- highres_image
depth_obs_keys:
- null
state_obs_keys:
- null
fps: 10
ucsd_kitchen_dataset_converted_externally_to_rlds:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- joint_state
fps: 2
ucsd_pick_and_place_dataset_converted_externally_to_rlds:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- eef_state
- gripper_state
fps: 3
spoc:
image_obs_keys:
- image
- image_manipulation
depth_obs_keys:
- null
state_obs_keys:
- null
fps: 3
austin_sailor_dataset_converted_externally_to_rlds:
image_obs_keys:
- image
- wrist_image
depth_obs_keys:
- null
state_obs_keys:
- state
fps: 20
austin_sirius_dataset_converted_externally_to_rlds:
image_obs_keys:
- image
- wrist_image
depth_obs_keys:
- null
state_obs_keys:
- state
fps: 20
bc_z:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- present/xyz
- present/axis_angle
- present/sensed_close
fps: 10
utokyo_pr2_opening_fridge_converted_externally_to_rlds:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- eef_state
- gripper_state
fps: 10
utokyo_pr2_tabletop_manipulation_converted_externally_to_rlds:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- eef_state
- gripper_state
fps: 10
utokyo_xarm_pick_and_place_converted_externally_to_rlds:
image_obs_keys:
- image
- image2
- hand_image
depth_obs_keys:
- null
state_obs_keys:
- end_effector_pose
fps: 10
utokyo_xarm_bimanual_converted_externally_to_rlds:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- pose_r
fps: 10
robo_net:
image_obs_keys:
- image
- image1
depth_obs_keys:
- null
state_obs_keys:
- eef_state
- gripper_state
fps: 1
robo_set:
image_obs_keys:
- image_left
- image_right
- image_wrist
depth_obs_keys:
- null
state_obs_keys:
- state
- state_velocity
fps: 5
berkeley_mvp_converted_externally_to_rlds:
image_obs_keys:
- hand_image
depth_obs_keys:
- null
state_obs_keys:
- gripper
- pose
- joint_pos
fps: 5
berkeley_rpt_converted_externally_to_rlds:
image_obs_keys:
- hand_image
depth_obs_keys:
- null
state_obs_keys:
- joint_pos
- gripper
fps: 30
kaist_nonprehensile_converted_externally_to_rlds:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- state
fps: 10
stanford_mask_vit_converted_externally_to_rlds:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- eef_state
- gripper_state
tokyo_u_lsmo_converted_externally_to_rlds:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- eef_state
- gripper_state
fps: 10
dlr_sara_pour_converted_externally_to_rlds:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- state
fps: 10
dlr_sara_grid_clamp_converted_externally_to_rlds:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- state
fps: 10
dlr_edan_shared_control_converted_externally_to_rlds:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- state
fps: 5
asu_table_top_converted_externally_to_rlds:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- eef_state
- gripper_state
fps: 12.5
stanford_robocook_converted_externally_to_rlds:
image_obs_keys:
- image_1
- image_2
depth_obs_keys:
- depth_1
- depth_2
state_obs_keys:
- eef_state
- gripper_state
fps: 5
imperialcollege_sawyer_wrist_cam:
image_obs_keys:
- image
- wrist_image
depth_obs_keys:
- null
state_obs_keys:
- state
fps: 10
iamlab_cmu_pickup_insert_converted_externally_to_rlds:
image_obs_keys:
- image
- wrist_image
depth_obs_keys:
- null
state_obs_keys:
- joint_state
- gripper_state
fps: 20
uiuc_d3field:
image_obs_keys:
- image_1
- image_2
depth_obs_keys:
- depth_1
- depth_2
state_obs_keys:
- null
fps: 1
utaustin_mutex:
image_obs_keys:
- image
- wrist_image
depth_obs_keys:
- null
state_obs_keys:
- state
fps: 20
berkeley_fanuc_manipulation:
image_obs_keys:
- image
- wrist_image
depth_obs_keys:
- null
state_obs_keys:
- joint_state
- gripper_state
fps: 10
cmu_playing_with_food:
image_obs_keys:
- image
- finger_vision_1
depth_obs_keys:
- null
state_obs_keys:
- state
fps: 10
cmu_play_fusion:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- state
fps: 5
cmu_stretch:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- eef_state
- gripper_state
fps: 10
berkeley_gnm_recon:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- state
- position
- yaw
fps: 3
berkeley_gnm_cory_hall:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- state
- position
- yaw
fps: 5
berkeley_gnm_sac_son:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- state
- position
- yaw
fps: 10
droid:
image_obs_keys:
- exterior_image_1_left
- exterior_image_2_left
- wrist_image_left
depth_obs_keys:
- null
state_obs_keys:
- proprio
fps: 15
droid_100:
image_obs_keys:
- exterior_image_1_left
- exterior_image_2_left
- wrist_image_left
depth_obs_keys:
- null
state_obs_keys:
- proprio
fps: 15
fmb:
image_obs_keys:
- image_side_1
- image_side_2
- image_wrist_1
- image_wrist_2
depth_obs_keys:
- image_side_1_depth
- image_side_2_depth
- image_wrist_1_depth
- image_wrist_2_depth
state_obs_keys:
- proprio
fps: 10
dobbe:
image_obs_keys:
- wrist_image
depth_obs_keys:
- null
state_obs_keys:
- proprio
fps: 3.75
usc_cloth_sim_converted_externally_to_rlds:
image_obs_keys:
- image
depth_obs_keys:
- null
state_obs_keys:
- null
fps: 10
plex_robosuite:
image_obs_keys:
- image
- wrist_image
depth_obs_keys:
- null
state_obs_keys:
- state
fps: 20
conq_hose_manipulation:
image_obs_keys:
- frontleft_fisheye_image
- frontright_fisheye_image
- hand_color_image
depth_obs_keys:
- null
state_obs_keys:
- state
fps: 30

View File

@@ -0,0 +1,106 @@
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the Licens e.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
NOTE(YL): Adapted from:
Octo: https://github.com/octo-models/octo/blob/main/octo/data/utils/data_utils.py
data_utils.py
Additional utils for data processing.
"""
from typing import Any, Dict, List
import tensorflow as tf
def binarize_gripper_actions(actions: tf.Tensor) -> tf.Tensor:
"""
Converts gripper actions from continuous to binary values (0 and 1).
We exploit that fact that most of the time, the gripper is fully open (near 1.0) or fully closed (near 0.0). As it
transitions between the two, it sometimes passes through a few intermediate values. We relabel those intermediate
values based on the state that is reached _after_ those intermediate values.
In the edge case that the trajectory ends with an intermediate value, we give up on binarizing and relabel that
chunk of intermediate values as the last action in the trajectory.
The `scan_fn` implements the following logic:
new_actions = np.empty_like(actions)
carry = actions[-1]
for i in reversed(range(actions.shape[0])):
if in_between_mask[i]:
carry = carry
else:
carry = float(open_mask[i])
new_actions[i] = carry
"""
open_mask, closed_mask = actions > 0.95, actions < 0.05
in_between_mask = tf.logical_not(tf.logical_or(open_mask, closed_mask))
is_open_float = tf.cast(open_mask, tf.float32)
def scan_fn(carry, i):
return tf.cond(in_between_mask[i], lambda: tf.cast(carry, tf.float32), lambda: is_open_float[i])
return tf.scan(scan_fn, tf.range(tf.shape(actions)[0]), actions[-1], reverse=True)
def invert_gripper_actions(actions: tf.Tensor) -> tf.Tensor:
return 1 - actions
def rel2abs_gripper_actions(actions: tf.Tensor) -> tf.Tensor:
"""
Converts relative gripper actions (+1 for closing, -1 for opening) to absolute actions (0 = closed; 1 = open).
Assumes that the first relative gripper is not redundant (i.e. close when already closed)!
"""
# Note =>> -1 for closing, 1 for opening, 0 for no change
opening_mask, closing_mask = actions < -0.1, actions > 0.1
thresholded_actions = tf.where(opening_mask, 1, tf.where(closing_mask, -1, 0))
def scan_fn(carry, i):
return tf.cond(thresholded_actions[i] == 0, lambda: carry, lambda: thresholded_actions[i])
# If no relative grasp, assumes open for whole trajectory
start = -1 * thresholded_actions[tf.argmax(thresholded_actions != 0, axis=0)]
start = tf.cond(start == 0, lambda: 1, lambda: start)
# Note =>> -1 for closed, 1 for open
new_actions = tf.scan(scan_fn, tf.range(tf.shape(actions)[0]), start)
new_actions = tf.cast(new_actions, tf.float32) / 2 + 0.5
return new_actions
# === Bridge-V2 =>> Dataset-Specific Transform ===
def relabel_bridge_actions(traj: Dict[str, Any]) -> Dict[str, Any]:
"""Relabels actions to use reached proprioceptive state; discards last timestep (no-action)."""
movement_actions = traj["observation"]["state"][1:, :6] - traj["observation"]["state"][:-1, :6]
traj_truncated = tf.nest.map_structure(lambda x: x[:-1], traj)
traj_truncated["action"] = tf.concat([movement_actions, traj["action"][:-1, -1:]], axis=1)
return traj_truncated
# === RLDS Dataset Initialization Utilities ===
def pprint_data_mixture(dataset_kwargs_list: List[Dict[str, Any]], dataset_weights: List[int]) -> None:
print("\n######################################################################################")
print(f"# Loading the following {len(dataset_kwargs_list)} datasets (incl. sampling weight):{'': >24} #")
for dataset_kwargs, weight in zip(dataset_kwargs_list, dataset_weights, strict=False):
pad = 80 - len(dataset_kwargs["name"])
print(f"# {dataset_kwargs['name']}: {weight:=>{pad}f} #")
print("######################################################################################\n")

View File

@@ -0,0 +1,200 @@
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
NOTE(YL): Adapted from:
OpenVLA: https://github.com/openvla/openvla
Episode transforms for DROID dataset.
"""
from typing import Any, Dict
import tensorflow as tf
import tensorflow_graphics.geometry.transformation as tfg
def rmat_to_euler(rot_mat):
return tfg.euler.from_rotation_matrix(rot_mat)
def euler_to_rmat(euler):
return tfg.rotation_matrix_3d.from_euler(euler)
def invert_rmat(rot_mat):
return tfg.rotation_matrix_3d.inverse(rot_mat)
def rotmat_to_rot6d(mat):
"""
Converts rotation matrix to R6 rotation representation (first two rows in rotation matrix).
Args:
mat: rotation matrix
Returns: 6d vector (first two rows of rotation matrix)
"""
r6 = mat[..., :2, :]
r6_0, r6_1 = r6[..., 0, :], r6[..., 1, :]
r6_flat = tf.concat([r6_0, r6_1], axis=-1)
return r6_flat
def velocity_act_to_wrist_frame(velocity, wrist_in_robot_frame):
"""
Translates velocity actions (translation + rotation) from base frame of the robot to wrist frame.
Args:
velocity: 6d velocity action (3 x translation, 3 x rotation)
wrist_in_robot_frame: 6d pose of the end-effector in robot base frame
Returns: 9d velocity action in robot wrist frame (3 x translation, 6 x rotation as R6)
"""
r_frame = euler_to_rmat(wrist_in_robot_frame[:, 3:6])
r_frame_inv = invert_rmat(r_frame)
# world to wrist: dT_pi = R^-1 dT_rbt
vel_t = (r_frame_inv @ velocity[:, :3][..., None])[..., 0]
# world to wrist: dR_pi = R^-1 dR_rbt R
dr_ = euler_to_rmat(velocity[:, 3:6])
dr_ = r_frame_inv @ (dr_ @ r_frame)
dr_r6 = rotmat_to_rot6d(dr_)
return tf.concat([vel_t, dr_r6], axis=-1)
def rand_swap_exterior_images(img1, img2):
"""
Randomly swaps the two exterior images (for training with single exterior input).
"""
return tf.cond(tf.random.uniform(shape=[]) > 0.5, lambda: (img1, img2), lambda: (img2, img1))
def droid_baseact_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
"""
DROID dataset transformation for actions expressed in *base* frame of the robot.
"""
dt = trajectory["action_dict"]["cartesian_velocity"][:, :3]
dr_ = trajectory["action_dict"]["cartesian_velocity"][:, 3:6]
trajectory["action"] = tf.concat(
(
dt,
dr_,
1 - trajectory["action_dict"]["gripper_position"],
),
axis=-1,
)
trajectory["observation"]["exterior_image_1_left"], trajectory["observation"]["exterior_image_2_left"] = (
rand_swap_exterior_images(
trajectory["observation"]["exterior_image_1_left"],
trajectory["observation"]["exterior_image_2_left"],
)
)
trajectory["observation"]["proprio"] = tf.concat(
(
trajectory["observation"]["cartesian_position"],
trajectory["observation"]["gripper_position"],
),
axis=-1,
)
return trajectory
def droid_wristact_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
"""
DROID dataset transformation for actions expressed in *wrist* frame of the robot.
"""
wrist_act = velocity_act_to_wrist_frame(
trajectory["action_dict"]["cartesian_velocity"], trajectory["observation"]["cartesian_position"]
)
trajectory["action"] = tf.concat(
(
wrist_act,
trajectory["action_dict"]["gripper_position"],
),
axis=-1,
)
trajectory["observation"]["exterior_image_1_left"], trajectory["observation"]["exterior_image_2_left"] = (
rand_swap_exterior_images(
trajectory["observation"]["exterior_image_1_left"],
trajectory["observation"]["exterior_image_2_left"],
)
)
trajectory["observation"]["proprio"] = tf.concat(
(
trajectory["observation"]["cartesian_position"],
trajectory["observation"]["gripper_position"],
),
axis=-1,
)
return trajectory
def droid_finetuning_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
"""
DROID dataset transformation for actions expressed in *base* frame of the robot.
"""
dt = trajectory["action_dict"]["cartesian_velocity"][:, :3]
dr_ = trajectory["action_dict"]["cartesian_velocity"][:, 3:6]
trajectory["action"] = tf.concat(
(
dt,
dr_,
1 - trajectory["action_dict"]["gripper_position"],
),
axis=-1,
)
trajectory["observation"]["proprio"] = tf.concat(
(
trajectory["observation"]["cartesian_position"],
trajectory["observation"]["gripper_position"],
),
axis=-1,
)
return trajectory
def zero_action_filter(traj: Dict) -> bool:
"""
Filters transitions whose actions are all-0 (only relative actions, no gripper action).
Note: this filter is applied *after* action normalization, so need to compare to "normalized 0".
"""
droid_q01 = tf.convert_to_tensor(
[
-0.7776297926902771,
-0.5803514122962952,
-0.5795090794563293,
-0.6464047729969025,
-0.7041108310222626,
-0.8895104378461838,
]
)
droid_q99 = tf.convert_to_tensor(
[
0.7597932070493698,
0.5726242214441299,
0.7351000607013702,
0.6705610305070877,
0.6464948207139969,
0.8897542208433151,
]
)
droid_norm_0_act = (
2 * (tf.zeros_like(traj["action"][:, :6]) - droid_q01) / (droid_q99 - droid_q01 + 1e-8) - 1
)
return tf.reduce_any(tf.math.abs(traj["action"][:, :6] - droid_norm_0_act) > 1e-5)

View File

@@ -0,0 +1,859 @@
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
NOTE(YL): Adapted from:
OpenVLA: https://github.com/openvla/openvla
Octo: https://github.com/octo-models/octo
transforms.py
Defines a registry of per-dataset standardization transforms for each dataset in Open-X Embodiment.
Transforms adopt the following structure:
Input: Dictionary of *batched* features (i.e., has leading time dimension)
Output: Dictionary `step` =>> {
"observation": {
<image_keys, depth_image_keys>
State (in chosen state representation)
},
"action": Action (in chosen action representation),
"language_instruction": str
}
"""
from typing import Any, Dict
import tensorflow as tf
from lerobot.common.datasets.push_dataset_to_hub.openx.data_utils import (
binarize_gripper_actions,
invert_gripper_actions,
rel2abs_gripper_actions,
relabel_bridge_actions,
)
def droid_baseact_transform_fn():
from lerobot.common.datasets.push_dataset_to_hub.openx.droid_utils import droid_baseact_transform
return droid_baseact_transform
def bridge_openx_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
"""
Applies to version of Bridge V2 in Open X-Embodiment mixture.
Note =>> In original Bridge V2 dataset, the first timestep has an all-zero action, so we remove it!
"""
for key in trajectory:
if key == "traj_metadata":
continue
elif key in ["observation", "action"]:
for key2 in trajectory[key]:
trajectory[key][key2] = trajectory[key][key2][1:]
else:
trajectory[key] = trajectory[key][1:]
trajectory["action"] = tf.concat(
(
trajectory["action"]["world_vector"],
trajectory["action"]["rotation_delta"],
tf.cast(trajectory["action"]["open_gripper"][:, None], tf.float32),
),
axis=-1,
)
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
trajectory = relabel_bridge_actions(trajectory)
trajectory["observation"]["EEF_state"] = trajectory["observation"]["state"][:, :6]
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -1:]
return trajectory
def bridge_orig_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
"""
Applies to original version of Bridge V2 from the official project website.
Note =>> In original Bridge V2 dataset, the first timestep has an all-zero action, so we remove it!
"""
for key in trajectory:
if key == "traj_metadata":
continue
elif key == "observation":
for key2 in trajectory[key]:
trajectory[key][key2] = trajectory[key][key2][1:]
else:
trajectory[key] = trajectory[key][1:]
trajectory["action"] = tf.concat(
[
trajectory["action"][:, :6],
binarize_gripper_actions(trajectory["action"][:, -1])[:, None],
],
axis=1,
)
trajectory = relabel_bridge_actions(trajectory)
trajectory["observation"]["EEF_state"] = trajectory["observation"]["state"][:, :6]
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -1:]
return trajectory
def ppgm_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["action"] = tf.concat(
[
trajectory["action"][:, :6],
binarize_gripper_actions(trajectory["action"][:, -1])[:, None],
],
axis=1,
)
trajectory["observation"]["EEF_state"] = trajectory["observation"]["cartesian_position"][:, :6]
trajectory["observation"]["gripper_state"] = trajectory["observation"]["gripper_position"][:, -1:]
return trajectory
def rt1_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
# make gripper action absolute action, +1 = open, 0 = close
gripper_action = trajectory["action"]["gripper_closedness_action"][:, 0]
gripper_action = rel2abs_gripper_actions(gripper_action)
trajectory["action"] = tf.concat(
(
trajectory["action"]["world_vector"],
trajectory["action"]["rotation_delta"],
gripper_action[:, None],
),
axis=-1,
)
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
return trajectory
def kuka_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
# make gripper action absolute action, +1 = open, 0 = close
gripper_action = trajectory["action"]["gripper_closedness_action"][:, 0]
gripper_action = rel2abs_gripper_actions(gripper_action)
trajectory["action"] = tf.concat(
(
trajectory["action"]["world_vector"],
trajectory["action"]["rotation_delta"],
gripper_action[:, None],
),
axis=-1,
)
# decode compressed state
eef_value = tf.io.decode_compressed(
trajectory["observation"]["clip_function_input/base_pose_tool_reached"],
compression_type="ZLIB",
)
eef_value = tf.io.decode_raw(eef_value, tf.float32)
trajectory["observation"]["clip_function_input/base_pose_tool_reached"] = tf.reshape(eef_value, (-1, 7))
gripper_value = tf.io.decode_compressed(
trajectory["observation"]["gripper_closed"], compression_type="ZLIB"
)
gripper_value = tf.io.decode_raw(gripper_value, tf.float32)
trajectory["observation"]["gripper_closed"] = tf.reshape(gripper_value, (-1, 1))
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
return trajectory
def taco_play_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["state_eef"] = trajectory["observation"]["robot_obs"][:, :6]
trajectory["observation"]["state_gripper"] = trajectory["observation"]["robot_obs"][:, 7:8]
trajectory["action"] = trajectory["action"]["rel_actions_world"]
# invert gripper action + clip, +1 = open, 0 = close
trajectory["action"] = tf.concat(
(
trajectory["action"][:, :6],
tf.clip_by_value(trajectory["action"][:, -1:], 0, 1),
),
axis=-1,
)
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
return trajectory
def jaco_play_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["state_eef"] = trajectory["observation"]["end_effector_cartesian_pos"][:, :6]
trajectory["observation"]["state_gripper"] = trajectory["observation"]["end_effector_cartesian_pos"][
:, -1:
]
# make gripper action absolute action, +1 = open, 0 = close
gripper_action = trajectory["action"]["gripper_closedness_action"][:, 0]
gripper_action = rel2abs_gripper_actions(gripper_action)
trajectory["action"] = tf.concat(
(
trajectory["action"]["world_vector"],
tf.zeros_like(trajectory["action"]["world_vector"]),
gripper_action[:, None],
),
axis=-1,
)
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
return trajectory
def berkeley_cable_routing_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["action"] = tf.concat(
(
trajectory["action"]["world_vector"],
trajectory["action"]["rotation_delta"],
tf.zeros_like(trajectory["action"]["world_vector"][:, :1]),
),
axis=-1,
)
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
return trajectory
def roboturk_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
# invert absolute gripper action, +1 = open, 0 = close
gripper_action = invert_gripper_actions(
tf.clip_by_value(trajectory["action"]["gripper_closedness_action"], 0, 1)
)
trajectory["action"] = tf.concat(
(
trajectory["action"]["world_vector"],
trajectory["action"]["rotation_delta"],
gripper_action,
),
axis=-1,
)
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
trajectory["language_embedding"] = trajectory["observation"]["natural_language_embedding"]
return trajectory
def nyu_door_opening_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
# make gripper action absolute action, +1 = open, 0 = close
gripper_action = trajectory["action"]["gripper_closedness_action"][:, 0]
gripper_action = rel2abs_gripper_actions(gripper_action)
trajectory["action"] = tf.concat(
(
trajectory["action"]["world_vector"],
trajectory["action"]["rotation_delta"],
gripper_action[:, None],
),
axis=-1,
)
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
return trajectory
def viola_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
# make gripper action, +1 = open, 0 = close
gripper_action = trajectory["action"]["gripper_closedness_action"][:, None]
gripper_action = tf.clip_by_value(gripper_action, 0, 1)
gripper_action = invert_gripper_actions(gripper_action)
trajectory["action"] = tf.concat(
(
trajectory["action"]["world_vector"],
trajectory["action"]["rotation_delta"],
gripper_action,
),
axis=-1,
)
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
return trajectory
def berkeley_autolab_ur5_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["state"] = trajectory["observation"]["robot_state"][:, 6:14]
# make gripper action absolute action, +1 = open, 0 = close
gripper_action = trajectory["action"]["gripper_closedness_action"]
gripper_action = rel2abs_gripper_actions(gripper_action)
trajectory["action"] = tf.concat(
(
trajectory["action"]["world_vector"],
trajectory["action"]["rotation_delta"],
gripper_action[:, None],
),
axis=-1,
)
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
return trajectory
def toto_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["action"] = tf.concat(
(
trajectory["action"]["world_vector"],
trajectory["action"]["rotation_delta"],
tf.cast(trajectory["action"]["open_gripper"][:, None], tf.float32),
),
axis=-1,
)
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
return trajectory
def language_table_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
# default to "open" gripper
trajectory["action"] = tf.concat(
(
trajectory["action"],
tf.zeros_like(trajectory["action"]),
tf.zeros_like(trajectory["action"]),
tf.ones_like(trajectory["action"][:, :1]),
),
axis=-1,
)
# decode language instruction
instruction_bytes = trajectory["observation"]["instruction"]
instruction_encoded = tf.strings.unicode_encode(instruction_bytes, output_encoding="UTF-8")
# Remove trailing padding --> convert RaggedTensor to regular Tensor.
trajectory["language_instruction"] = tf.strings.split(instruction_encoded, "\x00")[:, :1].to_tensor()[
:, 0
]
return trajectory
def pusht_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["action"] = tf.concat(
(
trajectory["action"]["world_vector"],
trajectory["action"]["rotation_delta"],
trajectory["action"]["gripper_closedness_action"][:, None],
),
axis=-1,
)
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
return trajectory
def stanford_kuka_multimodal_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["depth_image"] = trajectory["observation"]["depth_image"][..., 0]
trajectory["action"] = tf.concat(
(
trajectory["action"][:, :3],
tf.zeros_like(trajectory["action"][:, :3]),
trajectory["action"][:, -1:],
),
axis=-1,
)
return trajectory
def nyu_rot_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["eef_state"] = trajectory["observation"]["state"][..., :6]
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][..., -1:]
trajectory["action"] = trajectory["action"][..., :7]
return trajectory
def stanford_hydra_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
# invert gripper action, +1 = open, 0 = close
trajectory["action"] = tf.concat(
(
trajectory["action"][:, :6],
invert_gripper_actions(trajectory["action"][:, -1:]),
),
axis=-1,
)
trajectory["observation"]["eef_state"] = tf.concat(
(
trajectory["observation"]["state"][:, :3],
trajectory["observation"]["state"][:, 7:10],
),
axis=-1,
)
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -3:-2]
return trajectory
def austin_buds_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
# invert gripper action + clip, +1 = open, 0 = close
trajectory["action"] = tf.concat(
(
trajectory["action"][:, :6],
invert_gripper_actions(tf.clip_by_value(trajectory["action"][:, -1:], 0, 1)),
),
axis=-1,
)
trajectory["observation"]["state"] = trajectory["observation"]["state"][:, :8]
return trajectory
def nyu_franka_play_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["depth"] = tf.cast(trajectory["observation"]["depth"][..., 0], tf.float32)
trajectory["observation"]["depth_additional_view"] = tf.cast(
trajectory["observation"]["depth_additional_view"][..., 0], tf.float32
)
trajectory["observation"]["eef_state"] = trajectory["observation"]["state"][:, -6:]
# clip gripper action, +1 = open, 0 = close
trajectory["action"] = tf.concat(
(
trajectory["action"][:, -8:-2],
tf.clip_by_value(trajectory["action"][:, -2:-1], 0, 1),
),
axis=-1,
)
return trajectory
def maniskill_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][..., 7:8]
return trajectory
def furniture_bench_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
import tensorflow_graphics.geometry.transformation as tft
trajectory["observation"]["state"] = tf.concat(
(
trajectory["observation"]["state"][:, :7],
trajectory["observation"]["state"][:, -1:],
),
axis=-1,
)
# invert gripper action + clip, +1 = open, 0 = close
trajectory["action"] = tf.concat(
(
trajectory["action"][:, :3],
tft.euler.from_quaternion(trajectory["action"][:, 3:7]),
invert_gripper_actions(tf.clip_by_value(trajectory["action"][:, -1:], 0, 1)),
),
axis=-1,
)
return trajectory
def cmu_franka_exploration_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["action"] = trajectory["action"][..., :-1]
return trajectory
def ucsd_kitchen_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["joint_state"] = trajectory["observation"]["state"][:, :7]
trajectory["action"] = trajectory["action"][..., :-1]
return trajectory
def ucsd_pick_place_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["eef_state"] = trajectory["observation"]["state"][:, :6]
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -1:]
trajectory["action"] = tf.concat(
(
trajectory["action"][:, :3],
tf.zeros_like(trajectory["action"][:, :3]),
trajectory["action"][:, -1:],
),
axis=-1,
)
return trajectory
def austin_sailor_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
# invert gripper action + clip, +1 = open, 0 = close
trajectory["action"] = tf.concat(
(
trajectory["action"][:, :6],
invert_gripper_actions(tf.clip_by_value(trajectory["action"][:, -1:], 0, 1)),
),
axis=-1,
)
return trajectory
def austin_sirius_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
# invert gripper action + clip, +1 = open, 0 = close
trajectory["action"] = tf.concat(
(
trajectory["action"][:, :6],
invert_gripper_actions(tf.clip_by_value(trajectory["action"][:, -1:], 0, 1)),
),
axis=-1,
)
return trajectory
def bc_z_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["action"] = tf.concat(
(
trajectory["action"]["future/xyz_residual"][:, :3],
trajectory["action"]["future/axis_angle_residual"][:, :3],
invert_gripper_actions(tf.cast(trajectory["action"]["future/target_close"][:, :1], tf.float32)),
),
axis=-1,
)
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
return trajectory
def tokyo_pr2_opening_fridge_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["eef_state"] = trajectory["observation"]["state"][:, :6]
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -1:]
trajectory["action"] = trajectory["action"][..., :-1]
return trajectory
def tokyo_pr2_tabletop_manipulation_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["eef_state"] = trajectory["observation"]["state"][:, :6]
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -1:]
trajectory["action"] = trajectory["action"][..., :-1]
return trajectory
def utokyo_xarm_bimanual_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["action"] = trajectory["action"][..., -7:]
return trajectory
def robo_net_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["eef_state"] = tf.concat(
(
trajectory["observation"]["state"][:, :4],
tf.zeros_like(trajectory["observation"]["state"][:, :2]),
),
axis=-1,
)
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -1:]
trajectory["action"] = tf.concat(
(
trajectory["action"][:, :4],
tf.zeros_like(trajectory["action"][:, :2]),
trajectory["action"][:, -1:],
),
axis=-1,
)
return trajectory
def berkeley_mvp_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
"""
trajectory["observation"]["state"] = tf.concat((
tf.cast(trajectory["observation"]["gripper"][:, None], tf.float32),
trajectory["observation"]["pose"],
trajectory["observation"]["joint_pos"],),
axis=-1,)
"""
trajectory["observation"]["gripper"] = tf.cast(trajectory["observation"]["gripper"][:, None], tf.float32)
return trajectory
def berkeley_rpt_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["gripper"] = tf.cast(trajectory["observation"]["gripper"][:, None], tf.float32)
return trajectory
def kaist_nonprehensible_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["state"] = trajectory["observation"]["state"][:, -7:]
trajectory["action"] = tf.concat(
(
trajectory["action"][:, :6],
tf.zeros_like(trajectory["action"][:, :1]),
),
axis=-1,
)
return trajectory
def stanford_mask_vit_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["eef_state"] = tf.concat(
(
trajectory["observation"]["end_effector_pose"][:, :4],
tf.zeros_like(trajectory["observation"]["end_effector_pose"][:, :2]),
),
axis=-1,
)
trajectory["observation"]["gripper_state"] = trajectory["observation"]["end_effector_pose"][:, -1:]
trajectory["action"] = tf.concat(
(
trajectory["action"][:, :4],
tf.zeros_like(trajectory["action"][:, :2]),
trajectory["action"][:, -1:],
),
axis=-1,
)
return trajectory
def tokyo_lsmo_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["eef_state"] = trajectory["observation"]["state"][:, :6]
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -1:]
return trajectory
def dlr_sara_grid_clamp_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["state"] = trajectory["observation"]["state"][:, :6]
return trajectory
def dlr_edan_shared_control_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
# invert gripper action, +1 = open, 0 = close
trajectory["action"] = tf.concat(
(
trajectory["action"][:, :6],
invert_gripper_actions(trajectory["action"][:, -1:]),
),
axis=-1,
)
return trajectory
def asu_table_top_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["eef_state"] = trajectory["ground_truth_states"]["EE"]
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -1:]
return trajectory
def robocook_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["eef_state"] = trajectory["observation"]["state"][:, :6]
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -1:]
return trajectory
def imperial_wristcam_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["action"] = trajectory["action"][..., :-1]
return trajectory
def iamlab_pick_insert_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
import tensorflow_graphics.geometry.transformation as tft
trajectory["observation"]["joint_state"] = trajectory["observation"]["state"][:, :7]
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, 7:8]
trajectory["action"] = tf.concat(
(
trajectory["action"][:, :3],
tft.euler.from_quaternion(trajectory["action"][:, 3:7]),
trajectory["action"][:, 7:8],
),
axis=-1,
)
return trajectory
def uiuc_d3field_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["action"] = tf.concat(
(
trajectory["action"],
tf.zeros_like(trajectory["action"]),
tf.zeros_like(trajectory["action"][:, :1]),
),
axis=-1,
)
return trajectory
def utaustin_mutex_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["state"] = trajectory["observation"]["state"][:, :8]
# invert gripper action + clip, +1 = open, 0 = close
trajectory["action"] = tf.concat(
(
trajectory["action"][:, :6],
invert_gripper_actions(tf.clip_by_value(trajectory["action"][:, -1:], 0, 1)),
),
axis=-1,
)
return trajectory
def berkeley_fanuc_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["joint_state"] = trajectory["observation"]["state"][:, :6]
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, 6:7]
# dataset does not store gripper actions, so use gripper state info, invert so +1 = open, 0 = close
trajectory["action"] = tf.concat(
(
trajectory["action"],
invert_gripper_actions(trajectory["observation"]["gripper_state"]),
),
axis=-1,
)
return trajectory
def cmu_playing_with_food_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
import tensorflow_graphics.geometry.transformation as tft
trajectory["action"] = tf.concat(
(
trajectory["action"][:, :3],
tft.euler.from_quaternion(trajectory["action"][:, 3:7]),
trajectory["action"][:, -1:],
),
axis=-1,
)
return trajectory
def playfusion_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["action"] = tf.concat(
(
trajectory["action"][:, :3],
trajectory["action"][:, -4:],
),
axis=-1,
)
return trajectory
def cmu_stretch_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["eef_state"] = tf.concat(
(
trajectory["observation"]["state"][:, :3],
tf.zeros_like(trajectory["observation"]["state"][:, :3]),
),
axis=-1,
)
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -1:]
trajectory["action"] = trajectory["action"][..., :-1]
return trajectory
def gnm_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
trajectory["observation"]["state"] = tf.concat(
(
trajectory["observation"]["position"],
tf.zeros_like(trajectory["observation"]["state"][:, :3]),
trajectory["observation"]["yaw"],
),
axis=-1,
)
trajectory["action"] = tf.concat(
(
trajectory["action"],
tf.zeros_like(trajectory["action"]),
tf.zeros_like(trajectory["action"]),
tf.zeros_like(trajectory["action"][:, :1]),
),
axis=-1,
)
return trajectory
def fmb_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
# every input feature is batched, ie has leading batch dimension
trajectory["observation"]["proprio"] = tf.concat(
(
trajectory["observation"]["eef_pose"],
trajectory["observation"]["state_gripper_pose"][..., None],
),
axis=-1,
)
return trajectory
def dobbe_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
# every input feature is batched, ie has leading batch dimension
trajectory["observation"]["proprio"] = trajectory["observation"]["state"]
return trajectory
def robo_set_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
# gripper action is in -1...1 --> clip to 0...1, flip
gripper_action = trajectory["action"][:, -1:]
gripper_action = invert_gripper_actions(tf.clip_by_value(gripper_action, 0, 1))
trajectory["action"] = tf.concat(
(
trajectory["action"][:, :7],
gripper_action,
),
axis=-1,
)
return trajectory
def identity_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
return trajectory
# === Registry ===
OPENX_STANDARDIZATION_TRANSFORMS = {
"bridge_openx": bridge_openx_dataset_transform,
"bridge_orig": bridge_orig_dataset_transform,
"bridge_dataset": bridge_orig_dataset_transform,
"ppgm": ppgm_dataset_transform,
"ppgm_static": ppgm_dataset_transform,
"ppgm_wrist": ppgm_dataset_transform,
"fractal20220817_data": rt1_dataset_transform,
"kuka": kuka_dataset_transform,
"taco_play": taco_play_dataset_transform,
"jaco_play": jaco_play_dataset_transform,
"berkeley_cable_routing": berkeley_cable_routing_dataset_transform,
"roboturk": roboturk_dataset_transform,
"nyu_door_opening_surprising_effectiveness": nyu_door_opening_dataset_transform,
"viola": viola_dataset_transform,
"berkeley_autolab_ur5": berkeley_autolab_ur5_dataset_transform,
"toto": toto_dataset_transform,
"language_table": language_table_dataset_transform,
"columbia_cairlab_pusht_real": pusht_dataset_transform,
"stanford_kuka_multimodal_dataset_converted_externally_to_rlds": stanford_kuka_multimodal_dataset_transform,
"nyu_rot_dataset_converted_externally_to_rlds": nyu_rot_dataset_transform,
"stanford_hydra_dataset_converted_externally_to_rlds": stanford_hydra_dataset_transform,
"austin_buds_dataset_converted_externally_to_rlds": austin_buds_dataset_transform,
"nyu_franka_play_dataset_converted_externally_to_rlds": nyu_franka_play_dataset_transform,
"maniskill_dataset_converted_externally_to_rlds": maniskill_dataset_transform,
"furniture_bench_dataset_converted_externally_to_rlds": furniture_bench_dataset_transform,
"cmu_franka_exploration_dataset_converted_externally_to_rlds": cmu_franka_exploration_dataset_transform,
"ucsd_kitchen_dataset_converted_externally_to_rlds": ucsd_kitchen_dataset_transform,
"ucsd_pick_and_place_dataset_converted_externally_to_rlds": ucsd_pick_place_dataset_transform,
"austin_sailor_dataset_converted_externally_to_rlds": austin_sailor_dataset_transform,
"austin_sirius_dataset_converted_externally_to_rlds": austin_sirius_dataset_transform,
"bc_z": bc_z_dataset_transform,
"utokyo_pr2_opening_fridge_converted_externally_to_rlds": tokyo_pr2_opening_fridge_dataset_transform,
"utokyo_pr2_tabletop_manipulation_converted_externally_to_rlds": tokyo_pr2_tabletop_manipulation_dataset_transform,
"utokyo_xarm_pick_and_place_converted_externally_to_rlds": identity_transform,
"utokyo_xarm_bimanual_converted_externally_to_rlds": utokyo_xarm_bimanual_dataset_transform,
"robo_net": robo_net_dataset_transform,
"berkeley_mvp_converted_externally_to_rlds": berkeley_mvp_dataset_transform,
"berkeley_rpt_converted_externally_to_rlds": berkeley_rpt_dataset_transform,
"kaist_nonprehensile_converted_externally_to_rlds": kaist_nonprehensible_dataset_transform,
"stanford_mask_vit_converted_externally_to_rlds": stanford_mask_vit_dataset_transform,
"tokyo_u_lsmo_converted_externally_to_rlds": tokyo_lsmo_dataset_transform,
"dlr_sara_pour_converted_externally_to_rlds": identity_transform,
"dlr_sara_grid_clamp_converted_externally_to_rlds": dlr_sara_grid_clamp_dataset_transform,
"dlr_edan_shared_control_converted_externally_to_rlds": dlr_edan_shared_control_dataset_transform,
"asu_table_top_converted_externally_to_rlds": asu_table_top_dataset_transform,
"stanford_robocook_converted_externally_to_rlds": robocook_dataset_transform,
"imperialcollege_sawyer_wrist_cam": imperial_wristcam_dataset_transform,
"iamlab_cmu_pickup_insert_converted_externally_to_rlds": iamlab_pick_insert_dataset_transform,
"uiuc_d3field": uiuc_d3field_dataset_transform,
"utaustin_mutex": utaustin_mutex_dataset_transform,
"berkeley_fanuc_manipulation": berkeley_fanuc_dataset_transform,
"cmu_playing_with_food": cmu_playing_with_food_dataset_transform,
"cmu_play_fusion": playfusion_dataset_transform,
"cmu_stretch": cmu_stretch_dataset_transform,
"berkeley_gnm_recon": gnm_dataset_transform,
"berkeley_gnm_cory_hall": gnm_dataset_transform,
"berkeley_gnm_sac_son": gnm_dataset_transform,
"droid": droid_baseact_transform_fn(),
"droid_100": droid_baseact_transform_fn(), # first 100 episodes of droid
"fmb": fmb_transform,
"dobbe": dobbe_dataset_transform,
"robo_set": robo_set_dataset_transform,
"usc_cloth_sim_converted_externally_to_rlds": identity_transform,
"plex_robosuite": identity_transform,
"conq_hose_manipulation": identity_transform,
"io_ai_tech": identity_transform,
"spoc": identity_transform,
}

View File

@@ -14,16 +14,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""
For all datasets in the RLDS format.
For https://github.com/google-deepmind/open_x_embodiment (OPENX) datasets.
NOTE: You need to install tensorflow and tensorflow_datsets before running this script.
Example:
python lerobot/scripts/push_dataset_to_hub.py \
--raw-dir /path/to/data/bridge_dataset/1.0.0/ \
--repo-id your_hub/sampled_bridge_data_v2 \
--raw-format rlds \
--raw-dir /hdd/tensorflow_datasets/bridge_dataset/1.0.0/ \
--repo-id youliangtan/sampled_bridge_data_v2 \
--raw-format openx_rlds.bridge_orig \
--episodes 3 4 5 8 9
Exact dataset fps defined in openx/config.py, obtained from:
@@ -38,21 +35,28 @@ import tensorflow as tf
import tensorflow_datasets as tfds
import torch
import tqdm
import yaml
from datasets import Dataset, Features, Image, Sequence, Value
from PIL import Image as PILImage
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
from lerobot.common.datasets.push_dataset_to_hub.openx.transforms import OPENX_STANDARDIZATION_TRANSFORMS
from lerobot.common.datasets.push_dataset_to_hub.utils import (
calculate_episode_data_index,
concatenate_episodes,
get_default_encoding,
save_images_concurrently,
)
from lerobot.common.datasets.utils import (
calculate_episode_data_index,
hf_transform_to_torch,
)
from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames
with open("lerobot/common/datasets/push_dataset_to_hub/openx/configs.yaml") as f:
_openx_list = yaml.safe_load(f)
OPENX_DATASET_CONFIGS = _openx_list["OPENX_DATASET_CONFIGS"]
np.set_printoptions(precision=2)
@@ -104,6 +108,7 @@ def load_from_raw(
video: bool,
episodes: list[int] | None = None,
encoding: dict | None = None,
openx_dataset_name: str | None = None,
):
"""
Args:
@@ -131,17 +136,16 @@ def load_from_raw(
# we will apply the standardization transform if the dataset_name is provided
# if the dataset name is not provided and the goal is to convert any rlds formatted dataset
# search for 'image' keys in the observations
image_keys = []
state_keys = []
observation_info = dataset_info.features["steps"]["observation"]
for key in observation_info:
# check whether the key is for an image or a vector observation
if len(observation_info[key].shape) == 3:
# only adding uint8 images discards depth images
if observation_info[key].dtype == tf.uint8:
image_keys.append(key)
else:
state_keys.append(key)
if openx_dataset_name is not None:
print(" - applying standardization transform for dataset: ", openx_dataset_name)
assert openx_dataset_name in OPENX_STANDARDIZATION_TRANSFORMS
transform_fn = OPENX_STANDARDIZATION_TRANSFORMS[openx_dataset_name]
dataset = dataset.map(transform_fn)
image_keys = OPENX_DATASET_CONFIGS[openx_dataset_name]["image_obs_keys"]
else:
obs_keys = dataset_info.features["steps"]["observation"].keys()
image_keys = [key for key in obs_keys if "image" in key]
lang_key = "language_instruction" if "language_instruction" in dataset.element_spec else None
@@ -189,31 +193,50 @@ def load_from_raw(
num_frames = episode["action"].shape[0]
ep_dict = {}
for key in state_keys:
ep_dict[f"observation.{key}"] = tf_to_torch(episode["observation"][key])
###########################################################
# Handle the episodic data
ep_dict["action"] = tf_to_torch(episode["action"])
ep_dict["next.reward"] = tf_to_torch(episode["reward"]).float()
ep_dict["next.done"] = tf_to_torch(episode["is_last"])
ep_dict["is_terminal"] = tf_to_torch(episode["is_terminal"])
ep_dict["is_first"] = tf_to_torch(episode["is_first"])
ep_dict["discount"] = tf_to_torch(episode["discount"])
# last step of demonstration is considered done
done = torch.zeros(num_frames, dtype=torch.bool)
done[-1] = True
ep_dict = {}
langs = [] # TODO: might be located in "observation"
image_array_dict = {key: [] for key in image_keys}
# We will create the state observation tensor by stacking the state
# obs keys defined in the openx/configs.py
if openx_dataset_name is not None:
state_obs_keys = OPENX_DATASET_CONFIGS[openx_dataset_name]["state_obs_keys"]
# stack the state observations, if is None, pad with zeros
states = []
for key in state_obs_keys:
if key in episode["observation"]:
states.append(tf_to_torch(episode["observation"][key]))
else:
states.append(torch.zeros(num_frames, 1)) # pad with zeros
states = torch.cat(states, dim=1)
# assert states.shape == (num_frames, 8), f"states shape: {states.shape}"
else:
states = tf_to_torch(episode["observation"]["state"])
actions = tf_to_torch(episode["action"])
rewards = tf_to_torch(episode["reward"]).float()
# If lang_key is present, convert the entire tensor at once
if lang_key is not None:
ep_dict["language_instruction"] = [x.numpy().decode("utf-8") for x in episode[lang_key]]
ep_dict["timestamp"] = torch.arange(0, num_frames, 1) / fps
ep_dict["episode_index"] = torch.tensor([ep_idx] * num_frames)
ep_dict["frame_index"] = torch.arange(0, num_frames, 1)
image_array_dict = {key: [] for key in image_keys}
langs = [str(x) for x in episode[lang_key]]
for im_key in image_keys:
imgs = episode["observation"][im_key]
image_array_dict[im_key] = [tf_img_convert(img) for img in imgs]
# simple assertions
for item in [states, actions, rewards, done]:
assert len(item) == num_frames
###########################################################
# loop through all cameras
for im_key in image_keys:
img_key = f"observation.images.{im_key}"
@@ -239,6 +262,17 @@ def load_from_raw(
else:
ep_dict[img_key] = [PILImage.fromarray(x) for x in imgs_array]
if lang_key is not None:
ep_dict["language_instruction"] = langs
ep_dict["observation.state"] = states
ep_dict["action"] = actions
ep_dict["timestamp"] = torch.arange(0, num_frames, 1) / fps
ep_dict["episode_index"] = torch.tensor([ep_idx] * num_frames)
ep_dict["frame_index"] = torch.arange(0, num_frames, 1)
ep_dict["next.reward"] = rewards
ep_dict["next.done"] = done
path_ep_dict = tmp_ep_dicts_dir.joinpath(
"ep_dict_" + "0" * (10 - len(str(ep_idx))) + str(ep_idx) + ".pt"
)
@@ -256,28 +290,30 @@ def load_from_raw(
def to_hf_dataset(data_dict, video) -> Dataset:
features = {}
for key in data_dict:
# check if vector state obs
if key.startswith("observation.") and "observation.images." not in key:
features[key] = Sequence(length=data_dict[key].shape[1], feature=Value(dtype="float32", id=None))
# check if image obs
elif "observation.images." in key:
if video:
features[key] = VideoFrame()
else:
features[key] = Image()
keys = [key for key in data_dict if "observation.images." in key]
for key in keys:
if video:
features[key] = VideoFrame()
else:
features[key] = Image()
features["observation.state"] = Sequence(
length=data_dict["observation.state"].shape[1], feature=Value(dtype="float32", id=None)
)
if "observation.velocity" in data_dict:
features["observation.velocity"] = Sequence(
length=data_dict["observation.velocity"].shape[1], feature=Value(dtype="float32", id=None)
)
if "observation.effort" in data_dict:
features["observation.effort"] = Sequence(
length=data_dict["observation.effort"].shape[1], feature=Value(dtype="float32", id=None)
)
if "language_instruction" in data_dict:
features["language_instruction"] = Value(dtype="string", id=None)
features["action"] = Sequence(
length=data_dict["action"].shape[1], feature=Value(dtype="float32", id=None)
)
features["is_terminal"] = Value(dtype="bool", id=None)
features["is_first"] = Value(dtype="bool", id=None)
features["discount"] = Value(dtype="float32", id=None)
features["episode_index"] = Value(dtype="int64", id=None)
features["frame_index"] = Value(dtype="int64", id=None)
features["timestamp"] = Value(dtype="float32", id=None)
@@ -297,8 +333,19 @@ def from_raw_to_lerobot_format(
video: bool = True,
episodes: list[int] | None = None,
encoding: dict | None = None,
openx_dataset_name: str | None = None,
):
data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, encoding)
"""This is a test impl for rlds conversion"""
if openx_dataset_name is None:
# set a default rlds frame rate if the dataset is not from openx
fps = 30
elif "fps" not in OPENX_DATASET_CONFIGS[openx_dataset_name]:
raise ValueError(
"fps for this dataset is not specified in openx/configs.py yet," "means it is not yet tested"
)
fps = OPENX_DATASET_CONFIGS[openx_dataset_name]["fps"]
data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, encoding, openx_dataset_name)
hf_dataset = to_hf_dataset(data_dict, video)
episode_data_index = calculate_episode_data_index(hf_dataset)
info = {

View File

@@ -27,12 +27,12 @@ from PIL import Image as PILImage
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
from lerobot.common.datasets.push_dataset_to_hub.utils import (
calculate_episode_data_index,
concatenate_episodes,
get_default_encoding,
save_images_concurrently,
)
from lerobot.common.datasets.utils import (
calculate_episode_data_index,
hf_transform_to_torch,
)
from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames

View File

@@ -28,12 +28,12 @@ from PIL import Image as PILImage
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
from lerobot.common.datasets.push_dataset_to_hub._umi_imagecodecs_numcodecs import register_codecs
from lerobot.common.datasets.push_dataset_to_hub.utils import (
calculate_episode_data_index,
concatenate_episodes,
get_default_encoding,
save_images_concurrently,
)
from lerobot.common.datasets.utils import (
calculate_episode_data_index,
hf_transform_to_torch,
)
from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames

View File

@@ -16,9 +16,7 @@
import inspect
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Dict
import datasets
import numpy
import PIL
import torch
@@ -74,58 +72,3 @@ def check_repo_id(repo_id: str) -> None:
f"""`repo_id` is expected to contain a community or user id `/` the name of the dataset
(e.g. 'lerobot/pusht'), but contains '{repo_id}'."""
)
# TODO(aliberts): remove
def calculate_episode_data_index(hf_dataset: datasets.Dataset) -> Dict[str, torch.Tensor]:
"""
Calculate episode data index for the provided HuggingFace Dataset. Relies on episode_index column of hf_dataset.
Parameters:
- hf_dataset (datasets.Dataset): A HuggingFace dataset containing the episode index.
Returns:
- episode_data_index: A dictionary containing the data index for each episode. The dictionary has two keys:
- "from": A tensor containing the starting index of each episode.
- "to": A tensor containing the ending index of each episode.
"""
episode_data_index = {"from": [], "to": []}
current_episode = None
"""
The episode_index is a list of integers, each representing the episode index of the corresponding example.
For instance, the following is a valid episode_index:
[0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2]
Below, we iterate through the episode_index and populate the episode_data_index dictionary with the starting and
ending index of each episode. For the episode_index above, the episode_data_index dictionary will look like this:
{
"from": [0, 3, 7],
"to": [3, 7, 12]
}
"""
if len(hf_dataset) == 0:
episode_data_index = {
"from": torch.tensor([]),
"to": torch.tensor([]),
}
return episode_data_index
for idx, episode_idx in enumerate(hf_dataset["episode_index"]):
if episode_idx != current_episode:
# We encountered a new episode, so we append its starting location to the "from" list
episode_data_index["from"].append(idx)
# If this is not the first episode, we append the ending location of the previous episode to the "to" list
if current_episode is not None:
episode_data_index["to"].append(idx)
# Let's keep track of the current episode index
current_episode = episode_idx
else:
# We are still in the same episode, so there is nothing for us to do here
pass
# We have reached the end of the dataset, so we append the ending location of the last episode to the "to" list
episode_data_index["to"].append(idx + 1)
for k in ["from", "to"]:
episode_data_index[k] = torch.tensor(episode_data_index[k])
return episode_data_index

View File

@@ -27,12 +27,12 @@ from PIL import Image as PILImage
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
from lerobot.common.datasets.push_dataset_to_hub.utils import (
calculate_episode_data_index,
concatenate_episodes,
get_default_encoding,
save_images_concurrently,
)
from lerobot.common.datasets.utils import (
calculate_episode_data_index,
hf_transform_to_torch,
)
from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames

View File

@@ -13,60 +13,31 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.resources
import json
import logging
import textwrap
from collections.abc import Iterator
from itertools import accumulate
import re
import warnings
from functools import cache
from pathlib import Path
from pprint import pformat
from types import SimpleNamespace
from typing import Any
from typing import Dict
import datasets
import jsonlines
import numpy as np
import pyarrow.compute as pc
import torch
from datasets.table import embed_table_storage
from huggingface_hub import DatasetCard, DatasetCardData, HfApi
from datasets import load_dataset, load_from_disk
from huggingface_hub import DatasetCard, HfApi, hf_hub_download, snapshot_download
from PIL import Image as PILImage
from safetensors.torch import load_file
from torchvision import transforms
from lerobot.common.robot_devices.robots.utils import Robot
DEFAULT_CHUNK_SIZE = 1000 # Max number of episodes per chunk
INFO_PATH = "meta/info.json"
EPISODES_PATH = "meta/episodes.jsonl"
STATS_PATH = "meta/stats.json"
TASKS_PATH = "meta/tasks.jsonl"
DEFAULT_VIDEO_PATH = "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4"
DEFAULT_PARQUET_PATH = "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet"
DEFAULT_IMAGE_PATH = "images/{image_key}/episode_{episode_index:06d}/frame_{frame_index:06d}.png"
DATASET_CARD_TEMPLATE = """
---
# Metadata will go there
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## {}
"""
DEFAULT_FEATURES = {
"timestamp": {"dtype": "float32", "shape": (1,), "names": None},
"frame_index": {"dtype": "int64", "shape": (1,), "names": None},
"episode_index": {"dtype": "int64", "shape": (1,), "names": None},
"index": {"dtype": "int64", "shape": (1,), "names": None},
"task_index": {"dtype": "int64", "shape": (1,), "names": None},
}
def flatten_dict(d: dict, parent_key: str = "", sep: str = "/") -> dict:
def flatten_dict(d, parent_key="", sep="/"):
"""Flatten a nested dictionary structure by collapsing nested keys into one key with a separator.
For example:
@@ -85,7 +56,7 @@ def flatten_dict(d: dict, parent_key: str = "", sep: str = "/") -> dict:
return dict(items)
def unflatten_dict(d: dict, sep: str = "/") -> dict:
def unflatten_dict(d, sep="/"):
outdict = {}
for key, value in d.items():
parts = key.split(sep)
@@ -98,82 +69,6 @@ def unflatten_dict(d: dict, sep: str = "/") -> dict:
return outdict
def serialize_dict(stats: dict[str, torch.Tensor | np.ndarray | dict]) -> dict:
serialized_dict = {key: value.tolist() for key, value in flatten_dict(stats).items()}
return unflatten_dict(serialized_dict)
def write_parquet(dataset: datasets.Dataset, fpath: Path) -> None:
# Embed image bytes into the table before saving to parquet
format = dataset.format
dataset = dataset.with_format("arrow")
dataset = dataset.map(embed_table_storage, batched=False)
dataset = dataset.with_format(**format)
dataset.to_parquet(fpath)
def load_json(fpath: Path) -> Any:
with open(fpath) as f:
return json.load(f)
def write_json(data: dict, fpath: Path) -> None:
fpath.parent.mkdir(exist_ok=True, parents=True)
with open(fpath, "w") as f:
json.dump(data, f, indent=4, ensure_ascii=False)
def load_jsonlines(fpath: Path) -> list[Any]:
with jsonlines.open(fpath, "r") as reader:
return list(reader)
def write_jsonlines(data: dict, fpath: Path) -> None:
fpath.parent.mkdir(exist_ok=True, parents=True)
with jsonlines.open(fpath, "w") as writer:
writer.write_all(data)
def append_jsonlines(data: dict, fpath: Path) -> None:
fpath.parent.mkdir(exist_ok=True, parents=True)
with jsonlines.open(fpath, "a") as writer:
writer.write(data)
def load_info(local_dir: Path) -> dict:
info = load_json(local_dir / INFO_PATH)
for ft in info["features"].values():
ft["shape"] = tuple(ft["shape"])
return info
def load_stats(local_dir: Path) -> dict:
if not (local_dir / STATS_PATH).exists():
return None
stats = load_json(local_dir / STATS_PATH)
stats = {key: torch.tensor(value) for key, value in flatten_dict(stats).items()}
return unflatten_dict(stats)
def load_tasks(local_dir: Path) -> dict:
tasks = load_jsonlines(local_dir / TASKS_PATH)
return {item["task_index"]: item["task"] for item in sorted(tasks, key=lambda x: x["task_index"])}
def load_episodes(local_dir: Path) -> dict:
return load_jsonlines(local_dir / EPISODES_PATH)
def load_image_as_numpy(fpath: str | Path, dtype="float32", channel_first: bool = True) -> np.ndarray:
img = PILImage.open(fpath).convert("RGB")
img_array = np.array(img, dtype=dtype)
if channel_first: # (H, W, C) -> (C, H, W)
img_array = np.transpose(img_array, (2, 0, 1))
if "float" in dtype:
img_array /= 255.0
return img_array
def hf_transform_to_torch(items_dict: dict[torch.Tensor | None]):
"""Get a transform function that convert items from Hugging Face dataset (pyarrow)
to torch tensors. Importantly, images are converted from PIL, which corresponds to
@@ -185,6 +80,14 @@ def hf_transform_to_torch(items_dict: dict[torch.Tensor | None]):
if isinstance(first_item, PILImage.Image):
to_tensor = transforms.ToTensor()
items_dict[key] = [to_tensor(img) for img in items_dict[key]]
elif isinstance(first_item, str):
# TODO (michel-aractingi): add str2embedding via language tokenizer
# For now we leave this part up to the user to choose how to address
# language conditioned tasks
pass
elif isinstance(first_item, dict) and "path" in first_item and "timestamp" in first_item:
# video frame will be processed downstream
pass
elif first_item is None:
pass
else:
@@ -192,67 +95,19 @@ def hf_transform_to_torch(items_dict: dict[torch.Tensor | None]):
return items_dict
def _get_major_minor(version: str) -> tuple[int]:
split = version.strip("v").split(".")
return int(split[0]), int(split[1])
class BackwardCompatibilityError(Exception):
def __init__(self, repo_id, version):
message = textwrap.dedent(f"""
BackwardCompatibilityError: The dataset you requested ({repo_id}) is in {version} format.
We introduced a new format since v2.0 which is not backward compatible with v1.x.
Please, use our conversion script. Modify the following command with your own task description:
```
python lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py \\
--repo-id {repo_id} \\
--single-task "TASK DESCRIPTION." # <---- /!\\ Replace TASK DESCRIPTION /!\\
```
A few examples to replace TASK DESCRIPTION: "Pick up the blue cube and place it into the bin.",
"Insert the peg into the socket.", "Slide open the ziploc bag.", "Take the elevator to the 1st floor.",
"Open the top cabinet, store the pot inside it then close the cabinet.", "Push the T-shaped block onto the T-shaped target.",
"Grab the spray paint on the shelf and place it in the bin on top of the robot dog.", "Fold the sweatshirt.", ...
If you encounter a problem, contact LeRobot maintainers on [Discord](https://discord.com/invite/s3KuuzsPFb)
or open an [issue on GitHub](https://github.com/huggingface/lerobot/issues/new/choose).
""")
super().__init__(message)
def check_version_compatibility(
repo_id: str, version_to_check: str, current_version: str, enforce_breaking_major: bool = True
) -> None:
current_major, _ = _get_major_minor(current_version)
major_to_check, _ = _get_major_minor(version_to_check)
if major_to_check < current_major and enforce_breaking_major:
raise BackwardCompatibilityError(repo_id, version_to_check)
elif float(version_to_check.strip("v")) < float(current_version.strip("v")):
logging.warning(
f"""The dataset you requested ({repo_id}) was created with a previous version ({version_to_check}) of the
codebase. The current codebase version is {current_version}. You should be fine since
backward compatibility is maintained. If you encounter a problem, contact LeRobot maintainers on
Discord ('https://discord.com/invite/s3KuuzsPFb') or open an issue on github.""",
)
def get_hub_safe_version(repo_id: str, version: str) -> str:
@cache
def get_hf_dataset_safe_version(repo_id: str, version: str) -> str:
api = HfApi()
dataset_info = api.list_repo_refs(repo_id, repo_type="dataset")
branches = [b.name for b in dataset_info.branches]
if version not in branches:
num_version = float(version.strip("v"))
hub_num_versions = [float(v.strip("v")) for v in branches if v.startswith("v")]
if num_version >= 2.0 and all(v < 2.0 for v in hub_num_versions):
raise BackwardCompatibilityError(repo_id, version)
logging.warning(
warnings.warn(
f"""You are trying to load a dataset from {repo_id} created with a previous version of the
codebase. The following versions are available: {branches}.
The requested version ('{version}') is not found. You should be fine since
backward compatibility is maintained. If you encounter a problem, contact LeRobot maintainers on
Discord ('https://discord.com/invite/s3KuuzsPFb') or open an issue on github.""",
stacklevel=1,
)
if "main" not in branches:
raise ValueError(f"Version 'main' not found on {repo_id}")
@@ -261,184 +116,275 @@ def get_hub_safe_version(repo_id: str, version: str) -> str:
return version
def get_hf_features_from_features(features: dict) -> datasets.Features:
hf_features = {}
for key, ft in features.items():
if ft["dtype"] == "video":
continue
elif ft["dtype"] == "image":
hf_features[key] = datasets.Image()
elif ft["shape"] == (1,):
hf_features[key] = datasets.Value(dtype=ft["dtype"])
else:
assert len(ft["shape"]) == 1
hf_features[key] = datasets.Sequence(
length=ft["shape"][0], feature=datasets.Value(dtype=ft["dtype"])
)
def load_hf_dataset(repo_id: str, version: str, root: Path, split: str) -> datasets.Dataset:
"""hf_dataset contains all the observations, states, actions, rewards, etc."""
if root is not None:
hf_dataset = load_from_disk(str(Path(root) / repo_id / "train"))
# TODO(rcadene): clean this which enables getting a subset of dataset
if split != "train":
if "%" in split:
raise NotImplementedError(f"We dont support splitting based on percentage for now ({split}).")
match_from = re.search(r"train\[(\d+):\]", split)
match_to = re.search(r"train\[:(\d+)\]", split)
if match_from:
from_frame_index = int(match_from.group(1))
hf_dataset = hf_dataset.select(range(from_frame_index, len(hf_dataset)))
elif match_to:
to_frame_index = int(match_to.group(1))
hf_dataset = hf_dataset.select(range(to_frame_index))
else:
raise ValueError(
f'`split` ({split}) should either be "train", "train[INT:]", or "train[:INT]"'
)
else:
safe_version = get_hf_dataset_safe_version(repo_id, version)
hf_dataset = load_dataset(repo_id, revision=safe_version, split=split)
return datasets.Features(hf_features)
hf_dataset.set_transform(hf_transform_to_torch)
return hf_dataset
def get_features_from_robot(robot: Robot, use_videos: bool = True) -> dict:
camera_ft = {}
if robot.cameras:
camera_ft = {
key: {"dtype": "video" if use_videos else "image", **ft}
for key, ft in robot.camera_features.items()
}
return {**robot.motor_features, **camera_ft, **DEFAULT_FEATURES}
def load_episode_data_index(repo_id, version, root) -> dict[str, torch.Tensor]:
"""episode_data_index contains the range of indices for each episode
Example:
```python
from_id = episode_data_index["from"][episode_id].item()
to_id = episode_data_index["to"][episode_id].item()
episode_frames = [dataset[i] for i in range(from_id, to_id)]
```
"""
if root is not None:
path = Path(root) / repo_id / "meta_data" / "episode_data_index.safetensors"
else:
safe_version = get_hf_dataset_safe_version(repo_id, version)
path = hf_hub_download(
repo_id, "meta_data/episode_data_index.safetensors", repo_type="dataset", revision=safe_version
)
return load_file(path)
def create_empty_dataset_info(
codebase_version: str,
fps: int,
robot_type: str,
features: dict,
use_videos: bool,
) -> dict:
return {
"codebase_version": codebase_version,
"robot_type": robot_type,
"total_episodes": 0,
"total_frames": 0,
"total_tasks": 0,
"total_videos": 0,
"total_chunks": 0,
"chunks_size": DEFAULT_CHUNK_SIZE,
"fps": fps,
"splits": {},
"data_path": DEFAULT_PARQUET_PATH,
"video_path": DEFAULT_VIDEO_PATH if use_videos else None,
"features": features,
}
def load_stats(repo_id, version, root) -> dict[str, dict[str, torch.Tensor]]:
"""stats contains the statistics per modality computed over the full dataset, such as max, min, mean, std
Example:
```python
normalized_action = (action - stats["action"]["mean"]) / stats["action"]["std"]
```
"""
if root is not None:
path = Path(root) / repo_id / "meta_data" / "stats.safetensors"
else:
safe_version = get_hf_dataset_safe_version(repo_id, version)
path = hf_hub_download(
repo_id, "meta_data/stats.safetensors", repo_type="dataset", revision=safe_version
)
stats = load_file(path)
return unflatten_dict(stats)
def get_episode_data_index(
episode_dicts: list[dict], episodes: list[int] | None = None
) -> dict[str, torch.Tensor]:
episode_lengths = {ep_idx: ep_dict["length"] for ep_idx, ep_dict in enumerate(episode_dicts)}
if episodes is not None:
episode_lengths = {ep_idx: episode_lengths[ep_idx] for ep_idx in episodes}
def load_info(repo_id, version, root) -> dict:
"""info contains useful information regarding the dataset that are not stored elsewhere
cumulative_lenghts = list(accumulate(episode_lengths.values()))
return {
"from": torch.LongTensor([0] + cumulative_lenghts[:-1]),
"to": torch.LongTensor(cumulative_lenghts),
}
Example:
```python
print("frame per second used to collect the video", info["fps"])
```
"""
if root is not None:
path = Path(root) / repo_id / "meta_data" / "info.json"
else:
safe_version = get_hf_dataset_safe_version(repo_id, version)
path = hf_hub_download(repo_id, "meta_data/info.json", repo_type="dataset", revision=safe_version)
with open(path) as f:
info = json.load(f)
return info
def calculate_total_episode(
hf_dataset: datasets.Dataset, raise_if_not_contiguous: bool = True
) -> dict[str, torch.Tensor]:
episode_indices = sorted(hf_dataset.unique("episode_index"))
total_episodes = len(episode_indices)
if raise_if_not_contiguous and episode_indices != list(range(total_episodes)):
raise ValueError("episode_index values are not sorted and contiguous.")
return total_episodes
def load_videos(repo_id, version, root) -> Path:
if root is not None:
path = Path(root) / repo_id / "videos"
else:
# TODO(rcadene): we download the whole repo here. see if we can avoid this
safe_version = get_hf_dataset_safe_version(repo_id, version)
repo_dir = snapshot_download(repo_id, repo_type="dataset", revision=safe_version)
path = Path(repo_dir) / "videos"
return path
def calculate_episode_data_index(hf_dataset: datasets.Dataset) -> dict[str, torch.Tensor]:
episode_lengths = []
table = hf_dataset.data.table
total_episodes = calculate_total_episode(hf_dataset)
for ep_idx in range(total_episodes):
ep_table = table.filter(pc.equal(table["episode_index"], ep_idx))
episode_lengths.insert(ep_idx, len(ep_table))
cumulative_lenghts = list(accumulate(episode_lengths))
return {
"from": torch.LongTensor([0] + cumulative_lenghts[:-1]),
"to": torch.LongTensor(cumulative_lenghts),
}
def check_timestamps_sync(
def load_previous_and_future_frames(
item: dict[str, torch.Tensor],
hf_dataset: datasets.Dataset,
episode_data_index: dict[str, torch.Tensor],
fps: int,
delta_timestamps: dict[str, list[float]],
tolerance_s: float,
raise_value_error: bool = True,
) -> bool:
) -> dict[torch.Tensor]:
"""
This check is to make sure that each timestamps is separated to the next by 1/fps +/- tolerance to
account for possible numerical error.
Given a current item in the dataset containing a timestamp (e.g. 0.6 seconds), and a list of time differences of
some modalities (e.g. delta_timestamps={"observation.image": [-0.8, -0.2, 0, 0.2]}), this function computes for each
given modality (e.g. "observation.image") a list of query timestamps (e.g. [-0.2, 0.4, 0.6, 0.8]) and loads the closest
frames in the dataset.
Importantly, when no frame can be found around a query timestamp within a specified tolerance window, this function
raises an AssertionError. When a timestamp is queried before the first available timestamp of the episode or after
the last available timestamp, the violation of the tolerance doesnt raise an AssertionError, and the function
populates a boolean array indicating which frames are outside of the episode range. For instance, this boolean array
is useful during batched training to not supervise actions associated to timestamps coming after the end of the
episode, or to pad the observations in a specific way. Note that by default the observation frames before the start
of the episode are the same as the first frame of the episode.
Parameters:
- item (dict): A dictionary containing all the data related to a frame. It is the result of `dataset[idx]`. Each key
corresponds to a different modality (e.g., "timestamp", "observation.image", "action").
- hf_dataset (datasets.Dataset): A dictionary containing the full dataset. Each key corresponds to a different
modality (e.g., "timestamp", "observation.image", "action").
- episode_data_index (dict): A dictionary containing two keys ("from" and "to") associated to dataset indices.
They indicate the start index and end index of each episode in the dataset.
- delta_timestamps (dict): A dictionary containing lists of delta timestamps for each possible modality to be
retrieved. These deltas are added to the item timestamp to form the query timestamps.
- tolerance_s (float, optional): The tolerance level (in seconds) used to determine if a data point is close enough to the query
timestamp by asserting `tol > difference`. It is suggested to set `tol` to a smaller value than the
smallest expected inter-frame period, but large enough to account for jitter.
Returns:
- The same item with the queried frames for each modality specified in delta_timestamps, with an additional key for
each modality (e.g. "observation.image_is_pad").
Raises:
- AssertionError: If any of the frames unexpectedly violate the tolerance level. This could indicate synchronization
issues with timestamps during data collection.
"""
timestamps = torch.stack(hf_dataset["timestamp"])
diffs = torch.diff(timestamps)
within_tolerance = torch.abs(diffs - 1 / fps) <= tolerance_s
# get indices of the frames associated to the episode, and their timestamps
ep_id = item["episode_index"].item()
ep_data_id_from = episode_data_index["from"][ep_id].item()
ep_data_id_to = episode_data_index["to"][ep_id].item()
ep_data_ids = torch.arange(ep_data_id_from, ep_data_id_to, 1)
# We mask differences between the timestamp at the end of an episode
# and the one at the start of the next episode since these are expected
# to be outside tolerance.
mask = torch.ones(len(diffs), dtype=torch.bool)
ignored_diffs = episode_data_index["to"][:-1] - 1
mask[ignored_diffs] = False
filtered_within_tolerance = within_tolerance[mask]
# load timestamps
ep_timestamps = hf_dataset.select_columns("timestamp")[ep_data_id_from:ep_data_id_to]["timestamp"]
ep_timestamps = torch.stack(ep_timestamps)
if not torch.all(filtered_within_tolerance):
# Track original indices before masking
original_indices = torch.arange(len(diffs))
filtered_indices = original_indices[mask]
outside_tolerance_filtered_indices = torch.nonzero(~filtered_within_tolerance) # .squeeze()
outside_tolerance_indices = filtered_indices[outside_tolerance_filtered_indices]
episode_indices = torch.stack(hf_dataset["episode_index"])
# we make the assumption that the timestamps are sorted
ep_first_ts = ep_timestamps[0]
ep_last_ts = ep_timestamps[-1]
current_ts = item["timestamp"].item()
outside_tolerances = []
for idx in outside_tolerance_indices:
entry = {
"timestamps": [timestamps[idx], timestamps[idx + 1]],
"diff": diffs[idx],
"episode_index": episode_indices[idx].item(),
}
outside_tolerances.append(entry)
for key in delta_timestamps:
# get timestamps used as query to retrieve data of previous/future frames
delta_ts = delta_timestamps[key]
query_ts = current_ts + torch.tensor(delta_ts)
if raise_value_error:
raise ValueError(
f"""One or several timestamps unexpectedly violate the tolerance inside episode range.
This might be due to synchronization issues with timestamps during data collection.
\n{pformat(outside_tolerances)}"""
)
return False
# compute distances between each query timestamp and all timestamps of all the frames belonging to the episode
dist = torch.cdist(query_ts[:, None], ep_timestamps[:, None], p=1)
min_, argmin_ = dist.min(1)
return True
# TODO(rcadene): synchronize timestamps + interpolation if needed
is_pad = min_ > tolerance_s
# check violated query timestamps are all outside the episode range
assert ((query_ts[is_pad] < ep_first_ts) | (ep_last_ts < query_ts[is_pad])).all(), (
f"One or several timestamps unexpectedly violate the tolerance ({min_} > {tolerance_s=}) inside episode range."
"This might be due to synchronization issues with timestamps during data collection."
)
# get dataset indices corresponding to frames to be loaded
data_ids = ep_data_ids[argmin_]
# load frames modality
item[key] = hf_dataset.select_columns(key)[data_ids][key]
if isinstance(item[key][0], dict) and "path" in item[key][0]:
# video mode where frame are expressed as dict of path and timestamp
item[key] = item[key]
else:
item[key] = torch.stack(item[key])
item[f"{key}_is_pad"] = is_pad
return item
def check_delta_timestamps(
delta_timestamps: dict[str, list[float]], fps: int, tolerance_s: float, raise_value_error: bool = True
) -> bool:
"""This will check if all the values in delta_timestamps are multiples of 1/fps +/- tolerance.
This is to ensure that these delta_timestamps added to any timestamp from a dataset will themselves be
actual timestamps from the dataset.
def calculate_episode_data_index(hf_dataset: datasets.Dataset) -> Dict[str, torch.Tensor]:
"""
outside_tolerance = {}
for key, delta_ts in delta_timestamps.items():
within_tolerance = [abs(ts * fps - round(ts * fps)) / fps <= tolerance_s for ts in delta_ts]
if not all(within_tolerance):
outside_tolerance[key] = [
ts for ts, is_within in zip(delta_ts, within_tolerance, strict=True) if not is_within
]
Calculate episode data index for the provided HuggingFace Dataset. Relies on episode_index column of hf_dataset.
if len(outside_tolerance) > 0:
if raise_value_error:
raise ValueError(
f"""
The following delta_timestamps are found outside of tolerance range.
Please make sure they are multiples of 1/{fps} +/- tolerance and adjust
their values accordingly.
\n{pformat(outside_tolerance)}
"""
)
return False
Parameters:
- hf_dataset (datasets.Dataset): A HuggingFace dataset containing the episode index.
return True
Returns:
- episode_data_index: A dictionary containing the data index for each episode. The dictionary has two keys:
- "from": A tensor containing the starting index of each episode.
- "to": A tensor containing the ending index of each episode.
"""
episode_data_index = {"from": [], "to": []}
current_episode = None
"""
The episode_index is a list of integers, each representing the episode index of the corresponding example.
For instance, the following is a valid episode_index:
[0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2]
Below, we iterate through the episode_index and populate the episode_data_index dictionary with the starting and
ending index of each episode. For the episode_index above, the episode_data_index dictionary will look like this:
{
"from": [0, 3, 7],
"to": [3, 7, 12]
}
"""
if len(hf_dataset) == 0:
episode_data_index = {
"from": torch.tensor([]),
"to": torch.tensor([]),
}
return episode_data_index
for idx, episode_idx in enumerate(hf_dataset["episode_index"]):
if episode_idx != current_episode:
# We encountered a new episode, so we append its starting location to the "from" list
episode_data_index["from"].append(idx)
# If this is not the first episode, we append the ending location of the previous episode to the "to" list
if current_episode is not None:
episode_data_index["to"].append(idx)
# Let's keep track of the current episode index
current_episode = episode_idx
else:
# We are still in the same episode, so there is nothing for us to do here
pass
# We have reached the end of the dataset, so we append the ending location of the last episode to the "to" list
episode_data_index["to"].append(idx + 1)
for k in ["from", "to"]:
episode_data_index[k] = torch.tensor(episode_data_index[k])
return episode_data_index
def get_delta_indices(delta_timestamps: dict[str, list[float]], fps: int) -> dict[str, list[int]]:
delta_indices = {}
for key, delta_ts in delta_timestamps.items():
delta_indices[key] = (torch.tensor(delta_ts) * fps).long().tolist()
def reset_episode_index(hf_dataset: datasets.Dataset) -> datasets.Dataset:
"""Reset the `episode_index` of the provided HuggingFace Dataset.
return delta_indices
`episode_data_index` (and related functionality such as `load_previous_and_future_frames`) requires the
`episode_index` to be sorted, continuous (1,1,1 and not 1,2,1) and start at 0.
This brings the `episode_index` to the required format.
"""
if len(hf_dataset) == 0:
return hf_dataset
unique_episode_idxs = torch.stack(hf_dataset["episode_index"]).unique().tolist()
episode_idx_to_reset_idx_mapping = {
ep_id: reset_ep_id for reset_ep_id, ep_id in enumerate(unique_episode_idxs)
}
def modify_ep_idx_func(example):
example["episode_index"] = episode_idx_to_reset_idx_mapping[example["episode_index"].item()]
return example
hf_dataset = hf_dataset.map(modify_ep_idx_func)
return hf_dataset
def cycle(iterable):
@@ -454,7 +400,7 @@ def cycle(iterable):
iterator = iter(iterable)
def create_branch(repo_id, *, branch: str, repo_type: str | None = None) -> None:
def create_branch(repo_id, *, branch: str, repo_type: str | None = None):
"""Create a branch on a existing Hugging Face repo. Delete the branch if it already
exists before creating it.
"""
@@ -469,94 +415,12 @@ def create_branch(repo_id, *, branch: str, repo_type: str | None = None) -> None
api.create_branch(repo_id, repo_type=repo_type, branch=branch)
def create_lerobot_dataset_card(
tags: list | None = None,
dataset_info: dict | None = None,
**kwargs,
) -> DatasetCard:
"""
Keyword arguments will be used to replace values in ./lerobot/common/datasets/card_template.md.
Note: If specified, license must be one of https://huggingface.co/docs/hub/repositories-licenses.
"""
card_tags = ["LeRobot"]
if tags:
card_tags += tags
if dataset_info:
dataset_structure = "[meta/info.json](meta/info.json):\n"
dataset_structure += f"```json\n{json.dumps(dataset_info, indent=4)}\n```\n"
kwargs = {**kwargs, "dataset_structure": dataset_structure}
card_data = DatasetCardData(
license=kwargs.get("license"),
tags=card_tags,
task_categories=["robotics"],
configs=[
{
"config_name": "default",
"data_files": "data/*/*.parquet",
}
],
)
card_template = (importlib.resources.files("lerobot.common.datasets") / "card_template.md").read_text()
return DatasetCard.from_template(
card_data=card_data,
template_str=card_template,
**kwargs,
)
class IterableNamespace(SimpleNamespace):
"""
A namespace object that supports both dictionary-like iteration and dot notation access.
Automatically converts nested dictionaries into IterableNamespaces.
This class extends SimpleNamespace to provide:
- Dictionary-style iteration over keys
- Access to items via both dot notation (obj.key) and brackets (obj["key"])
- Dictionary-like methods: items(), keys(), values()
- Recursive conversion of nested dictionaries
Args:
dictionary: Optional dictionary to initialize the namespace
**kwargs: Additional keyword arguments passed to SimpleNamespace
Examples:
>>> data = {"name": "Alice", "details": {"age": 25}}
>>> ns = IterableNamespace(data)
>>> ns.name
'Alice'
>>> ns.details.age
25
>>> list(ns.keys())
['name', 'details']
>>> for key, value in ns.items():
... print(f"{key}: {value}")
name: Alice
details: IterableNamespace(age=25)
"""
def __init__(self, dictionary: dict[str, Any] = None, **kwargs):
super().__init__(**kwargs)
if dictionary is not None:
for key, value in dictionary.items():
if isinstance(value, dict):
setattr(self, key, IterableNamespace(value))
else:
setattr(self, key, value)
def __iter__(self) -> Iterator[str]:
return iter(vars(self))
def __getitem__(self, key: str) -> Any:
return vars(self)[key]
def items(self):
return vars(self).items()
def values(self):
return vars(self).values()
def keys(self):
return vars(self).keys()
def create_lerobot_dataset_card(tags: list | None = None, text: str | None = None) -> DatasetCard:
card = DatasetCard(DATASET_CARD_TEMPLATE)
card.data.task_categories = ["robotics"]
card.data.tags = ["LeRobot"]
if tags is not None:
card.data.tags += tags
if text is not None:
card.text += text
return card

View File

@@ -1,882 +0,0 @@
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is for internal use to convert all datasets under the 'lerobot' hub user account to v2.
Note: Since the original Aloha datasets don't use shadow motors, you need to comment those out in
lerobot/configs/robot/aloha.yaml before running this script.
"""
import traceback
from pathlib import Path
from textwrap import dedent
from lerobot import available_datasets
from lerobot.common.datasets.v2.convert_dataset_v1_to_v2 import convert_dataset, parse_robot_config
LOCAL_DIR = Path("data/")
ALOHA_CONFIG = Path("lerobot/configs/robot/aloha.yaml")
ALOHA_MOBILE_INFO = {
"robot_config": parse_robot_config(ALOHA_CONFIG),
"license": "mit",
"url": "https://mobile-aloha.github.io/",
"paper": "https://arxiv.org/abs/2401.02117",
"citation_bibtex": dedent(r"""
@inproceedings{fu2024mobile,
author = {Fu, Zipeng and Zhao, Tony Z. and Finn, Chelsea},
title = {Mobile ALOHA: Learning Bimanual Mobile Manipulation with Low-Cost Whole-Body Teleoperation},
booktitle = {arXiv},
year = {2024},
}""").lstrip(),
}
ALOHA_STATIC_INFO = {
"robot_config": parse_robot_config(ALOHA_CONFIG),
"license": "mit",
"url": "https://tonyzhaozh.github.io/aloha/",
"paper": "https://arxiv.org/abs/2304.13705",
"citation_bibtex": dedent(r"""
@article{Zhao2023LearningFB,
title={Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware},
author={Tony Zhao and Vikash Kumar and Sergey Levine and Chelsea Finn},
journal={RSS},
year={2023},
volume={abs/2304.13705},
url={https://arxiv.org/abs/2304.13705}
}""").lstrip(),
}
PUSHT_INFO = {
"license": "mit",
"url": "https://diffusion-policy.cs.columbia.edu/",
"paper": "https://arxiv.org/abs/2303.04137v5",
"citation_bibtex": dedent(r"""
@article{chi2024diffusionpolicy,
author = {Cheng Chi and Zhenjia Xu and Siyuan Feng and Eric Cousineau and Yilun Du and Benjamin Burchfiel and Russ Tedrake and Shuran Song},
title ={Diffusion Policy: Visuomotor Policy Learning via Action Diffusion},
journal = {The International Journal of Robotics Research},
year = {2024},
}""").lstrip(),
}
XARM_INFO = {
"license": "mit",
"url": "https://www.nicklashansen.com/td-mpc/",
"paper": "https://arxiv.org/abs/2203.04955",
"citation_bibtex": dedent(r"""
@inproceedings{Hansen2022tdmpc,
title={Temporal Difference Learning for Model Predictive Control},
author={Nicklas Hansen and Xiaolong Wang and Hao Su},
booktitle={ICML},
year={2022}
}
"""),
}
UNITREEH_INFO = {
"license": "apache-2.0",
}
DATASETS = {
"aloha_mobile_cabinet": {
"single_task": "Open the top cabinet, store the pot inside it then close the cabinet.",
**ALOHA_MOBILE_INFO,
},
"aloha_mobile_chair": {
"single_task": "Push the chairs in front of the desk to place them against it.",
**ALOHA_MOBILE_INFO,
},
"aloha_mobile_elevator": {
"single_task": "Take the elevator to the 1st floor.",
**ALOHA_MOBILE_INFO,
},
"aloha_mobile_shrimp": {
"single_task": "Sauté the raw shrimp on both sides, then serve it in the bowl.",
**ALOHA_MOBILE_INFO,
},
"aloha_mobile_wash_pan": {
"single_task": "Pick up the pan, rinse it in the sink and then place it in the drying rack.",
**ALOHA_MOBILE_INFO,
},
"aloha_mobile_wipe_wine": {
"single_task": "Pick up the wet cloth on the faucet and use it to clean the spilled wine on the table and underneath the glass.",
**ALOHA_MOBILE_INFO,
},
"aloha_static_battery": {
"single_task": "Place the battery into the slot of the remote controller.",
**ALOHA_STATIC_INFO,
},
"aloha_static_candy": {"single_task": "Pick up the candy and unwrap it.", **ALOHA_STATIC_INFO},
"aloha_static_coffee": {
"single_task": "Place the coffee capsule inside the capsule container, then place the cup onto the center of the cup tray, then push the 'Hot Water' and 'Travel Mug' buttons.",
**ALOHA_STATIC_INFO,
},
"aloha_static_coffee_new": {
"single_task": "Place the coffee capsule inside the capsule container, then place the cup onto the center of the cup tray.",
**ALOHA_STATIC_INFO,
},
"aloha_static_cups_open": {
"single_task": "Pick up the plastic cup and open its lid.",
**ALOHA_STATIC_INFO,
},
"aloha_static_fork_pick_up": {
"single_task": "Pick up the fork and place it on the plate.",
**ALOHA_STATIC_INFO,
},
"aloha_static_pingpong_test": {
"single_task": "Transfer one of the two balls in the right glass into the left glass, then transfer it back to the right glass.",
**ALOHA_STATIC_INFO,
},
"aloha_static_pro_pencil": {
"single_task": "Pick up the pencil with the right arm, hand it over to the left arm then place it back onto the table.",
**ALOHA_STATIC_INFO,
},
"aloha_static_screw_driver": {
"single_task": "Pick up the screwdriver with the right arm, hand it over to the left arm then place it into the cup.",
**ALOHA_STATIC_INFO,
},
"aloha_static_tape": {
"single_task": "Cut a small piece of tape from the tape dispenser then place it on the cardboard box's edge.",
**ALOHA_STATIC_INFO,
},
"aloha_static_thread_velcro": {
"single_task": "Pick up the velcro cable tie with the left arm, then insert the end of the velcro tie into the other end's loop with the right arm.",
**ALOHA_STATIC_INFO,
},
"aloha_static_towel": {
"single_task": "Pick up a piece of paper towel and place it on the spilled liquid.",
**ALOHA_STATIC_INFO,
},
"aloha_static_vinh_cup": {
"single_task": "Pick up the plastic cup with the right arm, then pop its lid open with the left arm.",
**ALOHA_STATIC_INFO,
},
"aloha_static_vinh_cup_left": {
"single_task": "Pick up the plastic cup with the left arm, then pop its lid open with the right arm.",
**ALOHA_STATIC_INFO,
},
"aloha_static_ziploc_slide": {"single_task": "Slide open the ziploc bag.", **ALOHA_STATIC_INFO},
"aloha_sim_insertion_scripted": {"single_task": "Insert the peg into the socket.", **ALOHA_STATIC_INFO},
"aloha_sim_insertion_scripted_image": {
"single_task": "Insert the peg into the socket.",
**ALOHA_STATIC_INFO,
},
"aloha_sim_insertion_human": {"single_task": "Insert the peg into the socket.", **ALOHA_STATIC_INFO},
"aloha_sim_insertion_human_image": {
"single_task": "Insert the peg into the socket.",
**ALOHA_STATIC_INFO,
},
"aloha_sim_transfer_cube_scripted": {
"single_task": "Pick up the cube with the right arm and transfer it to the left arm.",
**ALOHA_STATIC_INFO,
},
"aloha_sim_transfer_cube_scripted_image": {
"single_task": "Pick up the cube with the right arm and transfer it to the left arm.",
**ALOHA_STATIC_INFO,
},
"aloha_sim_transfer_cube_human": {
"single_task": "Pick up the cube with the right arm and transfer it to the left arm.",
**ALOHA_STATIC_INFO,
},
"aloha_sim_transfer_cube_human_image": {
"single_task": "Pick up the cube with the right arm and transfer it to the left arm.",
**ALOHA_STATIC_INFO,
},
"pusht": {"single_task": "Push the T-shaped block onto the T-shaped target.", **PUSHT_INFO},
"pusht_image": {"single_task": "Push the T-shaped block onto the T-shaped target.", **PUSHT_INFO},
"unitreeh1_fold_clothes": {"single_task": "Fold the sweatshirt.", **UNITREEH_INFO},
"unitreeh1_rearrange_objects": {"single_task": "Put the object into the bin.", **UNITREEH_INFO},
"unitreeh1_two_robot_greeting": {
"single_task": "Greet the other robot with a high five.",
**UNITREEH_INFO,
},
"unitreeh1_warehouse": {
"single_task": "Grab the spray paint on the shelf and place it in the bin on top of the robot dog.",
**UNITREEH_INFO,
},
"xarm_lift_medium": {"single_task": "Pick up the cube and lift it.", **XARM_INFO},
"xarm_lift_medium_image": {"single_task": "Pick up the cube and lift it.", **XARM_INFO},
"xarm_lift_medium_replay": {"single_task": "Pick up the cube and lift it.", **XARM_INFO},
"xarm_lift_medium_replay_image": {"single_task": "Pick up the cube and lift it.", **XARM_INFO},
"xarm_push_medium": {"single_task": "Push the cube onto the target.", **XARM_INFO},
"xarm_push_medium_image": {"single_task": "Push the cube onto the target.", **XARM_INFO},
"xarm_push_medium_replay": {"single_task": "Push the cube onto the target.", **XARM_INFO},
"xarm_push_medium_replay_image": {"single_task": "Push the cube onto the target.", **XARM_INFO},
"umi_cup_in_the_wild": {
"single_task": "Put the cup on the plate.",
"license": "apache-2.0",
},
"asu_table_top": {
"tasks_col": "language_instruction",
"license": "mit",
"paper": "https://link.springer.com/article/10.1007/s10514-023-10129-1",
"citation_bibtex": dedent(r"""
@inproceedings{zhou2023modularity,
title={Modularity through Attention: Efficient Training and Transfer of Language-Conditioned Policies for Robot Manipulation},
author={Zhou, Yifan and Sonawani, Shubham and Phielipp, Mariano and Stepputtis, Simon and Amor, Heni},
booktitle={Conference on Robot Learning},
pages={1684--1695},
year={2023},
organization={PMLR}
}
@article{zhou2023learning,
title={Learning modular language-conditioned robot policies through attention},
author={Zhou, Yifan and Sonawani, Shubham and Phielipp, Mariano and Ben Amor, Heni and Stepputtis, Simon},
journal={Autonomous Robots},
pages={1--21},
year={2023},
publisher={Springer}
}""").lstrip(),
},
"austin_buds_dataset": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://ut-austin-rpl.github.io/BUDS-website/",
"paper": "https://arxiv.org/abs/2109.13841",
"citation_bibtex": dedent(r"""
@article{zhu2022bottom,
title={Bottom-Up Skill Discovery From Unsegmented Demonstrations for Long-Horizon Robot Manipulation},
author={Zhu, Yifeng and Stone, Peter and Zhu, Yuke},
journal={IEEE Robotics and Automation Letters},
volume={7},
number={2},
pages={4126--4133},
year={2022},
publisher={IEEE}
}""").lstrip(),
},
"austin_sailor_dataset": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://ut-austin-rpl.github.io/sailor/",
"paper": "https://arxiv.org/abs/2210.11435",
"citation_bibtex": dedent(r"""
@inproceedings{nasiriany2022sailor,
title={Learning and Retrieval from Prior Data for Skill-based Imitation Learning},
author={Soroush Nasiriany and Tian Gao and Ajay Mandlekar and Yuke Zhu},
booktitle={Conference on Robot Learning (CoRL)},
year={2022}
}""").lstrip(),
},
"austin_sirius_dataset": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://ut-austin-rpl.github.io/sirius/",
"paper": "https://arxiv.org/abs/2211.08416",
"citation_bibtex": dedent(r"""
@inproceedings{liu2022robot,
title = {Robot Learning on the Job: Human-in-the-Loop Autonomy and Learning During Deployment},
author = {Huihan Liu and Soroush Nasiriany and Lance Zhang and Zhiyao Bao and Yuke Zhu},
booktitle = {Robotics: Science and Systems (RSS)},
year = {2023}
}""").lstrip(),
},
"berkeley_autolab_ur5": {
"tasks_col": "language_instruction",
"license": "cc-by-4.0",
"url": "https://sites.google.com/view/berkeley-ur5/home",
"citation_bibtex": dedent(r"""
@misc{BerkeleyUR5Website,
title = {Berkeley {UR5} Demonstration Dataset},
author = {Lawrence Yunliang Chen and Simeon Adebola and Ken Goldberg},
howpublished = {https://sites.google.com/view/berkeley-ur5/home},
}""").lstrip(),
},
"berkeley_cable_routing": {
"tasks_col": "language_instruction",
"license": "cc-by-4.0",
"url": "https://sites.google.com/view/cablerouting/home",
"paper": "https://arxiv.org/abs/2307.08927",
"citation_bibtex": dedent(r"""
@article{luo2023multistage,
author = {Jianlan Luo and Charles Xu and Xinyang Geng and Gilbert Feng and Kuan Fang and Liam Tan and Stefan Schaal and Sergey Levine},
title = {Multi-Stage Cable Routing through Hierarchical Imitation Learning},
journal = {arXiv pre-print},
year = {2023},
url = {https://arxiv.org/abs/2307.08927},
}""").lstrip(),
},
"berkeley_fanuc_manipulation": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://sites.google.com/berkeley.edu/fanuc-manipulation",
"citation_bibtex": dedent(r"""
@article{fanuc_manipulation2023,
title={Fanuc Manipulation: A Dataset for Learning-based Manipulation with FANUC Mate 200iD Robot},
author={Zhu, Xinghao and Tian, Ran and Xu, Chenfeng and Ding, Mingyu and Zhan, Wei and Tomizuka, Masayoshi},
year={2023},
}""").lstrip(),
},
"berkeley_gnm_cory_hall": {
"tasks_col": "language_instruction",
"license": "mit",
"paper": "https://arxiv.org/abs/1709.10489",
"citation_bibtex": dedent(r"""
@inproceedings{kahn2018self,
title={Self-supervised deep reinforcement learning with generalized computation graphs for robot navigation},
author={Kahn, Gregory and Villaflor, Adam and Ding, Bosen and Abbeel, Pieter and Levine, Sergey},
booktitle={2018 IEEE international conference on robotics and automation (ICRA)},
pages={5129--5136},
year={2018},
organization={IEEE}
}""").lstrip(),
},
"berkeley_gnm_recon": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://sites.google.com/view/recon-robot",
"paper": "https://arxiv.org/abs/2104.05859",
"citation_bibtex": dedent(r"""
@inproceedings{shah2021rapid,
title={Rapid Exploration for Open-World Navigation with Latent Goal Models},
author={Dhruv Shah and Benjamin Eysenbach and Nicholas Rhinehart and Sergey Levine},
booktitle={5th Annual Conference on Robot Learning },
year={2021},
url={https://openreview.net/forum?id=d_SWJhyKfVw}
}""").lstrip(),
},
"berkeley_gnm_sac_son": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://sites.google.com/view/SACSoN-review",
"paper": "https://arxiv.org/abs/2306.01874",
"citation_bibtex": dedent(r"""
@article{hirose2023sacson,
title={SACSoN: Scalable Autonomous Data Collection for Social Navigation},
author={Hirose, Noriaki and Shah, Dhruv and Sridhar, Ajay and Levine, Sergey},
journal={arXiv preprint arXiv:2306.01874},
year={2023}
}""").lstrip(),
},
"berkeley_mvp": {
"tasks_col": "language_instruction",
"license": "mit",
"paper": "https://arxiv.org/abs/2203.06173",
"citation_bibtex": dedent(r"""
@InProceedings{Radosavovic2022,
title = {Real-World Robot Learning with Masked Visual Pre-training},
author = {Ilija Radosavovic and Tete Xiao and Stephen James and Pieter Abbeel and Jitendra Malik and Trevor Darrell},
booktitle = {CoRL},
year = {2022}
}""").lstrip(),
},
"berkeley_rpt": {
"tasks_col": "language_instruction",
"license": "mit",
"paper": "https://arxiv.org/abs/2306.10007",
"citation_bibtex": dedent(r"""
@article{Radosavovic2023,
title={Robot Learning with Sensorimotor Pre-training},
author={Ilija Radosavovic and Baifeng Shi and Letian Fu and Ken Goldberg and Trevor Darrell and Jitendra Malik},
year={2023},
journal={arXiv:2306.10007}
}""").lstrip(),
},
"cmu_franka_exploration_dataset": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://human-world-model.github.io/",
"paper": "https://arxiv.org/abs/2308.10901",
"citation_bibtex": dedent(r"""
@inproceedings{mendonca2023structured,
title={Structured World Models from Human Videos},
author={Mendonca, Russell and Bahl, Shikhar and Pathak, Deepak},
journal={RSS},
year={2023}
}""").lstrip(),
},
"cmu_play_fusion": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://play-fusion.github.io/",
"paper": "https://arxiv.org/abs/2312.04549",
"citation_bibtex": dedent(r"""
@inproceedings{chen2023playfusion,
title={PlayFusion: Skill Acquisition via Diffusion from Language-Annotated Play},
author={Chen, Lili and Bahl, Shikhar and Pathak, Deepak},
booktitle={CoRL},
year={2023}
}""").lstrip(),
},
"cmu_stretch": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://robo-affordances.github.io/",
"paper": "https://arxiv.org/abs/2304.08488",
"citation_bibtex": dedent(r"""
@inproceedings{bahl2023affordances,
title={Affordances from Human Videos as a Versatile Representation for Robotics},
author={Bahl, Shikhar and Mendonca, Russell and Chen, Lili and Jain, Unnat and Pathak, Deepak},
booktitle={CVPR},
year={2023}
}
@article{mendonca2023structured,
title={Structured World Models from Human Videos},
author={Mendonca, Russell and Bahl, Shikhar and Pathak, Deepak},
journal={CoRL},
year={2023}
}""").lstrip(),
},
"columbia_cairlab_pusht_real": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://diffusion-policy.cs.columbia.edu/",
"paper": "https://arxiv.org/abs/2303.04137v5",
"citation_bibtex": dedent(r"""
@inproceedings{chi2023diffusionpolicy,
title={Diffusion Policy: Visuomotor Policy Learning via Action Diffusion},
author={Chi, Cheng and Feng, Siyuan and Du, Yilun and Xu, Zhenjia and Cousineau, Eric and Burchfiel, Benjamin and Song, Shuran},
booktitle={Proceedings of Robotics: Science and Systems (RSS)},
year={2023}
}""").lstrip(),
},
"conq_hose_manipulation": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://sites.google.com/view/conq-hose-manipulation-dataset/home",
"citation_bibtex": dedent(r"""
@misc{ConqHoseManipData,
author={Peter Mitrano and Dmitry Berenson},
title={Conq Hose Manipulation Dataset, v1.15.0},
year={2024},
howpublished={https://sites.google.com/view/conq-hose-manipulation-dataset}
}""").lstrip(),
},
"dlr_edan_shared_control": {
"tasks_col": "language_instruction",
"license": "mit",
"paper": "https://ieeexplore.ieee.org/document/9341156",
"citation_bibtex": dedent(r"""
@inproceedings{vogel_edan_2020,
title = {EDAN - an EMG-Controlled Daily Assistant to Help People with Physical Disabilities},
language = {en},
booktitle = {2020 {IEEE}/{RSJ} {International} {Conference} on {Intelligent} {Robots} and {Systems} ({IROS})},
author = {Vogel, Jörn and Hagengruber, Annette and Iskandar, Maged and Quere, Gabriel and Leipscher, Ulrike and Bustamante, Samuel and Dietrich, Alexander and Hoeppner, Hannes and Leidner, Daniel and Albu-Schäffer, Alin},
year = {2020}
}
@inproceedings{quere_shared_2020,
address = {Paris, France},
title = {Shared {Control} {Templates} for {Assistive} {Robotics}},
language = {en},
booktitle = {2020 {IEEE} {International} {Conference} on {Robotics} and {Automation} ({ICRA})},
author = {Quere, Gabriel and Hagengruber, Annette and Iskandar, Maged and Bustamante, Samuel and Leidner, Daniel and Stulp, Freek and Vogel, Joern},
year = {2020},
pages = {7},
}""").lstrip(),
},
"dlr_sara_grid_clamp": {
"tasks_col": "language_instruction",
"license": "mit",
"paper": "https://www.researchsquare.com/article/rs-3289569/v1",
"citation_bibtex": dedent(r"""
@article{padalkar2023guided,
title={A guided reinforcement learning approach using shared control templates for learning manipulation skills in the real world},
author={Padalkar, Abhishek and Quere, Gabriel and Raffin, Antonin and Silv{\'e}rio, Jo{\~a}o and Stulp, Freek},
journal={Research square preprint rs-3289569/v1},
year={2023}
}""").lstrip(),
},
"dlr_sara_pour": {
"tasks_col": "language_instruction",
"license": "mit",
"paper": "https://elib.dlr.de/193739/1/padalkar2023rlsct.pdf",
"citation_bibtex": dedent(r"""
@inproceedings{padalkar2023guiding,
title={Guiding Reinforcement Learning with Shared Control Templates},
author={Padalkar, Abhishek and Quere, Gabriel and Steinmetz, Franz and Raffin, Antonin and Nieuwenhuisen, Matthias and Silv{\'e}rio, Jo{\~a}o and Stulp, Freek},
booktitle={40th IEEE International Conference on Robotics and Automation, ICRA 2023},
year={2023},
organization={IEEE}
}""").lstrip(),
},
"droid_100": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://droid-dataset.github.io/",
"paper": "https://arxiv.org/abs/2403.12945",
"citation_bibtex": dedent(r"""
@article{khazatsky2024droid,
title = {DROID: A Large-Scale In-The-Wild Robot Manipulation Dataset},
author = {Alexander Khazatsky and Karl Pertsch and Suraj Nair and Ashwin Balakrishna and Sudeep Dasari and Siddharth Karamcheti and Soroush Nasiriany and Mohan Kumar Srirama and Lawrence Yunliang Chen and Kirsty Ellis and Peter David Fagan and Joey Hejna and Masha Itkina and Marion Lepert and Yecheng Jason Ma and Patrick Tree Miller and Jimmy Wu and Suneel Belkhale and Shivin Dass and Huy Ha and Arhan Jain and Abraham Lee and Youngwoon Lee and Marius Memmel and Sungjae Park and Ilija Radosavovic and Kaiyuan Wang and Albert Zhan and Kevin Black and Cheng Chi and Kyle Beltran Hatch and Shan Lin and Jingpei Lu and Jean Mercat and Abdul Rehman and Pannag R Sanketi and Archit Sharma and Cody Simpson and Quan Vuong and Homer Rich Walke and Blake Wulfe and Ted Xiao and Jonathan Heewon Yang and Arefeh Yavary and Tony Z. Zhao and Christopher Agia and Rohan Baijal and Mateo Guaman Castro and Daphne Chen and Qiuyu Chen and Trinity Chung and Jaimyn Drake and Ethan Paul Foster and Jensen Gao and David Antonio Herrera and Minho Heo and Kyle Hsu and Jiaheng Hu and Donovon Jackson and Charlotte Le and Yunshuang Li and Kevin Lin and Roy Lin and Zehan Ma and Abhiram Maddukuri and Suvir Mirchandani and Daniel Morton and Tony Nguyen and Abigail O'Neill and Rosario Scalise and Derick Seale and Victor Son and Stephen Tian and Emi Tran and Andrew E. Wang and Yilin Wu and Annie Xie and Jingyun Yang and Patrick Yin and Yunchu Zhang and Osbert Bastani and Glen Berseth and Jeannette Bohg and Ken Goldberg and Abhinav Gupta and Abhishek Gupta and Dinesh Jayaraman and Joseph J Lim and Jitendra Malik and Roberto Martín-Martín and Subramanian Ramamoorthy and Dorsa Sadigh and Shuran Song and Jiajun Wu and Michael C. Yip and Yuke Zhu and Thomas Kollar and Sergey Levine and Chelsea Finn},
year = {2024},
}""").lstrip(),
},
"fmb": {
"tasks_col": "language_instruction",
"license": "cc-by-4.0",
"url": "https://functional-manipulation-benchmark.github.io/",
"paper": "https://arxiv.org/abs/2401.08553",
"citation_bibtex": dedent(r"""
@article{luo2024fmb,
title={FMB: a Functional Manipulation Benchmark for Generalizable Robotic Learning},
author={Luo, Jianlan and Xu, Charles and Liu, Fangchen and Tan, Liam and Lin, Zipeng and Wu, Jeffrey and Abbeel, Pieter and Levine, Sergey},
journal={arXiv preprint arXiv:2401.08553},
year={2024}
}""").lstrip(),
},
"iamlab_cmu_pickup_insert": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://openreview.net/forum?id=WuBv9-IGDUA",
"paper": "https://arxiv.org/abs/2401.14502",
"citation_bibtex": dedent(r"""
@inproceedings{saxena2023multiresolution,
title={Multi-Resolution Sensing for Real-Time Control with Vision-Language Models},
author={Saumya Saxena and Mohit Sharma and Oliver Kroemer},
booktitle={7th Annual Conference on Robot Learning},
year={2023},
url={https://openreview.net/forum?id=WuBv9-IGDUA}
}""").lstrip(),
},
"imperialcollege_sawyer_wrist_cam": {
"tasks_col": "language_instruction",
"license": "mit",
},
"jaco_play": {
"tasks_col": "language_instruction",
"license": "cc-by-4.0",
"url": "https://github.com/clvrai/clvr_jaco_play_dataset",
"citation_bibtex": dedent(r"""
@software{dass2023jacoplay,
author = {Dass, Shivin and Yapeter, Jullian and Zhang, Jesse and Zhang, Jiahui
and Pertsch, Karl and Nikolaidis, Stefanos and Lim, Joseph J.},
title = {CLVR Jaco Play Dataset},
url = {https://github.com/clvrai/clvr_jaco_play_dataset},
version = {1.0.0},
year = {2023}
}""").lstrip(),
},
"kaist_nonprehensile": {
"tasks_col": "language_instruction",
"license": "cc-by-4.0",
"url": "https://github.com/JaeHyung-Kim/rlds_dataset_builder",
"citation_bibtex": dedent(r"""
@article{kimpre,
title={Pre-and post-contact policy decomposition for non-prehensile manipulation with zero-shot sim-to-real transfer},
author={Kim, Minchan and Han, Junhyek and Kim, Jaehyung and Kim, Beomjoon},
booktitle={2023 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
year={2023},
organization={IEEE}
}""").lstrip(),
},
"nyu_door_opening_surprising_effectiveness": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://jyopari.github.io/VINN/",
"paper": "https://arxiv.org/abs/2112.01511",
"citation_bibtex": dedent(r"""
@misc{pari2021surprising,
title={The Surprising Effectiveness of Representation Learning for Visual Imitation},
author={Jyothish Pari and Nur Muhammad Shafiullah and Sridhar Pandian Arunachalam and Lerrel Pinto},
year={2021},
eprint={2112.01511},
archivePrefix={arXiv},
primaryClass={cs.RO}
}""").lstrip(),
},
"nyu_franka_play_dataset": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://play-to-policy.github.io/",
"paper": "https://arxiv.org/abs/2210.10047",
"citation_bibtex": dedent(r"""
@article{cui2022play,
title = {From Play to Policy: Conditional Behavior Generation from Uncurated Robot Data},
author = {Cui, Zichen Jeff and Wang, Yibin and Shafiullah, Nur Muhammad Mahi and Pinto, Lerrel},
journal = {arXiv preprint arXiv:2210.10047},
year = {2022}
}""").lstrip(),
},
"nyu_rot_dataset": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://rot-robot.github.io/",
"paper": "https://arxiv.org/abs/2206.15469",
"citation_bibtex": dedent(r"""
@inproceedings{haldar2023watch,
title={Watch and match: Supercharging imitation with regularized optimal transport},
author={Haldar, Siddhant and Mathur, Vaibhav and Yarats, Denis and Pinto, Lerrel},
booktitle={Conference on Robot Learning},
pages={32--43},
year={2023},
organization={PMLR}
}""").lstrip(),
},
"roboturk": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://roboturk.stanford.edu/dataset_real.html",
"paper": "PAPER",
"citation_bibtex": dedent(r"""
@inproceedings{mandlekar2019scaling,
title={Scaling robot supervision to hundreds of hours with roboturk: Robotic manipulation dataset through human reasoning and dexterity},
author={Mandlekar, Ajay and Booher, Jonathan and Spero, Max and Tung, Albert and Gupta, Anchit and Zhu, Yuke and Garg, Animesh and Savarese, Silvio and Fei-Fei, Li},
booktitle={2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
pages={1048--1055},
year={2019},
organization={IEEE}
}""").lstrip(),
},
"stanford_hydra_dataset": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://sites.google.com/view/hydra-il-2023",
"paper": "https://arxiv.org/abs/2306.17237",
"citation_bibtex": dedent(r"""
@article{belkhale2023hydra,
title={HYDRA: Hybrid Robot Actions for Imitation Learning},
author={Belkhale, Suneel and Cui, Yuchen and Sadigh, Dorsa},
journal={arxiv},
year={2023}
}""").lstrip(),
},
"stanford_kuka_multimodal_dataset": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://sites.google.com/view/visionandtouch",
"paper": "https://arxiv.org/abs/1810.10191",
"citation_bibtex": dedent(r"""
@inproceedings{lee2019icra,
title={Making sense of vision and touch: Self-supervised learning of multimodal representations for contact-rich tasks},
author={Lee, Michelle A and Zhu, Yuke and Srinivasan, Krishnan and Shah, Parth and Savarese, Silvio and Fei-Fei, Li and Garg, Animesh and Bohg, Jeannette},
booktitle={2019 IEEE International Conference on Robotics and Automation (ICRA)},
year={2019},
url={https://arxiv.org/abs/1810.10191}
}""").lstrip(),
},
"stanford_robocook": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://hshi74.github.io/robocook/",
"paper": "https://arxiv.org/abs/2306.14447",
"citation_bibtex": dedent(r"""
@article{shi2023robocook,
title={RoboCook: Long-Horizon Elasto-Plastic Object Manipulation with Diverse Tools},
author={Shi, Haochen and Xu, Huazhe and Clarke, Samuel and Li, Yunzhu and Wu, Jiajun},
journal={arXiv preprint arXiv:2306.14447},
year={2023}
}""").lstrip(),
},
"taco_play": {
"tasks_col": "language_instruction",
"license": "cc-by-4.0",
"url": "https://www.kaggle.com/datasets/oiermees/taco-robot",
"paper": "https://arxiv.org/abs/2209.08959, https://arxiv.org/abs/2210.01911",
"citation_bibtex": dedent(r"""
@inproceedings{rosete2022tacorl,
author = {Erick Rosete-Beas and Oier Mees and Gabriel Kalweit and Joschka Boedecker and Wolfram Burgard},
title = {Latent Plans for Task Agnostic Offline Reinforcement Learning},
journal = {Proceedings of the 6th Conference on Robot Learning (CoRL)},
year = {2022}
}
@inproceedings{mees23hulc2,
title={Grounding Language with Visual Affordances over Unstructured Data},
author={Oier Mees and Jessica Borja-Diaz and Wolfram Burgard},
booktitle = {Proceedings of the IEEE International Conference on Robotics and Automation (ICRA)},
year={2023},
address = {London, UK}
}""").lstrip(),
},
"tokyo_u_lsmo": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "URL",
"paper": "https://arxiv.org/abs/2107.05842",
"citation_bibtex": dedent(r"""
@Article{Osa22,
author = {Takayuki Osa},
journal = {The International Journal of Robotics Research},
title = {Motion Planning by Learning the Solution Manifold in Trajectory Optimization},
year = {2022},
number = {3},
pages = {291--311},
volume = {41},
}""").lstrip(),
},
"toto": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://toto-benchmark.org/",
"paper": "https://arxiv.org/abs/2306.00942",
"citation_bibtex": dedent(r"""
@inproceedings{zhou2023train,
author={Zhou, Gaoyue and Dean, Victoria and Srirama, Mohan Kumar and Rajeswaran, Aravind and Pari, Jyothish and Hatch, Kyle and Jain, Aryan and Yu, Tianhe and Abbeel, Pieter and Pinto, Lerrel and Finn, Chelsea and Gupta, Abhinav},
booktitle={2023 IEEE International Conference on Robotics and Automation (ICRA)},
title={Train Offline, Test Online: A Real Robot Learning Benchmark},
year={2023},
}""").lstrip(),
},
"ucsd_kitchen_dataset": {
"tasks_col": "language_instruction",
"license": "mit",
"citation_bibtex": dedent(r"""
@ARTICLE{ucsd_kitchens,
author = {Ge Yan, Kris Wu, and Xiaolong Wang},
title = {{ucsd kitchens Dataset}},
year = {2023},
month = {August}
}""").lstrip(),
},
"ucsd_pick_and_place_dataset": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://owmcorl.github.io/#",
"paper": "https://arxiv.org/abs/2310.16029",
"citation_bibtex": dedent(r"""
@preprint{Feng2023Finetuning,
title={Finetuning Offline World Models in the Real World},
author={Yunhai Feng, Nicklas Hansen, Ziyan Xiong, Chandramouli Rajagopalan, Xiaolong Wang},
year={2023}
}""").lstrip(),
},
"uiuc_d3field": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://robopil.github.io/d3fields/",
"paper": "https://arxiv.org/abs/2309.16118",
"citation_bibtex": dedent(r"""
@article{wang2023d3field,
title={D^3Field: Dynamic 3D Descriptor Fields for Generalizable Robotic Manipulation},
author={Wang, Yixuan and Li, Zhuoran and Zhang, Mingtong and Driggs-Campbell, Katherine and Wu, Jiajun and Fei-Fei, Li and Li, Yunzhu},
journal={arXiv preprint arXiv:},
year={2023},
}""").lstrip(),
},
"usc_cloth_sim": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://uscresl.github.io/dmfd/",
"paper": "https://arxiv.org/abs/2207.10148",
"citation_bibtex": dedent(r"""
@article{salhotra2022dmfd,
author={Salhotra, Gautam and Liu, I-Chun Arthur and Dominguez-Kuhne, Marcus and Sukhatme, Gaurav S.},
journal={IEEE Robotics and Automation Letters},
title={Learning Deformable Object Manipulation From Expert Demonstrations},
year={2022},
volume={7},
number={4},
pages={8775-8782},
doi={10.1109/LRA.2022.3187843}
}""").lstrip(),
},
"utaustin_mutex": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://ut-austin-rpl.github.io/MUTEX/",
"paper": "https://arxiv.org/abs/2309.14320",
"citation_bibtex": dedent(r"""
@inproceedings{shah2023mutex,
title={{MUTEX}: Learning Unified Policies from Multimodal Task Specifications},
author={Rutav Shah and Roberto Mart{\'\i}n-Mart{\'\i}n and Yuke Zhu},
booktitle={7th Annual Conference on Robot Learning},
year={2023},
url={https://openreview.net/forum?id=PwqiqaaEzJ}
}""").lstrip(),
},
"utokyo_pr2_opening_fridge": {
"tasks_col": "language_instruction",
"license": "mit",
"citation_bibtex": dedent(r"""
@misc{oh2023pr2utokyodatasets,
author={Jihoon Oh and Naoaki Kanazawa and Kento Kawaharazuka},
title={X-Embodiment U-Tokyo PR2 Datasets},
year={2023},
url={https://github.com/ojh6404/rlds_dataset_builder},
}""").lstrip(),
},
"utokyo_pr2_tabletop_manipulation": {
"tasks_col": "language_instruction",
"license": "mit",
"citation_bibtex": dedent(r"""
@misc{oh2023pr2utokyodatasets,
author={Jihoon Oh and Naoaki Kanazawa and Kento Kawaharazuka},
title={X-Embodiment U-Tokyo PR2 Datasets},
year={2023},
url={https://github.com/ojh6404/rlds_dataset_builder},
}""").lstrip(),
},
"utokyo_saytap": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://saytap.github.io/",
"paper": "https://arxiv.org/abs/2306.07580",
"citation_bibtex": dedent(r"""
@article{saytap2023,
author = {Yujin Tang and Wenhao Yu and Jie Tan and Heiga Zen and Aleksandra Faust and
Tatsuya Harada},
title = {SayTap: Language to Quadrupedal Locomotion},
eprint = {arXiv:2306.07580},
url = {https://saytap.github.io},
note = {https://saytap.github.io},
year = {2023}
}""").lstrip(),
},
"utokyo_xarm_bimanual": {
"tasks_col": "language_instruction",
"license": "cc-by-4.0",
"citation_bibtex": dedent(r"""
@misc{matsushima2023weblab,
title={Weblab xArm Dataset},
author={Tatsuya Matsushima and Hiroki Furuta and Yusuke Iwasawa and Yutaka Matsuo},
year={2023},
}""").lstrip(),
},
"utokyo_xarm_pick_and_place": {
"tasks_col": "language_instruction",
"license": "cc-by-4.0",
"citation_bibtex": dedent(r"""
@misc{matsushima2023weblab,
title={Weblab xArm Dataset},
author={Tatsuya Matsushima and Hiroki Furuta and Yusuke Iwasawa and Yutaka Matsuo},
year={2023},
}""").lstrip(),
},
"viola": {
"tasks_col": "language_instruction",
"license": "mit",
"url": "https://ut-austin-rpl.github.io/VIOLA/",
"paper": "https://arxiv.org/abs/2210.11339",
"citation_bibtex": dedent(r"""
@article{zhu2022viola,
title={VIOLA: Imitation Learning for Vision-Based Manipulation with Object Proposal Priors},
author={Zhu, Yifeng and Joshi, Abhishek and Stone, Peter and Zhu, Yuke},
journal={6th Annual Conference on Robot Learning (CoRL)},
year={2022}
}""").lstrip(),
},
}
def batch_convert():
status = {}
logfile = LOCAL_DIR / "conversion_log.txt"
assert set(DATASETS) == {id_.split("/")[1] for id_ in available_datasets}
for num, (name, kwargs) in enumerate(DATASETS.items()):
repo_id = f"lerobot/{name}"
print(f"\nConverting {repo_id} ({num}/{len(DATASETS)})")
print("---------------------------------------------------------")
try:
convert_dataset(repo_id, LOCAL_DIR, **kwargs)
status = f"{repo_id}: success."
with open(logfile, "a") as file:
file.write(status + "\n")
except Exception:
status = f"{repo_id}: failed\n {traceback.format_exc()}"
with open(logfile, "a") as file:
file.write(status + "\n")
continue
if __name__ == "__main__":
batch_convert()

View File

@@ -1,665 +0,0 @@
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script will help you convert any LeRobot dataset already pushed to the hub from codebase version 1.6 to
2.0. You will be required to provide the 'tasks', which is a short but accurate description in plain English
for each of the task performed in the dataset. This will allow to easily train models with task-conditionning.
We support 3 different scenarios for these tasks (see instructions below):
1. Single task dataset: all episodes of your dataset have the same single task.
2. Single task episodes: the episodes of your dataset each contain a single task but they can differ from
one episode to the next.
3. Multi task episodes: episodes of your dataset may each contain several different tasks.
Can you can also provide a robot config .yaml file (not mandatory) to this script via the option
'--robot-config' so that it writes information about the robot (robot type, motors names) this dataset was
recorded with. For now, only Aloha/Koch type robots are supported with this option.
# 1. Single task dataset
If your dataset contains a single task, you can simply provide it directly via the CLI with the
'--single-task' option.
Examples:
```bash
python lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py \
--repo-id lerobot/aloha_sim_insertion_human_image \
--single-task "Insert the peg into the socket." \
--robot-config lerobot/configs/robot/aloha.yaml \
--local-dir data
```
```bash
python lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py \
--repo-id aliberts/koch_tutorial \
--single-task "Pick the Lego block and drop it in the box on the right." \
--robot-config lerobot/configs/robot/koch.yaml \
--local-dir data
```
# 2. Single task episodes
If your dataset is a multi-task dataset, you have two options to provide the tasks to this script:
- If your dataset already contains a language instruction column in its parquet file, you can simply provide
this column's name with the '--tasks-col' arg.
Example:
```bash
python lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py \
--repo-id lerobot/stanford_kuka_multimodal_dataset \
--tasks-col "language_instruction" \
--local-dir data
```
- If your dataset doesn't contain a language instruction, you should provide the path to a .json file with the
'--tasks-path' arg. This file should have the following structure where keys correspond to each
episode_index in the dataset, and values are the language instruction for that episode.
Example:
```json
{
"0": "Do something",
"1": "Do something else",
"2": "Do something",
"3": "Go there",
...
}
```
# 3. Multi task episodes
If you have multiple tasks per episodes, your dataset should contain a language instruction column in its
parquet file, and you must provide this column's name with the '--tasks-col' arg.
Example:
```bash
python lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py \
--repo-id lerobot/stanford_kuka_multimodal_dataset \
--tasks-col "language_instruction" \
--local-dir data
```
"""
import argparse
import contextlib
import filecmp
import json
import logging
import math
import shutil
import subprocess
import tempfile
from pathlib import Path
import datasets
import pyarrow.compute as pc
import pyarrow.parquet as pq
import torch
from datasets import Dataset
from huggingface_hub import HfApi
from huggingface_hub.errors import EntryNotFoundError, HfHubHTTPError
from safetensors.torch import load_file
from lerobot.common.datasets.utils import (
DEFAULT_CHUNK_SIZE,
DEFAULT_PARQUET_PATH,
DEFAULT_VIDEO_PATH,
EPISODES_PATH,
INFO_PATH,
STATS_PATH,
TASKS_PATH,
create_branch,
create_lerobot_dataset_card,
flatten_dict,
get_hub_safe_version,
load_json,
unflatten_dict,
write_json,
write_jsonlines,
)
from lerobot.common.datasets.video_utils import (
VideoFrame, # noqa: F401
get_image_pixel_channels,
get_video_info,
)
from lerobot.common.utils.utils import init_hydra_config
V16 = "v1.6"
V20 = "v2.0"
GITATTRIBUTES_REF = "aliberts/gitattributes_reference"
V1_VIDEO_FILE = "{video_key}_episode_{episode_index:06d}.mp4"
V1_INFO_PATH = "meta_data/info.json"
V1_STATS_PATH = "meta_data/stats.safetensors"
def parse_robot_config(config_path: Path, config_overrides: list[str] | None = None) -> tuple[str, dict]:
robot_cfg = init_hydra_config(config_path, config_overrides)
if robot_cfg["robot_type"] in ["aloha", "koch"]:
state_names = [
f"{arm}_{motor}" if len(robot_cfg["follower_arms"]) > 1 else motor
for arm in robot_cfg["follower_arms"]
for motor in robot_cfg["follower_arms"][arm]["motors"]
]
action_names = [
# f"{arm}_{motor}" for arm in ["left", "right"] for motor in robot_cfg["leader_arms"][arm]["motors"]
f"{arm}_{motor}" if len(robot_cfg["leader_arms"]) > 1 else motor
for arm in robot_cfg["leader_arms"]
for motor in robot_cfg["leader_arms"][arm]["motors"]
]
# elif robot_cfg["robot_type"] == "stretch3": TODO
else:
raise NotImplementedError(
"Please provide robot_config={'robot_type': ..., 'names': ...} directly to convert_dataset()."
)
return {
"robot_type": robot_cfg["robot_type"],
"names": {
"observation.state": state_names,
"observation.effort": state_names,
"action": action_names,
},
}
def convert_stats_to_json(v1_dir: Path, v2_dir: Path) -> None:
safetensor_path = v1_dir / V1_STATS_PATH
stats = load_file(safetensor_path)
serialized_stats = {key: value.tolist() for key, value in stats.items()}
serialized_stats = unflatten_dict(serialized_stats)
json_path = v2_dir / STATS_PATH
json_path.parent.mkdir(exist_ok=True, parents=True)
with open(json_path, "w") as f:
json.dump(serialized_stats, f, indent=4)
# Sanity check
with open(json_path) as f:
stats_json = json.load(f)
stats_json = flatten_dict(stats_json)
stats_json = {key: torch.tensor(value) for key, value in stats_json.items()}
for key in stats:
torch.testing.assert_close(stats_json[key], stats[key])
def get_features_from_hf_dataset(dataset: Dataset, robot_config: dict | None = None) -> dict[str, list]:
features = {}
for key, ft in dataset.features.items():
if isinstance(ft, datasets.Value):
dtype = ft.dtype
shape = (1,)
names = None
if isinstance(ft, datasets.Sequence):
assert isinstance(ft.feature, datasets.Value)
dtype = ft.feature.dtype
shape = (ft.length,)
motor_names = (
robot_config["names"][key] if robot_config else [f"motor_{i}" for i in range(ft.length)]
)
assert len(motor_names) == shape[0]
names = {"motors": motor_names}
elif isinstance(ft, datasets.Image):
dtype = "image"
image = dataset[0][key] # Assuming first row
channels = get_image_pixel_channels(image)
shape = (image.height, image.width, channels)
names = ["height", "width", "channel"]
elif ft._type == "VideoFrame":
dtype = "video"
shape = None # Add shape later
names = ["height", "width", "channel"]
features[key] = {
"dtype": dtype,
"shape": shape,
"names": names,
}
return features
def add_task_index_by_episodes(dataset: Dataset, tasks_by_episodes: dict) -> tuple[Dataset, list[str]]:
df = dataset.to_pandas()
tasks = list(set(tasks_by_episodes.values()))
tasks_to_task_index = {task: task_idx for task_idx, task in enumerate(tasks)}
episodes_to_task_index = {ep_idx: tasks_to_task_index[task] for ep_idx, task in tasks_by_episodes.items()}
df["task_index"] = df["episode_index"].map(episodes_to_task_index).astype(int)
features = dataset.features
features["task_index"] = datasets.Value(dtype="int64")
dataset = Dataset.from_pandas(df, features=features, split="train")
return dataset, tasks
def add_task_index_from_tasks_col(
dataset: Dataset, tasks_col: str
) -> tuple[Dataset, dict[str, list[str]], list[str]]:
df = dataset.to_pandas()
# HACK: This is to clean some of the instructions in our version of Open X datasets
prefix_to_clean = "tf.Tensor(b'"
suffix_to_clean = "', shape=(), dtype=string)"
df[tasks_col] = df[tasks_col].str.removeprefix(prefix_to_clean).str.removesuffix(suffix_to_clean)
# Create task_index col
tasks_by_episode = df.groupby("episode_index")[tasks_col].unique().apply(lambda x: x.tolist()).to_dict()
tasks = df[tasks_col].unique().tolist()
tasks_to_task_index = {task: idx for idx, task in enumerate(tasks)}
df["task_index"] = df[tasks_col].map(tasks_to_task_index).astype(int)
# Build the dataset back from df
features = dataset.features
features["task_index"] = datasets.Value(dtype="int64")
dataset = Dataset.from_pandas(df, features=features, split="train")
dataset = dataset.remove_columns(tasks_col)
return dataset, tasks, tasks_by_episode
def split_parquet_by_episodes(
dataset: Dataset,
total_episodes: int,
total_chunks: int,
output_dir: Path,
) -> list:
table = dataset.data.table
episode_lengths = []
for ep_chunk in range(total_chunks):
ep_chunk_start = DEFAULT_CHUNK_SIZE * ep_chunk
ep_chunk_end = min(DEFAULT_CHUNK_SIZE * (ep_chunk + 1), total_episodes)
chunk_dir = "/".join(DEFAULT_PARQUET_PATH.split("/")[:-1]).format(episode_chunk=ep_chunk)
(output_dir / chunk_dir).mkdir(parents=True, exist_ok=True)
for ep_idx in range(ep_chunk_start, ep_chunk_end):
ep_table = table.filter(pc.equal(table["episode_index"], ep_idx))
episode_lengths.insert(ep_idx, len(ep_table))
output_file = output_dir / DEFAULT_PARQUET_PATH.format(
episode_chunk=ep_chunk, episode_index=ep_idx
)
pq.write_table(ep_table, output_file)
return episode_lengths
def move_videos(
repo_id: str,
video_keys: list[str],
total_episodes: int,
total_chunks: int,
work_dir: Path,
clean_gittatributes: Path,
branch: str = "main",
) -> None:
"""
HACK: Since HfApi() doesn't provide a way to move files directly in a repo, this function will run git
commands to fetch git lfs video files references to move them into subdirectories without having to
actually download them.
"""
_lfs_clone(repo_id, work_dir, branch)
videos_moved = False
video_files = [str(f.relative_to(work_dir)) for f in work_dir.glob("videos*/*.mp4")]
if len(video_files) == 0:
video_files = [str(f.relative_to(work_dir)) for f in work_dir.glob("videos*/*/*/*.mp4")]
videos_moved = True # Videos have already been moved
assert len(video_files) == total_episodes * len(video_keys)
lfs_untracked_videos = _get_lfs_untracked_videos(work_dir, video_files)
current_gittatributes = work_dir / ".gitattributes"
if not filecmp.cmp(current_gittatributes, clean_gittatributes, shallow=False):
fix_gitattributes(work_dir, current_gittatributes, clean_gittatributes)
if lfs_untracked_videos:
fix_lfs_video_files_tracking(work_dir, video_files)
if videos_moved:
return
video_dirs = sorted(work_dir.glob("videos*/"))
for ep_chunk in range(total_chunks):
ep_chunk_start = DEFAULT_CHUNK_SIZE * ep_chunk
ep_chunk_end = min(DEFAULT_CHUNK_SIZE * (ep_chunk + 1), total_episodes)
for vid_key in video_keys:
chunk_dir = "/".join(DEFAULT_VIDEO_PATH.split("/")[:-1]).format(
episode_chunk=ep_chunk, video_key=vid_key
)
(work_dir / chunk_dir).mkdir(parents=True, exist_ok=True)
for ep_idx in range(ep_chunk_start, ep_chunk_end):
target_path = DEFAULT_VIDEO_PATH.format(
episode_chunk=ep_chunk, video_key=vid_key, episode_index=ep_idx
)
video_file = V1_VIDEO_FILE.format(video_key=vid_key, episode_index=ep_idx)
if len(video_dirs) == 1:
video_path = video_dirs[0] / video_file
else:
for dir in video_dirs:
if (dir / video_file).is_file():
video_path = dir / video_file
break
video_path.rename(work_dir / target_path)
commit_message = "Move video files into chunk subdirectories"
subprocess.run(["git", "add", "."], cwd=work_dir, check=True)
subprocess.run(["git", "commit", "-m", commit_message], cwd=work_dir, check=True)
subprocess.run(["git", "push"], cwd=work_dir, check=True)
def fix_lfs_video_files_tracking(work_dir: Path, lfs_untracked_videos: list[str]) -> None:
"""
HACK: This function fixes the tracking by git lfs which was not properly set on some repos. In that case,
there's no other option than to download the actual files and reupload them with lfs tracking.
"""
for i in range(0, len(lfs_untracked_videos), 100):
files = lfs_untracked_videos[i : i + 100]
try:
subprocess.run(["git", "rm", "--cached", *files], cwd=work_dir, capture_output=True, check=True)
except subprocess.CalledProcessError as e:
print("git rm --cached ERROR:")
print(e.stderr)
subprocess.run(["git", "add", *files], cwd=work_dir, check=True)
commit_message = "Track video files with git lfs"
subprocess.run(["git", "commit", "-m", commit_message], cwd=work_dir, check=True)
subprocess.run(["git", "push"], cwd=work_dir, check=True)
def fix_gitattributes(work_dir: Path, current_gittatributes: Path, clean_gittatributes: Path) -> None:
shutil.copyfile(clean_gittatributes, current_gittatributes)
subprocess.run(["git", "add", ".gitattributes"], cwd=work_dir, check=True)
subprocess.run(["git", "commit", "-m", "Fix .gitattributes"], cwd=work_dir, check=True)
subprocess.run(["git", "push"], cwd=work_dir, check=True)
def _lfs_clone(repo_id: str, work_dir: Path, branch: str) -> None:
subprocess.run(["git", "lfs", "install"], cwd=work_dir, check=True)
repo_url = f"https://huggingface.co/datasets/{repo_id}"
env = {"GIT_LFS_SKIP_SMUDGE": "1"} # Prevent downloading LFS files
subprocess.run(
["git", "clone", "--branch", branch, "--single-branch", "--depth", "1", repo_url, str(work_dir)],
check=True,
env=env,
)
def _get_lfs_untracked_videos(work_dir: Path, video_files: list[str]) -> list[str]:
lfs_tracked_files = subprocess.run(
["git", "lfs", "ls-files", "-n"], cwd=work_dir, capture_output=True, text=True, check=True
)
lfs_tracked_files = set(lfs_tracked_files.stdout.splitlines())
return [f for f in video_files if f not in lfs_tracked_files]
def get_videos_info(repo_id: str, local_dir: Path, video_keys: list[str], branch: str) -> dict:
# Assumes first episode
video_files = [
DEFAULT_VIDEO_PATH.format(episode_chunk=0, video_key=vid_key, episode_index=0)
for vid_key in video_keys
]
hub_api = HfApi()
hub_api.snapshot_download(
repo_id=repo_id, repo_type="dataset", local_dir=local_dir, revision=branch, allow_patterns=video_files
)
videos_info_dict = {}
for vid_key, vid_path in zip(video_keys, video_files, strict=True):
videos_info_dict[vid_key] = get_video_info(local_dir / vid_path)
return videos_info_dict
def convert_dataset(
repo_id: str,
local_dir: Path,
single_task: str | None = None,
tasks_path: Path | None = None,
tasks_col: Path | None = None,
robot_config: dict | None = None,
test_branch: str | None = None,
**card_kwargs,
):
v1 = get_hub_safe_version(repo_id, V16)
v1x_dir = local_dir / V16 / repo_id
v20_dir = local_dir / V20 / repo_id
v1x_dir.mkdir(parents=True, exist_ok=True)
v20_dir.mkdir(parents=True, exist_ok=True)
hub_api = HfApi()
hub_api.snapshot_download(
repo_id=repo_id, repo_type="dataset", revision=v1, local_dir=v1x_dir, ignore_patterns="videos*/"
)
branch = "main"
if test_branch:
branch = test_branch
create_branch(repo_id=repo_id, branch=test_branch, repo_type="dataset")
metadata_v1 = load_json(v1x_dir / V1_INFO_PATH)
dataset = datasets.load_dataset("parquet", data_dir=v1x_dir / "data", split="train")
features = get_features_from_hf_dataset(dataset, robot_config)
video_keys = [key for key, ft in features.items() if ft["dtype"] == "video"]
if single_task and "language_instruction" in dataset.column_names:
logging.warning(
"'single_task' provided but 'language_instruction' tasks_col found. Using 'language_instruction'.",
)
single_task = None
tasks_col = "language_instruction"
# Episodes & chunks
episode_indices = sorted(dataset.unique("episode_index"))
total_episodes = len(episode_indices)
assert episode_indices == list(range(total_episodes))
total_videos = total_episodes * len(video_keys)
total_chunks = total_episodes // DEFAULT_CHUNK_SIZE
if total_episodes % DEFAULT_CHUNK_SIZE != 0:
total_chunks += 1
# Tasks
if single_task:
tasks_by_episodes = {ep_idx: single_task for ep_idx in episode_indices}
dataset, tasks = add_task_index_by_episodes(dataset, tasks_by_episodes)
tasks_by_episodes = {ep_idx: [task] for ep_idx, task in tasks_by_episodes.items()}
elif tasks_path:
tasks_by_episodes = load_json(tasks_path)
tasks_by_episodes = {int(ep_idx): task for ep_idx, task in tasks_by_episodes.items()}
dataset, tasks = add_task_index_by_episodes(dataset, tasks_by_episodes)
tasks_by_episodes = {ep_idx: [task] for ep_idx, task in tasks_by_episodes.items()}
elif tasks_col:
dataset, tasks, tasks_by_episodes = add_task_index_from_tasks_col(dataset, tasks_col)
else:
raise ValueError
assert set(tasks) == {task for ep_tasks in tasks_by_episodes.values() for task in ep_tasks}
tasks = [{"task_index": task_idx, "task": task} for task_idx, task in enumerate(tasks)]
write_jsonlines(tasks, v20_dir / TASKS_PATH)
features["task_index"] = {
"dtype": "int64",
"shape": (1,),
"names": None,
}
# Videos
if video_keys:
assert metadata_v1.get("video", False)
dataset = dataset.remove_columns(video_keys)
clean_gitattr = Path(
hub_api.hf_hub_download(
repo_id=GITATTRIBUTES_REF, repo_type="dataset", local_dir=local_dir, filename=".gitattributes"
)
).absolute()
with tempfile.TemporaryDirectory() as tmp_video_dir:
move_videos(
repo_id, video_keys, total_episodes, total_chunks, Path(tmp_video_dir), clean_gitattr, branch
)
videos_info = get_videos_info(repo_id, v1x_dir, video_keys=video_keys, branch=branch)
for key in video_keys:
features[key]["shape"] = (
videos_info[key].pop("video.height"),
videos_info[key].pop("video.width"),
videos_info[key].pop("video.channels"),
)
features[key]["video_info"] = videos_info[key]
assert math.isclose(videos_info[key]["video.fps"], metadata_v1["fps"], rel_tol=1e-3)
if "encoding" in metadata_v1:
assert videos_info[key]["video.pix_fmt"] == metadata_v1["encoding"]["pix_fmt"]
else:
assert metadata_v1.get("video", 0) == 0
videos_info = None
# Split data into 1 parquet file by episode
episode_lengths = split_parquet_by_episodes(dataset, total_episodes, total_chunks, v20_dir)
if robot_config is not None:
robot_type = robot_config["robot_type"]
repo_tags = [robot_type]
else:
robot_type = "unknown"
repo_tags = None
# Episodes
episodes = [
{"episode_index": ep_idx, "tasks": tasks_by_episodes[ep_idx], "length": episode_lengths[ep_idx]}
for ep_idx in episode_indices
]
write_jsonlines(episodes, v20_dir / EPISODES_PATH)
# Assemble metadata v2.0
metadata_v2_0 = {
"codebase_version": V20,
"robot_type": robot_type,
"total_episodes": total_episodes,
"total_frames": len(dataset),
"total_tasks": len(tasks),
"total_videos": total_videos,
"total_chunks": total_chunks,
"chunks_size": DEFAULT_CHUNK_SIZE,
"fps": metadata_v1["fps"],
"splits": {"train": f"0:{total_episodes}"},
"data_path": DEFAULT_PARQUET_PATH,
"video_path": DEFAULT_VIDEO_PATH if video_keys else None,
"features": features,
}
write_json(metadata_v2_0, v20_dir / INFO_PATH)
convert_stats_to_json(v1x_dir, v20_dir)
card = create_lerobot_dataset_card(tags=repo_tags, dataset_info=metadata_v2_0, **card_kwargs)
with contextlib.suppress(EntryNotFoundError, HfHubHTTPError):
hub_api.delete_folder(repo_id=repo_id, path_in_repo="data", repo_type="dataset", revision=branch)
with contextlib.suppress(EntryNotFoundError, HfHubHTTPError):
hub_api.delete_folder(repo_id=repo_id, path_in_repo="meta_data", repo_type="dataset", revision=branch)
with contextlib.suppress(EntryNotFoundError, HfHubHTTPError):
hub_api.delete_folder(repo_id=repo_id, path_in_repo="meta", repo_type="dataset", revision=branch)
hub_api.upload_folder(
repo_id=repo_id,
path_in_repo="data",
folder_path=v20_dir / "data",
repo_type="dataset",
revision=branch,
)
hub_api.upload_folder(
repo_id=repo_id,
path_in_repo="meta",
folder_path=v20_dir / "meta",
repo_type="dataset",
revision=branch,
)
card.push_to_hub(repo_id=repo_id, repo_type="dataset", revision=branch)
if not test_branch:
create_branch(repo_id=repo_id, branch=V20, repo_type="dataset")
def main():
parser = argparse.ArgumentParser()
task_args = parser.add_mutually_exclusive_group(required=True)
parser.add_argument(
"--repo-id",
type=str,
required=True,
help="Repository identifier on Hugging Face: a community or a user name `/` the name of the dataset (e.g. `lerobot/pusht`, `cadene/aloha_sim_insertion_human`).",
)
task_args.add_argument(
"--single-task",
type=str,
help="A short but accurate description of the single task performed in the dataset.",
)
task_args.add_argument(
"--tasks-col",
type=str,
help="The name of the column containing language instructions",
)
task_args.add_argument(
"--tasks-path",
type=Path,
help="The path to a .json file containing one language instruction for each episode_index",
)
parser.add_argument(
"--robot-config",
type=Path,
default=None,
help="Path to the robot's config yaml the dataset during conversion.",
)
parser.add_argument(
"--robot-overrides",
type=str,
nargs="*",
help="Any key=value arguments to override the robot config values (use dots for.nested=overrides)",
)
parser.add_argument(
"--local-dir",
type=Path,
default=None,
help="Local directory to store the dataset during conversion. Defaults to /tmp/lerobot_dataset_v2",
)
parser.add_argument(
"--license",
type=str,
default="apache-2.0",
help="Repo license. Must be one of https://huggingface.co/docs/hub/repositories-licenses. Defaults to mit.",
)
parser.add_argument(
"--test-branch",
type=str,
default=None,
help="Repo branch to test your conversion first (e.g. 'v2.0.test')",
)
args = parser.parse_args()
if not args.local_dir:
args.local_dir = Path("/tmp/lerobot_dataset_v2")
robot_config = parse_robot_config(args.robot_config, args.robot_overrides) if args.robot_config else None
del args.robot_config, args.robot_overrides
convert_dataset(**vars(args), robot_config=robot_config)
if __name__ == "__main__":
main()

View File

@@ -13,7 +13,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import subprocess
import warnings
@@ -26,11 +25,47 @@ import pyarrow as pa
import torch
import torchvision
from datasets.features.features import register_feature
from PIL import Image
def load_from_videos(
item: dict[str, torch.Tensor],
video_frame_keys: list[str],
videos_dir: Path,
tolerance_s: float,
backend: str = "pyav",
):
"""Note: When using data workers (e.g. DataLoader with num_workers>0), do not call this function
in the main process (e.g. by using a second Dataloader with num_workers=0). It will result in a Segmentation Fault.
This probably happens because a memory reference to the video loader is created in the main process and a
subprocess fails to access it.
"""
# since video path already contains "videos" (e.g. videos_dir="data/videos", path="videos/episode_0.mp4")
data_dir = videos_dir.parent
for key in video_frame_keys:
if isinstance(item[key], list):
# load multiple frames at once (expected when delta_timestamps is not None)
timestamps = [frame["timestamp"] for frame in item[key]]
paths = [frame["path"] for frame in item[key]]
if len(set(paths)) > 1:
raise NotImplementedError("All video paths are expected to be the same for now.")
video_path = data_dir / paths[0]
frames = decode_video_frames_torchvision(video_path, timestamps, tolerance_s, backend)
item[key] = frames
else:
# load one frame
timestamps = [item[key]["timestamp"]]
video_path = data_dir / item[key]["path"]
frames = decode_video_frames_torchvision(video_path, timestamps, tolerance_s, backend)
item[key] = frames[0]
return item
def decode_video_frames_torchvision(
video_path: Path | str,
video_path: str,
timestamps: list[float],
tolerance_s: float,
backend: str = "pyav",
@@ -128,8 +163,8 @@ def decode_video_frames_torchvision(
def encode_video_frames(
imgs_dir: Path | str,
video_path: Path | str,
imgs_dir: Path,
video_path: Path,
fps: int,
vcodec: str = "libsvtav1",
pix_fmt: str = "yuv420p",
@@ -212,104 +247,3 @@ with warnings.catch_warnings():
)
# to make VideoFrame available in HuggingFace `datasets`
register_feature(VideoFrame, "VideoFrame")
def get_audio_info(video_path: Path | str) -> dict:
ffprobe_audio_cmd = [
"ffprobe",
"-v",
"error",
"-select_streams",
"a:0",
"-show_entries",
"stream=channels,codec_name,bit_rate,sample_rate,bit_depth,channel_layout,duration",
"-of",
"json",
str(video_path),
]
result = subprocess.run(ffprobe_audio_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if result.returncode != 0:
raise RuntimeError(f"Error running ffprobe: {result.stderr}")
info = json.loads(result.stdout)
audio_stream_info = info["streams"][0] if info.get("streams") else None
if audio_stream_info is None:
return {"has_audio": False}
# Return the information, defaulting to None if no audio stream is present
return {
"has_audio": True,
"audio.channels": audio_stream_info.get("channels", None),
"audio.codec": audio_stream_info.get("codec_name", None),
"audio.bit_rate": int(audio_stream_info["bit_rate"]) if audio_stream_info.get("bit_rate") else None,
"audio.sample_rate": int(audio_stream_info["sample_rate"])
if audio_stream_info.get("sample_rate")
else None,
"audio.bit_depth": audio_stream_info.get("bit_depth", None),
"audio.channel_layout": audio_stream_info.get("channel_layout", None),
}
def get_video_info(video_path: Path | str) -> dict:
ffprobe_video_cmd = [
"ffprobe",
"-v",
"error",
"-select_streams",
"v:0",
"-show_entries",
"stream=r_frame_rate,width,height,codec_name,nb_frames,duration,pix_fmt",
"-of",
"json",
str(video_path),
]
result = subprocess.run(ffprobe_video_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if result.returncode != 0:
raise RuntimeError(f"Error running ffprobe: {result.stderr}")
info = json.loads(result.stdout)
video_stream_info = info["streams"][0]
# Calculate fps from r_frame_rate
r_frame_rate = video_stream_info["r_frame_rate"]
num, denom = map(int, r_frame_rate.split("/"))
fps = num / denom
pixel_channels = get_video_pixel_channels(video_stream_info["pix_fmt"])
video_info = {
"video.fps": fps,
"video.height": video_stream_info["height"],
"video.width": video_stream_info["width"],
"video.channels": pixel_channels,
"video.codec": video_stream_info["codec_name"],
"video.pix_fmt": video_stream_info["pix_fmt"],
"video.is_depth_map": False,
**get_audio_info(video_path),
}
return video_info
def get_video_pixel_channels(pix_fmt: str) -> int:
if "gray" in pix_fmt or "depth" in pix_fmt or "monochrome" in pix_fmt:
return 1
elif "rgba" in pix_fmt or "yuva" in pix_fmt:
return 4
elif "rgb" in pix_fmt or "yuv" in pix_fmt:
return 3
else:
raise ValueError("Unknown format")
def get_image_pixel_channels(image: Image):
if image.mode == "L":
return 1 # Grayscale
elif image.mode == "LA":
return 2 # Grayscale + Alpha
elif image.mode == "RGB":
return 3 # RGB
elif image.mode == "RGBA":
return 4 # RGBA
else:
raise ValueError("Unknown format")

View File

@@ -14,13 +14,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
from collections import deque
import gymnasium as gym
import numpy as np
import torch
from omegaconf import DictConfig
from mani_skill.utils import common
def make_env(cfg: DictConfig, n_envs: int | None = None) -> gym.vector.VectorEnv | None:
@@ -34,10 +30,6 @@ def make_env(cfg: DictConfig, n_envs: int | None = None) -> gym.vector.VectorEnv
if cfg.env.name == "real_world":
return
if "maniskill" in cfg.env.name:
env = make_maniskill_env(cfg, n_envs if n_envs is not None else cfg.eval.batch_size)
return env
package_name = f"gym_{cfg.env.name}"
try:
@@ -64,86 +56,3 @@ def make_env(cfg: DictConfig, n_envs: int | None = None) -> gym.vector.VectorEnv
)
return env
def make_maniskill_env(cfg: DictConfig, n_envs: int | None = None) -> gym.vector.VectorEnv | None:
"""Make ManiSkill3 gym environment"""
from mani_skill.vector.wrappers.gymnasium import ManiSkillVectorEnv
env = gym.make(
cfg.env.task,
obs_mode=cfg.env.obs,
control_mode=cfg.env.control_mode,
render_mode=cfg.env.render_mode,
sensor_configs=dict(width=cfg.env.image_size, height=cfg.env.image_size),
num_envs=n_envs,
)
# cfg.env_cfg.control_mode = cfg.eval_env_cfg.control_mode = env.control_mode
env = ManiSkillVectorEnv(env, ignore_terminations=True)
# state should have the size of 25
# env = ConvertToLeRobotEnv(env, n_envs)
# env = PixelWrapper(cfg, env, n_envs)
env._max_episode_steps = env.max_episode_steps = 50 # gym_utils.find_max_episode_steps_value(env)
env.unwrapped.metadata["render_fps"] = 20
return env
class PixelWrapper(gym.Wrapper):
"""
Wrapper for pixel observations. Works with Maniskill vectorized environments
"""
def __init__(self, cfg, env, num_envs, num_frames=3):
super().__init__(env)
self.cfg = cfg
self.env = env
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(num_envs, num_frames * 3, cfg.env.render_size, cfg.env.render_size),
dtype=np.uint8,
)
self._frames = deque([], maxlen=num_frames)
self._render_size = cfg.env.render_size
def _get_obs(self, obs):
frame = obs["sensor_data"]["base_camera"]["rgb"].cpu().permute(0, 3, 1, 2)
self._frames.append(frame)
return {"pixels": torch.from_numpy(np.concatenate(self._frames, axis=1)).to(self.env.device)}
def reset(self, seed):
obs, info = self.env.reset() # (seed=seed)
for _ in range(self._frames.maxlen):
obs_frames = self._get_obs(obs)
return obs_frames, info
def step(self, action):
obs, reward, terminated, truncated, info = self.env.step(action)
return self._get_obs(obs), reward, terminated, truncated, info
class ConvertToLeRobotEnv(gym.Wrapper):
def __init__(self, env, num_envs):
super().__init__(env)
def reset(self, seed=None, options=None):
obs, info = self.env.reset(seed=seed, options={})
return self._get_obs(obs), info
def step(self, action):
obs, reward, terminated, truncated, info = self.env.step(action)
return self._get_obs(obs), reward, terminated, truncated, info
def _get_obs(self, observation):
sensor_data = observation.pop("sensor_data")
del observation["sensor_param"]
images = []
for cam_data in sensor_data.values():
images.append(cam_data["rgb"])
images = torch.concat(images, axis=-1)
# flatten the rest of the data which should just be state data
observation = common.flatten_state_dict(
observation, use_torch=True, device=self.base_env.device
)
ret = dict()
ret["state"] = observation
ret["pixels"] = images
return ret

View File

@@ -28,9 +28,6 @@ def preprocess_observation(observations: dict[str, np.ndarray]) -> dict[str, Ten
"""
# map to expected inputs for the policy
return_observations = {}
# TODO: You have to merge all tensors from agent key and extra key
# You don't keep sensor param key in the observation
# And you keep sensor data rgb
if "pixels" in observations:
if isinstance(observations["pixels"], dict):
imgs = {f"observation.images.{key}": img for key, img in observations["pixels"].items()}
@@ -53,8 +50,6 @@ def preprocess_observation(observations: dict[str, np.ndarray]) -> dict[str, Ten
img /= 255
return_observations[imgkey] = img
# obs state agent qpos and qvel
# image
if "environment_state" in observations:
return_observations["observation.environment_state"] = torch.from_numpy(
@@ -65,38 +60,3 @@ def preprocess_observation(observations: dict[str, np.ndarray]) -> dict[str, Ten
# requirement for "agent_pos"
return_observations["observation.state"] = torch.from_numpy(observations["agent_pos"]).float()
return return_observations
def preprocess_maniskill_observation(observations: dict[str, np.ndarray]) -> dict[str, Tensor]:
"""Convert environment observation to LeRobot format observation.
Args:
observation: Dictionary of observation batches from a Gym vector environment.
Returns:
Dictionary of observation batches with keys renamed to LeRobot format and values as tensors.
"""
# map to expected inputs for the policy
return_observations = {}
# TODO: You have to merge all tensors from agent key and extra key
# You don't keep sensor param key in the observation
# And you keep sensor data rgb
q_pos = observations["agent"]["qpos"]
q_vel = observations["agent"]["qvel"]
tcp_pos = observations["extra"]["tcp_pose"]
img = observations["sensor_data"]["base_camera"]["rgb"]
_, h, w, c = img.shape
assert c < h and c < w, f"expect channel last images, but instead got {img.shape=}"
# sanity check that images are uint8
assert img.dtype == torch.uint8, f"expect torch.uint8, but instead {img.dtype=}"
# convert to channel first of type float32 in range [0,1]
img = einops.rearrange(img, "b h w c -> b c h w").contiguous()
img = img.type(torch.float32)
img /= 255
state = torch.cat([q_pos, q_vel, tcp_pos], dim=-1)
return_observations["observation.image"] = img
return_observations["observation.state"] = state
return return_observations

View File

@@ -25,7 +25,6 @@ from glob import glob
from pathlib import Path
import torch
import wandb
from huggingface_hub.constants import SAFETENSORS_SINGLE_FILE
from omegaconf import DictConfig, OmegaConf
from termcolor import colored
@@ -108,6 +107,8 @@ class Logger:
self._wandb = None
else:
os.environ["WANDB_SILENT"] = "true"
import wandb
wandb_run_id = None
if cfg.resume:
wandb_run_id = get_wandb_run_id_from_filesystem(self.checkpoints_dir)
@@ -231,7 +232,7 @@ class Logger:
# TODO(alexander-soare): Add local text log.
if self._wandb is not None:
for k, v in d.items():
if not isinstance(v, (int, float, str, wandb.Table)):
if not isinstance(v, (int, float, str)):
logging.warning(
f'WandB logging of key "{k}" was ignored as its type is not handled by this wrapper.'
)

View File

@@ -67,7 +67,6 @@ class DiffusionConfig:
use_group_norm: Whether to replace batch normalization with group normalization in the backbone.
The group sizes are set to be about 16 (to be precise, feature_dim // 16).
spatial_softmax_num_keypoints: Number of keypoints for SpatialSoftmax.
use_separate_rgb_encoders_per_camera: Whether to use a separate RGB encoder for each camera view.
down_dims: Feature dimension for each stage of temporal downsampling in the diffusion modeling Unet.
You may provide a variable number of dimensions, therefore also controlling the degree of
downsampling.
@@ -131,7 +130,6 @@ class DiffusionConfig:
pretrained_backbone_weights: str | None = None
use_group_norm: bool = True
spatial_softmax_num_keypoints: int = 32
use_separate_rgb_encoder_per_camera: bool = False
# Unet.
down_dims: tuple[int, ...] = (512, 1024, 2048)
kernel_size: int = 5

View File

@@ -182,13 +182,8 @@ class DiffusionModel(nn.Module):
self._use_env_state = False
if num_images > 0:
self._use_images = True
if self.config.use_separate_rgb_encoder_per_camera:
encoders = [DiffusionRgbEncoder(config) for _ in range(num_images)]
self.rgb_encoder = nn.ModuleList(encoders)
global_cond_dim += encoders[0].feature_dim * num_images
else:
self.rgb_encoder = DiffusionRgbEncoder(config)
global_cond_dim += self.rgb_encoder.feature_dim * num_images
self.rgb_encoder = DiffusionRgbEncoder(config)
global_cond_dim += self.rgb_encoder.feature_dim * num_images
if "observation.environment_state" in config.input_shapes:
self._use_env_state = True
global_cond_dim += config.input_shapes["observation.environment_state"][0]
@@ -244,32 +239,16 @@ class DiffusionModel(nn.Module):
"""Encode image features and concatenate them all together along with the state vector."""
batch_size, n_obs_steps = batch["observation.state"].shape[:2]
global_cond_feats = [batch["observation.state"]]
# Extract image features.
# Extract image feature (first combine batch, sequence, and camera index dims).
if self._use_images:
if self.config.use_separate_rgb_encoder_per_camera:
# Combine batch and sequence dims while rearranging to make the camera index dimension first.
images_per_camera = einops.rearrange(batch["observation.images"], "b s n ... -> n (b s) ...")
img_features_list = torch.cat(
[
encoder(images)
for encoder, images in zip(self.rgb_encoder, images_per_camera, strict=True)
]
)
# Separate batch and sequence dims back out. The camera index dim gets absorbed into the
# feature dim (effectively concatenating the camera features).
img_features = einops.rearrange(
img_features_list, "(n b s) ... -> b s (n ...)", b=batch_size, s=n_obs_steps
)
else:
# Combine batch, sequence, and "which camera" dims before passing to shared encoder.
img_features = self.rgb_encoder(
einops.rearrange(batch["observation.images"], "b s n ... -> (b s n) ...")
)
# Separate batch dim and sequence dim back out. The camera index dim gets absorbed into the
# feature dim (effectively concatenating the camera features).
img_features = einops.rearrange(
img_features, "(b s n) ... -> b s (n ...)", b=batch_size, s=n_obs_steps
)
img_features = self.rgb_encoder(
einops.rearrange(batch["observation.images"], "b s n ... -> (b s n) ...")
)
# Separate batch dim and sequence dim back out. The camera index dim gets absorbed into the
# feature dim (effectively concatenating the camera features).
img_features = einops.rearrange(
img_features, "(b s n) ... -> b s (n ...)", b=batch_size, s=n_obs_steps
)
global_cond_feats.append(img_features)
if self._use_env_state:

Some files were not shown because too many files have changed in this diff Show More