Compare commits
1 Commits
user/mruss
...
Cadene-pat
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b4aef34c8e |
@@ -1,68 +0,0 @@
|
||||
{
|
||||
"homing_offset": [
|
||||
2048,
|
||||
3072,
|
||||
3072,
|
||||
-1024,
|
||||
-1024,
|
||||
2048,
|
||||
-2048,
|
||||
2048,
|
||||
-2048
|
||||
],
|
||||
"drive_mode": [
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
0
|
||||
],
|
||||
"start_pos": [
|
||||
2015,
|
||||
3058,
|
||||
3061,
|
||||
1071,
|
||||
1071,
|
||||
2035,
|
||||
2152,
|
||||
2029,
|
||||
2499
|
||||
],
|
||||
"end_pos": [
|
||||
-1008,
|
||||
-1963,
|
||||
-1966,
|
||||
2141,
|
||||
2143,
|
||||
-971,
|
||||
3043,
|
||||
-1077,
|
||||
3144
|
||||
],
|
||||
"calib_mode": [
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"LINEAR"
|
||||
],
|
||||
"motor_names": [
|
||||
"waist",
|
||||
"shoulder",
|
||||
"shoulder_shadow",
|
||||
"elbow",
|
||||
"elbow_shadow",
|
||||
"forearm_roll",
|
||||
"wrist_angle",
|
||||
"wrist_rotate",
|
||||
"gripper"
|
||||
]
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
{
|
||||
"homing_offset": [
|
||||
2048,
|
||||
3072,
|
||||
3072,
|
||||
-1024,
|
||||
-1024,
|
||||
2048,
|
||||
-2048,
|
||||
2048,
|
||||
-1024
|
||||
],
|
||||
"drive_mode": [
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
0
|
||||
],
|
||||
"start_pos": [
|
||||
2035,
|
||||
3024,
|
||||
3019,
|
||||
979,
|
||||
981,
|
||||
1982,
|
||||
2166,
|
||||
2124,
|
||||
1968
|
||||
],
|
||||
"end_pos": [
|
||||
-990,
|
||||
-2017,
|
||||
-2015,
|
||||
2078,
|
||||
2076,
|
||||
-1030,
|
||||
3117,
|
||||
-1016,
|
||||
2556
|
||||
],
|
||||
"calib_mode": [
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"LINEAR"
|
||||
],
|
||||
"motor_names": [
|
||||
"waist",
|
||||
"shoulder",
|
||||
"shoulder_shadow",
|
||||
"elbow",
|
||||
"elbow_shadow",
|
||||
"forearm_roll",
|
||||
"wrist_angle",
|
||||
"wrist_rotate",
|
||||
"gripper"
|
||||
]
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
{
|
||||
"homing_offset": [
|
||||
2048,
|
||||
3072,
|
||||
3072,
|
||||
-1024,
|
||||
-1024,
|
||||
2048,
|
||||
-2048,
|
||||
2048,
|
||||
-2048
|
||||
],
|
||||
"drive_mode": [
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
0
|
||||
],
|
||||
"start_pos": [
|
||||
2056,
|
||||
2895,
|
||||
2896,
|
||||
1191,
|
||||
1190,
|
||||
2018,
|
||||
2051,
|
||||
2056,
|
||||
2509
|
||||
],
|
||||
"end_pos": [
|
||||
-1040,
|
||||
-2004,
|
||||
-2006,
|
||||
2126,
|
||||
2127,
|
||||
-1010,
|
||||
3050,
|
||||
-1117,
|
||||
3143
|
||||
],
|
||||
"calib_mode": [
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"LINEAR"
|
||||
],
|
||||
"motor_names": [
|
||||
"waist",
|
||||
"shoulder",
|
||||
"shoulder_shadow",
|
||||
"elbow",
|
||||
"elbow_shadow",
|
||||
"forearm_roll",
|
||||
"wrist_angle",
|
||||
"wrist_rotate",
|
||||
"gripper"
|
||||
]
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
{
|
||||
"homing_offset": [
|
||||
2048,
|
||||
3072,
|
||||
3072,
|
||||
-1024,
|
||||
-1024,
|
||||
2048,
|
||||
-2048,
|
||||
2048,
|
||||
-2048
|
||||
],
|
||||
"drive_mode": [
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
0
|
||||
],
|
||||
"start_pos": [
|
||||
2068,
|
||||
3034,
|
||||
3030,
|
||||
1038,
|
||||
1041,
|
||||
1991,
|
||||
1948,
|
||||
2090,
|
||||
1985
|
||||
],
|
||||
"end_pos": [
|
||||
-1025,
|
||||
-2014,
|
||||
-2015,
|
||||
2058,
|
||||
2060,
|
||||
-955,
|
||||
3091,
|
||||
-940,
|
||||
2576
|
||||
],
|
||||
"calib_mode": [
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"LINEAR"
|
||||
],
|
||||
"motor_names": [
|
||||
"waist",
|
||||
"shoulder",
|
||||
"shoulder_shadow",
|
||||
"elbow",
|
||||
"elbow_shadow",
|
||||
"forearm_roll",
|
||||
"wrist_angle",
|
||||
"wrist_rotate",
|
||||
"gripper"
|
||||
]
|
||||
}
|
||||
@@ -65,6 +65,7 @@ htmlcov/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
@@ -72,11 +73,6 @@ coverage.xml
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
# Ignore .cache except calibration
|
||||
.cache/*
|
||||
!.cache/calibration/
|
||||
!.cache/calibration/**
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
2
.gitattributes
vendored
@@ -3,4 +3,4 @@
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.json !text !filter !merge !diff
|
||||
*.json filter=lfs diff=lfs merge=lfs -text
|
||||
|
||||
15
.github/workflows/test.yml
vendored
@@ -11,7 +11,6 @@ on:
|
||||
- ".github/**"
|
||||
- "poetry.lock"
|
||||
- "Makefile"
|
||||
- ".cache/**"
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
@@ -22,7 +21,6 @@ on:
|
||||
- ".github/**"
|
||||
- "poetry.lock"
|
||||
- "Makefile"
|
||||
- ".cache/**"
|
||||
|
||||
jobs:
|
||||
pytest:
|
||||
@@ -37,17 +35,13 @@ jobs:
|
||||
lfs: true # Ensure LFS files are pulled
|
||||
|
||||
- name: Install apt dependencies
|
||||
# portaudio19-dev is needed to install pyaudio
|
||||
run: |
|
||||
sudo apt-get update && \
|
||||
sudo apt-get install -y libegl1-mesa-dev ffmpeg portaudio19-dev
|
||||
run: sudo apt-get update && sudo apt-get install -y libegl1-mesa-dev ffmpeg
|
||||
|
||||
- name: Install poetry
|
||||
run: |
|
||||
pipx install poetry && poetry config virtualenvs.in-project true
|
||||
echo "${{ github.workspace }}/.venv/bin" >> $GITHUB_PATH
|
||||
|
||||
# TODO(rcadene, aliberts): python 3.12 seems to be used in the tests, not python 3.10
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
@@ -66,6 +60,7 @@ jobs:
|
||||
-W ignore::UserWarning:gymnasium.utils.env_checker:247 \
|
||||
&& rm -rf tests/outputs outputs
|
||||
|
||||
|
||||
pytest-minimal:
|
||||
name: Pytest (minimal install)
|
||||
runs-on: ubuntu-latest
|
||||
@@ -85,7 +80,6 @@ jobs:
|
||||
pipx install poetry && poetry config virtualenvs.in-project true
|
||||
echo "${{ github.workspace }}/.venv/bin" >> $GITHUB_PATH
|
||||
|
||||
# TODO(rcadene, aliberts): python 3.12 seems to be used in the tests, not python 3.10
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
@@ -116,10 +110,7 @@ jobs:
|
||||
lfs: true # Ensure LFS files are pulled
|
||||
|
||||
- name: Install apt dependencies
|
||||
# portaudio19-dev is needed to install pyaudio
|
||||
run: |
|
||||
sudo apt-get update && \
|
||||
sudo apt-get install -y libegl1-mesa-dev portaudio19-dev
|
||||
run: sudo apt-get update && sudo apt-get install -y libegl1-mesa-dev
|
||||
|
||||
- name: Install poetry
|
||||
run: |
|
||||
|
||||
7
.gitignore
vendored
@@ -66,6 +66,7 @@ htmlcov/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
@@ -73,11 +74,6 @@ coverage.xml
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
# Ignore .cache except calibration
|
||||
.cache/*
|
||||
!.cache/calibration/
|
||||
!.cache/calibration/**
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
@@ -125,6 +121,7 @@ celerybeat.pid
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
@@ -20,7 +20,7 @@ Some of the ways you can contribute to 🤗 LeRobot:
|
||||
* Contributing to the examples or to the documentation.
|
||||
* Submitting issues related to bugs or desired new features.
|
||||
|
||||
Following the guides below, feel free to open issues and PRs and to coordinate your efforts with the community on our [Discord Channel](https://discord.gg/VjFz58wn3R). For specific inquiries, reach out to [Remi Cadene](mailto:remi.cadene@huggingface.co).
|
||||
Following the guides below, feel free to open issues and PRs and to coordinate your efforts with the community on our [Discord Channel](https://discord.gg/VjFz58wn3R). For specific inquiries, reach out to [Remi Cadene](remi.cadene@huggingface.co).
|
||||
|
||||
If you are not sure how to contribute or want to know the next features we working on, look on this project page: [LeRobot TODO](https://github.com/orgs/huggingface/projects/46)
|
||||
|
||||
|
||||
20
README.md
@@ -23,21 +23,20 @@
|
||||
</div>
|
||||
|
||||
<h2 align="center">
|
||||
<p><a href="https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md">New robot in town: SO-100</a></p>
|
||||
<p><a href="https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md">Hot new tutorial: Getting started with real-world robots</a></p>
|
||||
</h2>
|
||||
|
||||
<div align="center">
|
||||
<img src="media/so100/leader_follower.webp?raw=true" alt="SO-100 leader and follower arms" title="SO-100 leader and follower arms" width="50%">
|
||||
<p>We just added a new tutorial on how to build a more affordable robot, at the price of $110 per arm!</p>
|
||||
<img src="media/tutorial/koch_v1_1_leader_follower.webp?raw=true" alt="Koch v1.1 leader and follower arms" title="Koch v1.1 leader and follower arms" width="50%">
|
||||
<p>We just dropped an in-depth tutorial on how to build your own robot!</p>
|
||||
<p>Teach it new skills by showing it a few moves with just a laptop.</p>
|
||||
<p>Then watch your homemade robot act autonomously 🤯</p>
|
||||
<p>Follow the link to the <a href="https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md">full tutorial for SO-100</a>.</p>
|
||||
<p>For more info, see <a href="https://x.com/RemiCadene/status/1825455895561859185">our thread on X</a> or <a href="https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md">our tutorial page</a>.</p>
|
||||
</div>
|
||||
|
||||
<br/>
|
||||
|
||||
<h3 align="center">
|
||||
<p>LeRobot: State-of-the-art AI for real-world robotics</p>
|
||||
<p>State-of-the-art AI for real-world robotics</p>
|
||||
</h3>
|
||||
|
||||
---
|
||||
@@ -267,20 +266,13 @@ checkpoints
|
||||
│ └── training_state.pth # optimizer/scheduler/rng state and training step
|
||||
```
|
||||
|
||||
To resume training from a checkpoint, you can add these to the `train.py` python command:
|
||||
```bash
|
||||
hydra.run.dir=your/original/experiment/dir resume=true
|
||||
```
|
||||
|
||||
It will load the pretrained model, optimizer and scheduler states for training. For more information please see our tutorial on training resumption [here](https://github.com/huggingface/lerobot/blob/main/examples/5_resume_training.md).
|
||||
|
||||
To use wandb for logging training and evaluation curves, make sure you've run `wandb login` as a one-time setup step. Then, when running the training command above, enable WandB in the configuration by adding:
|
||||
|
||||
```bash
|
||||
wandb.enable=true
|
||||
```
|
||||
|
||||
A link to the wandb logs for the run will also show up in yellow in your terminal. Here is an example of what they look like in your browser. Please also check [here](https://github.com/huggingface/lerobot/blob/main/examples/4_train_policy_with_script.md#typical-logs-and-metrics) for the explaination of some commonly used metrics in logs.
|
||||
A link to the wandb logs for the run will also show up in yellow in your terminal. Here is an example of what they look like in your browser:
|
||||
|
||||

|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ RUN echo "source /opt/venv/bin/activate" >> /root/.bashrc
|
||||
COPY . /lerobot
|
||||
WORKDIR /lerobot
|
||||
RUN pip install --upgrade --no-cache-dir pip
|
||||
RUN pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]" \
|
||||
RUN pip install --no-cache-dir ".[test, aloha, xarm, pusht, koch]" \
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
|
||||
# Set EGL as the rendering backend for MuJoCo
|
||||
|
||||
@@ -24,7 +24,7 @@ RUN echo "source /opt/venv/bin/activate" >> /root/.bashrc
|
||||
COPY . /lerobot
|
||||
WORKDIR /lerobot
|
||||
RUN pip install --upgrade --no-cache-dir pip
|
||||
RUN pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]"
|
||||
RUN pip install --no-cache-dir ".[test, aloha, xarm, pusht, koch]"
|
||||
|
||||
# Set EGL as the rendering backend for MuJoCo
|
||||
ENV MUJOCO_GL="egl"
|
||||
|
||||
BIN
examples.zip
@@ -1,280 +0,0 @@
|
||||
This tutorial explains how to use [SO-100](https://github.com/TheRobotStudio/SO-ARM100) with LeRobot.
|
||||
|
||||
## Source the parts
|
||||
|
||||
Follow this [README](https://github.com/TheRobotStudio/SO-ARM100). It contains the bill of materials, with link to source the parts, as well as the instructions to 3D print the parts, and advices if it's your first time printing or if you don't own a 3D printer already.
|
||||
|
||||
**Important**: Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
|
||||
|
||||
## Install LeRobot
|
||||
|
||||
On your computer:
|
||||
|
||||
1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
|
||||
```bash
|
||||
mkdir -p ~/miniconda3
|
||||
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
|
||||
bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
|
||||
rm ~/miniconda3/miniconda.sh
|
||||
~/miniconda3/bin/conda init bash
|
||||
```
|
||||
|
||||
2. Restart shell or `source ~/.bashrc`
|
||||
|
||||
3. Create and activate a fresh conda environment for lerobot
|
||||
```bash
|
||||
conda create -y -n lerobot python=3.10 && conda activate lerobot
|
||||
```
|
||||
|
||||
4. Clone LeRobot:
|
||||
```bash
|
||||
git clone https://github.com/huggingface/lerobot.git ~/lerobot
|
||||
```
|
||||
|
||||
5. Install LeRobot with dependencies for the feetech motors:
|
||||
```bash
|
||||
cd ~/lerobot && pip install -e ".[feetech]"
|
||||
```
|
||||
|
||||
For Linux only (not Mac), install extra dependencies for recording datasets:
|
||||
```bash
|
||||
conda install -y -c conda-forge ffmpeg
|
||||
pip uninstall -y opencv-python
|
||||
conda install -y -c conda-forge "opencv>=4.10.0"
|
||||
```
|
||||
|
||||
## Configure the motors
|
||||
|
||||
Follow steps 1 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I) which illustrates the use of our scripts below.
|
||||
|
||||
**Find USB ports associated to your arms**
|
||||
To find the correct ports for each arm, run the utility script twice:
|
||||
```bash
|
||||
python lerobot/scripts/find_motors_bus_port.py
|
||||
```
|
||||
|
||||
Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect leader arm and press Enter...]
|
||||
|
||||
The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect follower arm and press Enter...]
|
||||
|
||||
The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
Troubleshooting: On Linux, you might need to give access to the USB ports by running:
|
||||
```bash
|
||||
sudo chmod 666 /dev/ttyACM0
|
||||
sudo chmod 666 /dev/ttyACM1
|
||||
```
|
||||
|
||||
**Configure your motors**
|
||||
Plug your first motor and run this script to set its ID to 1. It will also set its present position to 2048, so expect your motor to rotate:
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 1
|
||||
```
|
||||
|
||||
Note: These motors are currently limitated. They can take values between 0 and 4096 only, which corresponds to a full turn. They can't turn more than that. 2048 is at the middle of this range, so we can take -2048 steps (180 degrees anticlockwise) and reach the maximum range, or take +2048 steps (180 degrees clockwise) and reach the maximum range. The configuration step also sets the homing offset to 0, so that if you misassembled the arm, you can always update the homing offset to account for a shift up to ± 2048 steps (± 180 degrees).
|
||||
|
||||
Then unplug your motor and plug the second motor and set its ID to 2.
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 2
|
||||
```
|
||||
|
||||
Redo the process for all your motors until ID 6. Do the same for the 6 motors of the leader arm.
|
||||
|
||||
**Remove the gears of the 6 leader motors**
|
||||
Follow step 2 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I). You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm.
|
||||
|
||||
**Add motor horn to the motors**
|
||||
Follow step 3 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I). For SO-100, you need to align the holes on the motor horn to the motor spline to be approximately 1:30, 4:30, 7:30 and 10:30.
|
||||
Try to avoid rotating the motor while doing so to keep position 2048 set during configuration. It is especially tricky for the leader motors as it is more sensible without the gears, but it's ok if it's a bit rotated.
|
||||
|
||||
## Assemble the arms
|
||||
|
||||
Follow step 4 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I). The first arm should take a bit more than 1 hour to assemble, but once you get use to it, you can do it under 1 hour for the second arm.
|
||||
|
||||
## Calibrate
|
||||
|
||||
Next, you'll need to calibrate your SO-100 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one SO-100 robot to work on another.
|
||||
|
||||
**Auto-calibration of follower arm**
|
||||
Follow step 5 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I) which illustrates the auto-calibration of the follower arm. You first need to manually move your follower arm to this initial position:
|
||||
|
||||
<div style="text-align:center;">
|
||||
<img src="../media/so100/follower_initial.webp?raw=true" alt="SO-100 follower arm initial position" title="SO-100 follower arm initial position" width="50%">
|
||||
</div>
|
||||
|
||||
Then run this script to launch auto-calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py calibrate \
|
||||
--robot-path lerobot/configs/robot/so100.yaml \
|
||||
--robot-overrides '~cameras' --arms main_follower
|
||||
```
|
||||
|
||||
Note: You can't run auto-calibration for the leader arm, since we removed the gears. Thus, you will need to manually calibrate the leader arm. It's less precise than auto-calibration, but precision is not as critical for the leader arm.
|
||||
|
||||
**Manual calibration of leader arm**
|
||||
Follow step 6 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
|---|---|---|
|
||||
| <img src="../media/so100/leader_zero.webp?raw=true" alt="SO-100 leader arm zero position" title="SO-100 leader arm zero position" style="width:100%;"> | <img src="../media/so100/leader_rotated.webp?raw=true" alt="SO-100 leader arm rotated position" title="SO-100 leader arm rotated position" style="width:100%;"> | <img src="../media/so100/leader_rest.webp?raw=true" alt="SO-100 leader arm rest position" title="SO-100 leader arm rest position" style="width:100%;"> |
|
||||
|
||||
Run this script to launch manual calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py calibrate \
|
||||
--robot-path lerobot/configs/robot/so100.yaml \
|
||||
--robot-overrides '~cameras' --arms main_leader
|
||||
```
|
||||
|
||||
## Teleoperate
|
||||
|
||||
**Simple teleop**
|
||||
Then you are ready to teleoperate your robot! Run this simple script (it won't connect and display the cameras):
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/so100.yaml \
|
||||
--robot-overrides '~cameras' \
|
||||
--display-cameras 0
|
||||
```
|
||||
|
||||
|
||||
**Teleop with displaying cameras**
|
||||
Follow [this guide to setup your cameras](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#c-add-your-cameras-with-opencvcamera). Then you will be able to display the cameras on your computer while you are teleoperating by running the following code. This is useful to prepare your setup before recording your first dataset.
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/so100.yaml
|
||||
```
|
||||
|
||||
## Record a dataset
|
||||
|
||||
Once you're familiar with teleoperation, you can record your first dataset with SO-100.
|
||||
|
||||
If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
```
|
||||
|
||||
Store your Hugging Face repository name in a variable to run these commands:
|
||||
```bash
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
|
||||
Record 2 episodes and upload your dataset to the hub:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/so100.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/so100_test \
|
||||
--tags so100 tutorial \
|
||||
--warmup-time-s 5 \
|
||||
--episode-time-s 40 \
|
||||
--reset-time-s 10 \
|
||||
--num-episodes 2 \
|
||||
--push-to-hub 1
|
||||
```
|
||||
|
||||
## Visualize a dataset
|
||||
|
||||
If you uploaded your dataset to the hub with `--push-to-hub 1`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
|
||||
```bash
|
||||
echo ${HF_USER}/so100_test
|
||||
```
|
||||
|
||||
If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with:
|
||||
```bash
|
||||
python lerobot/scripts/visualize_dataset_html.py \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/so100_test
|
||||
```
|
||||
|
||||
## Replay an episode
|
||||
|
||||
Now try to replay the first episode on your robot:
|
||||
```bash
|
||||
DATA_DIR=data python lerobot/scripts/control_robot.py replay \
|
||||
--robot-path lerobot/configs/robot/so100.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/so100_test \
|
||||
--episode 0
|
||||
```
|
||||
|
||||
## Train a policy
|
||||
|
||||
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
```bash
|
||||
DATA_DIR=data python lerobot/scripts/train.py \
|
||||
dataset_repo_id=${HF_USER}/so100_test \
|
||||
policy=act_so100_real \
|
||||
env=so100_real \
|
||||
hydra.run.dir=outputs/train/act_so100_test \
|
||||
hydra.job.name=act_so100_test \
|
||||
device=cuda \
|
||||
wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/so100_test`.
|
||||
2. We provided the policy with `policy=act_so100_real`. This loads configurations from [`lerobot/configs/policy/act_so100_real.yaml`](../lerobot/configs/policy/act_so100_real.yaml). Importantly, this policy uses 2 cameras as input `laptop`, `phone`.
|
||||
3. We provided an environment as argument with `env=so100_real`. This loads configurations from [`lerobot/configs/env/so100_real.yaml`](../lerobot/configs/env/so100_real.yaml).
|
||||
4. We provided `device=cuda` since we are training on a Nvidia GPU, but you can also use `device=mps` if you are using a Mac with Apple silicon, or `device=cpu` otherwise.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
6. We added `DATA_DIR=data` to access your dataset stored in your local `data` directory. If you dont provide `DATA_DIR`, your dataset will be downloaded from Hugging Face hub to your cache folder `$HOME/.cache/hugginface`. In future versions of `lerobot`, both directories will be in sync.
|
||||
|
||||
Training should take several hours. You will find checkpoints in `outputs/train/act_so100_test/checkpoints`.
|
||||
|
||||
## Evaluate your policy
|
||||
|
||||
You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/so100.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/eval_act_so100_test \
|
||||
--tags so100 tutorial eval \
|
||||
--warmup-time-s 5 \
|
||||
--episode-time-s 40 \
|
||||
--reset-time-s 10 \
|
||||
--num-episodes 10 \
|
||||
-p outputs/train/act_so100_test/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
|
||||
1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_so100_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_so100_test`).
|
||||
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_act_so100_test`).
|
||||
|
||||
## More
|
||||
|
||||
Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth tutorial on controlling real robots with LeRobot.
|
||||
|
||||
If you have any question or need help, please reach out on Discord in the channel [`#so100-arm`](https://discord.com/channels/1216765309076115607/1237741463832363039).
|
||||
@@ -1,280 +0,0 @@
|
||||
This tutorial explains how to use [Moss v1](https://github.com/jess-moss/moss-robot-arms) with LeRobot.
|
||||
|
||||
## Source the parts
|
||||
|
||||
Follow this [README](https://github.com/jess-moss/moss-robot-arms). It contains the bill of materials, with link to source the parts, as well as the instructions to 3D print the parts, and advices if it's your first time printing or if you don't own a 3D printer already.
|
||||
|
||||
**Important**: Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
|
||||
|
||||
## Install LeRobot
|
||||
|
||||
On your computer:
|
||||
|
||||
1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
|
||||
```bash
|
||||
mkdir -p ~/miniconda3
|
||||
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
|
||||
bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
|
||||
rm ~/miniconda3/miniconda.sh
|
||||
~/miniconda3/bin/conda init bash
|
||||
```
|
||||
|
||||
2. Restart shell or `source ~/.bashrc`
|
||||
|
||||
3. Create and activate a fresh conda environment for lerobot
|
||||
```bash
|
||||
conda create -y -n lerobot python=3.10 && conda activate lerobot
|
||||
```
|
||||
|
||||
4. Clone LeRobot:
|
||||
```bash
|
||||
git clone https://github.com/huggingface/lerobot.git ~/lerobot
|
||||
```
|
||||
|
||||
5. Install LeRobot with dependencies for the feetech motors:
|
||||
```bash
|
||||
cd ~/lerobot && pip install -e ".[feetech]"
|
||||
```
|
||||
|
||||
For Linux only (not Mac), install extra dependencies for recording datasets:
|
||||
```bash
|
||||
conda install -y -c conda-forge ffmpeg
|
||||
pip uninstall -y opencv-python
|
||||
conda install -y -c conda-forge "opencv>=4.10.0"
|
||||
```
|
||||
|
||||
## Configure the motors
|
||||
|
||||
Follow steps 1 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the use of our scripts below.
|
||||
|
||||
**Find USB ports associated to your arms**
|
||||
To find the correct ports for each arm, run the utility script twice:
|
||||
```bash
|
||||
python lerobot/scripts/find_motors_bus_port.py
|
||||
```
|
||||
|
||||
Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect leader arm and press Enter...]
|
||||
|
||||
The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect follower arm and press Enter...]
|
||||
|
||||
The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
Troubleshooting: On Linux, you might need to give access to the USB ports by running:
|
||||
```bash
|
||||
sudo chmod 666 /dev/ttyACM0
|
||||
sudo chmod 666 /dev/ttyACM1
|
||||
```
|
||||
|
||||
**Configure your motors**
|
||||
Plug your first motor and run this script to set its ID to 1. It will also set its present position to 2048, so expect your motor to rotate:
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 1
|
||||
```
|
||||
|
||||
Note: These motors are currently limitated. They can take values between 0 and 4096 only, which corresponds to a full turn. They can't turn more than that. 2048 is at the middle of this range, so we can take -2048 steps (180 degrees anticlockwise) and reach the maximum range, or take +2048 steps (180 degrees clockwise) and reach the maximum range. The configuration step also sets the homing offset to 0, so that if you misassembled the arm, you can always update the homing offset to account for a shift up to ± 2048 steps (± 180 degrees).
|
||||
|
||||
Then unplug your motor and plug the second motor and set its ID to 2.
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 2
|
||||
```
|
||||
|
||||
Redo the process for all your motors until ID 6. Do the same for the 6 motors of the leader arm.
|
||||
|
||||
**Remove the gears of the 6 leader motors**
|
||||
Follow step 2 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm.
|
||||
|
||||
**Add motor horn to the motors**
|
||||
Follow step 3 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). For Moss v1, you need to align the holes on the motor horn to the motor spline to be approximately 3, 6, 9 and 12 o'clock.
|
||||
Try to avoid rotating the motor while doing so to keep position 2048 set during configuration. It is especially tricky for the leader motors as it is more sensible without the gears, but it's ok if it's a bit rotated.
|
||||
|
||||
## Assemble the arms
|
||||
|
||||
Follow step 4 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). The first arm should take a bit more than 1 hour to assemble, but once you get use to it, you can do it under 1 hour for the second arm.
|
||||
|
||||
## Calibrate
|
||||
|
||||
Next, you'll need to calibrate your Moss v1 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one Moss v1 robot to work on another.
|
||||
|
||||
**Auto-calibration of follower arm**
|
||||
Follow step 5 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the auto-calibration of the follower arm. You first need to manually move your follower arm to this initial position:
|
||||
|
||||
<div style="text-align:center;">
|
||||
<img src="../media/moss/follower_initial.webp?raw=true" alt="Moss v1 follower arm initial position" title="Moss v1 follower arm initial position" width="50%">
|
||||
</div>
|
||||
|
||||
Then run this script to launch auto-calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py calibrate \
|
||||
--robot-path lerobot/configs/robot/moss.yaml \
|
||||
--robot-overrides '~cameras' --arms main_follower
|
||||
```
|
||||
|
||||
Note: You can't run auto-calibration for the leader arm, since we removed the gears. Thus, you will need to manually calibrate the leader arm. It's less precise than auto-calibration, but precision is not as critical for the leader arm.
|
||||
|
||||
**Manual calibration of leader arm**
|
||||
Follow step 6 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
|---|---|---|
|
||||
| <img src="../media/moss/leader_zero.webp?raw=true" alt="Moss v1 leader arm zero position" title="Moss v1 leader arm zero position" style="width:100%;"> | <img src="../media/moss/leader_rotated.webp?raw=true" alt="Moss v1 leader arm rotated position" title="Moss v1 leader arm rotated position" style="width:100%;"> | <img src="../media/moss/leader_rest.webp?raw=true" alt="Moss v1 leader arm rest position" title="Moss v1 leader arm rest position" style="width:100%;"> |
|
||||
|
||||
Run this script to launch manual calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py calibrate \
|
||||
--robot-path lerobot/configs/robot/moss.yaml \
|
||||
--robot-overrides '~cameras' --arms main_leader
|
||||
```
|
||||
|
||||
## Teleoperate
|
||||
|
||||
**Simple teleop**
|
||||
Then you are ready to teleoperate your robot! Run this simple script (it won't connect and display the cameras):
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/moss.yaml \
|
||||
--robot-overrides '~cameras' \
|
||||
--display-cameras 0
|
||||
```
|
||||
|
||||
|
||||
**Teleop with displaying cameras**
|
||||
Follow [this guide to setup your cameras](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#c-add-your-cameras-with-opencvcamera). Then you will be able to display the cameras on your computer while you are teleoperating by running the following code. This is useful to prepare your setup before recording your first dataset.
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/moss.yaml
|
||||
```
|
||||
|
||||
## Record a dataset
|
||||
|
||||
Once you're familiar with teleoperation, you can record your first dataset with Moss v1.
|
||||
|
||||
If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
```
|
||||
|
||||
Store your Hugging Face repository name in a variable to run these commands:
|
||||
```bash
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
|
||||
Record 2 episodes and upload your dataset to the hub:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/moss.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/moss_test \
|
||||
--tags moss tutorial \
|
||||
--warmup-time-s 5 \
|
||||
--episode-time-s 40 \
|
||||
--reset-time-s 10 \
|
||||
--num-episodes 2 \
|
||||
--push-to-hub 1
|
||||
```
|
||||
|
||||
## Visualize a dataset
|
||||
|
||||
If you uploaded your dataset to the hub with `--push-to-hub 1`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
|
||||
```bash
|
||||
echo ${HF_USER}/moss_test
|
||||
```
|
||||
|
||||
If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with:
|
||||
```bash
|
||||
python lerobot/scripts/visualize_dataset_html.py \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/moss_test
|
||||
```
|
||||
|
||||
## Replay an episode
|
||||
|
||||
Now try to replay the first episode on your robot:
|
||||
```bash
|
||||
DATA_DIR=data python lerobot/scripts/control_robot.py replay \
|
||||
--robot-path lerobot/configs/robot/moss.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/moss_test \
|
||||
--episode 0
|
||||
```
|
||||
|
||||
## Train a policy
|
||||
|
||||
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
```bash
|
||||
DATA_DIR=data python lerobot/scripts/train.py \
|
||||
dataset_repo_id=${HF_USER}/moss_test \
|
||||
policy=act_moss_real \
|
||||
env=moss_real \
|
||||
hydra.run.dir=outputs/train/act_moss_test \
|
||||
hydra.job.name=act_moss_test \
|
||||
device=cuda \
|
||||
wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/moss_test`.
|
||||
2. We provided the policy with `policy=act_moss_real`. This loads configurations from [`lerobot/configs/policy/act_moss_real.yaml`](../lerobot/configs/policy/act_moss_real.yaml). Importantly, this policy uses 2 cameras as input `laptop`, `phone`.
|
||||
3. We provided an environment as argument with `env=moss_real`. This loads configurations from [`lerobot/configs/env/moss_real.yaml`](../lerobot/configs/env/moss_real.yaml).
|
||||
4. We provided `device=cuda` since we are training on a Nvidia GPU, but you can also use `device=mps` if you are using a Mac with Apple silicon, or `device=cpu` otherwise.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
6. We added `DATA_DIR=data` to access your dataset stored in your local `data` directory. If you dont provide `DATA_DIR`, your dataset will be downloaded from Hugging Face hub to your cache folder `$HOME/.cache/hugginface`. In future versions of `lerobot`, both directories will be in sync.
|
||||
|
||||
Training should take several hours. You will find checkpoints in `outputs/train/act_moss_test/checkpoints`.
|
||||
|
||||
## Evaluate your policy
|
||||
|
||||
You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/moss.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/eval_act_moss_test \
|
||||
--tags moss tutorial eval \
|
||||
--warmup-time-s 5 \
|
||||
--episode-time-s 40 \
|
||||
--reset-time-s 10 \
|
||||
--num-episodes 10 \
|
||||
-p outputs/train/act_moss_test/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
|
||||
1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_moss_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_moss_test`).
|
||||
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_act_moss_test`).
|
||||
|
||||
## More
|
||||
|
||||
Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth tutorial on controlling real robots with LeRobot.
|
||||
|
||||
If you have any question or need help, please reach out on Discord in the channel [`#moss-arm`](https://discord.com/channels/1216765309076115607/1275374638985252925).
|
||||
@@ -170,36 +170,6 @@ python lerobot/scripts/train.py --config-dir outputs/train/my_experiment/checkpo
|
||||
|
||||
Note that you may still use the regular syntax for config parameter overrides (eg: by adding `training.offline_steps=200000`).
|
||||
|
||||
## Typical logs and metrics
|
||||
|
||||
When you start the training process, you will first see your full configuration being printed in the terminal. You can check it to make sure that you config it correctly and your config is not overrided by other files. The final configuration will also be saved with the checkpoint.
|
||||
|
||||
After that, you will see training log like this one:
|
||||
|
||||
```
|
||||
INFO 2024-08-14 13:35:12 ts/train.py:192 step:0 smpl:64 ep:1 epch:0.00 loss:1.112 grdn:15.387 lr:2.0e-07 updt_s:1.738 data_s:4.774
|
||||
```
|
||||
|
||||
or evaluation log like:
|
||||
|
||||
```
|
||||
INFO 2024-08-14 13:38:45 ts/train.py:226 step:100 smpl:6K ep:52 epch:0.25 ∑rwrd:20.693 success:0.0% eval_s:120.266
|
||||
```
|
||||
|
||||
These logs will also be saved in wandb if `wandb.enable` is set to `true`. Here are the meaning of some abbreviations:
|
||||
|
||||
- `smpl`: number of samples seen during training.
|
||||
- `ep`: number of episodes seen during training. An episode contains multiple samples in a complete manipulation task.
|
||||
- `epch`: number of time all unique samples are seen (epoch).
|
||||
- `grdn`: gradient norm.
|
||||
- `∑rwrd`: compute the sum of rewards in every evaluation episode and then take an average of them.
|
||||
- `success`: average success rate of eval episodes. Reward and success are usually different except for the sparsing reward setting, where reward=1 only when the task is completed successfully.
|
||||
- `eval_s`: time to evaluate the policy in the environment, in second.
|
||||
- `updt_s`: time to update the network parameters, in second.
|
||||
- `data_s`: time to load a batch of data, in second.
|
||||
|
||||
Some metrics are useful for initial performance profiling. For example, if you find the current GPU utilization is low via the `nvidia-smi` command and `data_s` sometimes is too high, you may need to modify batch size or number of dataloading workers to accelerate dataloading. We also recommend [pytorch profiler](https://github.com/huggingface/lerobot?tab=readme-ov-file#improve-your-code-with-profiling) for detailed performance probing.
|
||||
|
||||
---
|
||||
|
||||
So far we've seen how to train Diffusion Policy for PushT and ACT for ALOHA. Now, what if we want to train ACT for PushT? Well, there are aspects of the ACT configuration that are specific to the ALOHA environments, and these happen to be incompatible with PushT. Therefore, trying to run the following will almost certainly raise an exception of sorts (eg: feature dimension mismatch):
|
||||
|
||||
@@ -11,7 +11,7 @@ This tutorial will guide you through the process of setting up and training a ne
|
||||
|
||||
By following these steps, you'll be able to replicate tasks like picking up a Lego block and placing it in a bin with a high success rate, as demonstrated in [this video](https://x.com/RemiCadene/status/1814680760592572934).
|
||||
|
||||
This tutorial is specifically made for the affordable [Koch v1.1](https://github.com/jess-moss/koch-v1-1) robot, but it contains additional information to be easily adapted to various types of robots like [Aloha bimanual robot](https://aloha-2.github.io) by changing some configurations. The Koch v1.1 consists of a leader arm and a follower arm, each with 6 motors. It can work with one or several cameras to record the scene, which serve as visual sensors for the robot.
|
||||
Although this tutorial is general and can be easily adapted to various types of robots by changing the configuration, it is specifically based on the [Koch v1.1](https://github.com/jess-moss/koch-v1-1), an affordable robot. The Koch v1.1 consists of a leader arm and a follower arm, each with 6 motors. It can work with one or several cameras to record the scene, which serve as visual sensors for the robot.
|
||||
|
||||
During the data collection phase, you will control the follower arm by moving the leader arm. This process is known as "teleoperation." This technique is used to collect robot trajectories. Afterward, you'll train a neural network to imitate these trajectories and deploy the network to enable your robot to operate autonomously.
|
||||
|
||||
@@ -29,23 +29,16 @@ For a visual walkthrough of the assembly process, you can refer to [this video t
|
||||
|
||||
## 2. Configure motors, calibrate arms, teleoperate your Koch v1.1
|
||||
|
||||
First, install the additional dependencies required for robots built with dynamixel motors like Koch v1.1 by running one of the following commands.
|
||||
First, install the additional dependencies required for Koch v1.1 by running one of the following commands.
|
||||
|
||||
Using `pip`:
|
||||
```bash
|
||||
pip install -e ".[dynamixel]"
|
||||
pip install -e ".[koch]"
|
||||
```
|
||||
|
||||
Or using `poetry`:
|
||||
```bash
|
||||
poetry install --sync --extras "dynamixel"
|
||||
```
|
||||
|
||||
/!\ For Linux only, ffmpeg and opencv requires conda install for now. Run this exact sequence of commands:
|
||||
```bash
|
||||
conda install -c conda-forge ffmpeg
|
||||
pip uninstall opencv-python
|
||||
conda install -c conda-forge "opencv>=4.10.0"
|
||||
poetry install --sync --extras "koch"
|
||||
```
|
||||
|
||||
You are now ready to plug the 5V power supply to the motor bus of the leader arm (the smaller one) since all its motors only require 5V.
|
||||
@@ -78,12 +71,12 @@ To begin, create two instances of the [`DynamixelMotorsBus`](../lerobot/common/
|
||||
|
||||
To find the correct ports for each arm, run the utility script twice:
|
||||
```bash
|
||||
python lerobot/scripts/find_motors_bus_port.py
|
||||
python lerobot/common/robot_devices/motors/dynamixel.py
|
||||
```
|
||||
|
||||
Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
Finding all available ports for the DynamixelMotorsBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
|
||||
|
||||
@@ -95,7 +88,7 @@ Reconnect the usb cable.
|
||||
|
||||
Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
Finding all available ports for the DynamixelMotorsBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
|
||||
|
||||
@@ -154,7 +147,6 @@ follower_arm = DynamixelMotorsBus(
|
||||
Next, update the port values in the YAML configuration file for the Koch robot at [`lerobot/configs/robot/koch.yaml`](../lerobot/configs/robot/koch.yaml) with the ports you've identified:
|
||||
```yaml
|
||||
[...]
|
||||
robot_type: koch
|
||||
leader_arms:
|
||||
main:
|
||||
_target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
|
||||
@@ -182,8 +174,6 @@ follower_arms:
|
||||
[...]
|
||||
```
|
||||
|
||||
Don't forget to set `robot_type: aloha` if you follow this tutorial with [Aloha bimanual robot](aloha-2.github.io) instead of Koch v1.1
|
||||
|
||||
This configuration file is used to instantiate your robot across all scripts. We'll cover how this works later on.
|
||||
|
||||
**Connect and Configure your Motors**
|
||||
@@ -308,37 +298,32 @@ Alternatively, you can unplug the power cord, which will automatically disable t
|
||||
|
||||
*/!\ Warning*: These motors tend to overheat, especially under torque or if left plugged in for too long. Unplug after use.
|
||||
|
||||
### b. Teleoperate your Koch v1.1 with ManipulatorRobot
|
||||
### b. Teleoperate your Koch v1.1 with KochRobot
|
||||
|
||||
**Instantiate the ManipulatorRobot**
|
||||
**Instantiate the KochRobot**
|
||||
|
||||
Before you can teleoperate your robot, you need to instantiate the [`ManipulatorRobot`](../lerobot/common/robot_devices/robots/manipulator.py) using the previously defined `leader_arm` and `follower_arm`.
|
||||
Before you can teleoperate your robot, you need to instantiate the [`KochRobot`](../lerobot/common/robot_devices/robots/koch.py) using the previously defined `leader_arm` and `follower_arm`.
|
||||
|
||||
For the Koch v1.1 robot, we only have one leader, so we refer to it as `"main"` and define it as `leader_arms={"main": leader_arm}`. We do the same for the follower arm. For other robots (like the Aloha), which may have two pairs of leader and follower arms, you would define them like this: `leader_arms={"left": left_leader_arm, "right": right_leader_arm},`. Same thing for the follower arms.
|
||||
For the Koch robot, we only have one leader, so we refer to it as `"main"` and define it as `leader_arms={"main": leader_arm}`. We do the same for the follower arm. For other robots (like the Aloha), which may have two pairs of leader and follower arms, you would define them like this: `leader_arms={"left": left_leader_arm, "right": right_leader_arm},`. Same thing for the follower arms.
|
||||
|
||||
You also need to provide a path to a calibration directory, such as `calibration_dir=".cache/calibration/koch"`. More on this in the next section.
|
||||
You also need to provide a path to a calibration file, such as `calibration_path=".cache/calibration/koch.pkl"`. More on this in the next section.
|
||||
|
||||
Run the following code to instantiate your manipulator robot:
|
||||
Run the following code to instantiate your Koch robot:
|
||||
```python
|
||||
from lerobot.common.robot_devices.robots.manipulator import ManipulatorRobot
|
||||
from lerobot.common.robot_devices.robots.koch import KochRobot
|
||||
|
||||
robot = ManipulatorRobot(
|
||||
robot_type="koch",
|
||||
robot = KochRobot(
|
||||
leader_arms={"main": leader_arm},
|
||||
follower_arms={"main": follower_arm},
|
||||
calibration_dir=".cache/calibration/koch",
|
||||
calibration_path=".cache/calibration/koch.pkl",
|
||||
)
|
||||
```
|
||||
|
||||
The `robot_type="koch"` is used to set the associated settings and calibration process. For instance, we activate the torque of the gripper of the leader Koch v1.1 arm and position it at a 40 degree angle to use it as a trigger.
|
||||
**Calibrate and Connect the KochRobot**
|
||||
|
||||
For the [Aloha bimanual robot](https://aloha-2.github.io), we would use `robot_type="aloha"` to set different settings such as a secondary ID for shadow joints (shoulder, elbow). Specific to Aloha, LeRobot comes with default calibration files stored in in `.cache/calibration/aloha_default`. Assuming the motors have been properly assembled, no manual calibration step is expected. If you need to run manual calibration, simply update `calibration_dir` to `.cache/calibration/aloha`.
|
||||
Next, you'll need to calibrate your robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one Koch robot to work on another.
|
||||
|
||||
**Calibrate and Connect the ManipulatorRobot**
|
||||
|
||||
Next, you'll need to calibrate your Koch robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one Koch robot to work on another.
|
||||
|
||||
When you connect your robot for the first time, the [`ManipulatorRobot`](../lerobot/common/robot_devices/robots/manipulator.py) will detect if the calibration file is missing and trigger the calibration procedure. During this process, you will be guided to move each arm to three different positions.
|
||||
When you connect your robot for the first time, the [`KochRobot`](../lerobot/common/robot_devices/robots/koch.py) will detect if the calibration file is missing and trigger the calibration procedure. During this process, you will be guided to move each arm to three different positions.
|
||||
|
||||
Here are the positions you'll move the follower arm to:
|
||||
|
||||
@@ -369,26 +354,27 @@ The output will look like this:
|
||||
```
|
||||
Connecting main follower arm
|
||||
Connecting main leader arm
|
||||
Missing calibration file '.cache/calibration/koch.pkl'. Starting calibration procedure.
|
||||
|
||||
Running calibration of main follower...
|
||||
|
||||
Missing calibration file '.cache/calibration/koch/main_follower.json'
|
||||
Running calibration of koch main follower...
|
||||
Move arm to zero position
|
||||
[...]
|
||||
Move arm to rotated position
|
||||
[...]
|
||||
Move arm to rest position
|
||||
[...]
|
||||
Calibration is done! Saving calibration file '.cache/calibration/koch/main_follower.json'
|
||||
|
||||
Missing calibration file '.cache/calibration/koch/main_leader.json'
|
||||
Running calibration of koch main leader...
|
||||
Running calibration of main leader...
|
||||
|
||||
Move arm to zero position
|
||||
[...]
|
||||
Move arm to rotated position
|
||||
[...]
|
||||
Move arm to rest position
|
||||
[...]
|
||||
Calibration is done! Saving calibration file '.cache/calibration/koch/main_leader.json'
|
||||
|
||||
Calibration is done! Saving calibration file '.cache/calibration/koch.pkl'
|
||||
```
|
||||
|
||||
*Verifying Calibration*
|
||||
@@ -428,7 +414,7 @@ for _ in tqdm.tqdm(range(seconds*frequency)):
|
||||
|
||||
*Using `teleop_step` for Teleoperation*
|
||||
|
||||
Alternatively, you can teleoperate the robot using the `teleop_step` method from [`ManipulatorRobot`](../lerobot/common/robot_devices/robots/manipulator.py).
|
||||
Alternatively, you can teleoperate the robot using the `teleop_step` method from [`KochRobot`](../lerobot/common/robot_devices/robots/koch.py).
|
||||
|
||||
Run this code to teleoperate:
|
||||
```python
|
||||
@@ -621,10 +607,10 @@ Additionaly, you can set up your robot to work with your cameras.
|
||||
|
||||
Modify the following Python code with the appropriate camera names and configurations:
|
||||
```python
|
||||
robot = ManipulatorRobot(
|
||||
robot = KochRobot(
|
||||
leader_arms={"main": leader_arm},
|
||||
follower_arms={"main": follower_arm},
|
||||
calibration_dir=".cache/calibration/koch",
|
||||
calibration_path=".cache/calibration/koch.pkl",
|
||||
cameras={
|
||||
"laptop": OpenCVCamera(0, fps=30, width=640, height=480),
|
||||
"phone": OpenCVCamera(1, fps=30, width=640, height=480),
|
||||
@@ -766,7 +752,7 @@ Before trying `record`, if you want to push your dataset to the hub, make sure y
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
```
|
||||
Also, store your Hugging Face repository name in a variable (e.g. `cadene` or `lerobot`). For instance, run this to use your Hugging Face user name as repository:
|
||||
Also, store your Hugging Face repositery name in a variable (e.g. `cadene` or `lerobot`). For instance, run this to use your Hugging Face user name as repositery:
|
||||
```bash
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
@@ -939,7 +925,7 @@ huggingface-cli upload ${HF_USER}/act_koch_test_${CKPT} \
|
||||
|
||||
## 5. Evaluate your policy
|
||||
|
||||
Now that you have a policy checkpoint, you can easily control your robot with it using methods from [`ManipulatorRobot`](../lerobot/common/robot_devices/robots/manipulator.py) and the policy.
|
||||
Now that you have a policy checkpoint, you can easily control your robot with it using methods from [`KochRobot`](../lerobot/common/robot_devices/robots/koch.py) and the policy.
|
||||
|
||||
Try this code for running inference for 60 seconds at 30 fps:
|
||||
```python
|
||||
|
||||
@@ -1,158 +0,0 @@
|
||||
This tutorial explains how to use [Stretch 3](https://hello-robot.com/stretch-3-product) with LeRobot.
|
||||
|
||||
## Setup
|
||||
|
||||
Familiarize yourself with Stretch by following its [tutorials](https://docs.hello-robot.com/0.3/getting_started/hello_robot/) (recommended).
|
||||
|
||||
To use LeRobot on Stretch, 3 options are available:
|
||||
- [tethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#tethered-setup)
|
||||
- [untethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#untethered-setup)
|
||||
- ssh directly into Stretch (you will first need to install and configure openssh-server on stretch using one of the two above setups)
|
||||
|
||||
|
||||
## Install LeRobot
|
||||
|
||||
On Stretch's CLI, follow these steps:
|
||||
|
||||
1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
|
||||
```bash
|
||||
mkdir -p ~/miniconda3
|
||||
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
|
||||
bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
|
||||
rm ~/miniconda3/miniconda.sh
|
||||
~/miniconda3/bin/conda init bash
|
||||
```
|
||||
|
||||
2. Comment out these lines in `~/.profile` (this can mess up paths used by conda and ~/.local/bin should already be in your PATH)
|
||||
```
|
||||
# set PATH so it includes user's private bin if it exists
|
||||
if [ -d "$HOME/.local/bin" ] ; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
fi
|
||||
```
|
||||
|
||||
3. Restart shell or `source ~/.bashrc`
|
||||
|
||||
4. Create and activate a fresh conda environment for lerobot
|
||||
```bash
|
||||
conda create -y -n lerobot python=3.10 && conda activate lerobot
|
||||
```
|
||||
|
||||
5. Clone LeRobot:
|
||||
```bash
|
||||
git clone https://github.com/huggingface/lerobot.git ~/lerobot
|
||||
```
|
||||
|
||||
6. Install LeRobot with stretch dependencies:
|
||||
```bash
|
||||
cd ~/lerobot && pip install -e ".[stretch]"
|
||||
```
|
||||
|
||||
> **Note:** If you get this message, you can ignore it: `ERROR: pip's dependency resolver does not currently take into account all the packages that are installed.`
|
||||
|
||||
For Linux only (not Mac), install extra dependencies for recording datasets:
|
||||
```bash
|
||||
conda install -y -c conda-forge ffmpeg
|
||||
pip uninstall -y opencv-python
|
||||
conda install -y -c conda-forge "opencv>=4.10.0"
|
||||
```
|
||||
|
||||
7. Run a [system check](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#system-check) to make sure your robot is ready:
|
||||
```bash
|
||||
stretch_system_check.py
|
||||
```
|
||||
|
||||
> **Note:** You may need to free the "robot process" after booting Stretch by running `stretch_free_robot_process.py`. For more info this Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#turning-off-gamepad-teleoperation).
|
||||
|
||||
You should get something like this:
|
||||
```bash
|
||||
For use with S T R E T C H (R) from Hello Robot Inc.
|
||||
---------------------------------------------------------------------
|
||||
|
||||
Model = Stretch 3
|
||||
Tool = DexWrist 3 w/ Gripper
|
||||
Serial Number = stretch-se3-3054
|
||||
|
||||
---- Checking Hardware ----
|
||||
[Pass] Comms are ready
|
||||
[Pass] Actuators are ready
|
||||
[Warn] Sensors not ready (IMU AZ = -10.19 out of range -10.1 to -9.5)
|
||||
[Pass] Battery voltage is 13.6 V
|
||||
|
||||
---- Checking Software ----
|
||||
[Pass] Ubuntu 22.04 is ready
|
||||
[Pass] All APT pkgs are setup correctly
|
||||
[Pass] Firmware is up-to-date
|
||||
[Pass] Python pkgs are up-to-date
|
||||
[Pass] ROS2 Humble is ready
|
||||
```
|
||||
|
||||
## Teleoperate, record a dataset and run a policy
|
||||
|
||||
**Calibrate (Optional)**
|
||||
Before operating Stretch, you need to [home](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#homing) it first. Be mindful about giving Stretch some space as this procedure will move the robot's arm and gripper. Now run this command:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py calibrate \
|
||||
--robot-path lerobot/configs/robot/stretch.yaml
|
||||
```
|
||||
This is equivalent to running `stretch_robot_home.py`
|
||||
|
||||
> **Note:** If you run any of the LeRobot scripts below and Stretch is not poperly homed, it will automatically home/calibrate first.
|
||||
|
||||
**Teleoperate**
|
||||
Before trying teleoperation, you need activate the gamepad controller by pressing the middle button. For more info, see Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/hello_robot/#gamepad-teleoperation).
|
||||
|
||||
Now try out teleoperation (see above documentation to learn about the gamepad controls):
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/stretch.yaml
|
||||
```
|
||||
This is essentially the same as running `stretch_gamepad_teleop.py`
|
||||
|
||||
**Record a dataset**
|
||||
Once you're familiar with the gamepad controls and after a bit of practice, you can try to record your first dataset with Stretch.
|
||||
|
||||
If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
```
|
||||
|
||||
Store your Hugging Face repository name in a variable to run these commands:
|
||||
```bash
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
|
||||
Record one episode:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/stretch.yaml \
|
||||
--fps 20 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/stretch_test \
|
||||
--tags stretch tutorial \
|
||||
--warmup-time-s 3 \
|
||||
--episode-time-s 40 \
|
||||
--reset-time-s 10 \
|
||||
--num-episodes 1 \
|
||||
--push-to-hub 0
|
||||
```
|
||||
|
||||
> **Note:** If you're using ssh to connect to Stretch and run this script, you won't be able to visualize its cameras feed (though they will still be recording). To see the cameras stream, use [tethered](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#tethered-setup) or [untethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#untethered-setup).
|
||||
|
||||
**Replay an episode**
|
||||
Now try to replay this episode (make sure the robot's initial position is the same):
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py replay \
|
||||
--robot-path lerobot/configs/robot/stretch.yaml \
|
||||
--fps 20 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/stretch_test \
|
||||
--episode 0
|
||||
```
|
||||
|
||||
Follow [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) to train a policy on your data and run inference on your robot. You will need to adapt the code for Stretch.
|
||||
|
||||
> TODO(rcadene, aliberts): Add already setup environment and policy yaml configuration files
|
||||
|
||||
If you need help, please reach out on Discord in the channel `#stretch3-mobile-arm`.
|
||||
@@ -1,179 +0,0 @@
|
||||
This tutorial explains how to use [Aloha and Aloha 2 stationary](https://www.trossenrobotics.com/aloha-stationary) with LeRobot.
|
||||
|
||||
## Setup
|
||||
|
||||
Follow the [documentation from Trossen Robotics](https://docs.trossenrobotics.com/aloha_docs/getting_started/stationary/hardware_setup.html) for setting up the hardware and plugging the 4 arms and 4 cameras to your computer.
|
||||
|
||||
|
||||
## Install LeRobot
|
||||
|
||||
On your computer:
|
||||
|
||||
1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
|
||||
```bash
|
||||
mkdir -p ~/miniconda3
|
||||
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
|
||||
bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
|
||||
rm ~/miniconda3/miniconda.sh
|
||||
~/miniconda3/bin/conda init bash
|
||||
```
|
||||
|
||||
2. Restart shell or `source ~/.bashrc`
|
||||
|
||||
3. Create and activate a fresh conda environment for lerobot
|
||||
```bash
|
||||
conda create -y -n lerobot python=3.10 && conda activate lerobot
|
||||
```
|
||||
|
||||
4. Clone LeRobot:
|
||||
```bash
|
||||
git clone https://github.com/huggingface/lerobot.git ~/lerobot
|
||||
```
|
||||
|
||||
5. Install LeRobot with dependencies for the Aloha motors (dynamixel) and cameras (intelrealsense):
|
||||
```bash
|
||||
cd ~/lerobot && pip install -e ".[dynamixel, intelrealsense]"
|
||||
```
|
||||
|
||||
For Linux only (not Mac), install extra dependencies for recording datasets:
|
||||
```bash
|
||||
conda install -y -c conda-forge ffmpeg
|
||||
pip uninstall -y opencv-python
|
||||
conda install -y -c conda-forge "opencv>=4.10.0"
|
||||
```
|
||||
|
||||
## Teleoperate
|
||||
|
||||
**/!\ FOR SAFETY, READ THIS /!\**
|
||||
Teleoperation consists in manually operating the leader arms to move the follower arms. Importantly:
|
||||
1. Make sure your leader arms are in the same position as the follower arms, so that the follower arms don't move too fast to match the leader arms,
|
||||
2. Our code assumes that your robot has been assembled following Trossen Robotics instructions. This allows us to skip calibration, as we use the pre-defined calibration files in `.cache/calibration/aloha_default`. If you replace a motor, make sure you follow the exact instructions from Trossen Robotics.
|
||||
|
||||
By running the following code, you can start your first **SAFE** teleoperation:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/aloha.yaml \
|
||||
--robot-overrides max_relative_target=5
|
||||
```
|
||||
|
||||
By adding `--robot-overrides max_relative_target=5`, we override the default value for `max_relative_target` defined in `lerobot/configs/robot/aloha.yaml`. It is expected to be `5` to limit the magnitude of the movement for more safety, but the teloperation won't be smooth. When you feel confident, you can disable this limit by adding `--robot-overrides max_relative_target=null` to the command line:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/aloha.yaml \
|
||||
--robot-overrides max_relative_target=null
|
||||
```
|
||||
|
||||
## Record a dataset
|
||||
|
||||
Once you're familiar with teleoperation, you can record your first dataset with Aloha.
|
||||
|
||||
If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
```
|
||||
|
||||
Store your Hugging Face repository name in a variable to run these commands:
|
||||
```bash
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
|
||||
Record 2 episodes and upload your dataset to the hub:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/aloha.yaml \
|
||||
--robot-overrides max_relative_target=null \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/aloha_test \
|
||||
--tags aloha tutorial \
|
||||
--warmup-time-s 5 \
|
||||
--episode-time-s 40 \
|
||||
--reset-time-s 10 \
|
||||
--num-episodes 2 \
|
||||
--push-to-hub 1
|
||||
```
|
||||
|
||||
## Visualize a dataset
|
||||
|
||||
If you uploaded your dataset to the hub with `--push-to-hub 1`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
|
||||
```bash
|
||||
echo ${HF_USER}/aloha_test
|
||||
```
|
||||
|
||||
If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with:
|
||||
```bash
|
||||
python lerobot/scripts/visualize_dataset_html.py \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/aloha_test
|
||||
```
|
||||
|
||||
## Replay an episode
|
||||
|
||||
**/!\ FOR SAFETY, READ THIS /!\**
|
||||
Replay consists in automatically replaying the sequence of actions (i.e. goal positions for your motors) recorded in a given dataset episode. Make sure the current initial position of your robot is similar to the one in your episode, so that your follower arms don't move too fast to go to the first goal positions. For safety, you might want to add `--robot-overrides max_relative_target=5` to your command line as explained above.
|
||||
|
||||
Now try to replay the first episode on your robot:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py replay \
|
||||
--robot-path lerobot/configs/robot/aloha.yaml \
|
||||
--robot-overrides max_relative_target=null \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/aloha_test \
|
||||
--episode 0
|
||||
```
|
||||
|
||||
## Train a policy
|
||||
|
||||
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
```bash
|
||||
DATA_DIR=data python lerobot/scripts/train.py \
|
||||
dataset_repo_id=${HF_USER}/aloha_test \
|
||||
policy=act_aloha_real \
|
||||
env=aloha_real \
|
||||
hydra.run.dir=outputs/train/act_aloha_test \
|
||||
hydra.job.name=act_aloha_test \
|
||||
device=cuda \
|
||||
wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/aloha_test`.
|
||||
2. We provided the policy with `policy=act_aloha_real`. This loads configurations from [`lerobot/configs/policy/act_aloha_real.yaml`](../lerobot/configs/policy/act_aloha_real.yaml). Importantly, this policy uses 4 cameras as input `cam_right_wrist`, `cam_left_wrist`, `cam_high`, and `cam_low`.
|
||||
3. We provided an environment as argument with `env=aloha_real`. This loads configurations from [`lerobot/configs/env/aloha_real.yaml`](../lerobot/configs/env/aloha_real.yaml). Note: this yaml defines 18 dimensions for the `state_dim` and `action_dim`, corresponding to 18 motors, not 14 motors as used in previous Aloha work. This is because, we include the `shoulder_shadow` and `elbow_shadow` motors for simplicity.
|
||||
4. We provided `device=cuda` since we are training on a Nvidia GPU.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
6. We added `DATA_DIR=data` to access your dataset stored in your local `data` directory. If you dont provide `DATA_DIR`, your dataset will be downloaded from Hugging Face hub to your cache folder `$HOME/.cache/hugginface`. In future versions of `lerobot`, both directories will be in sync.
|
||||
|
||||
Training should take several hours. You will find checkpoints in `outputs/train/act_aloha_test/checkpoints`.
|
||||
|
||||
## Evaluate your policy
|
||||
|
||||
You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/aloha.yaml \
|
||||
--robot-overrides max_relative_target=null \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/eval_act_aloha_test \
|
||||
--tags aloha tutorial eval \
|
||||
--warmup-time-s 5 \
|
||||
--episode-time-s 40 \
|
||||
--reset-time-s 10 \
|
||||
--num-episodes 10 \
|
||||
--num-image-writer-processes 1 \
|
||||
-p outputs/train/act_aloha_test/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
|
||||
1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_aloha_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_aloha_test`).
|
||||
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_act_aloha_test`).
|
||||
3. We use `--num-image-writer-processes 1` instead of the default value (`0`). On our computer, using a dedicated process to write images from the 4 cameras on disk allows to reach constent 30 fps during inference. Feel free to explore different values for `--num-image-writer-processes`.
|
||||
|
||||
## More
|
||||
|
||||
Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth explaination.
|
||||
|
||||
If you have any question or need help, please reach out on Discord in the channel `#aloha-arm`.
|
||||
@@ -1 +0,0 @@
|
||||
Bearings - https://amzn.eu/d/8Xz7m4C - https://amzn.eu/d/1xOo8re - https://amzn.eu/d/9LXO205 (17x) - https://amzn.eu/d/eKGj9gf (2x) Bike Components - https://amzn.eu/d/cNiQi0O (1x) Accessories - https://amzn.eu/d/ipjCq1R (1x) - https://amzn.eu/d/0ZMzC3G (1x) Screws - https://amzn.eu/d/dzNhSkJ - https://amzn.eu/d/41AhVIU - https://amzn.eu/d/8G91txy - https://amzn.eu/d/9xu0pLa - https://amzn.eu/d/c5xaClV - https://amzn.eu/d/7kudpAo - https://amzn.eu/d/2BEgJFc - https://amzn.eu/d/4q9RNby - https://amzn.eu/d/4RE2lPV - https://amzn.eu/d/63YU0l1 Inserts - https://amzn.eu/d/7fjOtOC
|
||||
@@ -1,624 +0,0 @@
|
||||
# Using the [SO-100](https://github.com/TheRobotStudio/SO-ARM100) with LeRobot
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [A. Source the parts](#a-source-the-parts)
|
||||
- [B. Install LeRobot](#b-install-lerobot)
|
||||
- [C. Configure the Motors](#c-configure-the-motors)
|
||||
- [D. Step-by-Step Assembly Instructions](#d-step-by-step-assembly-instructions)
|
||||
- [E. Calibrate](#e-calibrate)
|
||||
- [F. Teleoperate](#f-teleoperate)
|
||||
- [G. Record a dataset](#g-record-a-dataset)
|
||||
- [H. Visualize a dataset](#h-visualize-a-dataset)
|
||||
- [I. Replay an episode](#i-replay-an-episode)
|
||||
- [J. Train a policy](#j-train-a-policy)
|
||||
- [K. Evaluate your policy](#k-evaluate-your-policy)
|
||||
- [L. More Information](#l-more-information)
|
||||
|
||||
## A. Source the parts
|
||||
|
||||
Follow this [README](https://github.com/TheRobotStudio/SO-ARM100). It contains the bill of materials, with a link to source the parts, as well as the instructions to 3D print the parts,
|
||||
and advice if it's your first time printing or if you don't own a 3D printer.
|
||||
|
||||
Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
|
||||
|
||||
## B. Install LeRobot
|
||||
|
||||
> [!TIP]
|
||||
> We use the Command Prompt (cmd) quite a lot. If you are not comfortable using the cmd or want to brush up using the command line you can have a look here: [Command line crash course](https://developer.mozilla.org/en-US/docs/Learn_web_development/Getting_started/Environment_setup/Command_line)
|
||||
|
||||
On your computer:
|
||||
|
||||
#### 1. [Install Miniconda](https://docs.anaconda.com/miniconda/install/#quick-command-line-install):
|
||||
|
||||
#### 2. Restart shell
|
||||
Copy paste in your shell: `source ~/.bashrc` or for Mac: `source ~/.bash_profile` or `source ~/.zshrc` if you're using zshell
|
||||
|
||||
#### 3. Create and activate a fresh conda environment for lerobot
|
||||
|
||||
<details>
|
||||
<summary><strong>Video install instructions</strong></summary>
|
||||
|
||||
<video src="https://github.com/user-attachments/assets/17172d3b-3b64-4b80-9cf1-b2b7c5cbd236"></video>
|
||||
|
||||
</details>
|
||||
|
||||
```bash
|
||||
conda create -y -n lerobot python=3.10
|
||||
```
|
||||
|
||||
Then activate your conda environment (do this each time you open a shell to use lerobot!):
|
||||
```bash
|
||||
conda activate lerobot
|
||||
```
|
||||
|
||||
#### 4. Clone LeRobot:
|
||||
```bash
|
||||
git clone https://github.com/huggingface/lerobot.git ~/lerobot
|
||||
```
|
||||
|
||||
#### 5. Install ffmpeg in your environment:
|
||||
When using `miniconda`, install `ffmpeg` in your environment:
|
||||
```bash
|
||||
conda install ffmpeg -c conda-forge
|
||||
```
|
||||
|
||||
#### 6. Install LeRobot with dependencies for the feetech motors:
|
||||
```bash
|
||||
cd ~/lerobot && pip install -e ".[feetech]"
|
||||
```
|
||||
|
||||
Great :hugs:! You are now done installing LeRobot and we can begin assembling the SO100 arms :robot:.
|
||||
Every time you now want to use LeRobot you can go to the `~/lerobot` folder where we installed LeRobot and run one of the commands.
|
||||
|
||||
## C. Configure the motors
|
||||
|
||||
> [!NOTE]
|
||||
> Throughout this tutorial you will find videos on how to do the steps, the full video tutorial can be found here: [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I).
|
||||
|
||||
### 1. Find the USB ports associated to each arm
|
||||
|
||||
Designate one bus servo adapter and 6 motors for your leader arm, and similarly the other bus servo adapter and 6 motors for the follower arm. It's convenient to label them and write on each motor if it's for the follower `F` or for the leader `L` and it's ID from 1 to 6 (F1...F6 and L1...L6).
|
||||
|
||||
#### a. Run the script to find port
|
||||
|
||||
<details>
|
||||
<summary><strong>Video finding port</strong></summary>
|
||||
<video src="https://github.com/user-attachments/assets/4a21a14d-2046-4805-93c4-ee97a30ba33f"></video>
|
||||
<video src="https://github.com/user-attachments/assets/1cc3aecf-c16d-4ff9-aec7-8c175afbbce2"></video>
|
||||
</details>
|
||||
|
||||
To find the port for each bus servo adapter, run the utility script:
|
||||
```bash
|
||||
python lerobot/scripts/find_motors_bus_port.py
|
||||
```
|
||||
|
||||
#### b. Example outputs
|
||||
|
||||
Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your MotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect leader arm and press Enter...]
|
||||
|
||||
The port of this MotorsBus is /dev/tty.usbmodem575E0031751
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your MotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect follower arm and press Enter...]
|
||||
|
||||
The port of this MotorsBus is /dev/tty.usbmodem575E0032081
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
#### c. Troubleshooting
|
||||
On Linux, you might need to give access to the USB ports by running:
|
||||
```bash
|
||||
sudo chmod 666 /dev/ttyACM0
|
||||
sudo chmod 666 /dev/ttyACM1
|
||||
```
|
||||
|
||||
#### d. Update config file
|
||||
|
||||
IMPORTANTLY: Now that you have your ports, update the **port** default values of [`SO100RobotConfig`](../lerobot/common/robot_devices/robots/configs.py). You will find something like:
|
||||
```python
|
||||
@RobotConfig.register_subclass("so100")
|
||||
@dataclass
|
||||
class So100RobotConfig(ManipulatorRobotConfig):
|
||||
calibration_dir: str = ".cache/calibration/so100"
|
||||
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
|
||||
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
|
||||
# the number of motors in your follower arms.
|
||||
max_relative_target: int | None = None
|
||||
|
||||
leader_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem58760431091", <-- UPDATE HERE
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
follower_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem585A0076891", <-- UPDATE HERE
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### 2. Assembling the Base
|
||||
Let's begin with assembling the follower arm base
|
||||
|
||||
#### a. Set IDs for all 12 motors
|
||||
|
||||
<details>
|
||||
<summary><strong>Video configuring motor</strong></summary>
|
||||
<video src="https://github.com/user-attachments/assets/ef9b3317-2e11-4858-b9d3-f0a02fb48ecf"></video>
|
||||
<video src="https://github.com/user-attachments/assets/f36b5ed5-c803-4ebe-8947-b39278776a0d"></video>
|
||||
</details>
|
||||
|
||||
Plug your first motor F1 and run this script to set its ID to 1. It will also set its present position to 2048, so expect your motor to rotate. Replace the text after --port to the corresponding follower control board port and run this command in cmd:
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 1
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> These motors are currently limited. They can take values between 0 and 4096 only, which corresponds to a full turn. They can't turn more than that. 2048 is at the middle of this range, so we can take -2048 steps (180 degrees anticlockwise) and reach the maximum range, or take +2048 steps (180 degrees clockwise) and reach the maximum range. The configuration step also sets the homing offset to 0, so that if you misassembled the arm, you can always update the homing offset to account for a shift up to ± 2048 steps (± 180 degrees).
|
||||
|
||||
Then unplug your motor and plug the second motor and set its ID to 2.
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 2
|
||||
```
|
||||
|
||||
Redo the process for all your motors until ID 6. Do the same for the 6 motors of the leader arm.
|
||||
|
||||
|
||||
#### b. Remove the gears of the 6 leader motors
|
||||
|
||||
<details>
|
||||
<summary><strong>Video removing gears</strong></summary>
|
||||
|
||||
<video src="https://github.com/user-attachments/assets/0c95b88c-5b85-413d-ba19-aee2f864f2a7"></video>
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
Follow the video for removing gears. You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm.
|
||||
|
||||
## D. Step-by-Step Assembly Instructions
|
||||
|
||||
**Step 1: Clean Parts**
|
||||
- Remove all support material from the 3D-printed parts.
|
||||
---
|
||||
|
||||
### Additional Guidance
|
||||
|
||||
<details>
|
||||
<summary><strong>Video assembling arms</strong></summary>
|
||||
|
||||
<video src="https://github.com/user-attachments/assets/488a39de-0189-4461-9de3-05b015f90cca"></video>
|
||||
|
||||
</details>
|
||||
|
||||
**Note:**
|
||||
This video provides visual guidance for assembling the arms, but it doesn't specify when or how to do the wiring. Inserting the cables beforehand is much easier than doing it afterward. The first arm may take a bit more than 1 hour to assemble, but once you get used to it, you can assemble the second arm in under 1 hour.
|
||||
|
||||
---
|
||||
|
||||
### First Motor
|
||||
|
||||
**Step 2: Insert Wires**
|
||||
- Insert two wires into the first motor.
|
||||
|
||||
<img src="../media/tutorial/img1.jpg" style="height:300px;">
|
||||
|
||||
**Step 3: Install in Base**
|
||||
- Place the first motor into the base.
|
||||
|
||||
<img src="../media/tutorial/img2.jpg" style="height:300px;">
|
||||
|
||||
**Step 4: Secure Motor**
|
||||
- Fasten the motor with 4 screws. Two from the bottom and two from top.
|
||||
|
||||
**Step 5: Attach Motor Holder**
|
||||
- Slide over the first motor holder and fasten it using two screws (one on each side).
|
||||
|
||||
<img src="../media/tutorial/img4.jpg" style="height:300px;">
|
||||
|
||||
**Step 6: Attach Motor Horns**
|
||||
- Install both motor horns, securing the top horn with a screw. Try not to move the motor position when attaching the motor horn, especially for the leader arms, where we removed the gears.
|
||||
|
||||
<img src="../media/tutorial/img5.jpg" style="height:300px;">
|
||||
<details>
|
||||
<summary><strong>Video adding motor horn</strong></summary>
|
||||
<video src="https://github.com/user-attachments/assets/ef3391a4-ad05-4100-b2bd-1699bf86c969"></video>
|
||||
</details>
|
||||
|
||||
**Step 7: Attach Shoulder Part**
|
||||
- Route one wire to the back of the robot and the other to the left or in photo towards you (see photo).
|
||||
- Attach the shoulder part.
|
||||
|
||||
<img src="../media/tutorial/img6.jpg" style="height:300px;">
|
||||
|
||||
**Step 8: Secure Shoulder**
|
||||
- Tighten the shoulder part with 4 screws on top and 4 on the bottom
|
||||
*(access bottom holes by turning the shoulder).*
|
||||
|
||||
---
|
||||
|
||||
### Second Motor Assembly
|
||||
|
||||
**Step 9: Install Motor 2**
|
||||
- Slide the second motor in from the top and link the wire from motor 1 to motor 2.
|
||||
|
||||
<img src="../media/tutorial/img8.jpg" style="height:300px;">
|
||||
|
||||
**Step 10: Attach Shoulder Holder**
|
||||
- Add the shoulder motor holder.
|
||||
- Ensure the wire from motor 1 to motor 2 goes behind the holder while the other wire is routed upward (see photo).
|
||||
- This part can be tight to assemble, you can use a workbench like the image or a similar setup to push the part around the motor.
|
||||
|
||||
<div style="display: flex;">
|
||||
<img src="../media/tutorial/img9.jpg" style="height:250px;">
|
||||
<img src="../media/tutorial/img10.jpg" style="height:250px;">
|
||||
<img src="../media/tutorial/img12.jpg" style="height:250px;">
|
||||
</div>
|
||||
|
||||
**Step 11: Secure Motor 2**
|
||||
- Fasten the second motor with 4 screws.
|
||||
|
||||
**Step 12: Attach Motor Horn**
|
||||
- Attach both motor horns to motor 2, again use the horn screw.
|
||||
|
||||
**Step 13: Attach Base**
|
||||
- Install the base attachment using 2 screws.
|
||||
|
||||
<img src="../media/tutorial/img11.jpg" style="height:300px;">
|
||||
|
||||
**Step 14: Attach Upper Arm**
|
||||
- Attach the upper arm with 4 screws on each side.
|
||||
|
||||
<img src="../media/tutorial/img13.jpg" style="height:300px;">
|
||||
|
||||
---
|
||||
|
||||
### Third Motor Assembly
|
||||
|
||||
**Step 15: Install Motor 3**
|
||||
- Route the motor cable from motor 2 through the cable holder to motor 3, then secure motor 3 with 4 screws.
|
||||
|
||||
**Step 16: Attach Motor Horn**
|
||||
- Attach both motor horns to motor 3 and secure one again with a horn screw.
|
||||
|
||||
<img src="../media/tutorial/img14.jpg" style="height:300px;">
|
||||
|
||||
**Step 17: Attach Forearm**
|
||||
- Connect the forearm to motor 3 using 4 screws on each side.
|
||||
|
||||
<img src="../media/tutorial/img15.jpg" style="height:300px;">
|
||||
|
||||
---
|
||||
|
||||
### Fourth Motor Assembly
|
||||
|
||||
**Step 18: Install Motor 4**
|
||||
- Slide in motor 4, attach the cable from motor 3, and secure the cable in its holder with a screw.
|
||||
|
||||
<div style="display: flex;">
|
||||
<img src="../media/tutorial/img16.jpg" style="height:300px;">
|
||||
<img src="../media/tutorial/img19.jpg" style="height:300px;">
|
||||
</div>
|
||||
|
||||
**Step 19: Attach Motor Holder 4**
|
||||
- Install the fourth motor holder (a tight fit). Ensure one wire is routed upward and the wire from motor 3 is routed downward (see photo).
|
||||
|
||||
<img src="../media/tutorial/img17.jpg" style="height:300px;">
|
||||
|
||||
**Step 20: Secure Motor 4 & Attach Horn**
|
||||
- Fasten motor 4 with 4 screws and attach its motor horns, use for one a horn screw.
|
||||
|
||||
<img src="../media/tutorial/img18.jpg" style="height:300px;">
|
||||
|
||||
---
|
||||
|
||||
### Wrist Assembly
|
||||
|
||||
**Step 21: Install Motor 5**
|
||||
- Insert motor 5 into the wrist holder and secure it with 2 front screws.
|
||||
|
||||
<img src="../media/tutorial/img20.jpg" style="height:300px;">
|
||||
|
||||
**Step 22: Attach Wrist**
|
||||
- Connect the wire from motor 4 to motor 5. And already insert the other wire for the gripper.
|
||||
- Secure the wrist to motor 4 using 4 screws on both sides.
|
||||
|
||||
<img src="../media/tutorial/img22.jpg" style="height:300px;">
|
||||
|
||||
**Step 23: Attach Wrist Horn**
|
||||
- Install only one motor horn on the wrist motor and secure it with a horn screw.
|
||||
|
||||
<img src="../media/tutorial/img23.jpg" style="height:300px;">
|
||||
|
||||
---
|
||||
|
||||
### Follower Configuration
|
||||
|
||||
**Step 24: Attach Gripper**
|
||||
- Attach the gripper to motor 5.
|
||||
|
||||
<img src="../media/tutorial/img24.jpg" style="height:300px;">
|
||||
|
||||
**Step 25: Install Gripper Motor**
|
||||
- Insert the gripper motor, connect the motor wire from motor 5 to motor 6, and secure it with 3 screws on each side.
|
||||
|
||||
<img src="../media/tutorial/img25.jpg" style="height:300px;">
|
||||
|
||||
**Step 26: Attach Gripper Horn & Claw**
|
||||
- Attach the motor horns and again use a horn screw.
|
||||
- Install the gripper claw and secure it with 4 screws on both sides.
|
||||
|
||||
<img src="../media/tutorial/img26.jpg" style="height:300px;">
|
||||
|
||||
**Step 27: Mount Controller**
|
||||
- Attach the motor controller on the back.
|
||||
|
||||
<div style="display: flex;">
|
||||
<img src="../media/tutorial/img27.jpg" style="height:300px;">
|
||||
<img src="../media/tutorial/img28.jpg" style="height:300px;">
|
||||
</div>
|
||||
|
||||
*Assembly complete – proceed to Leader arm assembly.*
|
||||
|
||||
---
|
||||
|
||||
### Leader Configuration
|
||||
|
||||
For the leader configuration, perform **Steps 1–23**. Make sure that you removed the motor gears from the motors.
|
||||
|
||||
**Step 24: Attach Leader Holder**
|
||||
- Mount the leader holder onto the wrist and secure it with a screw.
|
||||
|
||||
<img src="../media/tutorial/img29.jpg" style="height:300px;">
|
||||
|
||||
**Step 25: Attach Handle**
|
||||
- Attach the handle to motor 5 using 4 screws.
|
||||
|
||||
<img src="../media/tutorial/img30.jpg" style="height:300px;">
|
||||
|
||||
**Step 26: Install Gripper Motor**
|
||||
- Insert the gripper motor, secure it with 3 screws on each side, attach a motor horn using a horn screw, and connect the motor wire.
|
||||
|
||||
<img src="../media/tutorial/img31.jpg" style="height:300px;">
|
||||
|
||||
**Step 27: Attach Trigger**
|
||||
- Attach the follower trigger with 4 screws.
|
||||
|
||||
<img src="../media/tutorial/img32.jpg" style="height:300px;">
|
||||
|
||||
**Step 28: Mount Controller**
|
||||
- Attach the motor controller on the back.
|
||||
|
||||
<div style="display: flex;">
|
||||
<img src="../media/tutorial/img27.jpg" style="height:300px;">
|
||||
<img src="../media/tutorial/img28.jpg" style="height:300px;">
|
||||
</div>
|
||||
|
||||
*Assembly complete – proceed to calibration.*
|
||||
|
||||
|
||||
## E. Calibrate
|
||||
|
||||
Next, you'll need to calibrate your SO-100 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one SO-100 robot to work on another.
|
||||
|
||||
#### a. Manual calibration of follower arm
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Contrarily to step 6 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=724) which illustrates the auto calibration, we will actually do manual calibration of follower for now.
|
||||
|
||||
You will need to move the follower arm to these positions sequentially:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
| ------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| <img src="../media/so100/follower_zero.webp?raw=true" alt="SO-100 follower arm zero position" title="SO-100 follower arm zero position" style="width:100%;"> | <img src="../media/so100/follower_rotated.webp?raw=true" alt="SO-100 follower arm rotated position" title="SO-100 follower arm rotated position" style="width:100%;"> | <img src="../media/so100/follower_rest.webp?raw=true" alt="SO-100 follower arm rest position" title="SO-100 follower arm rest position" style="width:100%;"> |
|
||||
|
||||
Make sure both arms are connected and run this script to launch manual calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--robot.cameras='{}' \
|
||||
--control.type=calibrate \
|
||||
--control.arms='["main_follower"]'
|
||||
```
|
||||
|
||||
#### b. Manual calibration of leader arm
|
||||
Follow step 6 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=724) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
| ------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| <img src="../media/so100/leader_zero.webp?raw=true" alt="SO-100 leader arm zero position" title="SO-100 leader arm zero position" style="width:100%;"> | <img src="../media/so100/leader_rotated.webp?raw=true" alt="SO-100 leader arm rotated position" title="SO-100 leader arm rotated position" style="width:100%;"> | <img src="../media/so100/leader_rest.webp?raw=true" alt="SO-100 leader arm rest position" title="SO-100 leader arm rest position" style="width:100%;"> |
|
||||
|
||||
Run this script to launch manual calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--robot.cameras='{}' \
|
||||
--control.type=calibrate \
|
||||
--control.arms='["main_leader"]'
|
||||
```
|
||||
|
||||
## F. Teleoperate
|
||||
|
||||
**Simple teleop**
|
||||
Then you are ready to teleoperate your robot! Run this simple script (it won't connect and display the cameras):
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--robot.cameras='{}' \
|
||||
--control.type=teleoperate
|
||||
```
|
||||
|
||||
|
||||
#### a. Teleop with displaying cameras
|
||||
Follow [this guide to setup your cameras](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#c-add-your-cameras-with-opencvcamera). Then you will be able to display the cameras on your computer while you are teleoperating by running the following code. This is useful to prepare your setup before recording your first dataset.
|
||||
|
||||
> **NOTE:** To visualize the data, enable `--control.display_data=true`. This streams the data using `rerun`.
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--control.type=teleoperate
|
||||
```
|
||||
|
||||
## G. Record a dataset
|
||||
|
||||
Once you're familiar with teleoperation, you can record your first dataset with SO-100.
|
||||
|
||||
If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
```
|
||||
|
||||
Store your Hugging Face repository name in a variable to run these commands:
|
||||
```bash
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
|
||||
Record 2 episodes and upload your dataset to the hub:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--control.type=record \
|
||||
--control.fps=30 \
|
||||
--control.single_task="Grasp a lego block and put it in the bin." \
|
||||
--control.repo_id=${HF_USER}/so100_test \
|
||||
--control.tags='["so100","tutorial"]' \
|
||||
--control.warmup_time_s=5 \
|
||||
--control.episode_time_s=30 \
|
||||
--control.reset_time_s=30 \
|
||||
--control.num_episodes=2 \
|
||||
--control.push_to_hub=true
|
||||
```
|
||||
|
||||
Note: You can resume recording by adding `--control.resume=true`.
|
||||
|
||||
## H. Visualize a dataset
|
||||
|
||||
If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
|
||||
```bash
|
||||
echo ${HF_USER}/so100_test
|
||||
```
|
||||
|
||||
If you didn't upload with `--control.push_to_hub=false`, you can also visualize it locally with (a window can be opened in the browser `http://127.0.0.1:9090` with the visualization tool):
|
||||
```bash
|
||||
python lerobot/scripts/visualize_dataset_html.py \
|
||||
--repo-id ${HF_USER}/so100_test \
|
||||
--local-files-only 1
|
||||
```
|
||||
|
||||
## I. Replay an episode
|
||||
|
||||
Now try to replay the first episode on your robot:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--control.type=replay \
|
||||
--control.fps=30 \
|
||||
--control.repo_id=${HF_USER}/so100_test \
|
||||
--control.episode=0
|
||||
```
|
||||
|
||||
## J. Train a policy
|
||||
|
||||
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--dataset.repo_id=${HF_USER}/so100_test \
|
||||
--policy.type=act \
|
||||
--output_dir=outputs/train/act_so100_test \
|
||||
--job_name=act_so100_test \
|
||||
--policy.device=cuda \
|
||||
--wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/so100_test`.
|
||||
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
|
||||
4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
|
||||
Training should take several hours. You will find checkpoints in `outputs/train/act_so100_test/checkpoints`.
|
||||
|
||||
To resume training from a checkpoint, below is an example command to resume from `last` checkpoint of the `act_so100_test` policy:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--config_path=outputs/train/act_so100_test/checkpoints/last/pretrained_model/train_config.json \
|
||||
--resume=true
|
||||
```
|
||||
|
||||
## K. Evaluate your policy
|
||||
|
||||
You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--control.type=record \
|
||||
--control.fps=30 \
|
||||
--control.single_task="Grasp a lego block and put it in the bin." \
|
||||
--control.repo_id=${HF_USER}/eval_act_so100_test \
|
||||
--control.tags='["tutorial"]' \
|
||||
--control.warmup_time_s=5 \
|
||||
--control.episode_time_s=30 \
|
||||
--control.reset_time_s=30 \
|
||||
--control.num_episodes=10 \
|
||||
--control.push_to_hub=true \
|
||||
--control.policy.path=outputs/train/act_so100_test/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
|
||||
1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_so100_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_so100_test`).
|
||||
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_so100_test`).
|
||||
|
||||
## L. More Information
|
||||
|
||||
Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth tutorial on controlling real robots with LeRobot.
|
||||
|
||||
> [!TIP]
|
||||
> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb) in the channel [`#so100-arm`](https://discord.com/channels/1216765309076115607/1237741463832363039).
|
||||
@@ -1,45 +0,0 @@
|
||||
import time
|
||||
import numpy as np
|
||||
|
||||
from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus, CalibrationMode
|
||||
|
||||
@staticmethod
|
||||
def degps_to_raw(degps: float) -> int:
|
||||
steps_per_deg = 4096.0 / 360.0
|
||||
speed_in_steps = abs(degps) * steps_per_deg
|
||||
speed_int = int(round(speed_in_steps))
|
||||
if speed_int > 0x7FFF:
|
||||
speed_int = 0x7FFF
|
||||
if degps < 0:
|
||||
return speed_int | 0x8000
|
||||
else:
|
||||
return speed_int & 0x7FFF
|
||||
|
||||
@staticmethod
|
||||
def raw_to_degps(raw_speed: int) -> float:
|
||||
steps_per_deg = 4096.0 / 360.0
|
||||
magnitude = raw_speed & 0x7FFF
|
||||
degps = magnitude / steps_per_deg
|
||||
if raw_speed & 0x8000:
|
||||
degps = -degps
|
||||
return degps
|
||||
|
||||
def main():
|
||||
# Instantiate the bus for a single motor on port /dev/ttyACM0.
|
||||
arm_bus = FeetechMotorsBus(
|
||||
port="/dev/ttyACM0",
|
||||
motors={"wrist_pitch": [1, "scs0009"]},
|
||||
protocol_version=1,
|
||||
group_sync_read=False, # using individual read calls
|
||||
)
|
||||
arm_bus.connect()
|
||||
# Read the current raw motor position.
|
||||
# Note that "Present_Position" is in the raw units.
|
||||
current_raw = arm_bus.read("Present_Position", ["wrist_pitch"])[0]
|
||||
print("Current raw position:", current_raw)
|
||||
arm_bus.write("Goal_Position", 1000)
|
||||
arm_bus.disconnect()
|
||||
exit()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
Before Width: | Height: | Size: 148 KiB |
@@ -1,46 +0,0 @@
|
||||
import serial
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.animation as animation
|
||||
from collections import deque
|
||||
|
||||
# Adjust this to match your actual serial port and baud rate
|
||||
SERIAL_PORT = '/dev/ttyACM0' # or COM3 on Windows
|
||||
BAUD_RATE = 115200
|
||||
|
||||
# Set up serial connection
|
||||
ser = serial.Serial(SERIAL_PORT, BAUD_RATE)
|
||||
|
||||
# Buffers for real-time plot
|
||||
buffer_len = 200
|
||||
val1_buffer = deque([0]*buffer_len, maxlen=buffer_len)
|
||||
val2_buffer = deque([0]*buffer_len, maxlen=buffer_len)
|
||||
|
||||
# Setup the plot
|
||||
fig, ax = plt.subplots()
|
||||
line1, = ax.plot([], [], label='Sensor 0')
|
||||
line2, = ax.plot([], [], label='Sensor 1')
|
||||
ax.set_ylim(0, 4096)
|
||||
ax.set_xlim(0, buffer_len)
|
||||
ax.legend()
|
||||
|
||||
def update(frame):
|
||||
while ser.in_waiting:
|
||||
line = ser.readline().decode('utf-8').strip()
|
||||
parts = line.split()
|
||||
if len(parts) >= 2:
|
||||
try:
|
||||
val1 = int(parts[0])
|
||||
val2 = int(parts[1])
|
||||
val1_buffer.append(val1)
|
||||
val2_buffer.append(val2)
|
||||
except ValueError:
|
||||
pass # skip malformed lines
|
||||
|
||||
line1.set_ydata(val1_buffer)
|
||||
line1.set_xdata(range(len(val1_buffer)))
|
||||
line2.set_ydata(val2_buffer)
|
||||
line2.set_xdata(range(len(val2_buffer)))
|
||||
return line1, line2
|
||||
|
||||
ani = animation.FuncAnimation(fig, update, interval=50)
|
||||
plt.show()
|
||||
@@ -1,64 +0,0 @@
|
||||
import serial
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.animation as animation
|
||||
from collections import deque
|
||||
|
||||
# Config
|
||||
SERIAL_PORT = '/dev/ttyACM1' # Change as needed
|
||||
BAUD_RATE = 115200
|
||||
BUFFER_LEN = 200
|
||||
|
||||
# Sensor names in order
|
||||
sensor_names = [
|
||||
"wrist_roll",
|
||||
"wrist_pitch",
|
||||
"wrist_yaw",
|
||||
"elbow_flex",
|
||||
"shoulder_roll",
|
||||
"shoulder_yaw",
|
||||
"shoulder_pitch"
|
||||
]
|
||||
|
||||
# Initialize buffers
|
||||
sensor_data = {
|
||||
name: deque([0]*BUFFER_LEN, maxlen=BUFFER_LEN)
|
||||
for name in sensor_names
|
||||
}
|
||||
|
||||
# Setup plot
|
||||
fig, axes = plt.subplots(len(sensor_names), 1, figsize=(8, 12), sharex=True)
|
||||
fig.tight_layout(pad=3.0)
|
||||
|
||||
lines = {}
|
||||
for i, name in enumerate(sensor_names):
|
||||
axes[i].set_title(name)
|
||||
axes[i].set_xlim(0, BUFFER_LEN)
|
||||
axes[i].set_ylim(0, 4096)
|
||||
line, = axes[i].plot([], [], label=name)
|
||||
axes[i].legend()
|
||||
lines[name] = line
|
||||
|
||||
# Connect to serial
|
||||
ser = serial.Serial(SERIAL_PORT, BAUD_RATE)
|
||||
|
||||
# Update function
|
||||
def update(frame):
|
||||
while ser.in_waiting:
|
||||
line = ser.readline().decode().strip()
|
||||
parts = line.split()
|
||||
if len(parts) != 7:
|
||||
continue
|
||||
try:
|
||||
values = list(map(int, parts))
|
||||
except ValueError:
|
||||
continue
|
||||
for i, name in enumerate(sensor_names):
|
||||
sensor_data[name].append(values[i])
|
||||
for name in sensor_names:
|
||||
x = range(len(sensor_data[name]))
|
||||
lines[name].set_data(x, sensor_data[name])
|
||||
return lines.values()
|
||||
|
||||
# Animate
|
||||
ani = animation.FuncAnimation(fig, update, interval=50, blit=False)
|
||||
plt.show()
|
||||
@@ -1,161 +0,0 @@
|
||||
import serial
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.animation as animation
|
||||
from collections import deque
|
||||
|
||||
# Adjust this to match your actual serial port and baud rate
|
||||
SERIAL_PORT = '/dev/ttyACM0' # or COM3 on Windows
|
||||
BAUD_RATE = 115200
|
||||
|
||||
# Set up serial connection
|
||||
ser = serial.Serial(SERIAL_PORT, BAUD_RATE)
|
||||
|
||||
# How many data points to keep in the scrolling buffer
|
||||
buffer_len = 200
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# 1) Sensor buffers for existing sensors + new wrist_pitch, wrist_yaw
|
||||
# -------------------------------------------------------------------
|
||||
sensor_buffers = {
|
||||
'wrist_roll': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len),
|
||||
'val2': deque([0]*buffer_len, maxlen=buffer_len)
|
||||
},
|
||||
'elbow_pitch': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len),
|
||||
'val2': deque([0]*buffer_len, maxlen=buffer_len)
|
||||
},
|
||||
'shoulder_pitch': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len),
|
||||
'val2': deque([0]*buffer_len, maxlen=buffer_len)
|
||||
},
|
||||
'shoulder_yaw': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len),
|
||||
'val2': deque([0]*buffer_len, maxlen=buffer_len)
|
||||
},
|
||||
'shoulder_roll': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len),
|
||||
'val2': deque([0]*buffer_len, maxlen=buffer_len)
|
||||
},
|
||||
# --- New single-valued sensors ---
|
||||
'wrist_pitch': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len) # Only one line
|
||||
},
|
||||
'wrist_yaw': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len) # Only one line
|
||||
},
|
||||
}
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# 2) Figure with 7 subplots (was 5). We keep the original 5 + 2 new.
|
||||
# -------------------------------------------------------------------
|
||||
fig, axes = plt.subplots(7, 1, figsize=(8, 14), sharex=True)
|
||||
fig.tight_layout(pad=3.0)
|
||||
|
||||
# We'll store line references in a dict so we can update them in update().
|
||||
lines = {}
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# 3) Define each subplot, including new ones at the end.
|
||||
# -------------------------------------------------------------------
|
||||
subplot_info = [
|
||||
('wrist_roll', 'Wrist Roll (2,3)', axes[0]),
|
||||
('elbow_pitch', 'Elbow Pitch (0,1)', axes[1]),
|
||||
('shoulder_pitch', 'Shoulder Pitch (10,11)', axes[2]),
|
||||
('shoulder_yaw', 'Shoulder Yaw (12,13)', axes[3]),
|
||||
('shoulder_roll', 'Shoulder Roll (14,15)', axes[4]),
|
||||
('wrist_pitch', 'Wrist Pitch (0)', axes[5]), # new
|
||||
('wrist_yaw', 'Wrist Yaw (1)', axes[6]), # new
|
||||
]
|
||||
|
||||
# Set up each subplot
|
||||
for (sensor_name, label, ax) in subplot_info:
|
||||
ax.set_title(label)
|
||||
ax.set_xlim(0, buffer_len)
|
||||
ax.set_ylim(0, 4096) # adjust if needed
|
||||
|
||||
# For existing sensors, plot 2 lines (val1, val2)
|
||||
# For the new single-line sensors, plot just 1 line
|
||||
if sensor_name in ['wrist_pitch', 'wrist_yaw']:
|
||||
# Single-valued
|
||||
line, = ax.plot([], [], label=f"{sensor_name}")
|
||||
lines[sensor_name] = line
|
||||
else:
|
||||
# Pair of values
|
||||
line1, = ax.plot([], [], label=f"{sensor_name} - val1")
|
||||
line2, = ax.plot([], [], label=f"{sensor_name} - val2")
|
||||
lines[sensor_name] = [line1, line2]
|
||||
|
||||
ax.legend()
|
||||
|
||||
def update(frame):
|
||||
# Read all available lines from the serial buffer
|
||||
while ser.in_waiting:
|
||||
raw_line = ser.readline().decode('utf-8').strip()
|
||||
parts = raw_line.split()
|
||||
|
||||
# We expect at least 16 values if all sensors are present
|
||||
if len(parts) < 7:
|
||||
continue
|
||||
|
||||
try:
|
||||
values = list(map(int, parts))
|
||||
except ValueError:
|
||||
# If there's a parsing error, skip this line
|
||||
continue
|
||||
|
||||
# Original code: extract the relevant values and append to the correct buffer
|
||||
sensor_buffers['elbow_pitch']['val1'].append(values[13])
|
||||
sensor_buffers['elbow_pitch']['val2'].append(values[13])
|
||||
|
||||
sensor_buffers['wrist_roll']['val1'].append(values[3])
|
||||
sensor_buffers['wrist_roll']['val2'].append(values[3])
|
||||
|
||||
sensor_buffers['shoulder_pitch']['val1'].append(values[14])
|
||||
sensor_buffers['shoulder_pitch']['val2'].append(values[14])
|
||||
|
||||
sensor_buffers['shoulder_yaw']['val1'].append(values[8])
|
||||
sensor_buffers['shoulder_yaw']['val2'].append(values[8])
|
||||
|
||||
sensor_buffers['shoulder_roll']['val1'].append(values[10])
|
||||
sensor_buffers['shoulder_roll']['val2'].append(values[10])
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# 4) New code: also read wrist_pitch (index 0) and wrist_yaw (index 1)
|
||||
# -------------------------------------------------------------------
|
||||
sensor_buffers['wrist_yaw']['val1'].append(values[0])
|
||||
sensor_buffers['wrist_pitch']['val1'].append(values[1])
|
||||
|
||||
# Update each line's data in each subplot
|
||||
all_lines = []
|
||||
for (sensor_name, _, ax) in subplot_info:
|
||||
# x-values are just the index range of the buffer for val1
|
||||
x_data = range(len(sensor_buffers[sensor_name]['val1']))
|
||||
|
||||
# If this sensor has two lines
|
||||
if isinstance(lines[sensor_name], list):
|
||||
# First line
|
||||
lines[sensor_name][0].set_data(
|
||||
x_data,
|
||||
sensor_buffers[sensor_name]['val1']
|
||||
)
|
||||
# Second line
|
||||
lines[sensor_name][1].set_data(
|
||||
x_data,
|
||||
sensor_buffers[sensor_name]['val2']
|
||||
)
|
||||
all_lines.extend(lines[sensor_name])
|
||||
else:
|
||||
# Single line only (wrist_pitch, wrist_yaw)
|
||||
lines[sensor_name].set_data(
|
||||
x_data,
|
||||
sensor_buffers[sensor_name]['val1']
|
||||
)
|
||||
all_lines.append(lines[sensor_name])
|
||||
|
||||
return all_lines
|
||||
|
||||
# Create the animation
|
||||
ani = animation.FuncAnimation(fig, update, interval=50, blit=False)
|
||||
|
||||
plt.show()
|
||||
@@ -1,186 +0,0 @@
|
||||
from lerobot.common.robot_devices.motors.feetech import (
|
||||
CalibrationMode,
|
||||
FeetechMotorsBus,
|
||||
)
|
||||
import yaml
|
||||
|
||||
class HopeJuniorRobot:
|
||||
def __init__(self):
|
||||
self.arm_port = "/dev/tty.usbserial-140"
|
||||
self.hand_port = "/dev/tty.usbmodem58760436961"
|
||||
self.arm_bus = FeetechMotorsBus(
|
||||
port = self.arm_port,
|
||||
motors={
|
||||
# "motor1": (1, "sts3250"),
|
||||
# "motor2": (2, "sts3250"),
|
||||
# "motor3": (3, "sts3250"),
|
||||
|
||||
#"shoulder_pitch": [1, "sts3215"],
|
||||
"shoulder_pitch": [1, "sm8512bl"],
|
||||
"shoulder_yaw": [2, "sts3250"], # TODO: sts3250
|
||||
"shoulder_roll": [3, "sts3250"], # TODO: sts3250
|
||||
"elbow_flex": [4, "sts3250"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"wrist_yaw": [6, "sts3215"],
|
||||
"wrist_pitch": [7, "sts3215"],
|
||||
},
|
||||
protocol_version=0,
|
||||
)
|
||||
self.hand_bus = FeetechMotorsBus(
|
||||
port=self.hand_port,
|
||||
|
||||
motors = {
|
||||
# Thumb
|
||||
"thumb_basel_rotation": [1, "scs0009"],
|
||||
"thumb_mcp": [3, "scs0009"],
|
||||
"thumb_pip": [4, "scs0009"],
|
||||
"thumb_dip": [13, "scs0009"],
|
||||
|
||||
# Index
|
||||
"index_thumb_side": [5, "scs0009"],
|
||||
"index_pinky_side": [6, "scs0009"],
|
||||
"index_flexor": [16, "scs0009"],
|
||||
|
||||
# Middle
|
||||
"middle_thumb_side": [8, "scs0009"],
|
||||
"middle_pinky_side": [9, "scs0009"],
|
||||
"middle_flexor": [2, "scs0009"],
|
||||
|
||||
# Ring
|
||||
"ring_thumb_side": [11, "scs0009"],
|
||||
"ring_pinky_side": [12, "scs0009"],
|
||||
"ring_flexor": [7, "scs0009"],
|
||||
|
||||
# Pinky
|
||||
"pinky_thumb_side": [14, "scs0009"],
|
||||
"pinky_pinky_side": [15, "scs0009"],
|
||||
"pinky_flexor": [10, "scs0009"],
|
||||
},
|
||||
protocol_version=1,#1
|
||||
group_sync_read=False,
|
||||
)
|
||||
|
||||
self.arm_calib_dict = self.get_arm_calibration()
|
||||
self.hand_calib_dict = self.get_hand_calibration()
|
||||
|
||||
|
||||
def apply_arm_config(self, config_file):
|
||||
with open(config_file, "r") as file:
|
||||
config = yaml.safe_load(file)
|
||||
for param, value in config.get("robot", {}).get("arm_bus", {}).items():
|
||||
self.arm_bus.write(param, value)
|
||||
|
||||
def apply_hand_config(config_file, robot):
|
||||
with open(config_file, "r") as file:
|
||||
config = yaml.safe_load(file)
|
||||
|
||||
for param, value in config.get("robot", {}).get("hand_bus", {}).items():
|
||||
robot.arm_bus.write(param, value)
|
||||
|
||||
def get_hand_calibration(self):
|
||||
homing_offset = [0] * len(self.hand_bus.motor_names)
|
||||
drive_mode = [0] * len(self.hand_bus.motor_names)
|
||||
|
||||
start_pos = [
|
||||
750, # thumb_basel_rotation
|
||||
100, # thumb_mcp
|
||||
700, # thumb_pip
|
||||
100, # thumb_dip
|
||||
|
||||
800, # index_thumb_side
|
||||
950, # index_pinky_side
|
||||
0, # index_flexor
|
||||
|
||||
250, # middle_thumb_side
|
||||
850, # middle_pinky_side
|
||||
0, # middle_flexor
|
||||
|
||||
850, # ring_thumb_side
|
||||
900, # ring_pinky_side
|
||||
0, # ring_flexor
|
||||
|
||||
00, # pinky_thumb_side
|
||||
950, # pinky_pinky_side
|
||||
0, # pinky_flexor
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
start_pos[0] - 550, # thumb_basel_rotation
|
||||
start_pos[1] + 400, # thumb_mcp
|
||||
start_pos[2] + 300, # thumb_pip
|
||||
start_pos[3] + 200, # thumb_dip
|
||||
|
||||
start_pos[4] - 700, # index_thumb_side
|
||||
start_pos[5] - 300, # index_pinky_side
|
||||
start_pos[6] + 600, # index_flexor
|
||||
|
||||
start_pos[7] + 700, # middle_thumb_side
|
||||
start_pos[8] - 400, # middle_pinky_side
|
||||
start_pos[9] + 600, # middle_flexor
|
||||
|
||||
start_pos[10] - 600, # ring_thumb_side
|
||||
start_pos[11] - 400, # ring_pinky_side
|
||||
start_pos[12] + 600, # ring_flexor
|
||||
|
||||
start_pos[13] + 400, # pinky_thumb_side
|
||||
start_pos[14] - 450, # pinky_pinky_side
|
||||
start_pos[15] + 600, # pinky_flexor
|
||||
]
|
||||
|
||||
|
||||
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.hand_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.hand_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def get_arm_calibration(self):
|
||||
|
||||
homing_offset = [0] * len(self.arm_bus.motor_names)
|
||||
drive_mode = [0] * len(self.arm_bus.motor_names)
|
||||
|
||||
start_pos = [
|
||||
1800, # shoulder_up
|
||||
2800, # shoulder_forward
|
||||
1800, # shoulder_roll
|
||||
1200, # bend_elbow
|
||||
700, # wrist_roll
|
||||
1850, # wrist_yaw
|
||||
1700, # wrist_pitch
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
2800, # shoulder_up
|
||||
3150, # shoulder_forward
|
||||
400, #shoulder_roll
|
||||
2300, # bend_elbow
|
||||
2300, # wrist_roll
|
||||
2150, # wrist_yaw
|
||||
2300, # wrist_pitch
|
||||
]
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.arm_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.arm_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def connect_arm(self):
|
||||
self.arm_bus.connect()
|
||||
|
||||
def connect_hand(self):
|
||||
self.hand_bus.connect()
|
||||
@@ -1,730 +0,0 @@
|
||||
from lerobot.common.robot_devices.motors.feetech import (
|
||||
CalibrationMode,
|
||||
FeetechMotorsBus,
|
||||
)
|
||||
import serial
|
||||
import threading
|
||||
import time
|
||||
from typing import Callable
|
||||
import pickle
|
||||
import cv2
|
||||
import numpy as np
|
||||
from collections import deque
|
||||
import json
|
||||
import os
|
||||
LOWER_BOUND_LINEAR = -100
|
||||
UPPER_BOUND_LINEAR = 200
|
||||
|
||||
class HomonculusArm:
|
||||
def __init__(self, serial_port: str = "/dev/ttyACM1", baud_rate: int = 115200):
|
||||
self.serial_port = serial_port
|
||||
self.baud_rate = 115200
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
|
||||
# Number of past values to keep in memory
|
||||
self.buffer_size = 10
|
||||
|
||||
# Initialize a buffer (deque) for each joint
|
||||
self.joint_buffer = {
|
||||
"wrist_roll": deque(maxlen=self.buffer_size),
|
||||
"wrist_pitch": deque(maxlen=self.buffer_size),
|
||||
"wrist_yaw": deque(maxlen=self.buffer_size),
|
||||
"elbow_flex": deque(maxlen=self.buffer_size),
|
||||
"shoulder_roll": deque(maxlen=self.buffer_size),
|
||||
"shoulder_yaw": deque(maxlen=self.buffer_size),
|
||||
"shoulder_pitch": deque(maxlen=self.buffer_size),
|
||||
}
|
||||
|
||||
# Start the reading thread
|
||||
self.thread = threading.Thread(target=self.async_read, daemon=True)
|
||||
self.thread.start()
|
||||
|
||||
# Last read dictionary
|
||||
self.last_d = {
|
||||
"wrist_roll": 100,
|
||||
"wrist_pitch": 100,
|
||||
"wrist_yaw": 100,
|
||||
"elbow_flex": 100,
|
||||
"shoulder_roll": 100,
|
||||
"shoulder_yaw": 100,
|
||||
"shoulder_pitch": 100,
|
||||
}
|
||||
self.calibration = None
|
||||
|
||||
# For adaptive EMA, we store a "previous smoothed" state per joint
|
||||
self.adaptive_ema_state = {
|
||||
"wrist_roll": None,
|
||||
"wrist_pitch": None,
|
||||
"wrist_yaw": None,
|
||||
"elbow_flex": None,
|
||||
"shoulder_roll": None,
|
||||
"shoulder_yaw": None,
|
||||
"shoulder_pitch": None,
|
||||
}
|
||||
|
||||
self.kalman_state = {
|
||||
joint: {"x": None, "P": None} for joint in self.joint_buffer.keys()
|
||||
}
|
||||
|
||||
@property
|
||||
def joint_names(self):
|
||||
return list(self.last_d.keys())
|
||||
|
||||
def read(self, motor_names: list[str] | None = None):
|
||||
"""
|
||||
Return the most recent (single) values from self.last_d,
|
||||
optionally applying calibration.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
# Get raw (last) values
|
||||
values = np.array([self.last_d[k] for k in motor_names])
|
||||
|
||||
#print(motor_names)
|
||||
print(values)
|
||||
|
||||
# Apply calibration if available
|
||||
if self.calibration is not None:
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
print(values)
|
||||
return values
|
||||
|
||||
def read_running_average(self, motor_names: list[str] | None = None, linearize=False):
|
||||
"""
|
||||
Return the AVERAGE of the most recent self.buffer_size (or fewer, if not enough data) readings
|
||||
for each joint, optionally applying calibration.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
# Gather averaged readings from buffers
|
||||
smoothed_vals = []
|
||||
for name in motor_names:
|
||||
buf = self.joint_buffer[name]
|
||||
if len(buf) == 0:
|
||||
# If no data has been read yet, fall back to last_d
|
||||
smoothed_vals.append(self.last_d[name])
|
||||
else:
|
||||
# Otherwise, average over the existing buffer
|
||||
smoothed_vals.append(np.mean(buf))
|
||||
|
||||
smoothed_vals = np.array(smoothed_vals, dtype=np.float32)
|
||||
|
||||
# Apply calibration if available
|
||||
if self.calibration is not None:
|
||||
|
||||
if False:
|
||||
for i, joint_name in enumerate(motor_names):
|
||||
# Re-use the same raw_min / raw_max from the calibration
|
||||
calib_idx = self.calibration["motor_names"].index(joint_name)
|
||||
min_reading = self.calibration["start_pos"][calib_idx]
|
||||
max_reading = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
B_value = smoothed_vals[i]
|
||||
print(joint_name)
|
||||
if joint_name == "elbow_flex":
|
||||
print('elbow')
|
||||
try:
|
||||
smoothed_vals[i] = int(min_reading+(max_reading - min_reading)*np.arcsin((B_value-min_reading)/(max_reading-min_reading))/(np.pi / 2))
|
||||
except:
|
||||
print('not working')
|
||||
print(smoothed_vals)
|
||||
print('not working')
|
||||
smoothed_vals = self.apply_calibration(smoothed_vals, motor_names)
|
||||
return smoothed_vals
|
||||
|
||||
def read_kalman_filter(
|
||||
self,
|
||||
Q: float = 1.0,
|
||||
R: float = 100.0,
|
||||
motor_names: list[str] | None = None
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Return a Kalman-filtered reading for each requested joint.
|
||||
|
||||
We store a separate Kalman filter (x, P) per joint. For each new measurement Z:
|
||||
1) Predict:
|
||||
x_pred = x (assuming no motion model)
|
||||
P_pred = P + Q
|
||||
2) Update:
|
||||
K = P_pred / (P_pred + R)
|
||||
x = x_pred + K * (Z - x_pred)
|
||||
P = (1 - K) * P_pred
|
||||
|
||||
:param Q: Process noise. Larger Q means the estimate can change more freely.
|
||||
:param R: Measurement noise. Larger R means we trust our sensor less.
|
||||
:param motor_names: If not specified, all joints are filtered.
|
||||
:return: Kalman-filtered positions as a numpy array.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
current_vals = np.array([self.last_d[name] for name in motor_names], dtype=np.float32)
|
||||
filtered_vals = np.zeros_like(current_vals)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
# Retrieve the filter state for this joint
|
||||
x = self.kalman_state[name]["x"]
|
||||
P = self.kalman_state[name]["P"]
|
||||
Z = current_vals[i]
|
||||
|
||||
# If this is the first reading, initialize
|
||||
if x is None or P is None:
|
||||
x = Z
|
||||
P = 1.0 # or some large initial uncertainty
|
||||
|
||||
# 1) Predict step
|
||||
x_pred = x # no velocity model, so x_pred = x
|
||||
P_pred = P + Q
|
||||
|
||||
# 2) Update step
|
||||
K = P_pred / (P_pred + R) # Kalman gain
|
||||
x_new = x_pred + K * (Z - x_pred) # new state estimate
|
||||
P_new = (1 - K) * P_pred # new covariance
|
||||
|
||||
# Save back
|
||||
self.kalman_state[name]["x"] = x_new
|
||||
self.kalman_state[name]["P"] = P_new
|
||||
|
||||
filtered_vals[i] = x_new
|
||||
|
||||
if self.calibration is not None:
|
||||
filtered_vals = self.apply_calibration(filtered_vals, motor_names)
|
||||
|
||||
return filtered_vals
|
||||
|
||||
|
||||
def async_read(self):
|
||||
"""
|
||||
Continuously read from the serial buffer in its own thread,
|
||||
store into `self.last_d` and also append to the rolling buffer (joint_buffer).
|
||||
"""
|
||||
while True:
|
||||
if self.serial.in_waiting > 0:
|
||||
self.serial.flush()
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
|
||||
if len(vals) != 7:
|
||||
continue
|
||||
try:
|
||||
vals = [int(val) for val in vals]#remove last digit
|
||||
except ValueError:
|
||||
self.serial.flush()
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
vals = [int(val) for val in vals]
|
||||
d = {
|
||||
"wrist_roll": vals[0],
|
||||
"wrist_yaw": vals[1],
|
||||
"wrist_pitch": vals[2],
|
||||
"elbow_flex": vals[3],
|
||||
"shoulder_roll": vals[4],
|
||||
"shoulder_yaw": vals[5],
|
||||
"shoulder_pitch": vals[6],
|
||||
}
|
||||
|
||||
# Update the last_d dictionary
|
||||
self.last_d = d
|
||||
|
||||
# Also push these new values into the rolling buffers
|
||||
for joint_name, joint_val in d.items():
|
||||
self.joint_buffer[joint_name].append(joint_val)
|
||||
|
||||
# Optional: short sleep to avoid busy-loop
|
||||
# time.sleep(0.001)
|
||||
|
||||
def run_calibration(self, robot):
|
||||
robot.arm_bus.write("Acceleration", 50)
|
||||
n_joints = len(self.joint_names)
|
||||
|
||||
max_open_all = np.zeros(n_joints, dtype=np.float32)
|
||||
min_open_all = np.zeros(n_joints, dtype=np.float32)
|
||||
max_closed_all = np.zeros(n_joints, dtype=np.float32)
|
||||
min_closed_all = np.zeros(n_joints, dtype=np.float32)
|
||||
|
||||
for i, jname in enumerate(self.joint_names):
|
||||
|
||||
print(f"\n--- Calibrating joint '{jname}' ---")
|
||||
|
||||
joint_idx = robot.arm_calib_dict["motor_names"].index(jname)
|
||||
open_val = robot.arm_calib_dict["start_pos"][joint_idx]
|
||||
print(f"Commanding {jname} to OPEN position {open_val}...")
|
||||
robot.arm_bus.write("Goal_Position", [open_val], [jname])
|
||||
|
||||
input("Physically verify or adjust the joint. Press Enter when ready to capture...")
|
||||
|
||||
open_pos_list = []
|
||||
for _ in range(100):
|
||||
all_joints_vals = self.read() # read entire arm
|
||||
open_pos_list.append(all_joints_vals[i]) # store only this joint
|
||||
time.sleep(0.01)
|
||||
|
||||
# Convert to numpy and track min/max
|
||||
open_array = np.array(open_pos_list, dtype=np.float32)
|
||||
max_open_all[i] = open_array.max()
|
||||
min_open_all[i] = open_array.min()
|
||||
closed_val = robot.arm_calib_dict["end_pos"][joint_idx]
|
||||
if jname == "elbow_flex":
|
||||
closed_val = closed_val - 700
|
||||
closed_val = robot.arm_calib_dict["end_pos"][joint_idx]
|
||||
print(f"Commanding {jname} to CLOSED position {closed_val}...")
|
||||
robot.arm_bus.write("Goal_Position", [closed_val], [jname])
|
||||
|
||||
input("Physically verify or adjust the joint. Press Enter when ready to capture...")
|
||||
|
||||
closed_pos_list = []
|
||||
for _ in range(100):
|
||||
all_joints_vals = self.read()
|
||||
closed_pos_list.append(all_joints_vals[i])
|
||||
time.sleep(0.01)
|
||||
|
||||
closed_array = np.array(closed_pos_list, dtype=np.float32)
|
||||
# Some thresholding for closed positions
|
||||
#closed_array[closed_array < 1000] = 60000
|
||||
|
||||
max_closed_all[i] = closed_array.max()
|
||||
min_closed_all[i] = closed_array.min()
|
||||
|
||||
robot.arm_bus.write("Goal_Position", [int((closed_val+open_val)/2)], [jname])
|
||||
|
||||
open_pos = np.maximum(max_open_all, max_closed_all)
|
||||
closed_pos = np.minimum(min_open_all, min_closed_all)
|
||||
|
||||
for i, jname in enumerate(self.joint_names):
|
||||
if jname not in ["wrist_pitch", "shoulder_pitch"]:
|
||||
# Swap open/closed for these joints
|
||||
tmp_pos = open_pos[i]
|
||||
open_pos[i] = closed_pos[i]
|
||||
closed_pos[i] = tmp_pos
|
||||
|
||||
# Debug prints
|
||||
print("\nFinal open/closed arrays after any swaps/inversions:")
|
||||
print(f"open_pos={open_pos}")
|
||||
print(f"closed_pos={closed_pos}")
|
||||
|
||||
|
||||
homing_offset = [0] * n_joints
|
||||
drive_mode = [0] * n_joints
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * n_joints
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": open_pos,
|
||||
"end_pos": closed_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.joint_names,
|
||||
}
|
||||
file_path = "examples/hopejr/settings/arm_calib.pkl"
|
||||
|
||||
if not os.path.exists(file_path):
|
||||
with open(file_path, "wb") as f:
|
||||
pickle.dump(calib_dict, f)
|
||||
print(f"Dictionary saved to {file_path}")
|
||||
|
||||
self.set_calibration(calib_dict)
|
||||
|
||||
def set_calibration(self, calibration: dict[str, list]):
|
||||
self.calibration = calibration
|
||||
|
||||
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""
|
||||
Example calibration that linearly maps [start_pos, end_pos] to [0,100].
|
||||
Extend or modify for your needs.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
values = values.astype(np.float32)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
calib_idx = self.calibration["motor_names"].index(name)
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# Rescale the present position to [0, 100]
|
||||
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
|
||||
|
||||
# Check boundaries
|
||||
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
|
||||
# If you want to handle out-of-range differently:
|
||||
# raise JointOutOfRangeError(msg)
|
||||
msg = (
|
||||
f"Wrong motor position range detected for {name}. "
|
||||
f"Value = {values[i]} %, expected within [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}]"
|
||||
)
|
||||
print(msg)
|
||||
|
||||
return values
|
||||
|
||||
|
||||
class HomonculusGlove:
|
||||
def __init__(self, serial_port: str = "/dev/ttyACM1", baud_rate: int = 115200):
|
||||
self.serial_port = serial_port
|
||||
self.baud_rate = baud_rate
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
|
||||
# Number of past values to keep in memory
|
||||
self.buffer_size = 10
|
||||
|
||||
# Initialize a buffer (deque) for each joint
|
||||
self.joint_buffer = {
|
||||
"thumb_0": deque(maxlen=self.buffer_size),
|
||||
"thumb_1": deque(maxlen=self.buffer_size),
|
||||
"thumb_2": deque(maxlen=self.buffer_size),
|
||||
"thumb_3": deque(maxlen=self.buffer_size),
|
||||
"index_0": deque(maxlen=self.buffer_size),
|
||||
"index_1": deque(maxlen=self.buffer_size),
|
||||
"index_2": deque(maxlen=self.buffer_size),
|
||||
"middle_0": deque(maxlen=self.buffer_size),
|
||||
"middle_1": deque(maxlen=self.buffer_size),
|
||||
"middle_2": deque(maxlen=self.buffer_size),
|
||||
"ring_0": deque(maxlen=self.buffer_size),
|
||||
"ring_1": deque(maxlen=self.buffer_size),
|
||||
"ring_2": deque(maxlen=self.buffer_size),
|
||||
"pinky_0": deque(maxlen=self.buffer_size),
|
||||
"pinky_1": deque(maxlen=self.buffer_size),
|
||||
"pinky_2": deque(maxlen=self.buffer_size),
|
||||
"battery_voltage": deque(maxlen=self.buffer_size),
|
||||
}
|
||||
|
||||
# Start the reading thread
|
||||
self.thread = threading.Thread(target=self.async_read, daemon=True)
|
||||
self.thread.start()
|
||||
|
||||
# Last read dictionary
|
||||
self.last_d = {
|
||||
"thumb_0": 100,
|
||||
"thumb_1": 100,
|
||||
"thumb_2": 100,
|
||||
"thumb_3": 100,
|
||||
"index_0": 100,
|
||||
"index_1": 100,
|
||||
"index_2": 100,
|
||||
"middle_0": 100,
|
||||
"middle_1": 100,
|
||||
"middle_2": 100,
|
||||
"ring_0": 100,
|
||||
"ring_1": 100,
|
||||
"ring_2": 100,
|
||||
"pinky_0": 100,
|
||||
"pinky_1": 100,
|
||||
"pinky_2": 100,
|
||||
"battery_voltage": 100,
|
||||
}
|
||||
self.calibration = None
|
||||
|
||||
@property
|
||||
def joint_names(self):
|
||||
return list(self.last_d.keys())
|
||||
|
||||
def read(self, motor_names: list[str] | None = None):
|
||||
"""
|
||||
Return the most recent (single) values from self.last_d,
|
||||
optionally applying calibration.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
# Get raw (last) values
|
||||
values = np.array([self.last_d[k] for k in motor_names])
|
||||
|
||||
print(values)
|
||||
|
||||
# Apply calibration if available
|
||||
if self.calibration is not None:
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
print(values)
|
||||
return values
|
||||
|
||||
def read_running_average(self, motor_names: list[str] | None = None, linearize=False):
|
||||
"""
|
||||
Return the AVERAGE of the most recent self.buffer_size (or fewer, if not enough data) readings
|
||||
for each joint, optionally applying calibration.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
# Gather averaged readings from buffers
|
||||
smoothed_vals = []
|
||||
for name in motor_names:
|
||||
buf = self.joint_buffer[name]
|
||||
if len(buf) == 0:
|
||||
# If no data has been read yet, fall back to last_d
|
||||
smoothed_vals.append(self.last_d[name])
|
||||
else:
|
||||
# Otherwise, average over the existing buffer
|
||||
smoothed_vals.append(np.mean(buf))
|
||||
|
||||
smoothed_vals = np.array(smoothed_vals, dtype=np.float32)
|
||||
|
||||
# Apply calibration if available
|
||||
if self.calibration is not None:
|
||||
smoothed_vals = self.apply_calibration(smoothed_vals, motor_names)
|
||||
|
||||
return smoothed_vals
|
||||
|
||||
def async_read(self):
|
||||
"""
|
||||
Continuously read from the serial buffer in its own thread,
|
||||
store into `self.last_d` and also append to the rolling buffer (joint_buffer).
|
||||
"""
|
||||
while True:
|
||||
if self.serial.in_waiting > 0:
|
||||
self.serial.flush()
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
if len(vals) != 17:
|
||||
continue
|
||||
vals = [int(val) for val in vals]
|
||||
|
||||
d = {
|
||||
"thumb_0": vals[0],
|
||||
"thumb_1": vals[1],
|
||||
"thumb_2": vals[2],
|
||||
"thumb_3": vals[3],
|
||||
"index_0": vals[4],
|
||||
"index_1": vals[5],
|
||||
"index_2": vals[6],
|
||||
"middle_0": vals[7],
|
||||
"middle_1": vals[8],
|
||||
"middle_2": vals[9],
|
||||
"ring_0": vals[10],
|
||||
"ring_1": vals[11],
|
||||
"ring_2": vals[12],
|
||||
"pinky_0": vals[13],
|
||||
"pinky_1": vals[14],
|
||||
"pinky_2": vals[15],
|
||||
"battery_voltage": vals[16],
|
||||
}
|
||||
|
||||
# Update the last_d dictionary
|
||||
self.last_d = d
|
||||
|
||||
# Also push these new values into the rolling buffers
|
||||
for joint_name, joint_val in d.items():
|
||||
self.joint_buffer[joint_name].append(joint_val)
|
||||
|
||||
def run_calibration(self):
|
||||
print("\nMove arm to open position")
|
||||
input("Press Enter to continue...")
|
||||
open_pos_list = []
|
||||
for _ in range(100):
|
||||
open_pos = self.read()
|
||||
open_pos_list.append(open_pos)
|
||||
time.sleep(0.01)
|
||||
open_pos = np.array(open_pos_list)
|
||||
max_open_pos = open_pos.max(axis=0)
|
||||
min_open_pos = open_pos.min(axis=0)
|
||||
|
||||
print(f"{max_open_pos=}")
|
||||
print(f"{min_open_pos=}")
|
||||
|
||||
print("\nMove arm to closed position")
|
||||
input("Press Enter to continue...")
|
||||
closed_pos_list = []
|
||||
for _ in range(100):
|
||||
closed_pos = self.read()
|
||||
closed_pos_list.append(closed_pos)
|
||||
time.sleep(0.01)
|
||||
closed_pos = np.array(closed_pos_list)
|
||||
max_closed_pos = closed_pos.max(axis=0)
|
||||
closed_pos[closed_pos < 1000] = 60000
|
||||
min_closed_pos = closed_pos.min(axis=0)
|
||||
|
||||
print(f"{max_closed_pos=}")
|
||||
print(f"{min_closed_pos=}")
|
||||
|
||||
open_pos = np.array([max_open_pos, max_closed_pos]).max(axis=0)
|
||||
closed_pos = np.array([min_open_pos, min_closed_pos]).min(axis=0)
|
||||
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
for i, jname in enumerate(self.joint_names):
|
||||
if jname in [
|
||||
"thumb_0",
|
||||
"thumb_3",
|
||||
"index_2",
|
||||
"middle_2",
|
||||
"ring_2",
|
||||
"pinky_2",
|
||||
"index_0",
|
||||
]:
|
||||
tmp_pos = open_pos[i]
|
||||
open_pos[i] = closed_pos[i]
|
||||
closed_pos[i] = tmp_pos
|
||||
|
||||
print()
|
||||
print(f"{open_pos=}")
|
||||
print(f"{closed_pos=}")
|
||||
|
||||
homing_offset = [0] * len(self.joint_names)
|
||||
drive_mode = [0] * len(self.joint_names)
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.joint_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": open_pos,
|
||||
"end_pos": closed_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.joint_names,
|
||||
}
|
||||
|
||||
file_path = "examples/hopejr/settings/hand_calib.pkl"
|
||||
|
||||
if not os.path.exists(file_path):
|
||||
with open(file_path, "wb") as f:
|
||||
pickle.dump(calib_dict, f)
|
||||
print(f"Dictionary saved to {file_path}")
|
||||
|
||||
# return calib_dict
|
||||
self.set_calibration(calib_dict)
|
||||
|
||||
|
||||
|
||||
def set_calibration(self, calibration: dict[str, list]):
|
||||
self.calibration = calibration
|
||||
|
||||
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
|
||||
a "zero position" at 0 degree.
|
||||
|
||||
Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor
|
||||
rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range.
|
||||
|
||||
Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation
|
||||
when given a goal position that is + or - their resolution. For instance, feetech xl330-m077 have a resolution of 4096, and
|
||||
at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830,
|
||||
or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor.
|
||||
To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work
|
||||
in the centered nominal degree range ]-180, 180[.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.motor_names
|
||||
|
||||
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
|
||||
values = values.astype(np.float32)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
calib_idx = self.calibration["motor_names"].index(name)
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# Rescale the present position to a nominal range [0, 100] %,
|
||||
# useful for joints with linear motions like Aloha gripper
|
||||
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
|
||||
|
||||
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
|
||||
if name == "pinky_1" and (values[i] < LOWER_BOUND_LINEAR):
|
||||
values[i] = end_pos
|
||||
else:
|
||||
msg = (
|
||||
f"Wrong motor position range detected for {name}. "
|
||||
f"Expected to be in nominal range of [0, 100] % (a full linear translation), "
|
||||
f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, "
|
||||
f"but present value is {values[i]} %. "
|
||||
"This might be due to a cable connection issue creating an artificial jump in motor values. "
|
||||
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
|
||||
)
|
||||
print(msg)
|
||||
# raise JointOutOfRangeError(msg)
|
||||
|
||||
return values
|
||||
|
||||
# def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
# """Inverse of `apply_calibration`."""
|
||||
# if motor_names is None:
|
||||
# motor_names = self.motor_names
|
||||
|
||||
# for i, name in enumerate(motor_names):
|
||||
# calib_idx = self.calibration["motor_names"].index(name)
|
||||
# calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
# if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
# start_pos = self.calibration["start_pos"][calib_idx]
|
||||
# end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# # Convert from nominal lnear range of [0, 100] % to
|
||||
# # actual motor range of values which can be arbitrary.
|
||||
# values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos
|
||||
|
||||
# values = np.round(values).astype(np.int32)
|
||||
# return values
|
||||
|
||||
class EncoderReader:
|
||||
def __init__(self, serial_port="/dev/ttyUSB1", baud_rate=115200):
|
||||
self.serial_port = serial_port
|
||||
self.baud_rate = baud_rate
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
|
||||
# Start a background thread to continuously read from the serial port
|
||||
self.thread = threading.Thread(target=self.async_read, daemon=True)
|
||||
self.thread.start()
|
||||
|
||||
# Store the latest encoder reading in this dictionary
|
||||
self.last_d = {"encoder": 500}
|
||||
|
||||
def async_read(self):
|
||||
while True:
|
||||
# Read one line from serial
|
||||
line = self.serial.readline().decode("utf-8").strip()
|
||||
if line:
|
||||
try:
|
||||
val = int(line) # Parse the incoming line as integer
|
||||
self.last_d["encoder"] = val
|
||||
except ValueError:
|
||||
# If we couldn't parse it as an integer, just skip
|
||||
pass
|
||||
|
||||
def read(self):
|
||||
"""
|
||||
Returns the last encoder value that was read.
|
||||
"""
|
||||
return self.last_d["encoder"]
|
||||
|
||||
class Tac_Man:
|
||||
def __init__(self, serial_port="/dev/ttyUSB1", baud_rate=115200):
|
||||
self.serial_port = serial_port
|
||||
self.baud_rate = baud_rate
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
|
||||
# Start a background thread to continuously read from the serial port
|
||||
self.thread = threading.Thread(target=self.async_read, daemon=True)
|
||||
self.thread.start()
|
||||
|
||||
# Store the latest encoder readings in this list
|
||||
self.last_d = [0, 0, 0] # Default values for three readings
|
||||
|
||||
def async_read(self):
|
||||
while True:
|
||||
# Read one line from serial
|
||||
line = self.serial.readline().decode("utf-8").strip()
|
||||
if line:
|
||||
try:
|
||||
# Parse the incoming line as three comma-separated integers
|
||||
values = [int(val) for val in line.split(",")]
|
||||
if len(values) == 3: # Ensure we have exactly three values
|
||||
self.last_d = values
|
||||
except ValueError:
|
||||
# If parsing fails, skip this line
|
||||
pass
|
||||
|
||||
def read(self):
|
||||
"""
|
||||
Returns the last encoder values that were read as a list of three integers.
|
||||
"""
|
||||
return self.last_d
|
||||
@@ -1,111 +0,0 @@
|
||||
test and test4
|
||||
installed serial and opencv
|
||||
after pip install -e .
|
||||
pip install -e ".[feetech]"
|
||||
|
||||
robot.hand_bus.read("Present_Position")
|
||||
array([ 349, 799, 1000, 1004, 508, 503, 673, 608, 791, 390, 552,
|
||||
506, 600, 565, 428, 379], dtype=int32)
|
||||
|
||||
robot.hand_bus.write("Goal_Position",[349,799,500,500,508,503,673,608,791,390,552,506,600,565,428,379])
|
||||
|
||||
|
||||
robot.arm_bus.write("Goal_Position", [1825, 2045, 2010, 2035, 1414, 1800, 1615])
|
||||
robot.arm_bus.read("Present_Position")
|
||||
|
||||
robot.arm_bus.write("Goal_Position", [1500], ["elbow_flex"])
|
||||
robot.arm_bus.write("Goal_Position", [2000], ["wrist_yaw"])
|
||||
|
||||
ranges: [600-2300, 1500-2300, 1300-2800, 1000-2500, 600-2800,400-1700, 1300-2300]
|
||||
shoulder_up,
|
||||
shoulder forward,
|
||||
shoulder yaw,
|
||||
elbow_flex
|
||||
wrist_yaw,
|
||||
wrist_pitch,
|
||||
wrist_roll
|
||||
|
||||
COM18
|
||||
|
||||
C:/Users/Lenovo/AppData/Local/Programs/Python/Python310/python.exe c:/Users/Lenovo/Documents/HuggingFace/lerobot/examples/test4.py
|
||||
|
||||
wrist pitch is fucked
|
||||
|
||||
|
||||
so the wrist motor was fucked
|
||||
and we didnt know which one it was because
|
||||
if the chain hjas an issue we dont know how to locate whihc motor is at fault (cables are hard to remove)
|
||||
|
||||
to calibrate:
|
||||
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/ttyACM1 \
|
||||
--brand feetech \
|
||||
--model sts3250 \
|
||||
--baudrate 1000000 \
|
||||
--ID 2
|
||||
|
||||
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/ttyACM0 \
|
||||
--brand feetech \
|
||||
--model sm8512bl \
|
||||
--baudrate 115200 \
|
||||
--ID 1
|
||||
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/ttyACM1 \
|
||||
--brand feetech \
|
||||
--model scs0009 \
|
||||
--baudrate 1000000 \
|
||||
--ID 30
|
||||
|
||||
why are the motors beeping?
|
||||
|
||||
|
||||
#interpolate between start and end pos
|
||||
robot.arm_bus.write("Goal_Position", [int((i*interp+j*(1-interp))) for i, j in zip(arm_calibration["start_pos"], arm_calibration["end_pos"])])
|
||||
|
||||
control maj M to look for stuff
|
||||
|
||||
set calibration is useless
|
||||
|
||||
move the joints to that position too
|
||||
|
||||
|
||||
/home/nepyope/Desktop/HuggingFace/lerobot/lerobot/common/robot_devices/motors/feetech.py
|
||||
|
||||
theres clearly some lag, and its probably because of an out of range issue
|
||||
|
||||
|
||||
# hand_calibration = robot.get_hand_calibration()
|
||||
# joint = input("Enter joint name: ")
|
||||
# j1 = f"{joint}_pinky_side"
|
||||
# j2 = f"{joint}_thumb_side"
|
||||
# encoder = EncoderReader("/dev/ttyUSB0", 115200)
|
||||
# start_angle1 = hand_calibration['start_pos'][hand_calibration['motor_names'].index(j1)]
|
||||
# end_angle1 = hand_calibration['end_pos'][hand_calibration['motor_names'].index(j1)]
|
||||
# start_angle2 = hand_calibration['start_pos'][hand_calibration['motor_names'].index(j2)]
|
||||
# end_angle2 = hand_calibration['end_pos'][hand_calibration['motor_names'].index(j2)]
|
||||
# # start_angle = shoulder_calibration['start_pos'][shoulder_calibration['motor_names'].index(joint)]
|
||||
# # end_angle = shoulder_calibration['end_pos'][shoulder_calibration['motor_names'].index(joint)]
|
||||
# while True:
|
||||
# angle1 = int(start_angle1+(end_angle1-start_angle1)*encoder.read()/1000)
|
||||
# angle2 = int(start_angle2+(end_angle2-start_angle2)*encoder.read()/1000)
|
||||
|
||||
# robot.hand_bus.write("Goal_Position",angle1, [j1])
|
||||
# robot.hand_bus.write("Goal_Position",angle2, [j2])
|
||||
# print(angle1, angle2)
|
||||
# time.sleep(0.1)
|
||||
|
||||
# print(robot.hand_bus.find_motor_indices())
|
||||
# exit()
|
||||
|
||||
|
||||
|
||||
maybe divide the 3.3 by 2 and use that as a reference
|
||||
|
||||
https://jlcpcb.com/partdetail/23831236-OPA340UA_UMW/C22365307
|
||||
|
||||
|
||||
-90 is good for the op amp
|
||||
@@ -1,52 +0,0 @@
|
||||
#include <Arduino.h>
|
||||
|
||||
// Define multiplexer input pins
|
||||
#define S0 5
|
||||
#define S1 6
|
||||
#define S2 8
|
||||
#define S3 7
|
||||
#define SENSOR_INPUT 4
|
||||
|
||||
#define SENSOR_COUNT 16
|
||||
|
||||
int rawVals[SENSOR_COUNT];
|
||||
|
||||
void measureRawValues() {
|
||||
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
|
||||
digitalWrite(S0, (i & 0b1) ^ 0b1);;
|
||||
digitalWrite(S1, (i >> 1 & 0b1) ^ 0b1);;
|
||||
digitalWrite(S2, (i >> 2 & 0b1) ^ 0b1);;
|
||||
digitalWrite(S3, i >> 3 & 0b1);
|
||||
delay(1);
|
||||
|
||||
rawVals[i] = analogRead(SENSOR_INPUT);
|
||||
}
|
||||
}
|
||||
|
||||
void printRawValues() {
|
||||
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
|
||||
Serial.print(rawVals[i]);
|
||||
if (i < SENSOR_COUNT - 1) Serial.print(" ");
|
||||
}
|
||||
Serial.println();
|
||||
}
|
||||
|
||||
void setup() {
|
||||
Serial.begin(115200);
|
||||
|
||||
pinMode(S0, OUTPUT);
|
||||
pinMode(S1, OUTPUT);
|
||||
pinMode(S2, OUTPUT);
|
||||
pinMode(S3, OUTPUT);
|
||||
|
||||
digitalWrite(S0, LOW);
|
||||
digitalWrite(S1, LOW);
|
||||
digitalWrite(S2, LOW);
|
||||
digitalWrite(S3, LOW);
|
||||
}
|
||||
|
||||
void loop() {
|
||||
measureRawValues();
|
||||
printRawValues();
|
||||
delay(1);
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
#include <Arduino.h>
|
||||
|
||||
// Define multiplexer input pins
|
||||
#define S0 5
|
||||
#define S1 6
|
||||
#define S2 8
|
||||
#define S3 7
|
||||
#define SENSOR_INPUT 4
|
||||
|
||||
#define SENSOR_COUNT 16
|
||||
|
||||
int rawVals[SENSOR_COUNT];
|
||||
|
||||
void measureRawValues() {
|
||||
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
|
||||
digitalWrite(S0, (i & 0b1) ^ 0b1);;
|
||||
digitalWrite(S1, (i >> 1 & 0b1) ^ 0b1);;
|
||||
digitalWrite(S2, (i >> 2 & 0b1) ^ 0b1);;
|
||||
digitalWrite(S3, i >> 3 & 0b1);
|
||||
delay(1);
|
||||
|
||||
rawVals[i] = analogRead(SENSOR_INPUT);
|
||||
}
|
||||
}
|
||||
|
||||
void printRawValues() {
|
||||
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
|
||||
Serial.print(rawVals[i]);
|
||||
if (i < SENSOR_COUNT - 1) Serial.print(" ");
|
||||
}
|
||||
Serial.println();
|
||||
}
|
||||
|
||||
void setup() {
|
||||
Serial.begin(115200);
|
||||
|
||||
pinMode(S0, OUTPUT);
|
||||
pinMode(S1, OUTPUT);
|
||||
pinMode(S2, OUTPUT);
|
||||
pinMode(S3, OUTPUT);
|
||||
|
||||
digitalWrite(S0, LOW);
|
||||
digitalWrite(S1, LOW);
|
||||
digitalWrite(S2, LOW);
|
||||
digitalWrite(S3, LOW);
|
||||
}
|
||||
|
||||
void loop() {
|
||||
measureRawValues();
|
||||
printRawValues();
|
||||
delay(1);
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
#include <Arduino.h>
|
||||
|
||||
// Define multiplexer input pins
|
||||
#define S0 5
|
||||
#define S1 6
|
||||
#define S2 8
|
||||
#define S3 7
|
||||
#define SENSOR_INPUT 4
|
||||
|
||||
#define SENSOR_COUNT 16
|
||||
|
||||
int rawVals[SENSOR_COUNT];
|
||||
|
||||
void measureRawValues() {
|
||||
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
|
||||
digitalWrite(S0, (i & 0b1) ^ 0b1);;
|
||||
digitalWrite(S1, (i >> 1 & 0b1) ^ 0b1);;
|
||||
digitalWrite(S2, (i >> 2 & 0b1) ^ 0b1);;
|
||||
digitalWrite(S3, i >> 3 & 0b1);
|
||||
delay(1);
|
||||
|
||||
rawVals[i] = analogRead(SENSOR_INPUT);
|
||||
}
|
||||
}
|
||||
|
||||
void printRawValues() {
|
||||
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
|
||||
Serial.print(rawVals[i]);
|
||||
if (i < SENSOR_COUNT - 1) Serial.print(" ");
|
||||
}
|
||||
Serial.println();
|
||||
}
|
||||
|
||||
void setup() {
|
||||
Serial.begin(115200);
|
||||
|
||||
pinMode(S0, OUTPUT);
|
||||
pinMode(S1, OUTPUT);
|
||||
pinMode(S2, OUTPUT);
|
||||
pinMode(S3, OUTPUT);
|
||||
|
||||
digitalWrite(S0, LOW);
|
||||
digitalWrite(S1, LOW);
|
||||
digitalWrite(S2, LOW);
|
||||
digitalWrite(S3, LOW);
|
||||
}
|
||||
|
||||
void loop() {
|
||||
measureRawValues();
|
||||
printRawValues();
|
||||
delay(1);
|
||||
}
|
||||
|
Before Width: | Height: | Size: 75 KiB |
|
Before Width: | Height: | Size: 3.2 KiB |
@@ -1,74 +0,0 @@
|
||||
import numpy as np
|
||||
from PIL import Image, ImageSequence
|
||||
|
||||
def coalesce_gif(im):
|
||||
"""
|
||||
Attempt to coalesce frames so each one is a full image.
|
||||
This handles many (though not all) partial-frame GIFs.
|
||||
"""
|
||||
# Convert mode to RGBA
|
||||
im = im.convert("RGBA")
|
||||
|
||||
# Prepare an accumulator the same size as the base frame
|
||||
base = Image.new("RGBA", im.size)
|
||||
frames = []
|
||||
|
||||
# Go through each frame
|
||||
for frame in ImageSequence.Iterator(im):
|
||||
base.alpha_composite(frame.convert("RGBA"))
|
||||
frames.append(base.copy())
|
||||
return frames
|
||||
|
||||
def remove_white_make_black(arr, threshold=250):
|
||||
"""
|
||||
For each pixel in arr (H,W,3), if R,G,B >= threshold, set to black (0,0,0).
|
||||
This effectively 'removes' white so it won't affect the sum.
|
||||
"""
|
||||
mask = (arr[..., 0] >= threshold) & \
|
||||
(arr[..., 1] >= threshold) & \
|
||||
(arr[..., 2] >= threshold)
|
||||
arr[mask] = 0 # set to black
|
||||
|
||||
def main():
|
||||
# Load the animated GIF
|
||||
gif = Image.open("input.gif")
|
||||
|
||||
# Coalesce frames so each is full-size
|
||||
frames = coalesce_gif(gif)
|
||||
if not frames:
|
||||
print("No frames found!")
|
||||
return
|
||||
|
||||
# Convert first frame to RGB array, initialize sum array
|
||||
w, h = frames[0].size
|
||||
sum_array = np.zeros((h, w, 3), dtype=np.uint16) # 16-bit to avoid overflow
|
||||
|
||||
# For each frame:
|
||||
for f in frames:
|
||||
# Convert to RGB
|
||||
rgb = f.convert("RGB")
|
||||
arr = np.array(rgb, dtype=np.uint16) # shape (H, W, 3)
|
||||
|
||||
# Remove near-white by setting it to black
|
||||
remove_white_make_black(arr, threshold=250)
|
||||
|
||||
# Add to sum_array, then clamp to 255
|
||||
sum_array += arr
|
||||
np.clip(sum_array, 0, 255, out=sum_array)
|
||||
|
||||
# Convert sum_array back to 8-bit
|
||||
sum_array = sum_array.astype(np.uint8)
|
||||
|
||||
# Finally, any pixel that stayed black is presumably "empty," so we set it to white
|
||||
black_mask = (sum_array[..., 0] == 0) & \
|
||||
(sum_array[..., 1] == 0) & \
|
||||
(sum_array[..., 2] == 0)
|
||||
sum_array[black_mask] = [255, 255, 255]
|
||||
|
||||
# Create final Pillow image
|
||||
final_img = Image.fromarray(sum_array, mode="RGB")
|
||||
final_img.save("result.png")
|
||||
print("Done! Wrote result.png.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,31 +0,0 @@
|
||||
import time
|
||||
import serial
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Import the motor bus (adjust the import path as needed)
|
||||
from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus
|
||||
|
||||
def main():
|
||||
|
||||
bus = FeetechMotorsBus(
|
||||
port="/dev/ttyACM0",
|
||||
motors={
|
||||
"leader": [1, "scs0009"],
|
||||
"follower": [2, "scs0009"]
|
||||
},
|
||||
protocol_version=1,
|
||||
group_sync_read=False
|
||||
)
|
||||
bus.connect()
|
||||
print(bus.read("Present_Position", "leader"))
|
||||
bus.write("Torque_Enable", 0, ["leader"])
|
||||
bus.write("Torque_Enable", 1, ["follower"])
|
||||
for i in range(10000000):
|
||||
time.sleep(0.01)
|
||||
pos = bus.read("Present_Position", "leader")
|
||||
if pos[0] > 1 and pos[0] < 1022:
|
||||
bus.write("Goal_Position", pos, ["follower"])
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,15 +0,0 @@
|
||||
robot:
|
||||
arm_bus:
|
||||
Lock: 0
|
||||
Torque_Limit: 1000
|
||||
Protection_Current: 500
|
||||
Over_Current_Protection_Time: 10
|
||||
Max_Torque_Limit: 1000
|
||||
Overload_Torque: 40 # Play around with this
|
||||
Protection_Time: 1000 # When does it kick in?
|
||||
Protective_Torque: 1
|
||||
Maximum_Acceleration: 100
|
||||
Torque_Enable: 1
|
||||
Acceleration: 30
|
||||
hand_bus:
|
||||
Acceleration: 100
|
||||
@@ -1,61 +0,0 @@
|
||||
import time
|
||||
import numpy as np
|
||||
from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus
|
||||
|
||||
def main():
|
||||
# Instantiate the bus for a single motor on port /dev/ttyACM0.
|
||||
arm_bus = FeetechMotorsBus(
|
||||
port="/dev/ttyACM0",
|
||||
motors={"wrist_pitch": [1, "scs0009"]},
|
||||
protocol_version=1,
|
||||
group_sync_read=False, # using individual read calls
|
||||
)
|
||||
arm_bus.connect()
|
||||
|
||||
# Configure continuous rotation mode.
|
||||
arm_bus.write("Min_Angle_Limit", 0)
|
||||
arm_bus.write("Max_Angle_Limit", 1024)
|
||||
|
||||
# For model "scs0009", the raw reading runs from 0 to ~1022.
|
||||
resolution_max = 1022 # use 1022 as the effective maximum raw value
|
||||
|
||||
# Read initial raw motor position.
|
||||
prev_raw = arm_bus.read("Present_Position", ["wrist_pitch"])[0]
|
||||
print("Initial raw position:", prev_raw)
|
||||
|
||||
# Command continuous rotation.
|
||||
arm_bus.write("Goal_Position", 1024)
|
||||
|
||||
# Initialize loop counter.
|
||||
loops_count = 0
|
||||
target_effective = 1780
|
||||
tolerance = 50 # stop when effective position is within ±50 of target
|
||||
|
||||
while True:
|
||||
current_raw = arm_bus.read("Present_Position", ["wrist_pitch"])[0]
|
||||
|
||||
# Detect wrap-around: if the previous reading was near the top (>= 1020)
|
||||
# and current reading is low (< 100), count that as one full loop.
|
||||
if prev_raw >= 1020 and current_raw < 100:
|
||||
loops_count += 1
|
||||
print(f"Wrap detected! loops_count increased to {loops_count}")
|
||||
|
||||
# Compute the effective position.
|
||||
effective_position = loops_count * resolution_max + current_raw
|
||||
print(f"Raw position: {current_raw} | loops_count: {loops_count} | Effective position: {effective_position}")
|
||||
|
||||
# Check if effective position is within tolerance of the target.
|
||||
if abs(effective_position - target_effective) <= tolerance:
|
||||
# Command motor to stop by setting the current raw position as goal.
|
||||
arm_bus.write("Goal_Position", current_raw)
|
||||
print(f"Target reached (effective position: {effective_position}). Stopping motor at raw position {current_raw}.")
|
||||
break
|
||||
|
||||
prev_raw = current_raw
|
||||
time.sleep(0.01) # 10 ms delay
|
||||
|
||||
time.sleep(1)
|
||||
arm_bus.disconnect()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,226 +0,0 @@
|
||||
from follower import HopeJuniorRobot
|
||||
from leader import (
|
||||
HomonculusArm,
|
||||
HomonculusGlove,
|
||||
EncoderReader
|
||||
)
|
||||
from visualizer import value_to_color
|
||||
|
||||
import time
|
||||
import numpy as np
|
||||
|
||||
import pickle
|
||||
import pygame
|
||||
import typer
|
||||
|
||||
def main(
|
||||
calibrate_glove: bool = typer.Option(False, "--calibrate-glove", help="Calibrate the glove"),
|
||||
calibrate_exoskeleton: bool = typer.Option(False, "--calibrate-exoskeleton", help="Calibrate the exoskeleton"),
|
||||
freeze_fingers: bool = typer.Option(False, "--freeze-fingers", help="Freeze the fingers"),
|
||||
freeze_arm: bool = typer.Option(False, "--freeze-arm", help="Freeze the arm")):
|
||||
show_loads: bool = typer.Option(False, "--show-loads", help="Show the loads in a GUI")
|
||||
robot = HopeJuniorRobot()
|
||||
|
||||
|
||||
robot.connect_hand()
|
||||
robot.connect_arm()
|
||||
#read pos
|
||||
print(robot.hand_bus.read("Present_Position"))
|
||||
print(robot.arm_bus.read("Present_Position", "shoulder_pitch"))
|
||||
print(robot.arm_bus.read("Present_Position",["shoulder_yaw","shoulder_roll","elbow_flex","wrist_roll","wrist_yaw","wrist_pitch"]))
|
||||
#robot.arm_bus.write("Goal_Position", robot.arm_calib_dict["start_pos"][0]*1 +robot.arm_calib_dict["end_pos"][0]*0, ["wrist_roll"])
|
||||
for i in range(10):
|
||||
time.sleep(0.1)
|
||||
robot.apply_arm_config('examples/hopejr/settings/config.yaml')
|
||||
|
||||
# #calibrate arm
|
||||
arm_calibration = robot.get_arm_calibration()
|
||||
exoskeleton = HomonculusArm(serial_port="/dev/tty.usbmodem1201")
|
||||
|
||||
|
||||
if calibrate_exoskeleton:
|
||||
exoskeleton.run_calibration(robot)
|
||||
|
||||
file_path = "examples/hopejr/settings/arm_calib.pkl"
|
||||
with open(file_path, "rb") as f:
|
||||
calib_dict = pickle.load(f)
|
||||
print("Loaded dictionary:", calib_dict)
|
||||
exoskeleton.set_calibration(calib_dict)
|
||||
|
||||
#calibrate hand
|
||||
hand_calibration = robot.get_hand_calibration()
|
||||
glove = HomonculusGlove(serial_port = "/dev/tty.usbmodem1101")
|
||||
|
||||
if calibrate_glove:
|
||||
glove.run_calibration()
|
||||
|
||||
file_path = "examples/hopejr/settings/hand_calib.pkl"
|
||||
with open(file_path, "rb") as f:
|
||||
calib_dict = pickle.load(f)
|
||||
print("Loaded dictionary:", calib_dict)
|
||||
glove.set_calibration(calib_dict)
|
||||
|
||||
robot.hand_bus.set_calibration(hand_calibration)
|
||||
robot.arm_bus.set_calibration(arm_calibration)
|
||||
|
||||
# Initialize Pygame
|
||||
# pygame.init()
|
||||
|
||||
# # Set up the display
|
||||
# screen = pygame.display.set_mode((800, 600))
|
||||
|
||||
# pygame.display.set_caption("Robot Hand Visualization")
|
||||
|
||||
|
||||
# # Create hand structure with 16 squares and initial values
|
||||
# hand_components = []
|
||||
|
||||
# # Add thumb (4 squares in diamond shape)
|
||||
# thumb_positions = [
|
||||
# (150, 300), (125, 350),
|
||||
# (175, 350), (150, 400)
|
||||
# ]
|
||||
# for pos in thumb_positions:
|
||||
# hand_components.append({"pos": pos, "value": 0})
|
||||
|
||||
# # Add fingers (4 fingers with 3 squares each in vertical lines)
|
||||
# finger_positions = [
|
||||
# (200, 100), # Index
|
||||
# (250, 100), # Middle
|
||||
# (300, 100), # Ring
|
||||
# (350, 100) # Pinky
|
||||
# ]
|
||||
|
||||
# for x, y in finger_positions:
|
||||
# for i in range(3):
|
||||
# hand_components.append({"pos": (x, y + i * 50), "value": 0})
|
||||
|
||||
for i in range(1000000000000000):
|
||||
robot.apply_arm_config('examples/hopejr/settings/config.yaml')
|
||||
#robot.arm_bus.write("Acceleration", 50, "shoulder_yaw")
|
||||
joint_names = ["shoulder_pitch", "shoulder_yaw", "shoulder_roll", "elbow_flex", "wrist_roll", "wrist_yaw", "wrist_pitch"]
|
||||
#only wrist roll
|
||||
#joint_names = ["shoulder_pitch"]
|
||||
joint_values = exoskeleton.read(motor_names=joint_names)
|
||||
|
||||
#joint_values = joint_values.round().astype(int)
|
||||
joint_dict = {k: v for k, v in zip(joint_names, joint_values, strict=False)}
|
||||
|
||||
motor_values = []
|
||||
motor_names = []
|
||||
motor_names += ["shoulder_pitch", "shoulder_yaw", "shoulder_roll", "elbow_flex", "wrist_roll", "wrist_yaw", "wrist_pitch"]
|
||||
#motor_names += ["shoulder_pitch"]
|
||||
motor_values += [joint_dict[name] for name in motor_names]
|
||||
#remove 50 from shoulder_roll
|
||||
#motor_values += [joint_dict[name] for name in motor_names]
|
||||
|
||||
motor_values = np.array(motor_values)
|
||||
motor_values = np.clip(motor_values, 0, 100)
|
||||
|
||||
print(motor_names, motor_values)
|
||||
if not freeze_arm:
|
||||
robot.arm_bus.write("Goal_Position", motor_values, motor_names)
|
||||
|
||||
if not freeze_fingers:#include hand
|
||||
hand_joint_names = []
|
||||
hand_joint_names += ["thumb_3", "thumb_2", "thumb_1", "thumb_0"]#, "thumb_3"]
|
||||
hand_joint_names += ["index_0", "index_1", "index_2"]
|
||||
hand_joint_names += ["middle_0", "middle_1", "middle_2"]
|
||||
hand_joint_names += ["ring_0", "ring_1", "ring_2"]
|
||||
hand_joint_names += ["pinky_0", "pinky_1", "pinky_2"]
|
||||
hand_joint_values = glove.read(hand_joint_names)
|
||||
hand_joint_values = hand_joint_values.round( ).astype(int)
|
||||
hand_joint_dict = {k: v for k, v in zip(hand_joint_names, hand_joint_values, strict=False)}
|
||||
|
||||
hand_motor_values = []
|
||||
hand_motor_names = []
|
||||
|
||||
# Thumb
|
||||
hand_motor_names += ["thumb_basel_rotation", "thumb_mcp", "thumb_pip", "thumb_dip"]#, "thumb_MCP"]
|
||||
hand_motor_values += [
|
||||
hand_joint_dict["thumb_3"],
|
||||
hand_joint_dict["thumb_2"],
|
||||
hand_joint_dict["thumb_1"],
|
||||
hand_joint_dict["thumb_0"]
|
||||
]
|
||||
|
||||
# # Index finger
|
||||
index_splay = 0.1
|
||||
hand_motor_names += ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
hand_motor_values += [
|
||||
hand_joint_dict["index_2"],
|
||||
(100 - hand_joint_dict["index_0"]) * index_splay + hand_joint_dict["index_1"] * (1 - index_splay),
|
||||
hand_joint_dict["index_0"] * index_splay + hand_joint_dict["index_1"] * (1 - index_splay),
|
||||
]
|
||||
|
||||
# Middle finger
|
||||
middle_splay = 0.1
|
||||
hand_motor_names += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
hand_motor_values += [
|
||||
hand_joint_dict["middle_2"],
|
||||
hand_joint_dict["middle_0"] * middle_splay + hand_joint_dict["middle_1"] * (1 - middle_splay),
|
||||
(100 - hand_joint_dict["middle_0"]) * middle_splay + hand_joint_dict["middle_1"] * (1 - middle_splay),
|
||||
]
|
||||
|
||||
# # Ring finger
|
||||
ring_splay = 0.1
|
||||
hand_motor_names += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
hand_motor_values += [
|
||||
hand_joint_dict["ring_2"],
|
||||
(100 - hand_joint_dict["ring_0"]) * ring_splay + hand_joint_dict["ring_1"] * (1 - ring_splay),
|
||||
hand_joint_dict["ring_0"] * ring_splay + hand_joint_dict["ring_1"] * (1 - ring_splay),
|
||||
]
|
||||
|
||||
# # Pinky finger
|
||||
pinky_splay = -.1
|
||||
hand_motor_names += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
hand_motor_values += [
|
||||
hand_joint_dict["pinky_2"],
|
||||
hand_joint_dict["pinky_0"] * pinky_splay + hand_joint_dict["pinky_1"] * (1 - pinky_splay),
|
||||
(100 - hand_joint_dict["pinky_0"]) * pinky_splay + hand_joint_dict["pinky_1"] * (1 - pinky_splay),
|
||||
]
|
||||
|
||||
hand_motor_values = np.array(hand_motor_values)
|
||||
hand_motor_values = np.clip(hand_motor_values, 0, 100)
|
||||
robot.hand_bus.write("Acceleration", 255, hand_motor_names)
|
||||
robot.hand_bus.write("Goal_Position", hand_motor_values, hand_motor_names)
|
||||
|
||||
# if i%20==0 and i > 100:
|
||||
# try:
|
||||
# loads = robot.hand_bus.read("Present_Load")
|
||||
# for i, comp in enumerate(hand_components):
|
||||
# # Wave oscillates between 0 and 2024:
|
||||
# # Center (1012) +/- 1012 * sin(...)
|
||||
# comp["value"] = loads[i]
|
||||
# except:
|
||||
# pass
|
||||
|
||||
|
||||
time.sleep(0.01)
|
||||
|
||||
# for event in pygame.event.get():
|
||||
# if event.type == pygame.QUIT:
|
||||
# robot.hand_bus.disconnect()
|
||||
# robot.arm_bus.disconnect()
|
||||
# exit()
|
||||
# # Check for user pressing 'q' to quit
|
||||
# if event.type == pygame.KEYDOWN:
|
||||
# if event.key == pygame.K_q:
|
||||
# robot.hand_bus.disconnect()
|
||||
# robot.arm_bus.disconnect()
|
||||
# exit()
|
||||
|
||||
# # Draw background
|
||||
# screen.fill((0, 0, 0)) # Black background
|
||||
|
||||
# # Draw hand components
|
||||
# for comp in hand_components:
|
||||
# x, y = comp["pos"]
|
||||
# color = value_to_color(comp["value"])
|
||||
# pygame.draw.rect(screen, color, (x, y, 30, 30))
|
||||
|
||||
# pygame.display.flip()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
typer.run(main)
|
||||
@@ -1,135 +0,0 @@
|
||||
import serial
|
||||
import threading
|
||||
import time
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Thread function to read from a serial port continuously until stop_event is set.
|
||||
def read_serial(port, baudrate, stop_event, data_list):
|
||||
try:
|
||||
ser = serial.Serial(port, baudrate, timeout=1)
|
||||
except Exception as e:
|
||||
print(f"Error opening {port}: {e}")
|
||||
return
|
||||
|
||||
while not stop_event.is_set():
|
||||
try:
|
||||
line = ser.readline().decode('utf-8').strip()
|
||||
except Exception as e:
|
||||
print(f"Decode error on {port}: {e}")
|
||||
continue
|
||||
|
||||
if line:
|
||||
try:
|
||||
# Split the line into integer values.
|
||||
values = [int(x) for x in line.split()]
|
||||
# For ACM1, ignore the extra value if present.
|
||||
if len(values) >= 16:
|
||||
if len(values) > 16:
|
||||
values = values[:16]
|
||||
# Save the timestamp (relative to start) with the sensor readings.
|
||||
timestamp = time.time()
|
||||
data_list.append((timestamp, values))
|
||||
except Exception as e:
|
||||
print(f"Error parsing line from {port}: '{line}' -> {e}")
|
||||
ser.close()
|
||||
|
||||
def main():
|
||||
# --- Configuration ---
|
||||
# Set your serial port names here (adjust for your system)
|
||||
acm0_port = "/dev/ttyACM0" # Example for Linux (or "COM3" on Windows)
|
||||
acm1_port = "/dev/ttyACM1" # Example for Linux (or "COM4" on Windows)
|
||||
baudrate = 115200
|
||||
|
||||
# Data storage for each device:
|
||||
data_acm0 = [] # Will hold tuples of (timestamp, [16 sensor values])
|
||||
data_acm1 = []
|
||||
|
||||
# Event to signal threads to stop reading.
|
||||
stop_event = threading.Event()
|
||||
|
||||
# Create and start reader threads.
|
||||
thread_acm0 = threading.Thread(target=read_serial, args=(acm0_port, baudrate, stop_event, data_acm0))
|
||||
thread_acm1 = threading.Thread(target=read_serial, args=(acm1_port, baudrate, stop_event, data_acm1))
|
||||
thread_acm0.start()
|
||||
thread_acm1.start()
|
||||
|
||||
# Record data for 10 seconds.
|
||||
record_duration = 10 # seconds
|
||||
start_time = time.time()
|
||||
time.sleep(record_duration)
|
||||
stop_event.set() # signal threads to stop
|
||||
|
||||
# Wait for both threads to finish.
|
||||
thread_acm0.join()
|
||||
thread_acm1.join()
|
||||
print("Finished recording.")
|
||||
|
||||
# --- Process the Data ---
|
||||
# Convert lists of (timestamp, values) to numpy arrays.
|
||||
# Compute time relative to the start of the recording.
|
||||
times_acm0 = np.array([t - start_time for t, _ in data_acm0])
|
||||
sensor_acm0 = np.array([vals for _, vals in data_acm0]) # shape (N0, 16)
|
||||
|
||||
times_acm1 = np.array([t - start_time for t, _ in data_acm1])
|
||||
sensor_acm1 = np.array([vals for _, vals in data_acm1]) # shape (N1, 16)
|
||||
|
||||
# --- Plot 1: Overlapping Time Series ---
|
||||
plt.figure(figsize=(12, 8))
|
||||
# Plot each sensor from ACM0 in red.
|
||||
for i in range(16):
|
||||
plt.plot(times_acm0, sensor_acm0[:, i], color='red', alpha=0.7,
|
||||
label='ACM0 Sensor 1' if i == 0 else None)
|
||||
# Plot each sensor from ACM1 in blue.
|
||||
for i in range(16):
|
||||
plt.plot(times_acm1, sensor_acm1[:, i], color='blue', alpha=0.7,
|
||||
label='ACM1 Sensor 1' if i == 0 else None)
|
||||
plt.xlabel("Time (s)")
|
||||
plt.ylabel("Sensor Reading")
|
||||
plt.title("Overlapping Sensor Readings (ACM0 in Red, ACM1 in Blue)")
|
||||
plt.legend()
|
||||
plt.tight_layout()
|
||||
plt.savefig("overlapping_sensor_readings.png", dpi=300)
|
||||
plt.close()
|
||||
print("Saved overlapping_sensor_readings.png")
|
||||
|
||||
# --- Plot 2: Variance of Noise for Each Sensor ---
|
||||
# Compute variance (over time) for each sensor channel.
|
||||
variance_acm0 = np.var(sensor_acm0, axis=0)
|
||||
variance_acm1 = np.var(sensor_acm1, axis=0)
|
||||
sensor_numbers = np.arange(1, 17)
|
||||
bar_width = 0.35
|
||||
|
||||
plt.figure(figsize=(12, 6))
|
||||
plt.bar(sensor_numbers - bar_width/2, variance_acm0, bar_width, color='red', label='ACM0')
|
||||
plt.bar(sensor_numbers + bar_width/2, variance_acm1, bar_width, color='blue', label='ACM1')
|
||||
plt.xlabel("Sensor Number")
|
||||
plt.ylabel("Variance")
|
||||
plt.title("Noise Variance per Sensor")
|
||||
plt.xticks(sensor_numbers)
|
||||
plt.legend()
|
||||
plt.tight_layout()
|
||||
plt.savefig("sensor_variance.png", dpi=300)
|
||||
plt.close()
|
||||
print("Saved sensor_variance.png")
|
||||
|
||||
# --- Plot 3: Difference Between ACM0 and ACM1 Readings ---
|
||||
# Since the two devices may not sample at exactly the same time,
|
||||
# we interpolate ACM1's data onto ACM0's time base for each sensor.
|
||||
plt.figure(figsize=(12, 8))
|
||||
for i in range(16):
|
||||
if len(times_acm1) > 1 and len(times_acm0) > 1:
|
||||
interp_acm1 = np.interp(times_acm0, times_acm1, sensor_acm1[:, i])
|
||||
diff = sensor_acm0[:, i] - interp_acm1
|
||||
plt.plot(times_acm0, diff, label=f"Sensor {i+1}")
|
||||
plt.xlabel("Time (s)")
|
||||
plt.ylabel("Difference (ACM0 - ACM1)")
|
||||
plt.title("Difference in Sensor Readings")
|
||||
plt.legend(fontsize='small', ncol=2)
|
||||
plt.tight_layout()
|
||||
plt.savefig("sensor_differences.png", dpi=300)
|
||||
plt.close()
|
||||
print("Saved sensor_differences.png")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
Before Width: | Height: | Size: 1.5 MiB |
|
Before Width: | Height: | Size: 1.3 MiB |
|
Before Width: | Height: | Size: 87 KiB |
@@ -1,84 +0,0 @@
|
||||
import time
|
||||
import serial
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Import the motor bus (adjust the import path as needed)
|
||||
from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus
|
||||
|
||||
def main():
|
||||
# -------------------------------
|
||||
# Setup the motor bus (ACM0)
|
||||
# -------------------------------
|
||||
arm_bus = FeetechMotorsBus(
|
||||
port="/dev/ttyACM0",
|
||||
motors={
|
||||
"wrist_pitch": [7, "sts3215"],
|
||||
},
|
||||
protocol_version=0,
|
||||
)
|
||||
arm_bus.connect()
|
||||
|
||||
# -------------------------------
|
||||
# Setup the serial connection for sensor (ACM1)
|
||||
# -------------------------------
|
||||
try:
|
||||
ser = serial.Serial("/dev/ttyACM1", 115200, timeout=1)
|
||||
except Exception as e:
|
||||
print(f"Error opening serial port /dev/ttyACM1: {e}")
|
||||
return
|
||||
|
||||
# Lists to store the motor positions and sensor values.
|
||||
positions = []
|
||||
sensor_values = []
|
||||
|
||||
# -------------------------------
|
||||
# Loop: move motor and collect sensor data
|
||||
# -------------------------------
|
||||
# We assume that 2800 > 1480 so we decrement by 10 each step.
|
||||
for pos in range(2800, 1500, -10): # 2800 down to 1480 (inclusive)
|
||||
# Command the motor to go to position 'pos'
|
||||
arm_bus.write("Goal_Position", pos, ["wrist_pitch"])
|
||||
|
||||
# Wait a short period for the motor to move and the sensor to update.
|
||||
time.sleep(0.01)
|
||||
|
||||
# Read one line from the sensor device.
|
||||
sensor_val = np.nan # default if reading fails
|
||||
try:
|
||||
line = ser.readline().decode('utf-8').strip()
|
||||
if line:
|
||||
# Split the line into parts and convert each part to int.
|
||||
parts = line.split()
|
||||
# Ensure there are enough values (we expect at least 15 values)
|
||||
if len(parts) >= 15:
|
||||
values = [int(x) for x in parts]
|
||||
# Use the 15th value (index 14)
|
||||
sensor_val = values[14]
|
||||
except Exception as e:
|
||||
print(f"Error parsing sensor data: {e}")
|
||||
|
||||
positions.append(pos)
|
||||
sensor_values.append(sensor_val)
|
||||
print(f"Motor pos: {pos} | Sensor 15th value: {sensor_val}")
|
||||
|
||||
#move it back to
|
||||
arm_bus.write("Goal_Position", 2800, ["wrist_pitch"])
|
||||
# -------------------------------
|
||||
# Plot the data: Motor Angle vs. Sensor 15th Value
|
||||
# -------------------------------
|
||||
plt.figure(figsize=(10, 6))
|
||||
plt.plot(positions, sensor_values, marker='o', linestyle='-')
|
||||
plt.xlabel("Motor Angle")
|
||||
plt.ylabel("Sensor 15th Value")
|
||||
plt.title("Motor Angle vs Sensor 15th Value")
|
||||
plt.grid(True)
|
||||
plt.savefig("asd.png", dpi=300)
|
||||
plt.close()
|
||||
print("Plot saved as asd.png")
|
||||
|
||||
# Close the serial connection.
|
||||
ser.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,682 +0,0 @@
|
||||
import threading
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
# from qai_hub_models.models.mediapipe_hand.app import MediaPipeHandApp
|
||||
# from qai_hub_models.models.mediapipe_hand.model import (
|
||||
# MediaPipeHand,
|
||||
# )
|
||||
# from qai_hub_models.utils.image_processing import (
|
||||
# app_to_net_image_inputs,
|
||||
# )
|
||||
from lerobot.common.robot_devices.motors.feetech import (
|
||||
CalibrationMode,
|
||||
FeetechMotorsBus,
|
||||
)
|
||||
|
||||
LOWER_BOUND_LINEAR = -100
|
||||
UPPER_BOUND_LINEAR = 200
|
||||
|
||||
import serial
|
||||
|
||||
|
||||
class HomonculusGlove:
|
||||
def __init__(self):
|
||||
self.serial_port = "COM10"
|
||||
self.baud_rate = 115200
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
self.thread = threading.Thread(target=self.async_read)
|
||||
self.thread.start()
|
||||
self.last_d = {
|
||||
"thumb_0": 100,
|
||||
"thumb_1": 100,
|
||||
"thumb_2": 100,
|
||||
"thumb_3": 100,
|
||||
"index_0": 100,
|
||||
"index_1": 100,
|
||||
"index_2": 100,
|
||||
"middle_0": 100,
|
||||
"middle_1": 100,
|
||||
"middle_2": 100,
|
||||
"ring_0": 100,
|
||||
"ring_1": 100,
|
||||
"ring_2": 100,
|
||||
"pinky_0": 100,
|
||||
"pinky_1": 100,
|
||||
"pinky_2": 100,
|
||||
"battery_voltage": 100,
|
||||
}
|
||||
self.calibration = None
|
||||
|
||||
@property
|
||||
def joint_names(self):
|
||||
return list(self.last_d.keys())
|
||||
|
||||
def read(self, motor_names: list[str] | None = None):
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
values = np.array([self.last_d[k] for k in motor_names])
|
||||
|
||||
print(motor_names)
|
||||
print(values)
|
||||
|
||||
if self.calibration is not None:
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
print(values)
|
||||
return values
|
||||
|
||||
def async_read(self):
|
||||
while True:
|
||||
if self.serial.in_waiting > 0:
|
||||
self.serial.flush()
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
if len(vals) != 17:
|
||||
continue
|
||||
vals = [int(val) for val in vals]
|
||||
|
||||
d = {
|
||||
"thumb_0": vals[0],
|
||||
"thumb_1": vals[1],
|
||||
"thumb_2": vals[2],
|
||||
"thumb_3": vals[3],
|
||||
"index_0": vals[4],
|
||||
"index_1": vals[5],
|
||||
"index_2": vals[6],
|
||||
"middle_0": vals[7],
|
||||
"middle_1": vals[8],
|
||||
"middle_2": vals[9],
|
||||
"ring_0": vals[10],
|
||||
"ring_1": vals[11],
|
||||
"ring_2": vals[12],
|
||||
"pinky_0": vals[13],
|
||||
"pinky_1": vals[14],
|
||||
"pinky_2": vals[15],
|
||||
"battery_voltage": vals[16],
|
||||
}
|
||||
self.last_d = d
|
||||
# print(d.values())
|
||||
|
||||
def run_calibration(self):
|
||||
print("\nMove arm to open position")
|
||||
input("Press Enter to continue...")
|
||||
open_pos_list = []
|
||||
for _ in range(300):
|
||||
open_pos = self.read()
|
||||
open_pos_list.append(open_pos)
|
||||
time.sleep(0.01)
|
||||
open_pos = np.array(open_pos_list)
|
||||
max_open_pos = open_pos.max(axis=0)
|
||||
min_open_pos = open_pos.min(axis=0)
|
||||
|
||||
print(f"{max_open_pos=}")
|
||||
print(f"{min_open_pos=}")
|
||||
|
||||
print("\nMove arm to closed position")
|
||||
input("Press Enter to continue...")
|
||||
closed_pos_list = []
|
||||
for _ in range(300):
|
||||
closed_pos = self.read()
|
||||
closed_pos_list.append(closed_pos)
|
||||
time.sleep(0.01)
|
||||
closed_pos = np.array(closed_pos_list)
|
||||
max_closed_pos = closed_pos.max(axis=0)
|
||||
closed_pos[closed_pos < 1000] = 60000
|
||||
min_closed_pos = closed_pos.min(axis=0)
|
||||
|
||||
print(f"{max_closed_pos=}")
|
||||
print(f"{min_closed_pos=}")
|
||||
|
||||
open_pos = np.array([max_open_pos, max_closed_pos]).max(axis=0)
|
||||
closed_pos = np.array([min_open_pos, min_closed_pos]).min(axis=0)
|
||||
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
for i, jname in enumerate(self.joint_names):
|
||||
if jname in ["thumb_0", "thumb_3", "index_2", "middle_2", "ring_2", "pinky_0", "pinky_2"]:
|
||||
tmp_pos = open_pos[i]
|
||||
open_pos[i] = closed_pos[i]
|
||||
closed_pos[i] = tmp_pos
|
||||
|
||||
print()
|
||||
print(f"{open_pos=}")
|
||||
print(f"{closed_pos=}")
|
||||
|
||||
homing_offset = [0] * len(self.joint_names)
|
||||
drive_mode = [0] * len(self.joint_names)
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.joint_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": open_pos,
|
||||
"end_pos": closed_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.joint_names,
|
||||
}
|
||||
# return calib_dict
|
||||
self.set_calibration(calib_dict)
|
||||
|
||||
def set_calibration(self, calibration: dict[str, list]):
|
||||
self.calibration = calibration
|
||||
|
||||
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
|
||||
a "zero position" at 0 degree.
|
||||
|
||||
Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor
|
||||
rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range.
|
||||
|
||||
Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation
|
||||
when given a goal position that is + or - their resolution. For instance, feetech xl330-m077 have a resolution of 4096, and
|
||||
at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830,
|
||||
or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor.
|
||||
To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work
|
||||
in the centered nominal degree range ]-180, 180[.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.motor_names
|
||||
|
||||
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
|
||||
values = values.astype(np.float32)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
calib_idx = self.calibration["motor_names"].index(name)
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# Rescale the present position to a nominal range [0, 100] %,
|
||||
# useful for joints with linear motions like Aloha gripper
|
||||
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
|
||||
|
||||
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
|
||||
if name == "pinky_1" and (values[i] < LOWER_BOUND_LINEAR):
|
||||
values[i] = end_pos
|
||||
else:
|
||||
msg = (
|
||||
f"Wrong motor position range detected for {name}. "
|
||||
f"Expected to be in nominal range of [0, 100] % (a full linear translation), "
|
||||
f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, "
|
||||
f"but present value is {values[i]} %. "
|
||||
"This might be due to a cable connection issue creating an artificial jump in motor values. "
|
||||
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
|
||||
)
|
||||
print(msg)
|
||||
# raise JointOutOfRangeError(msg)
|
||||
|
||||
return values
|
||||
|
||||
# def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
# """Inverse of `apply_calibration`."""
|
||||
# if motor_names is None:
|
||||
# motor_names = self.motor_names
|
||||
|
||||
# for i, name in enumerate(motor_names):
|
||||
# calib_idx = self.calibration["motor_names"].index(name)
|
||||
# calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
# if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
# start_pos = self.calibration["start_pos"][calib_idx]
|
||||
# end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# # Convert from nominal lnear range of [0, 100] % to
|
||||
# # actual motor range of values which can be arbitrary.
|
||||
# values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos
|
||||
|
||||
# values = np.round(values).astype(np.int32)
|
||||
# return values
|
||||
|
||||
|
||||
class HopeJuniorRobot:
|
||||
def __init__(self):
|
||||
self.arm_bus = FeetechMotorsBus(
|
||||
port="COM14",
|
||||
motors={
|
||||
# "motor1": (2, "sts3250"),
|
||||
# "motor2": (1, "scs0009"),
|
||||
"shoulder_pitch": [1, "sts3250"],
|
||||
"shoulder_yaw": [2, "sts3215"], # TODO: sts3250
|
||||
"shoulder_roll": [3, "sts3215"], # TODO: sts3250
|
||||
"elbow_flex": [4, "sts3250"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"wrist_yaw": [6, "sts3215"],
|
||||
"wrist_pitch": [7, "sts3215"],
|
||||
},
|
||||
protocol_version=0,
|
||||
)
|
||||
self.hand_bus = FeetechMotorsBus(
|
||||
port="COM15",
|
||||
motors={
|
||||
"thumb_basel_rotation": [30, "scs0009"],
|
||||
"thumb_flexor": [27, "scs0009"],
|
||||
"thumb_pinky_side": [26, "scs0009"],
|
||||
"thumb_thumb_side": [28, "scs0009"],
|
||||
"index_flexor": [25, "scs0009"],
|
||||
"index_pinky_side": [31, "scs0009"],
|
||||
"index_thumb_side": [32, "scs0009"],
|
||||
"middle_flexor": [24, "scs0009"],
|
||||
"middle_pinky_side": [33, "scs0009"],
|
||||
"middle_thumb_side": [34, "scs0009"],
|
||||
"ring_flexor": [21, "scs0009"],
|
||||
"ring_pinky_side": [36, "scs0009"],
|
||||
"ring_thumb_side": [35, "scs0009"],
|
||||
"pinky_flexor": [23, "scs0009"],
|
||||
"pinky_pinky_side": [38, "scs0009"],
|
||||
"pinky_thumb_side": [37, "scs0009"],
|
||||
},
|
||||
protocol_version=1,
|
||||
group_sync_read=False,
|
||||
)
|
||||
|
||||
def get_hand_calibration(self):
|
||||
homing_offset = [0] * len(self.hand_bus.motor_names)
|
||||
drive_mode = [0] * len(self.hand_bus.motor_names)
|
||||
|
||||
start_pos = [
|
||||
500,
|
||||
900,
|
||||
1000,
|
||||
0,
|
||||
100,
|
||||
250,
|
||||
750,
|
||||
100,
|
||||
400,
|
||||
150,
|
||||
100,
|
||||
120,
|
||||
980,
|
||||
100,
|
||||
950,
|
||||
750,
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
500 - 250,
|
||||
900 - 300,
|
||||
1000 - 550,
|
||||
0 + 550,
|
||||
1000,
|
||||
250 + 700,
|
||||
750 - 700,
|
||||
1000,
|
||||
400 + 700,
|
||||
150 + 700,
|
||||
1000,
|
||||
120 + 700,
|
||||
980 - 700,
|
||||
1000,
|
||||
950 - 700,
|
||||
750 - 700,
|
||||
]
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.hand_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.hand_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def connect(self):
|
||||
self.arm_bus.connect()
|
||||
#self.hand_bus.connect()
|
||||
|
||||
|
||||
ESCAPE_KEY_ID = 27
|
||||
|
||||
|
||||
def capture_and_display_processed_frames(
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray],
|
||||
window_display_name: str,
|
||||
cap_device: int = 0,
|
||||
) -> None:
|
||||
"""
|
||||
Capture frames from the given input camera device, run them through
|
||||
the frame processor, and display the outputs in a window with the given name.
|
||||
|
||||
User should press Esc to exit.
|
||||
|
||||
Inputs:
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray]
|
||||
Processes frames.
|
||||
Input and output are numpy arrays of shape (H W C) with BGR channel layout and dtype uint8 / byte.
|
||||
window_display_name: str
|
||||
Name of the window used to display frames.
|
||||
cap_device: int
|
||||
Identifier for the camera to use to capture frames.
|
||||
"""
|
||||
cv2.namedWindow(window_display_name)
|
||||
capture = cv2.VideoCapture(cap_device)
|
||||
if not capture.isOpened():
|
||||
raise ValueError("Unable to open video capture.")
|
||||
|
||||
frame_count = 0
|
||||
has_frame, frame = capture.read()
|
||||
while has_frame:
|
||||
assert isinstance(frame, np.ndarray)
|
||||
|
||||
frame_count = frame_count + 1
|
||||
# mirror frame
|
||||
frame = np.ascontiguousarray(frame[:, ::-1, ::-1])
|
||||
|
||||
# process & show frame
|
||||
processed_frame = frame_processor(frame)
|
||||
cv2.imshow(window_display_name, processed_frame[:, :, ::-1])
|
||||
|
||||
has_frame, frame = capture.read()
|
||||
key = cv2.waitKey(1)
|
||||
if key == ESCAPE_KEY_ID:
|
||||
break
|
||||
|
||||
capture.release()
|
||||
|
||||
|
||||
def main():
|
||||
robot = HopeJuniorRobot()
|
||||
robot.connect()
|
||||
|
||||
# robot.hand_bus.calibration = None
|
||||
|
||||
# breakpoint()
|
||||
# print(robot.arm_bus.read("Present_Position"))
|
||||
robot.arm_bus.write("Torque_Enable", 1)
|
||||
robot.arm_bus.write("Acceleration", 20)
|
||||
robot.arm_bus.read("Acceleration")
|
||||
robot.arm_bus.write("Goal_Position", calibration["start_pos"])
|
||||
exit()
|
||||
calibration = robot.get_hand_calibration()
|
||||
robot.arm_bus.write("Goal_Position", calibration["start_pos"])
|
||||
# robot.hand_bus.write("Goal_Position", calibration["end_pos"][:4], robot.hand_bus.motor_names[:4])
|
||||
robot.hand_bus.set_calibration(calibration)
|
||||
lol = 1
|
||||
|
||||
# # print(motors_bus.write("Goal_Position", 500))
|
||||
# print(robot.hand_bus.read("Present_Position"))
|
||||
# # pos = hand_bus.read("Present_Position")
|
||||
# # hand_bus.write("Goal_Position", pos[0]+20, hand_bus.motor_names[0])
|
||||
# # hand_bus.write("Goal_Position", pos[i]+delta, hand_bus.motor_names[i])
|
||||
# robot.hand_bus.read("Acceleration")
|
||||
# robot.hand_bus.write("Acceleration", 10)
|
||||
|
||||
# sleep = 1
|
||||
# # robot.hand_bus.write(
|
||||
# # "Goal_Position", [glove.last_d['index_2']-1500,300,300], ["index_pinky_side", "index_flexor", "index_thumb_side"]
|
||||
# # )
|
||||
# #time.sleep(sleep)
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [100, 0, 0], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [200, 100, 600], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
|
||||
# breakpoint()
|
||||
|
||||
glove = HomonculusGlove()
|
||||
glove.run_calibration()
|
||||
# while True:
|
||||
# joint_names = ["index_1", "index_2"]
|
||||
# joint_values = glove.read(joint_names)
|
||||
# print(joint_values)
|
||||
|
||||
input()
|
||||
while True:
|
||||
joint_names = []
|
||||
joint_names += ["thumb_0", "thumb_2", "thumb_3"]
|
||||
joint_names += ["index_1", "index_2"]
|
||||
joint_names += ["middle_1", "middle_2"]
|
||||
joint_names += ["ring_1", "ring_2"]
|
||||
joint_names += ["pinky_1", "pinky_2"]
|
||||
joint_values = glove.read(joint_names)
|
||||
joint_values = joint_values.round().astype(int)
|
||||
joint_dict = {k: v for k, v in zip(joint_names, joint_values, strict=False)}
|
||||
|
||||
motor_values = []
|
||||
motor_names = []
|
||||
motor_names += ["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"]
|
||||
motor_values += [
|
||||
joint_dict["thumb_3"],
|
||||
joint_dict["thumb_0"],
|
||||
joint_dict["thumb_2"],
|
||||
joint_dict["thumb_2"],
|
||||
]
|
||||
motor_names += ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
motor_values += [joint_dict["index_2"], joint_dict["index_1"], joint_dict["index_1"]]
|
||||
motor_names += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
motor_values += [joint_dict["middle_2"], joint_dict["middle_1"], joint_dict["middle_1"]]
|
||||
motor_names += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
motor_values += [joint_dict["ring_2"], joint_dict["ring_1"], joint_dict["ring_1"]]
|
||||
motor_names += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
|
||||
motor_values += [joint_dict["pinky_2"], joint_dict["pinky_1"], joint_dict["pinky_1"]]
|
||||
|
||||
motor_values = np.array(motor_values)
|
||||
motor_values = np.clip(motor_values, 0, 100)
|
||||
|
||||
robot.hand_bus.write("Goal_Position", motor_values, motor_names)
|
||||
time.sleep(0.02)
|
||||
|
||||
while True:
|
||||
# print(glove.read()['index_2']-1500)
|
||||
glove_index_flexor = glove.read()["index_2"] - 1500
|
||||
glove_index_subflexor = glove.read()["index_1"] - 1500
|
||||
glove_index_side = glove.read()["index_0"] - 2100
|
||||
|
||||
vals = [glove_index_flexor, 1000 - (glove_index_subflexor), glove_index_subflexor]
|
||||
|
||||
keys = ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
|
||||
glove_middle_flexor = glove.read()["middle_2"] - 1500
|
||||
glove_middle_subflexor = 1000 - (glove.read()["middle_1"] - 1700)
|
||||
vals += [glove_middle_flexor, glove_middle_subflexor, glove_middle_subflexor - 200]
|
||||
keys += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
|
||||
glove_ring_flexor = glove.read()["ring_2"] - 1300
|
||||
print(glove_ring_flexor)
|
||||
glove_ring_subflexor = glove.read()["ring_1"] - 1100
|
||||
|
||||
vals += [glove_ring_flexor, 1000 - glove_ring_subflexor, glove_ring_subflexor]
|
||||
keys += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
|
||||
glove_pinky_flexor = glove.read()["pinky_2"] - 1500
|
||||
glove_pinky_subflexor = glove.read()["pinky_1"] - 1300
|
||||
vals += [300 + glove_pinky_flexor, max(1000 - glove_pinky_subflexor - 100, 0), glove_pinky_subflexor]
|
||||
keys += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
|
||||
robot.hand_bus.write("Goal_Position", vals, keys)
|
||||
time.sleep(0.1)
|
||||
|
||||
time.sleep(3)
|
||||
|
||||
def move_arm(loop=10):
|
||||
sleep = 1
|
||||
for i in range(loop):
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1195])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 2195])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1457, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 2357, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 974, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 2674, 1957, 1695])
|
||||
time.sleep(sleep + 2)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 1632, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 1369, 1632, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 1330, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [2381, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1681, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
|
||||
def move_hand(loop=10):
|
||||
sleep = 0.5
|
||||
for i in range(loop):
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[500, 1000, 0, 1000],
|
||||
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [100, 1000, 150], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [200, 100, 700], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[500, 1000 - 250, 0 + 300, 1000 - 200],
|
||||
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[100 + 450, 100 + 400, 100 + 400],
|
||||
["index_flexor", "index_pinky_side", "index_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[100 + 350, 1000 - 450, 150 + 450],
|
||||
["middle_flexor", "middle_pinky_side", "middle_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[200 + 650, 200 + 350, 0 + 350],
|
||||
["ring_flexor", "ring_pinky_side", "ring_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[200 + 450, 100 + 400, 700 - 400],
|
||||
["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
|
||||
move_hand(3)
|
||||
|
||||
move_arm(1)
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
executor.submit(move_arm)
|
||||
executor.submit(move_hand)
|
||||
|
||||
# initial position
|
||||
for i in range(3):
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [500, 1000, 0, 1000, 100, 950, 100, 100, 1000, 150, 200, 200, 0, 200, 100, 700]
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
# for i in range(3):
|
||||
# robot.hand_bus.write("Goal_Position", [500, 1000-150, 0+250, 1000-150,
|
||||
# 100+300, 950-250, 100+250,
|
||||
# 100+200, 1000-300, 150+300,
|
||||
# 200+500, 200+200, 0+200,
|
||||
# 200+300, 100+200, 700-200])
|
||||
# time.sleep(1)
|
||||
|
||||
# camera = 0
|
||||
# score_threshold = 0.95
|
||||
# iou_threshold = 0.3
|
||||
|
||||
# app = MediaPipeHandApp(MediaPipeHand.from_pretrained(), score_threshold, iou_threshold)
|
||||
|
||||
# def frame_processor(frame: np.ndarray) -> np.ndarray:
|
||||
# # Input Prep
|
||||
# NHWC_int_numpy_frames, NCHW_fp32_torch_frames = app_to_net_image_inputs(frame)
|
||||
|
||||
# # Run Bounding Box & Keypoint Detector
|
||||
# batched_selected_boxes, batched_selected_keypoints = app._run_box_detector(NCHW_fp32_torch_frames)
|
||||
|
||||
# # The region of interest ( bounding box of 4 (x, y) corners).
|
||||
# # list[torch.Tensor(shape=[Num Boxes, 4, 2])],
|
||||
# # where 2 == (x, y)
|
||||
# #
|
||||
# # A list element will be None if there is no selected ROI.
|
||||
# batched_roi_4corners = app._compute_object_roi(batched_selected_boxes, batched_selected_keypoints)
|
||||
|
||||
# # selected landmarks for the ROI (if any)
|
||||
# # list[torch.Tensor(shape=[Num Selected Landmarks, K, 3])],
|
||||
# # where K == number of landmark keypoints, 3 == (x, y, confidence)
|
||||
# #
|
||||
# # A list element will be None if there is no ROI.
|
||||
# landmarks_out = app._run_landmark_detector(NHWC_int_numpy_frames, batched_roi_4corners)
|
||||
|
||||
# app._draw_predictions(
|
||||
# NHWC_int_numpy_frames,
|
||||
# batched_selected_boxes,
|
||||
# batched_selected_keypoints,
|
||||
# batched_roi_4corners,
|
||||
# *landmarks_out,
|
||||
# )
|
||||
|
||||
# return NHWC_int_numpy_frames[0]
|
||||
|
||||
# capture_and_display_processed_frames(frame_processor, "QAIHM Mediapipe Hand Demo", camera)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,231 +0,0 @@
|
||||
|
||||
#robot.arm_bus.write("Acceleration", [20], ["shoulder_pitch"])
|
||||
|
||||
####DEBUGGER####################
|
||||
# joint = input("Enter joint name: ")
|
||||
# encoder = EncoderReader("/dev/ttyUSB1", 115200)
|
||||
# start_angle = arm_calibration['start_pos'][arm_calibration['motor_names'].index(joint)]
|
||||
# end_angle = arm_calibration['end_pos'][arm_calibration['motor_names'].index(joint)]
|
||||
# # start_angle = shoulder_calibration['start_pos'][shoulder_calibration['motor_names'].index(joint)]
|
||||
# # end_angle = shoulder_calibration['end_pos'][shoulder_calibration['motor_names'].index(joint)]
|
||||
# while True:
|
||||
# angle = int(start_angle+(end_angle-start_angle)*encoder.read()/1000)
|
||||
# # robot.shoulder_bus.set_bus_baudrate(115200)
|
||||
# # robot.shoulder_bus.write("Goal_Position",angle, [joint])
|
||||
# robot.shoulder_bus.set_bus_baudrate(1000000)
|
||||
# robot.arm_bus.write("Goal_Position",angle, [joint])
|
||||
# print(angle)
|
||||
# time.sleep(0.1)
|
||||
|
||||
|
||||
|
||||
#####SAFETY CHECKS EXPLAINED#####
|
||||
#There are two safety checks built-in: one is based on load and the other is based on current.
|
||||
#Current: if Protection_Current > Present_Current we wait Over_Current_Protection_Time (expressed in ms) and set Torque_Enable to 0
|
||||
#Load: if Max_Torque_Limit*Overload_Torque (expressed as a percentage) > Present_Load, we wait Protection_Time (expressed in ms
|
||||
#and set Max_Torque_Limit to Protective_Torque)
|
||||
#Though we can specify Min-Max_Angle_Limit, Max_Temperature_Limit, Min-Max_Voltage_Limit, no safety checks are implemented for these values
|
||||
|
||||
#robot.arm_bus.set_calibration(arm_calibration)
|
||||
|
||||
|
||||
|
||||
|
||||
#method 1
|
||||
# robot.arm_bus.write("Overload_Torque", 80)
|
||||
# robot.arm_bus.write("Protection_Time", 10)
|
||||
# robot.arm_bus.write("Protective_Torque", 1)
|
||||
# robot.arm_bus.write("Protection_Current", 200,["shoulder_pitch"])
|
||||
# robot.arm_bus.write("Over_Current_Protection_Time", 10)
|
||||
|
||||
#method 2
|
||||
# robot.arm_bus.write("Protection_Current", 500,["shoulder_pitch"])
|
||||
# robot.arm_bus.write("Over_Current_Protection_Time", 10)
|
||||
# robot.arm_bus.write("Max_Torque_Limit", 1000)
|
||||
# robot.arm_bus.write("Overload_Torque", 40)
|
||||
# robot.arm_bus.write("Protection_Time", 10)
|
||||
# robot.arm_bus.write("Protective_Torque", 1)
|
||||
|
||||
# robot.shoulder_bus.set_bus_baudrate(115200)
|
||||
# robot.shoulder_bus.write("Goal_Position",2500)
|
||||
# exit()
|
||||
|
||||
######LOGGER####################
|
||||
# from test_torque.log_and_plot_feetech import log_and_plot_params
|
||||
|
||||
# params_to_log = [
|
||||
# "Protection_Current",
|
||||
# "Present_Current",
|
||||
# "Max_Torque_Limit",
|
||||
# "Protection_Time",
|
||||
# "Overload_Torque",
|
||||
# "Present_Load",
|
||||
# "Present_Position",
|
||||
# ]
|
||||
|
||||
# servo_names = ["shoulder_pitch"]
|
||||
|
||||
|
||||
# servo_data, timestamps = log_and_plot_params(robot.shoulder_bus, params_to_log, servo_names, test_id="shoulder_pitch")
|
||||
# exit()
|
||||
|
||||
|
||||
#robot.arm_bus.write("Goal_Position",2300, ["shoulder_pitch"])
|
||||
# dt = 2
|
||||
# steps = 4
|
||||
# max_pos = 1500
|
||||
# min_pos = 2300
|
||||
# increment = (max_pos - min_pos) / steps
|
||||
# # Move from min_pos to max_pos in steps
|
||||
# for i in range(steps + 1): # Include the last step
|
||||
# current_pos = min_pos + int(i * increment)
|
||||
# robot.arm_bus.write("Goal_Position", [current_pos], ["shoulder_pitch"])
|
||||
# time.sleep(dt)
|
||||
|
||||
# # Move back from max_pos to min_pos in steps
|
||||
# for i in range(steps + 1): # Include the last step
|
||||
# current_pos = max_pos - int(i * increment)
|
||||
# robot.arm_bus.write("Goal_Position", [current_pos], ["shoulder_pitch"])
|
||||
# time.sleep(dt)shoulder_pitch
|
||||
#demo to show how sending a lot of values makes the robt shake
|
||||
|
||||
|
||||
|
||||
# # Step increment
|
||||
#
|
||||
|
||||
# # Move from min_pos to max_pos in steps
|
||||
# for i in range(steps + 1): # Include the last step
|
||||
# current_pos = min_pos + int(i * increment)
|
||||
# robot.arm_bus.write("Goal_Position", [current_pos], ["elbow_flex"])
|
||||
# time.sleep(dt)
|
||||
|
||||
# # Move back from max_pos to min_pos in steps
|
||||
# for i in range(steps + 1): # Include the last step
|
||||
# current_pos = max_pos - int(i * increment)
|
||||
# robot.arm_bus.write("Goal_Position", [current_pos], ["elbow_flex"])
|
||||
# time.sleep(dt)
|
||||
# exit()
|
||||
|
||||
#robot.arm_bus.write("Goal_Position", a # shoulder_calibration = robot.get_shoulder_calibration()
|
||||
# print(shoulder_calibration)m_calibration["start_pos"])
|
||||
# robot.arm_bus.write("Over_Current_Protection_Time", 50)
|
||||
# robot.arm_bus.write("Protection_Current", 310, ["shoulder_pitch"])
|
||||
# robot.arm_bus.write("Overload_Torque", 80, ["shoulder_pitch"])
|
||||
# robot.arm_bus.write("Protection_Time", 100, ["shoulder_pitch"])
|
||||
# robot.arm_bus.write("Over_Current_Protection_Time", 50, ["shoulder_pitch"])
|
||||
|
||||
# robot.arm_bus.write("Protective_Torque", 20, ["shoulder_pitch"])
|
||||
|
||||
|
||||
# robot.arm_bus.write("Goal_Position", [600],["shoulder_pitch"])
|
||||
|
||||
# from test_torque.log_and_plot_feetech import log_and_plot_params
|
||||
|
||||
# params_to_log = [
|
||||
# "Present_Current",
|
||||
# "Protection_Current",
|
||||
# "Overload_Torque",
|
||||
# "Protection_Time",
|
||||
# "Protective_Torque",
|
||||
# "Present_Load",
|
||||
# "Present_Position",
|
||||
# ]
|
||||
|
||||
# servo_names = ["shoulder_pitch"]
|
||||
|
||||
#
|
||||
|
||||
#robot.arm_bus.write("Goal_Position", arm_calibration["start_pos"])
|
||||
|
||||
#robot.hand_bus.set_calibration(hand_calibration)
|
||||
|
||||
#interp = 0.3
|
||||
|
||||
#robot.arm_bus.write("Goal_Position", [int((i*interp+j*(1-interp))) for i, j in zip(arm_calibration["start_pos"], arm_calibration["end_pos"])])
|
||||
#exit()
|
||||
|
||||
# glove = HomonculusGlove()
|
||||
# glove.run_calibration()
|
||||
|
||||
|
||||
|
||||
####GOOD FOR GRASPING
|
||||
# start_pos = [
|
||||
# 500,
|
||||
# 900,
|
||||
# 500,
|
||||
# 1000,
|
||||
# 100,
|
||||
# 450,#250
|
||||
# 950,#750
|
||||
# 100,
|
||||
# 300,#400
|
||||
# 50,#150
|
||||
# 100,
|
||||
# 120,
|
||||
# 980,
|
||||
# 100,
|
||||
# 950,
|
||||
# 750,
|
||||
# ]
|
||||
# end_pos = [
|
||||
# start_pos[0] - 400,
|
||||
# start_pos[1] - 300,
|
||||
# start_pos[2] + 500,
|
||||
# start_pos[3] - 50,
|
||||
# start_pos[4] + 900,
|
||||
# start_pos[5] + 500,
|
||||
# start_pos[6] - 500,
|
||||
# start_pos[7] + 900,
|
||||
# start_pos[8] + 700,
|
||||
# start_pos[9] + 700,
|
||||
# start_pos[10] + 900,
|
||||
# start_pos[11] + 700,
|
||||
# start_pos[12] - 700,
|
||||
# start_pos[13] + 900,
|
||||
# start_pos[14] - 700,
|
||||
# start_pos[15] - 700,
|
||||
# ]
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
SCS_SERIES_CONTROL_TABLE = {
|
||||
|
||||
# "Max_Torque_Limit": (16, 2),
|
||||
# "Phase": (18, 1),
|
||||
# "Unloading_Condition": (19, 1),
|
||||
|
||||
"Protective_Torque": (37, 1),
|
||||
"Protection_Time": (38, 1),
|
||||
#Baud_Rate": (48, 1),
|
||||
|
||||
}
|
||||
|
||||
def read_and_print_scs_values(robot):
|
||||
for param_name in SCS_SERIES_CONTROL_TABLE:
|
||||
value = robot.hand_bus.read(param_name)
|
||||
print(f"{param_name}: {value}")
|
||||
|
||||
motor_1_values = {
|
||||
"Lock" : 255,
|
||||
#"Protection_Time": 20#so if you write to these they turn to 0 for some fucking reason. protection time was 100, procetive to
|
||||
}
|
||||
|
||||
# motor_1_values = {
|
||||
# "Lock": 1,
|
||||
# "Protection_Time": 100,
|
||||
# "Protective_Torque": 20,
|
||||
# "Phase": 1,#thisu is bullshit
|
||||
# "Unloading_Condition": 32,
|
||||
|
||||
# }
|
||||
#bug in writing to specific values of the scs0009
|
||||
|
||||
# Write values to motor 2, there is overload torque there
|
||||
#ok so i can write, the jittering is because of the overload torque which is still being triggered
|
||||
|
||||
#TODO: i have to write a functioining version for the sc009 (or i dont who cares)
|
||||
@@ -1,18 +0,0 @@
|
||||
# Color gradient function (0-2024 scaled to 0-10)
|
||||
def value_to_color(value):
|
||||
# Clamp the value between 0 and 2024
|
||||
value = max(0, min(2024, value))
|
||||
|
||||
# Scale from [0..2024] to [0..10]
|
||||
scaled_value = (value / 2024) * 10
|
||||
|
||||
# Green to Yellow (scaled_value 0..5), then Yellow to Red (scaled_value 5..10)
|
||||
if scaled_value <= 5:
|
||||
r = int(255 * (scaled_value / 5))
|
||||
g = 255
|
||||
else:
|
||||
r = 255
|
||||
g = int(255 * (1 - (scaled_value - 5) / 5))
|
||||
b = 0
|
||||
|
||||
return (r, g, b)
|
||||
681
examples/test.py
@@ -1,681 +0,0 @@
|
||||
import threading
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
# from qai_hub_models.models.mediapipe_hand.app import MediaPipeHandApp
|
||||
# from qai_hub_models.models.mediapipe_hand.model import (
|
||||
# MediaPipeHand,
|
||||
# )
|
||||
# from qai_hub_models.utils.image_processing import (
|
||||
# app_to_net_image_inputs,
|
||||
# )
|
||||
from lerobot.common.robot_devices.motors.feetech import (
|
||||
CalibrationMode,
|
||||
FeetechMotorsBus,
|
||||
)
|
||||
|
||||
LOWER_BOUND_LINEAR = -100
|
||||
UPPER_BOUND_LINEAR = 200
|
||||
|
||||
import serial
|
||||
|
||||
|
||||
class HomonculusGlove:
|
||||
def __init__(self):
|
||||
self.serial_port = "/dev/tty.usbmodem21401"
|
||||
self.baud_rate = 115200
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
self.thread = threading.Thread(target=self.async_read)
|
||||
self.thread.start()
|
||||
self.last_d = {
|
||||
"thumb_0": 100,
|
||||
"thumb_1": 100,
|
||||
"thumb_2": 100,
|
||||
"thumb_3": 100,
|
||||
"index_0": 100,
|
||||
"index_1": 100,
|
||||
"index_2": 100,
|
||||
"middle_0": 100,
|
||||
"middle_1": 100,
|
||||
"middle_2": 100,
|
||||
"ring_0": 100,
|
||||
"ring_1": 100,
|
||||
"ring_2": 100,
|
||||
"pinky_0": 100,
|
||||
"pinky_1": 100,
|
||||
"pinky_2": 100,
|
||||
"battery_voltage": 100,
|
||||
}
|
||||
self.calibration = None
|
||||
|
||||
@property
|
||||
def joint_names(self):
|
||||
return list(self.last_d.keys())
|
||||
|
||||
def read(self, motor_names: list[str] | None = None):
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
values = np.array([self.last_d[k] for k in motor_names])
|
||||
|
||||
print(motor_names)
|
||||
print(values)
|
||||
|
||||
if self.calibration is not None:
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
print(values)
|
||||
return values
|
||||
|
||||
def async_read(self):
|
||||
while True:
|
||||
if self.serial.in_waiting > 0:
|
||||
self.serial.flush()
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
if len(vals) != 17:
|
||||
continue
|
||||
vals = [int(val) for val in vals]
|
||||
|
||||
d = {
|
||||
"thumb_0": vals[0],
|
||||
"thumb_1": vals[1],
|
||||
"thumb_2": vals[2],
|
||||
"thumb_3": vals[3],
|
||||
"index_0": vals[4],
|
||||
"index_1": vals[5],
|
||||
"index_2": vals[6],
|
||||
"middle_0": vals[7],
|
||||
"middle_1": vals[8],
|
||||
"middle_2": vals[9],
|
||||
"ring_0": vals[10],
|
||||
"ring_1": vals[11],
|
||||
"ring_2": vals[12],
|
||||
"pinky_0": vals[13],
|
||||
"pinky_1": vals[14],
|
||||
"pinky_2": vals[15],
|
||||
"battery_voltage": vals[16],
|
||||
}
|
||||
self.last_d = d
|
||||
# print(d.values())
|
||||
|
||||
def run_calibration(self):
|
||||
print("\nMove arm to open position")
|
||||
input("Press Enter to continue...")
|
||||
open_pos_list = []
|
||||
for _ in range(300):
|
||||
open_pos = self.read()
|
||||
open_pos_list.append(open_pos)
|
||||
time.sleep(0.01)
|
||||
open_pos = np.array(open_pos_list)
|
||||
max_open_pos = open_pos.max(axis=0)
|
||||
min_open_pos = open_pos.min(axis=0)
|
||||
|
||||
print(f"{max_open_pos=}")
|
||||
print(f"{min_open_pos=}")
|
||||
|
||||
print("\nMove arm to closed position")
|
||||
input("Press Enter to continue...")
|
||||
closed_pos_list = []
|
||||
for _ in range(300):
|
||||
closed_pos = self.read()
|
||||
closed_pos_list.append(closed_pos)
|
||||
time.sleep(0.01)
|
||||
closed_pos = np.array(closed_pos_list)
|
||||
max_closed_pos = closed_pos.max(axis=0)
|
||||
closed_pos[closed_pos < 1000] = 60000
|
||||
min_closed_pos = closed_pos.min(axis=0)
|
||||
|
||||
print(f"{max_closed_pos=}")
|
||||
print(f"{min_closed_pos=}")
|
||||
|
||||
open_pos = np.array([max_open_pos, max_closed_pos]).max(axis=0)
|
||||
closed_pos = np.array([min_open_pos, min_closed_pos]).min(axis=0)
|
||||
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
for i, jname in enumerate(self.joint_names):
|
||||
if jname in ["thumb_0", "thumb_3", "index_2", "middle_2", "ring_2", "pinky_0", "pinky_2"]:
|
||||
tmp_pos = open_pos[i]
|
||||
open_pos[i] = closed_pos[i]
|
||||
closed_pos[i] = tmp_pos
|
||||
|
||||
print()
|
||||
print(f"{open_pos=}")
|
||||
print(f"{closed_pos=}")
|
||||
|
||||
homing_offset = [0] * len(self.joint_names)
|
||||
drive_mode = [0] * len(self.joint_names)
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.joint_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": open_pos,
|
||||
"end_pos": closed_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.joint_names,
|
||||
}
|
||||
# return calib_dict
|
||||
self.set_calibration(calib_dict)
|
||||
|
||||
def set_calibration(self, calibration: dict[str, list]):
|
||||
self.calibration = calibration
|
||||
|
||||
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
|
||||
a "zero position" at 0 degree.
|
||||
|
||||
Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor
|
||||
rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range.
|
||||
|
||||
Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation
|
||||
when given a goal position that is + or - their resolution. For instance, feetech xl330-m077 have a resolution of 4096, and
|
||||
at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830,
|
||||
or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor.
|
||||
To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work
|
||||
in the centered nominal degree range ]-180, 180[.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.motor_names
|
||||
|
||||
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
|
||||
values = values.astype(np.float32)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
calib_idx = self.calibration["motor_names"].index(name)
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# Rescale the present position to a nominal range [0, 100] %,
|
||||
# useful for joints with linear motions like Aloha gripper
|
||||
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
|
||||
|
||||
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
|
||||
if name == "pinky_1" and (values[i] < LOWER_BOUND_LINEAR):
|
||||
values[i] = end_pos
|
||||
else:
|
||||
msg = (
|
||||
f"Wrong motor position range detected for {name}. "
|
||||
f"Expected to be in nominal range of [0, 100] % (a full linear translation), "
|
||||
f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, "
|
||||
f"but present value is {values[i]} %. "
|
||||
"This might be due to a cable connection issue creating an artificial jump in motor values. "
|
||||
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
|
||||
)
|
||||
print(msg)
|
||||
# raise JointOutOfRangeError(msg)
|
||||
|
||||
return values
|
||||
|
||||
# def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
# """Inverse of `apply_calibration`."""
|
||||
# if motor_names is None:
|
||||
# motor_names = self.motor_names
|
||||
|
||||
# for i, name in enumerate(motor_names):
|
||||
# calib_idx = self.calibration["motor_names"].index(name)
|
||||
# calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
# if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
# start_pos = self.calibration["start_pos"][calib_idx]
|
||||
# end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# # Convert from nominal lnear range of [0, 100] % to
|
||||
# # actual motor range of values which can be arbitrary.
|
||||
# values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos
|
||||
|
||||
# values = np.round(values).astype(np.int32)
|
||||
# return values
|
||||
|
||||
|
||||
class HopeJuniorRobot:
|
||||
def __init__(self):
|
||||
self.arm_bus = FeetechMotorsBus(
|
||||
port="/dev/tty.usbmodem58760429571",
|
||||
motors={
|
||||
# "motor1": (2, "sts3250"),
|
||||
# "motor2": (1, "scs0009"),
|
||||
"shoulder_pitch": [1, "sts3250"],
|
||||
"shoulder_yaw": [2, "sts3215"], # TODO: sts3250
|
||||
"shoulder_roll": [3, "sts3215"], # TODO: sts3250
|
||||
"elbow_flex": [4, "sts3250"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"wrist_yaw": [6, "sts3215"],
|
||||
"wrist_pitch": [7, "sts3215"],
|
||||
},
|
||||
protocol_version=0,
|
||||
)
|
||||
self.hand_bus = FeetechMotorsBus(
|
||||
port="/dev/tty.usbmodem585A0077581",
|
||||
motors={
|
||||
"thumb_basel_rotation": [30, "scs0009"],
|
||||
"thumb_flexor": [27, "scs0009"],
|
||||
"thumb_pinky_side": [26, "scs0009"],
|
||||
"thumb_thumb_side": [28, "scs0009"],
|
||||
"index_flexor": [25, "scs0009"],
|
||||
"index_pinky_side": [31, "scs0009"],
|
||||
"index_thumb_side": [32, "scs0009"],
|
||||
"middle_flexor": [24, "scs0009"],
|
||||
"middle_pinky_side": [33, "scs0009"],
|
||||
"middle_thumb_side": [34, "scs0009"],
|
||||
"ring_flexor": [21, "scs0009"],
|
||||
"ring_pinky_side": [36, "scs0009"],
|
||||
"ring_thumb_side": [35, "scs0009"],
|
||||
"pinky_flexor": [23, "scs0009"],
|
||||
"pinky_pinky_side": [38, "scs0009"],
|
||||
"pinky_thumb_side": [37, "scs0009"],
|
||||
},
|
||||
protocol_version=1,
|
||||
group_sync_read=False,
|
||||
)
|
||||
|
||||
def get_hand_calibration(self):
|
||||
homing_offset = [0] * len(self.hand_bus.motor_names)
|
||||
drive_mode = [0] * len(self.hand_bus.motor_names)
|
||||
|
||||
start_pos = [
|
||||
500,
|
||||
900,
|
||||
1000,
|
||||
0,
|
||||
100,
|
||||
250,
|
||||
750,
|
||||
100,
|
||||
400,
|
||||
150,
|
||||
100,
|
||||
120,
|
||||
980,
|
||||
100,
|
||||
950,
|
||||
750,
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
500 - 250,
|
||||
900 - 300,
|
||||
1000 - 550,
|
||||
0 + 550,
|
||||
1000,
|
||||
250 + 700,
|
||||
750 - 700,
|
||||
1000,
|
||||
400 + 700,
|
||||
150 + 700,
|
||||
1000,
|
||||
120 + 700,
|
||||
980 - 700,
|
||||
1000,
|
||||
950 - 700,
|
||||
750 - 700,
|
||||
]
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.hand_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.hand_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def connect(self):
|
||||
self.arm_bus.connect()
|
||||
self.hand_bus.connect()
|
||||
|
||||
|
||||
ESCAPE_KEY_ID = 27
|
||||
|
||||
|
||||
def capture_and_display_processed_frames(
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray],
|
||||
window_display_name: str,
|
||||
cap_device: int = 0,
|
||||
) -> None:
|
||||
"""
|
||||
Capture frames from the given input camera device, run them through
|
||||
the frame processor, and display the outputs in a window with the given name.
|
||||
|
||||
User should press Esc to exit.
|
||||
|
||||
Inputs:
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray]
|
||||
Processes frames.
|
||||
Input and output are numpy arrays of shape (H W C) with BGR channel layout and dtype uint8 / byte.
|
||||
window_display_name: str
|
||||
Name of the window used to display frames.
|
||||
cap_device: int
|
||||
Identifier for the camera to use to capture frames.
|
||||
"""
|
||||
cv2.namedWindow(window_display_name)
|
||||
capture = cv2.VideoCapture(cap_device)
|
||||
if not capture.isOpened():
|
||||
raise ValueError("Unable to open video capture.")
|
||||
|
||||
frame_count = 0
|
||||
has_frame, frame = capture.read()
|
||||
while has_frame:
|
||||
assert isinstance(frame, np.ndarray)
|
||||
|
||||
frame_count = frame_count + 1
|
||||
# mirror frame
|
||||
frame = np.ascontiguousarray(frame[:, ::-1, ::-1])
|
||||
|
||||
# process & show frame
|
||||
processed_frame = frame_processor(frame)
|
||||
cv2.imshow(window_display_name, processed_frame[:, :, ::-1])
|
||||
|
||||
has_frame, frame = capture.read()
|
||||
key = cv2.waitKey(1)
|
||||
if key == ESCAPE_KEY_ID:
|
||||
break
|
||||
|
||||
capture.release()
|
||||
|
||||
|
||||
def main():
|
||||
robot = HopeJuniorRobot()
|
||||
robot.connect()
|
||||
|
||||
# robot.hand_bus.calibration = None
|
||||
|
||||
# breakpoint()
|
||||
# print(robot.arm_bus.read("Present_Position"))
|
||||
robot.arm_bus.write("Torque_Enable", 1)
|
||||
robot.arm_bus.write("Acceleration", 20)
|
||||
robot.arm_bus.read("Acceleration")
|
||||
|
||||
calibration = robot.get_hand_calibration()
|
||||
robot.hand_bus.write("Goal_Position", calibration["start_pos"])
|
||||
# robot.hand_bus.write("Goal_Position", calibration["end_pos"][:4], robot.hand_bus.motor_names[:4])
|
||||
robot.hand_bus.set_calibration(calibration)
|
||||
lol = 1
|
||||
|
||||
# # print(motors_bus.write("Goal_Position", 500))
|
||||
# print(robot.hand_bus.read("Present_Position"))
|
||||
# # pos = hand_bus.read("Present_Position")
|
||||
# # hand_bus.write("Goal_Position", pos[0]+20, hand_bus.motor_names[0])
|
||||
# # hand_bus.write("Goal_Position", pos[i]+delta, hand_bus.motor_names[i])
|
||||
# robot.hand_bus.read("Acceleration")
|
||||
# robot.hand_bus.write("Acceleration", 10)
|
||||
|
||||
# sleep = 1
|
||||
# # robot.hand_bus.write(
|
||||
# # "Goal_Position", [glove.last_d['index_2']-1500,300,300], ["index_pinky_side", "index_flexor", "index_thumb_side"]
|
||||
# # )
|
||||
# #time.sleep(sleep)
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [100, 0, 0], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [200, 100, 600], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
|
||||
# breakpoint()
|
||||
|
||||
glove = HomonculusGlove()
|
||||
glove.run_calibration()
|
||||
# while True:
|
||||
# joint_names = ["index_1", "index_2"]
|
||||
# joint_values = glove.read(joint_names)
|
||||
# print(joint_values)
|
||||
|
||||
input()
|
||||
while True:
|
||||
joint_names = []
|
||||
joint_names += ["thumb_0", "thumb_2", "thumb_3"]
|
||||
joint_names += ["index_1", "index_2"]
|
||||
joint_names += ["middle_1", "middle_2"]
|
||||
joint_names += ["ring_1", "ring_2"]
|
||||
joint_names += ["pinky_1", "pinky_2"]
|
||||
joint_values = glove.read(joint_names)
|
||||
joint_values = joint_values.round().astype(int)
|
||||
joint_dict = {k: v for k, v in zip(joint_names, joint_values, strict=False)}
|
||||
|
||||
motor_values = []
|
||||
motor_names = []
|
||||
motor_names += ["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"]
|
||||
motor_values += [
|
||||
joint_dict["thumb_3"],
|
||||
joint_dict["thumb_0"],
|
||||
joint_dict["thumb_2"],
|
||||
joint_dict["thumb_2"],
|
||||
]
|
||||
motor_names += ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
motor_values += [joint_dict["index_2"], joint_dict["index_1"], joint_dict["index_1"]]
|
||||
motor_names += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
motor_values += [joint_dict["middle_2"], joint_dict["middle_1"], joint_dict["middle_1"]]
|
||||
motor_names += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
motor_values += [joint_dict["ring_2"], joint_dict["ring_1"], joint_dict["ring_1"]]
|
||||
motor_names += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
|
||||
motor_values += [joint_dict["pinky_2"], joint_dict["pinky_1"], joint_dict["pinky_1"]]
|
||||
|
||||
motor_values = np.array(motor_values)
|
||||
motor_values = np.clip(motor_values, 0, 100)
|
||||
|
||||
robot.hand_bus.write("Goal_Position", motor_values, motor_names)
|
||||
time.sleep(0.02)
|
||||
|
||||
while True:
|
||||
# print(glove.read()['index_2']-1500)
|
||||
glove_index_flexor = glove.read()["index_2"] - 1500
|
||||
glove_index_subflexor = glove.read()["index_1"] - 1500
|
||||
glove_index_side = glove.read()["index_0"] - 2100
|
||||
|
||||
vals = [glove_index_flexor, 1000 - (glove_index_subflexor), glove_index_subflexor]
|
||||
|
||||
keys = ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
|
||||
glove_middle_flexor = glove.read()["middle_2"] - 1500
|
||||
glove_middle_subflexor = 1000 - (glove.read()["middle_1"] - 1700)
|
||||
vals += [glove_middle_flexor, glove_middle_subflexor, glove_middle_subflexor - 200]
|
||||
keys += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
|
||||
glove_ring_flexor = glove.read()["ring_2"] - 1300
|
||||
print(glove_ring_flexor)
|
||||
glove_ring_subflexor = glove.read()["ring_1"] - 1100
|
||||
|
||||
vals += [glove_ring_flexor, 1000 - glove_ring_subflexor, glove_ring_subflexor]
|
||||
keys += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
|
||||
glove_pinky_flexor = glove.read()["pinky_2"] - 1500
|
||||
glove_pinky_subflexor = glove.read()["pinky_1"] - 1300
|
||||
vals += [300 + glove_pinky_flexor, max(1000 - glove_pinky_subflexor - 100, 0), glove_pinky_subflexor]
|
||||
keys += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
|
||||
robot.hand_bus.write("Goal_Position", vals, keys)
|
||||
time.sleep(0.1)
|
||||
|
||||
time.sleep(3)
|
||||
|
||||
def move_arm(loop=10):
|
||||
sleep = 1
|
||||
for i in range(loop):
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1195])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 2195])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1457, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 2357, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 974, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 2674, 1957, 1695])
|
||||
time.sleep(sleep + 2)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 1632, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 1369, 1632, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 1330, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [2381, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1681, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
|
||||
def move_hand(loop=10):
|
||||
sleep = 0.5
|
||||
for i in range(loop):
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[500, 1000, 0, 1000],
|
||||
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [100, 1000, 150], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [200, 100, 700], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[500, 1000 - 250, 0 + 300, 1000 - 200],
|
||||
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[100 + 450, 100 + 400, 100 + 400],
|
||||
["index_flexor", "index_pinky_side", "index_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[100 + 350, 1000 - 450, 150 + 450],
|
||||
["middle_flexor", "middle_pinky_side", "middle_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[200 + 650, 200 + 350, 0 + 350],
|
||||
["ring_flexor", "ring_pinky_side", "ring_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[200 + 450, 100 + 400, 700 - 400],
|
||||
["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
|
||||
move_hand(3)
|
||||
|
||||
move_arm(1)
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
executor.submit(move_arm)
|
||||
executor.submit(move_hand)
|
||||
|
||||
# initial position
|
||||
for i in range(3):
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [500, 1000, 0, 1000, 100, 950, 100, 100, 1000, 150, 200, 200, 0, 200, 100, 700]
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
# for i in range(3):
|
||||
# robot.hand_bus.write("Goal_Position", [500, 1000-150, 0+250, 1000-150,
|
||||
# 100+300, 950-250, 100+250,
|
||||
# 100+200, 1000-300, 150+300,
|
||||
# 200+500, 200+200, 0+200,
|
||||
# 200+300, 100+200, 700-200])
|
||||
# time.sleep(1)
|
||||
|
||||
# camera = 0
|
||||
# score_threshold = 0.95
|
||||
# iou_threshold = 0.3
|
||||
|
||||
# app = MediaPipeHandApp(MediaPipeHand.from_pretrained(), score_threshold, iou_threshold)
|
||||
|
||||
# def frame_processor(frame: np.ndarray) -> np.ndarray:
|
||||
# # Input Prep
|
||||
# NHWC_int_numpy_frames, NCHW_fp32_torch_frames = app_to_net_image_inputs(frame)
|
||||
|
||||
# # Run Bounding Box & Keypoint Detector
|
||||
# batched_selected_boxes, batched_selected_keypoints = app._run_box_detector(NCHW_fp32_torch_frames)
|
||||
|
||||
# # The region of interest ( bounding box of 4 (x, y) corners).
|
||||
# # list[torch.Tensor(shape=[Num Boxes, 4, 2])],
|
||||
# # where 2 == (x, y)
|
||||
# #
|
||||
# # A list element will be None if there is no selected ROI.
|
||||
# batched_roi_4corners = app._compute_object_roi(batched_selected_boxes, batched_selected_keypoints)
|
||||
|
||||
# # selected landmarks for the ROI (if any)
|
||||
# # list[torch.Tensor(shape=[Num Selected Landmarks, K, 3])],
|
||||
# # where K == number of landmark keypoints, 3 == (x, y, confidence)
|
||||
# #
|
||||
# # A list element will be None if there is no ROI.
|
||||
# landmarks_out = app._run_landmark_detector(NHWC_int_numpy_frames, batched_roi_4corners)
|
||||
|
||||
# app._draw_predictions(
|
||||
# NHWC_int_numpy_frames,
|
||||
# batched_selected_boxes,
|
||||
# batched_selected_keypoints,
|
||||
# batched_roi_4corners,
|
||||
# *landmarks_out,
|
||||
# )
|
||||
|
||||
# return NHWC_int_numpy_frames[0]
|
||||
|
||||
# capture_and_display_processed_frames(frame_processor, "QAIHM Mediapipe Hand Demo", camera)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,133 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# ********* Ping Example *********
|
||||
#
|
||||
#
|
||||
# Available SCServo model on this example : All models using Protocol SCS
|
||||
# This example is tested with a SCServo(STS/SMS/SCS), and an URT
|
||||
# Be sure that SCServo(STS/SMS/SCS) properties are already set as %% ID : 1 / Baudnum : 6 (Baudrate : 1000000)
|
||||
#
|
||||
|
||||
import os
|
||||
|
||||
if os.name == "nt":
|
||||
import msvcrt
|
||||
|
||||
def getch():
|
||||
return msvcrt.getch().decode()
|
||||
else:
|
||||
import sys
|
||||
import termios
|
||||
import tty
|
||||
|
||||
fd = sys.stdin.fileno()
|
||||
old_settings = termios.tcgetattr(fd)
|
||||
|
||||
def getch():
|
||||
try:
|
||||
tty.setraw(sys.stdin.fileno())
|
||||
ch = sys.stdin.read(1)
|
||||
finally:
|
||||
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
||||
return ch
|
||||
|
||||
|
||||
from scservo_sdk import * # Uses SCServo SDK library
|
||||
|
||||
# Default setting
|
||||
SCS_ID = 1 # SCServo ID : 1
|
||||
BAUDRATE = 1000000 # SCServo default baudrate : 1000000
|
||||
DEVICENAME = "/dev/tty.usbserial-2130" # Check which port is being used on your controller
|
||||
# ex) Windows: "COM1" Linux: "/dev/ttyUSB0" Mac: "/dev/tty.usbserial-*"
|
||||
|
||||
protocol_end = 1 # SCServo bit end(STS/SMS=0, SCS=1)
|
||||
|
||||
# Initialize PortHandler instance
|
||||
# Set the port path
|
||||
# Get methods and members of PortHandlerLinux or PortHandlerWindows
|
||||
portHandler = PortHandler(DEVICENAME)
|
||||
|
||||
# Initialize PacketHandler instance
|
||||
# Get methods and members of Protocol
|
||||
packetHandler = PacketHandler(protocol_end)
|
||||
|
||||
# Open port
|
||||
if portHandler.openPort():
|
||||
print("Succeeded to open the port")
|
||||
else:
|
||||
print("Failed to open the port")
|
||||
print("Press any key to terminate...")
|
||||
getch()
|
||||
quit()
|
||||
|
||||
|
||||
# Set port baudrate
|
||||
if portHandler.setBaudRate(BAUDRATE):
|
||||
print("Succeeded to change the baudrate")
|
||||
else:
|
||||
print("Failed to change the baudrate")
|
||||
print("Press any key to terminate...")
|
||||
getch()
|
||||
quit()
|
||||
|
||||
# Try to ping the SCServo
|
||||
# Get SCServo model number
|
||||
scs_model_number, scs_comm_result, scs_error = packetHandler.ping(portHandler, SCS_ID)
|
||||
if scs_comm_result != COMM_SUCCESS:
|
||||
print("%s" % packetHandler.getTxRxResult(scs_comm_result))
|
||||
elif scs_error != 0:
|
||||
print("%s" % packetHandler.getRxPacketError(scs_error))
|
||||
else:
|
||||
print("[ID:%03d] ping Succeeded. SCServo model number : %d" % (SCS_ID, scs_model_number))
|
||||
|
||||
|
||||
ADDR_SCS_PRESENT_POSITION = 56
|
||||
scs_present_position, scs_comm_result, scs_error = packetHandler.read2ByteTxRx(
|
||||
portHandler, SCS_ID, ADDR_SCS_PRESENT_POSITION
|
||||
)
|
||||
if scs_comm_result != COMM_SUCCESS:
|
||||
print(packetHandler.getTxRxResult(scs_comm_result))
|
||||
elif scs_error != 0:
|
||||
print(packetHandler.getRxPacketError(scs_error))
|
||||
|
||||
breakpoint()
|
||||
scs_present_position = SCS_LOWORD(scs_present_position)
|
||||
# scs_present_speed = SCS_HIWORD(scs_present_position_speed)
|
||||
# print("[ID:%03d] PresPos:%03d PresSpd:%03d" % (SCS_ID, scs_present_position, SCS_TOHOST(scs_present_speed, 15)))
|
||||
print("[ID:%03d] PresPos:%03d" % (SCS_ID, scs_present_position))
|
||||
|
||||
groupSyncRead = GroupSyncRead(portHandler, packetHandler, ADDR_SCS_PRESENT_POSITION, 2)
|
||||
|
||||
scs_addparam_result = groupSyncRead.addParam(SCS_ID)
|
||||
if scs_addparam_result != True:
|
||||
print("[ID:%03d] groupSyncRead addparam failed" % SCS_ID)
|
||||
quit()
|
||||
|
||||
# Syncread present position
|
||||
scs_comm_result = groupSyncRead.txRxPacket()
|
||||
if scs_comm_result != COMM_SUCCESS:
|
||||
print("%s" % packetHandler.getTxRxResult(scs_comm_result))
|
||||
|
||||
# Check if groupsyncread data of SCServo#1 is available
|
||||
scs_getdata_result = groupSyncRead.isAvailable(SCS_ID, ADDR_SCS_PRESENT_POSITION, 2)
|
||||
if scs_getdata_result == True:
|
||||
# Get SCServo#1 present position value
|
||||
scs_present_position = groupSyncRead.getData(SCS_ID, ADDR_SCS_PRESENT_POSITION, 2)
|
||||
else:
|
||||
scs_present_position = 0
|
||||
print("[ID:%03d] groupSyncRead getdata failed" % SCS_ID)
|
||||
|
||||
# # Check if groupsyncread data of SCServo#2 is available
|
||||
# scs_getdata_result = groupSyncRead.isAvailable(SCS2_ID, ADDR_SCS_PRESENT_POSITION, 2)
|
||||
# if scs_getdata_result == True:
|
||||
# # Get SCServo#2 present position value
|
||||
# scs2_present_position_speed = groupSyncRead.getData(SCS2_ID, ADDR_SCS_PRESENT_POSITION, 2)
|
||||
# else:
|
||||
# print("[ID:%03d] groupSyncRead getdata failed" % SCS2_ID)
|
||||
|
||||
scs_present_position = SCS_LOWORD(scs_present_position)
|
||||
print("[ID:%03d] PresPos:%03d" % (SCS_ID, scs_present_position))
|
||||
|
||||
|
||||
# Close port
|
||||
portHandler.closePort()
|
||||
@@ -1,45 +0,0 @@
|
||||
import serial
|
||||
|
||||
|
||||
class HomonculusGlove:
|
||||
def __init__(self):
|
||||
self.serial_port = "/dev/tty.usbmodem1101"
|
||||
self.baud_rate = 115200
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
|
||||
def read(self):
|
||||
while True:
|
||||
if self.serial.in_waiting > 0:
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
vals = [int(val) for val in vals]
|
||||
|
||||
d = {
|
||||
"thumb_0": vals[0],
|
||||
"thumb_1": vals[1],
|
||||
"thumb_2": vals[2],
|
||||
"thumb_3": vals[3],
|
||||
"index_0": vals[4],
|
||||
"index_1": vals[5],
|
||||
"index_2": vals[6],
|
||||
"middle_0": vals[7],
|
||||
"middle_1": vals[8],
|
||||
"middle_2": vals[9],
|
||||
"ring_0": vals[10],
|
||||
"ring_1": vals[11],
|
||||
"ring_2": vals[12],
|
||||
"pinky_0": vals[13],
|
||||
"pinky_1": vals[14],
|
||||
"pinky_2": vals[15],
|
||||
}
|
||||
return d
|
||||
|
||||
# if ser.in_waiting > 0:
|
||||
# line = ser.readline().decode('utf-8').strip()
|
||||
# print(line)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
glove = HomonculusGlove()
|
||||
d = glove.read()
|
||||
lol = 1
|
||||
@@ -1,693 +0,0 @@
|
||||
import threading
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
# from qai_hub_models.models.mediapipe_hand.app import MediaPipeHandApp
|
||||
# from qai_hub_models.models.mediapipe_hand.model import (
|
||||
# MediaPipeHand,
|
||||
# )
|
||||
# from qai_hub_models.utils.image_processing import (
|
||||
# app_to_net_image_inputs,
|
||||
# )
|
||||
from lerobot.common.robot_devices.motors.feetech import (
|
||||
CalibrationMode,
|
||||
FeetechMotorsBus,
|
||||
)
|
||||
|
||||
LOWER_BOUND_LINEAR = -100
|
||||
UPPER_BOUND_LINEAR = 200
|
||||
|
||||
import serial
|
||||
|
||||
|
||||
class HomonculusGlove:
|
||||
def __init__(self):
|
||||
self.serial_port = "/dev/tty.usbmodem1401"
|
||||
self.baud_rate = 115200
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
self.thread = threading.Thread(target=self.async_read)
|
||||
self.thread.start()
|
||||
self.last_d = {
|
||||
"thumb_0": 100,
|
||||
"thumb_1": 100,
|
||||
"thumb_2": 100,
|
||||
"thumb_3": 100,
|
||||
"index_0": 100,
|
||||
"index_1": 100,
|
||||
"index_2": 100,
|
||||
"middle_0": 100,
|
||||
"middle_1": 100,
|
||||
"middle_2": 100,
|
||||
"ring_0": 100,
|
||||
"ring_1": 100,
|
||||
"ring_2": 100,
|
||||
"pinky_0": 100,
|
||||
"pinky_1": 100,
|
||||
"pinky_2": 100,
|
||||
"battery_voltage": 100,
|
||||
}
|
||||
self.calibration = None
|
||||
|
||||
@property
|
||||
def joint_names(self):
|
||||
return list(self.last_d.keys())
|
||||
|
||||
def read(self, motor_names: list[str] | None = None):
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
values = np.array([self.last_d[k] for k in motor_names])
|
||||
|
||||
print(motor_names)
|
||||
print(values)
|
||||
|
||||
if self.calibration is not None:
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
print(values)
|
||||
return values
|
||||
|
||||
def async_read(self):
|
||||
while True:
|
||||
if self.serial.in_waiting > 0:
|
||||
self.serial.flush()
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
if len(vals) != 17:
|
||||
continue
|
||||
vals = [int(val) for val in vals]
|
||||
|
||||
d = {
|
||||
"thumb_0": vals[0],
|
||||
"thumb_1": vals[1],
|
||||
"thumb_2": vals[2],
|
||||
"thumb_3": vals[3],
|
||||
"index_0": vals[4],
|
||||
"index_1": vals[5],
|
||||
"index_2": vals[6],
|
||||
"middle_0": vals[7],
|
||||
"middle_1": vals[8],
|
||||
"middle_2": vals[9],
|
||||
"ring_0": vals[10],
|
||||
"ring_1": vals[11],
|
||||
"ring_2": vals[12],
|
||||
"pinky_0": vals[13],
|
||||
"pinky_1": vals[14],
|
||||
"pinky_2": vals[15],
|
||||
"battery_voltage": vals[16],
|
||||
}
|
||||
self.last_d = d
|
||||
# print(d.values())
|
||||
|
||||
def run_calibration(self):
|
||||
print("\nMove arm to open position")
|
||||
input("Press Enter to continue...")
|
||||
open_pos_list = []
|
||||
for _ in range(300):
|
||||
open_pos = self.read()
|
||||
open_pos_list.append(open_pos)
|
||||
time.sleep(0.01)
|
||||
open_pos = np.array(open_pos_list)
|
||||
max_open_pos = open_pos.max(axis=0)
|
||||
min_open_pos = open_pos.min(axis=0)
|
||||
|
||||
print(f"{max_open_pos=}")
|
||||
print(f"{min_open_pos=}")
|
||||
|
||||
print("\nMove arm to closed position")
|
||||
input("Press Enter to continue...")
|
||||
closed_pos_list = []
|
||||
for _ in range(300):
|
||||
closed_pos = self.read()
|
||||
closed_pos_list.append(closed_pos)
|
||||
time.sleep(0.01)
|
||||
closed_pos = np.array(closed_pos_list)
|
||||
max_closed_pos = closed_pos.max(axis=0)
|
||||
closed_pos[closed_pos < 1000] = 60000
|
||||
min_closed_pos = closed_pos.min(axis=0)
|
||||
|
||||
print(f"{max_closed_pos=}")
|
||||
print(f"{min_closed_pos=}")
|
||||
|
||||
open_pos = np.array([max_open_pos, max_closed_pos]).max(axis=0)
|
||||
closed_pos = np.array([min_open_pos, min_closed_pos]).min(axis=0)
|
||||
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
for i, jname in enumerate(self.joint_names):
|
||||
if jname in [
|
||||
"thumb_0",
|
||||
"thumb_3",
|
||||
"index_2",
|
||||
"middle_2",
|
||||
"ring_2",
|
||||
"pinky_0",
|
||||
"pinky_2",
|
||||
"index_0",
|
||||
]:
|
||||
tmp_pos = open_pos[i]
|
||||
open_pos[i] = closed_pos[i]
|
||||
closed_pos[i] = tmp_pos
|
||||
|
||||
print()
|
||||
print(f"{open_pos=}")
|
||||
print(f"{closed_pos=}")
|
||||
|
||||
homing_offset = [0] * len(self.joint_names)
|
||||
drive_mode = [0] * len(self.joint_names)
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.joint_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": open_pos,
|
||||
"end_pos": closed_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.joint_names,
|
||||
}
|
||||
# return calib_dict
|
||||
self.set_calibration(calib_dict)
|
||||
|
||||
def set_calibration(self, calibration: dict[str, list]):
|
||||
self.calibration = calibration
|
||||
|
||||
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
|
||||
a "zero position" at 0 degree.
|
||||
|
||||
Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor
|
||||
rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range.
|
||||
|
||||
Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation
|
||||
when given a goal position that is + or - their resolution. For instance, feetech xl330-m077 have a resolution of 4096, and
|
||||
at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830,
|
||||
or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor.
|
||||
To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work
|
||||
in the centered nominal degree range ]-180, 180[.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.motor_names
|
||||
|
||||
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
|
||||
values = values.astype(np.float32)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
calib_idx = self.calibration["motor_names"].index(name)
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# Rescale the present position to a nominal range [0, 100] %,
|
||||
# useful for joints with linear motions like Aloha gripper
|
||||
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
|
||||
|
||||
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
|
||||
if name == "pinky_1" and (values[i] < LOWER_BOUND_LINEAR):
|
||||
values[i] = end_pos
|
||||
else:
|
||||
msg = (
|
||||
f"Wrong motor position range detected for {name}. "
|
||||
f"Expected to be in nominal range of [0, 100] % (a full linear translation), "
|
||||
f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, "
|
||||
f"but present value is {values[i]} %. "
|
||||
"This might be due to a cable connection issue creating an artificial jump in motor values. "
|
||||
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
|
||||
)
|
||||
print(msg)
|
||||
# raise JointOutOfRangeError(msg)
|
||||
|
||||
return values
|
||||
|
||||
# def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
# """Inverse of `apply_calibration`."""
|
||||
# if motor_names is None:
|
||||
# motor_names = self.motor_names
|
||||
|
||||
# for i, name in enumerate(motor_names):
|
||||
# calib_idx = self.calibration["motor_names"].index(name)
|
||||
# calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
# if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
# start_pos = self.calibration["start_pos"][calib_idx]
|
||||
# end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# # Convert from nominal lnear range of [0, 100] % to
|
||||
# # actual motor range of values which can be arbitrary.
|
||||
# values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos
|
||||
|
||||
# values = np.round(values).astype(np.int32)
|
||||
# return values
|
||||
|
||||
|
||||
class HopeJuniorRobot:
|
||||
def __init__(self):
|
||||
self.arm_bus = FeetechMotorsBus(
|
||||
port="/dev/tty.usbmodem58760429571",
|
||||
motors={
|
||||
# "motor1": (2, "sts3250"),
|
||||
# "motor2": (1, "scs0009"),
|
||||
"shoulder_pitch": [1, "sts3250"],
|
||||
"shoulder_yaw": [2, "sts3215"], # TODO: sts3250
|
||||
"shoulder_roll": [3, "sts3215"], # TODO: sts3250
|
||||
"elbow_flex": [4, "sts3250"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"wrist_yaw": [6, "sts3215"],
|
||||
"wrist_pitch": [7, "sts3215"],
|
||||
},
|
||||
protocol_version=0,
|
||||
)
|
||||
self.hand_bus = FeetechMotorsBus(
|
||||
port="/dev/tty.usbmodem585A0077581",
|
||||
motors={
|
||||
"thumb_basel_rotation": [30, "scs0009"],
|
||||
"thumb_flexor": [27, "scs0009"],
|
||||
"thumb_pinky_side": [26, "scs0009"],
|
||||
"thumb_thumb_side": [28, "scs0009"],
|
||||
"index_flexor": [25, "scs0009"],
|
||||
"index_pinky_side": [31, "scs0009"],
|
||||
"index_thumb_side": [32, "scs0009"],
|
||||
"middle_flexor": [24, "scs0009"],
|
||||
"middle_pinky_side": [33, "scs0009"],
|
||||
"middle_thumb_side": [34, "scs0009"],
|
||||
"ring_flexor": [21, "scs0009"],
|
||||
"ring_pinky_side": [36, "scs0009"],
|
||||
"ring_thumb_side": [35, "scs0009"],
|
||||
"pinky_flexor": [23, "scs0009"],
|
||||
"pinky_pinky_side": [38, "scs0009"],
|
||||
"pinky_thumb_side": [37, "scs0009"],
|
||||
},
|
||||
protocol_version=1,
|
||||
group_sync_read=False,
|
||||
)
|
||||
|
||||
def get_hand_calibration(self):
|
||||
homing_offset = [0] * len(self.hand_bus.motor_names)
|
||||
drive_mode = [0] * len(self.hand_bus.motor_names)
|
||||
|
||||
start_pos = [
|
||||
500,
|
||||
900,
|
||||
1000,
|
||||
0,
|
||||
100,
|
||||
250,
|
||||
750,
|
||||
100,
|
||||
400,
|
||||
150,
|
||||
100,
|
||||
120,
|
||||
980,
|
||||
100,
|
||||
950,
|
||||
750,
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
500 - 250,
|
||||
900 - 300,
|
||||
1000 - 550,
|
||||
0 + 550,
|
||||
1000,
|
||||
start_pos[5] + 500,
|
||||
start_pos[6] - 500,
|
||||
1000,
|
||||
400 + 700,
|
||||
150 + 700,
|
||||
1000,
|
||||
120 + 700,
|
||||
980 - 700,
|
||||
1000,
|
||||
950 - 700,
|
||||
750 - 700,
|
||||
]
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.hand_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.hand_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def connect(self):
|
||||
self.arm_bus.connect()
|
||||
self.hand_bus.connect()
|
||||
|
||||
|
||||
ESCAPE_KEY_ID = 27
|
||||
|
||||
|
||||
def capture_and_display_processed_frames(
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray],
|
||||
window_display_name: str,
|
||||
cap_device: int = 0,
|
||||
) -> None:
|
||||
"""
|
||||
Capture frames from the given input camera device, run them through
|
||||
the frame processor, and display the outputs in a window with the given name.
|
||||
|
||||
User should press Esc to exit.
|
||||
|
||||
Inputs:
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray]
|
||||
Processes frames.
|
||||
Input and output are numpy arrays of shape (H W C) with BGR channel layout and dtype uint8 / byte.
|
||||
window_display_name: str
|
||||
Name of the window used to display frames.
|
||||
cap_device: int
|
||||
Identifier for the camera to use to capture frames.
|
||||
"""
|
||||
cv2.namedWindow(window_display_name)
|
||||
capture = cv2.VideoCapture(cap_device)
|
||||
if not capture.isOpened():
|
||||
raise ValueError("Unable to open video capture.")
|
||||
|
||||
frame_count = 0
|
||||
has_frame, frame = capture.read()
|
||||
while has_frame:
|
||||
assert isinstance(frame, np.ndarray)
|
||||
|
||||
frame_count = frame_count + 1
|
||||
# mirror frame
|
||||
frame = np.ascontiguousarray(frame[:, ::-1, ::-1])
|
||||
|
||||
# process & show frame
|
||||
processed_frame = frame_processor(frame)
|
||||
cv2.imshow(window_display_name, processed_frame[:, :, ::-1])
|
||||
|
||||
has_frame, frame = capture.read()
|
||||
key = cv2.waitKey(1)
|
||||
if key == ESCAPE_KEY_ID:
|
||||
break
|
||||
|
||||
capture.release()
|
||||
|
||||
|
||||
def main():
|
||||
robot = HopeJuniorRobot()
|
||||
robot.connect()
|
||||
|
||||
# robot.hand_bus.calibration = None
|
||||
|
||||
# breakpoint()
|
||||
# print(robot.arm_bus.read("Present_Position"))
|
||||
robot.arm_bus.write("Torque_Enable", 1)
|
||||
robot.arm_bus.write("Acceleration", 20)
|
||||
robot.arm_bus.read("Acceleration")
|
||||
|
||||
calibration = robot.get_hand_calibration()
|
||||
robot.hand_bus.write("Goal_Position", calibration["start_pos"])
|
||||
# robot.hand_bus.write("Goal_Position", calibration["end_pos"][:4], robot.hand_bus.motor_names[:4])
|
||||
robot.hand_bus.set_calibration(calibration)
|
||||
lol = 1
|
||||
|
||||
# # print(motors_bus.write("Goal_Position", 500))
|
||||
# print(robot.hand_bus.read("Present_Position"))
|
||||
# # pos = hand_bus.read("Present_Position")
|
||||
# # hand_bus.write("Goal_Position", pos[0]+20, hand_bus.motor_names[0])
|
||||
# # hand_bus.write("Goal_Position", pos[i]+delta, hand_bus.motor_names[i])
|
||||
# robot.hand_bus.read("Acceleration")
|
||||
# robot.hand_bus.write("Acceleration", 10)
|
||||
|
||||
# sleep = 1
|
||||
# # robot.hand_bus.write(
|
||||
# # "Goal_Position", [glove.last_d['index_2']-1500,300,300], ["index_pinky_side", "index_flexor", "index_thumb_side"]
|
||||
# # )
|
||||
# #time.sleep(sleep)
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [100, 0, 0], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [200, 100, 600], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
|
||||
# breakpoint()
|
||||
|
||||
glove = HomonculusGlove()
|
||||
glove.run_calibration()
|
||||
# while True:
|
||||
# joint_names = ["index_1", "index_2"]
|
||||
# joint_values = glove.read(joint_names)
|
||||
# print(joint_values)
|
||||
|
||||
input()
|
||||
while True:
|
||||
joint_names = []
|
||||
# joint_names += ["thumb_0", "thumb_2", "thumb_3"]
|
||||
joint_names += ["index_0", "index_1"]
|
||||
# joint_names += ["middle_1", "middle_2"]
|
||||
# joint_names += ["ring_1", "ring_2"]
|
||||
# joint_names += ["pinky_0", "pinky_2"]
|
||||
joint_values = glove.read(joint_names)
|
||||
joint_values = joint_values.round().astype(int)
|
||||
joint_dict = {k: v for k, v in zip(joint_names, joint_values, strict=False)}
|
||||
|
||||
motor_values = []
|
||||
motor_names = []
|
||||
# motor_names += ["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"]
|
||||
# motor_values += [joint_dict["thumb_3"], joint_dict["thumb_0"], joint_dict["thumb_2"], joint_dict["thumb_2"]]
|
||||
motor_names += ["index_pinky_side", "index_thumb_side"]
|
||||
# if joint_dict["index_0"] -2100 > 0:
|
||||
splayamount = 0.5
|
||||
motor_values += [
|
||||
(100 - joint_dict["index_0"]) * splayamount + joint_dict["index_1"] * (1 - splayamount),
|
||||
(joint_dict["index_0"]) * splayamount + joint_dict["index_1"] * (1 - splayamount),
|
||||
]
|
||||
# else:
|
||||
# motor_values += [100-joint_dict["index_0"], joint_dict["index_0"]]
|
||||
|
||||
# motor_names += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
# motor_values += [joint_dict["middle_2"], joint_dict["middle_1"], joint_dict["middle_1"]]
|
||||
# motor_names += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
# motor_values += [joint_dict["ring_2"], joint_dict["ring_1"], joint_dict["ring_1"]]
|
||||
# motor_names += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
|
||||
# motor_values += [joint_dict["pinky_2"], joint_dict["pinky_0"], joint_dict["pinky_0"]]
|
||||
|
||||
motor_values = np.array(motor_values)
|
||||
motor_values = np.clip(motor_values, 0, 100)
|
||||
|
||||
robot.hand_bus.write("Goal_Position", motor_values, motor_names)
|
||||
time.sleep(0.02)
|
||||
|
||||
while True:
|
||||
# print(glove.read()['index_2']-1500)
|
||||
glove_index_flexor = glove.read()["index_2"] - 1500
|
||||
glove_index_subflexor = glove.read()["index_1"] - 1500
|
||||
glove_index_side = glove.read()["index_0"] - 2100
|
||||
|
||||
vals = [glove_index_flexor, 1000 - (glove_index_subflexor), glove_index_subflexor]
|
||||
|
||||
keys = ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
|
||||
glove_middle_flexor = glove.read()["middle_2"] - 1500
|
||||
glove_middle_subflexor = 1000 - (glove.read()["middle_1"] - 1700)
|
||||
vals += [glove_middle_flexor, glove_middle_subflexor, glove_middle_subflexor - 200]
|
||||
keys += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
|
||||
glove_ring_flexor = glove.read()["ring_2"] - 1300
|
||||
print(glove_ring_flexor)
|
||||
glove_ring_subflexor = glove.read()["ring_1"] - 1100
|
||||
|
||||
vals += [glove_ring_flexor, 1000 - glove_ring_subflexor, glove_ring_subflexor]
|
||||
keys += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
|
||||
glove_pinky_flexor = glove.read()["pinky_2"] - 1500
|
||||
glove_pinky_subflexor = glove.read()["pinky_1"] - 1300
|
||||
vals += [300 + glove_pinky_flexor, max(1000 - glove_pinky_subflexor - 100, 0), glove_pinky_subflexor]
|
||||
keys += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
|
||||
robot.hand_bus.write("Goal_Position", vals, keys)
|
||||
time.sleep(0.1)
|
||||
|
||||
time.sleep(3)
|
||||
|
||||
def move_arm(loop=10):
|
||||
sleep = 1
|
||||
for i in range(loop):
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1195])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 2195])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1457, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 2357, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 974, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 2674, 1957, 1695])
|
||||
time.sleep(sleep + 2)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 1632, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 1369, 1632, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 1330, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [2381, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1681, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
|
||||
def move_hand(loop=10):
|
||||
sleep = 0.5
|
||||
for i in range(loop):
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[500, 1000, 0, 1000],
|
||||
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [100, 1000, 150], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [200, 100, 700], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[500, 1000 - 250, 0 + 300, 1000 - 200],
|
||||
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[100 + 450, 100 + 400, 100 + 400],
|
||||
["index_flexor", "index_pinky_side", "index_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[100 + 350, 1000 - 450, 150 + 450],
|
||||
["middle_flexor", "middle_pinky_side", "middle_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[200 + 650, 200 + 350, 0 + 350],
|
||||
["ring_flexor", "ring_pinky_side", "ring_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[200 + 450, 100 + 400, 700 - 400],
|
||||
["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
|
||||
move_hand(3)
|
||||
|
||||
move_arm(1)
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
executor.submit(move_arm)
|
||||
executor.submit(move_hand)
|
||||
|
||||
# initial position
|
||||
for i in range(3):
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [500, 1000, 0, 1000, 100, 950, 100, 100, 1000, 150, 200, 200, 0, 200, 100, 700]
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
# for i in range(3):
|
||||
# robot.hand_bus.write("Goal_Position", [500, 1000-150, 0+250, 1000-150,
|
||||
# 100+300, 950-250, 100+250,
|
||||
# 100+200, 1000-300, 150+300,
|
||||
# 200+500, 200+200, 0+200,
|
||||
# 200+300, 100+200, 700-200])
|
||||
# time.sleep(1)
|
||||
|
||||
# camera = 0
|
||||
# score_threshold = 0.95
|
||||
# iou_threshold = 0.3
|
||||
|
||||
# app = MediaPipeHandApp(MediaPipeHand.from_pretrained(), score_threshold, iou_threshold)
|
||||
|
||||
# def frame_processor(frame: np.ndarray) -> np.ndarray:
|
||||
# # Input Prep
|
||||
# NHWC_int_numpy_frames, NCHW_fp32_torch_frames = app_to_net_image_inputs(frame)
|
||||
|
||||
# # Run Bounding Box & Keypoint Detector
|
||||
# batched_selected_boxes, batched_selected_keypoints = app._run_box_detector(NCHW_fp32_torch_frames)
|
||||
|
||||
# # The region of interest ( bounding box of 4 (x, y) corners).
|
||||
# # list[torch.Tensor(shape=[Num Boxes, 4, 2])],
|
||||
# # where 2 == (x, y)
|
||||
# #
|
||||
# # A list element will be None if there is no selected ROI.
|
||||
# batched_roi_4corners = app._compute_object_roi(batched_selected_boxes, batched_selected_keypoints)
|
||||
|
||||
# # selected landmarks for the ROI (if any)
|
||||
# # list[torch.Tensor(shape=[Num Selected Landmarks, K, 3])],
|
||||
# # where K == number of landmark keypoints, 3 == (x, y, confidence)
|
||||
# #
|
||||
# # A list element will be None if there is no ROI.
|
||||
# landmarks_out = app._run_landmark_detector(NHWC_int_numpy_frames, batched_roi_4corners)
|
||||
|
||||
# app._draw_predictions(
|
||||
# NHWC_int_numpy_frames,
|
||||
# batched_selected_boxes,
|
||||
# batched_selected_keypoints,
|
||||
# batched_roi_4corners,
|
||||
# *landmarks_out,
|
||||
# )
|
||||
|
||||
# return NHWC_int_numpy_frames[0]
|
||||
|
||||
# capture_and_display_processed_frames(frame_processor, "QAIHM Mediapipe Hand Demo", camera)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,97 +0,0 @@
|
||||
#faulty servo
|
||||
Model = [777]
|
||||
ID = [7]
|
||||
Baud_Rate = [0]
|
||||
Return_Delay = [0]
|
||||
Response_Status_Level = [1]
|
||||
Min_Angle_Limit = [1140]
|
||||
Max_Angle_Limit = [2750]
|
||||
Max_Temperature_Limit = [70]
|
||||
Max_Voltage_Limit = [140]
|
||||
Min_Voltage_Limit = [40]
|
||||
Max_Torque_Limit = [1000]
|
||||
Phase = [12]
|
||||
Unloading_Condition = [44]
|
||||
LED_Alarm_Condition = [47]
|
||||
P_Coefficient = [32]
|
||||
D_Coefficient = [32]
|
||||
I_Coefficient = [0]
|
||||
Minimum_Startup_Force = [16]
|
||||
CW_Dead_Zone = [1]
|
||||
CCW_Dead_Zone = [1]
|
||||
Protection_Current = [310]
|
||||
Angular_Resolution = [1]
|
||||
Offset = [1047]
|
||||
Mode = [0]
|
||||
Protective_Torque = [20]
|
||||
Protection_Time = [200]
|
||||
Overload_Torque = [80]
|
||||
Speed_closed_loop_P_proportional_coefficient = [10]
|
||||
Over_Current_Protection_Time = [200]
|
||||
Velocity_closed_loop_I_integral_coefficient = [200]
|
||||
Torque_Enable = [1]
|
||||
Acceleration = [20]
|
||||
Goal_Position = [0]
|
||||
Goal_Time = [0]
|
||||
Goal_Speed = [0]
|
||||
Torque_Limit = [1000]
|
||||
Lock = [1]
|
||||
Present_Position = [1494]
|
||||
Present_Speed = [0]
|
||||
Present_Load = [0]
|
||||
Present_Voltage = [123]
|
||||
Present_Temperature = [24]
|
||||
Status = [0]
|
||||
Moving = [0]
|
||||
Present_Current = [0]
|
||||
Maximum_Acceleration = [306]
|
||||
|
||||
|
||||
|
||||
#all servos of hopejr
|
||||
Model = [2825 777 777 2825 777 777 777]
|
||||
ID = [1 2 3 4 5 6 7]
|
||||
Baud_Rate = [0 0 0 0 0 0 0]
|
||||
Return_Delay = [0 0 0 0 0 0 0]
|
||||
Response_Status_Level = [1 1 1 1 1 1 1]
|
||||
Min_Angle_Limit = [ 650 1300 1300 1200 600 1725 0]
|
||||
Max_Angle_Limit = [2600 2050 2800 2500 4096 2250 4095]
|
||||
Max_Temperature_Limit = [80 70 70 80 70 70 70]
|
||||
Max_Voltage_Limit = [160 140 140 160 140 140 80]
|
||||
Min_Voltage_Limit = [60 40 40 60 40 40 40]
|
||||
Max_Torque_Limit = [1000 1000 1000 1000 1000 1000 1000]
|
||||
Phase = [12 12 12 12 12 12 12]
|
||||
Unloading_Condition = [45 44 44 45 44 44 44]
|
||||
LED_Alarm_Condition = [45 47 47 45 47 47 47]
|
||||
P_Coefficient = [32 32 32 32 32 32 32]
|
||||
D_Coefficient = [32 32 32 32 32 32 32]
|
||||
I_Coefficient = [0 0 0 0 0 0 0]
|
||||
Minimum_Startup_Force = [15 16 16 12 16 16 16]
|
||||
CW_Dead_Zone = [0 1 1 0 1 1 1]
|
||||
CCW_Dead_Zone = [0 1 1 0 1 1 1]
|
||||
Protection_Current = [310 310 310 310 310 310 500]
|
||||
Angular_Resolution = [1 1 1 1 1 1 1]
|
||||
Offset = [ 0 1047 1024 1047 1024 1024 0]
|
||||
Mode = [0 0 0 0 0 0 0]
|
||||
Protective_Torque = [20 20 20 20 20 20 20]
|
||||
Protection_Time = [200 200 200 200 200 200 200]
|
||||
Overload_Torque = [80 80 80 80 80 80 80]
|
||||
Speed_closed_loop_P_proportional_coefficient = [10 10 10 10 10 10 10]
|
||||
Over_Current_Protection_Time = [250 200 200 250 200 200 200]
|
||||
Velocity_closed_loop_I_integral_coefficient = [200 200 200 200 200 200 200]
|
||||
Torque_Enable = [1 1 1 1 1 1 1]
|
||||
Acceleration = [20 20 20 20 20 20 20]
|
||||
Goal_Position = [1909 1977 1820 1000 707 1941 1036]
|
||||
Goal_Time = [0 0 0 0 0 0 0]
|
||||
Goal_Speed = [0 0 0 0 0 0 0]
|
||||
Torque_Limit = [1000 1000 1000 200 1000 1000 1000]
|
||||
Lock = [1 1 1 1 1 1 1]
|
||||
Present_Position = [1909 1982 1821 1200 710 1941 1036]
|
||||
Present_Speed = [0 0 0 0 0 0 0]
|
||||
Present_Load = [ 0 48 0 0 32 0 0]
|
||||
Present_Voltage = [122 123 122 123 122 122 122]
|
||||
Present_Temperature = [23 28 28 26 29 28 28]
|
||||
Status = [0 0 0 0 0 0 1]
|
||||
Moving = [0 0 0 0 0 0 0]
|
||||
Present_Current = [0 1 0 1 1 0 1]
|
||||
Maximum_Acceleration = [1530 306 306 1530 306 306 254]
|
||||
@@ -1,192 +0,0 @@
|
||||
import threading
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import serial
|
||||
|
||||
from lerobot.common.robot_devices.motors.feetech import (
|
||||
CalibrationMode,
|
||||
FeetechMotorsBus,
|
||||
)
|
||||
|
||||
LOWER_BOUND_LINEAR = -100
|
||||
UPPER_BOUND_LINEAR = 200
|
||||
|
||||
ESCAPE_KEY_ID = 27
|
||||
|
||||
|
||||
class HopeJuniorRobot:
|
||||
def __init__(self):
|
||||
self.arm_bus = FeetechMotorsBus(
|
||||
port="/dev/ttyACM1",
|
||||
motors={
|
||||
# "motor1": (2, "sts3250"),
|
||||
# "motor2": (1, "scs0009"),
|
||||
#"shoulder_pitch": [1, "sts3250"],
|
||||
#"shoulder_yaw": [2, "sts3215"], # TODO: sts3250
|
||||
#"shoulder_roll": [3, "sts3215"], # TODO: sts3250
|
||||
#"elbow_flex": [4, "sts3250"],
|
||||
#"wrist_roll": [5, "sts3215"],
|
||||
#"wrist_yaw": [6, "sts3215"],
|
||||
"wrist_pitch": [7, "sts3215"],
|
||||
},
|
||||
protocol_version=0,
|
||||
)
|
||||
self.hand_bus = FeetechMotorsBus(
|
||||
port="/dev/ttyACM1",
|
||||
motors={
|
||||
"thumb_basel_rotation": [30, "scs0009"],
|
||||
"thumb_flexor": [27, "scs0009"],
|
||||
"thumb_pinky_side": [26, "scs0009"],
|
||||
"thumb_thumb_side": [28, "scs0009"],
|
||||
"index_flexor": [25, "scs0009"],
|
||||
"index_pinky_side": [31, "scs0009"],
|
||||
"index_thumb_side": [32, "scs0009"],
|
||||
"middle_flexor": [24, "scs0009"],
|
||||
"middle_pinky_side": [33, "scs0009"],
|
||||
"middle_thumb_side": [34, "scs0009"],
|
||||
"ring_flexor": [21, "scs0009"],
|
||||
"ring_pinky_side": [36, "scs0009"],
|
||||
"ring_thumb_side": [35, "scs0009"],
|
||||
"pinky_flexor": [23, "scs0009"],
|
||||
"pinky_pinky_side": [38, "scs0009"],
|
||||
"pinky_thumb_side": [37, "scs0009"],
|
||||
},
|
||||
protocol_version=1,
|
||||
group_sync_read=False,
|
||||
)
|
||||
|
||||
def get_hand_calibration(self):
|
||||
"""
|
||||
Returns a dictionary containing calibration settings for each motor
|
||||
on the hand bus.
|
||||
"""
|
||||
homing_offset = [0] * len(self.hand_bus.motor_names)
|
||||
drive_mode = [0] * len(self.hand_bus.motor_names)
|
||||
|
||||
start_pos = [
|
||||
500, 900, 0, 1000, 100, 250, 750, 100, 400, 150, 100, 120, 980, 100, 950, 750,
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
start_pos[0] - 400, # 500 - 400 = 100
|
||||
start_pos[1] - 300, # 900 - 300 = 600
|
||||
start_pos[2] + 550, # 0 + 550 = 550
|
||||
start_pos[3] - 550, # 1000 - 550 = 450
|
||||
start_pos[4] + 900, # 100 + 900 = 1000
|
||||
start_pos[5] + 500, # 250 + 500 = 750
|
||||
start_pos[6] - 500, # 750 - 500 = 250
|
||||
start_pos[7] + 900, # 100 + 900 = 1000
|
||||
start_pos[8] + 700, # 400 + 700 = 1100
|
||||
start_pos[9] + 700, # 150 + 700 = 850
|
||||
start_pos[10] + 900, # 100 + 900 = 1000
|
||||
start_pos[11] + 700, # 120 + 700 = 820
|
||||
start_pos[12] - 700, # 980 - 700 = 280
|
||||
start_pos[13] + 900, # 100 + 900 = 1000
|
||||
start_pos[14] - 700, # 950 - 700 = 250
|
||||
start_pos[15] - 700, # 750 - 700 = 50
|
||||
]
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.hand_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.hand_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def get_arm_calibration(self):
|
||||
"""
|
||||
Returns a dictionary containing calibration settings for each motor
|
||||
on the arm bus.
|
||||
"""
|
||||
homing_offset = [0] * len(self.arm_bus.motor_names)
|
||||
drive_mode = [0] * len(self.arm_bus.motor_names)
|
||||
|
||||
# Example placeholders
|
||||
start_pos = [
|
||||
600, # shoulder_up
|
||||
1500, # shoulder_forward
|
||||
1300, # shoulder_yaw
|
||||
1000, # bend_elbow
|
||||
1600, # wrist_roll
|
||||
1700, # wrist_yaw
|
||||
600, # wrist_pitch
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
2300, # shoulder_up
|
||||
2300, # shoulder_forward
|
||||
2800, # shoulder_yaw
|
||||
2500, # bend_elbow
|
||||
2800, # wrist_roll
|
||||
2200, # wrist_yaw
|
||||
1700, # wrist_pitch
|
||||
]
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.arm_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.arm_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def connect(self):
|
||||
"""Connect to the Feetech buses."""
|
||||
self.arm_bus.connect()
|
||||
# self.hand_bus.connect()
|
||||
|
||||
|
||||
def capture_and_display_processed_frames(
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray],
|
||||
window_display_name: str,
|
||||
cap_device: int = 0,
|
||||
) -> None:
|
||||
"""
|
||||
Capture frames from the given input camera device, run them through
|
||||
the frame processor, and display the outputs in a window with the given name.
|
||||
|
||||
User should press Esc to exit.
|
||||
|
||||
Inputs:
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray]
|
||||
Processes frames.
|
||||
Input and output are numpy arrays of shape (H W C) with BGR channel layout and dtype uint8 / byte.
|
||||
window_display_name: str
|
||||
Name of the window used to display frames.
|
||||
cap_device: int
|
||||
Identifier for the camera to use to capture frames.
|
||||
"""
|
||||
cv2.namedWindow(window_display_name)
|
||||
capture = cv2.VideoCapture(cap_device)
|
||||
if not capture.isOpened():
|
||||
raise ValueError("Unable to open video capture.")
|
||||
|
||||
frame_count = 0
|
||||
has_frame, frame = capture.read()
|
||||
while has_frame:
|
||||
frame_count = frame_count + 1
|
||||
# Mirror frame horizontally and flip color for demonstration
|
||||
frame = np.ascontiguousarray(frame[:, ::-1, ::-1])
|
||||
|
||||
# process & show frame
|
||||
processed_frame = frame_processor(frame)
|
||||
cv2.imshow(window_display_name, processed_frame[:, :, ::-1])
|
||||
|
||||
has_frame, frame = capture.read()
|
||||
key = cv2.waitKey(1)
|
||||
if key == ESCAPE_KEY_ID:
|
||||
break
|
||||
|
||||
capture.release()
|
||||
|
Before Width: | Height: | Size: 56 KiB |
|
Before Width: | Height: | Size: 54 KiB |
|
Before Width: | Height: | Size: 56 KiB |
@@ -1,44 +0,0 @@
|
||||
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import time
|
||||
from typing import List, Tuple
|
||||
def log_and_plot_params(bus, params_to_log: list, servo_names: list,
|
||||
test_id="servo_log", interval=0.1, duration=5, save_plot=True) -> Tuple[dict, List[float]]:
|
||||
|
||||
"""
|
||||
Logs specific servo parameters for a given duration and generates a plot.
|
||||
"""
|
||||
|
||||
servo_data = {servo_name: {param: [] for param in params_to_log} for servo_name in servo_names}
|
||||
timestamps = []
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
while time.time() - start_time < duration:
|
||||
timestamp = time.time() - start_time
|
||||
timestamps.append(timestamp)
|
||||
for param in params_to_log:
|
||||
values = bus.read(param, servo_names)
|
||||
for servo_name, value in zip(servo_names, values):
|
||||
servo_data[servo_name][param].append(value)
|
||||
|
||||
time.sleep(interval)
|
||||
|
||||
if save_plot:
|
||||
for servo_name, data in servo_data.items():
|
||||
plt.figure(figsize=(10, 6))
|
||||
for param in params_to_log:
|
||||
if all(v is not None for v in data[param]):
|
||||
plt.plot(timestamps, data[param], label=param)
|
||||
plt.xlabel("Time (s)")
|
||||
plt.ylabel("Parameter Values")
|
||||
plt.title(f"Parameter Trends for Servo: {servo_name}")
|
||||
plt.legend()
|
||||
plt.grid(True)
|
||||
plt.tight_layout()
|
||||
plot_filename = f"{test_id}_{servo_name}.png"
|
||||
plt.savefig(plot_filename)
|
||||
print(f"Plot saved as {plot_filename}")
|
||||
|
||||
return servo_data, timestamps
|
||||
|
Before Width: | Height: | Size: 65 KiB |
|
Before Width: | Height: | Size: 60 KiB |
|
Before Width: | Height: | Size: 54 KiB |
@@ -1,68 +0,0 @@
|
||||
STS_SERIES_CONTROL_TABLE = {
|
||||
"Model": (3, 2),
|
||||
"ID": (5, 1),
|
||||
"Baud_Rate": (6, 1),
|
||||
"Return_Delay": (7, 1),
|
||||
"Response_Status_Level": (8, 1),
|
||||
"Min_Angle_Limit": (9, 2),
|
||||
"Max_Angle_Limit": (11, 2),
|
||||
"Max_Temperature_Limit": (13, 1),
|
||||
"Max_Voltage_Limit": (14, 1),
|
||||
"Min_Voltage_Limit": (15, 1),
|
||||
"Max_Torque_Limit": (16, 2),
|
||||
"Phase": (18, 1),
|
||||
"Unloading_Condition": (19, 1),
|
||||
"LED_Alarm_Condition": (20, 1),
|
||||
"P_Coefficient": (21, 1),
|
||||
"D_Coefficient": (22, 1),
|
||||
"I_Coefficient": (23, 1),
|
||||
"Minimum_Startup_Force": (24, 2),
|
||||
"CW_Dead_Zone": (26, 1),
|
||||
"CCW_Dead_Zone": (27, 1),
|
||||
"Protection_Current": (28, 2),
|
||||
"Angular_Resolution": (30, 1),
|
||||
"Offset": (31, 2),
|
||||
"Mode": (33, 1),
|
||||
"Protective_Torque": (34, 1),
|
||||
"Protection_Time": (35, 1),
|
||||
"Overload_Torque": (36, 1),
|
||||
"Speed_closed_loop_P_proportional_coefficient": (37, 1),
|
||||
"Over_Current_Protection_Time": (38, 1),
|
||||
"Velocity_closed_loop_I_integral_coefficient": (39, 1),
|
||||
"Torque_Enable": (40, 1),
|
||||
"Acceleration": (41, 1),
|
||||
"Goal_Position": (42, 2),
|
||||
"Goal_Time": (44, 2),
|
||||
"Goal_Speed": (46, 2),
|
||||
"Torque_Limit": (48, 2),
|
||||
"Lock": (55, 1),
|
||||
"Present_Position": (56, 2),
|
||||
"Present_Speed": (58, 2),
|
||||
"Present_Load": (60, 2),
|
||||
"Present_Voltage": (62, 1),
|
||||
"Present_Temperature": (63, 1),
|
||||
"Status": (65, 1),
|
||||
"Moving": (66, 1),
|
||||
"Present_Current": (69, 2),
|
||||
# Not in the Memory Table
|
||||
"Maximum_Acceleration": (85, 2),
|
||||
}
|
||||
|
||||
import time
|
||||
|
||||
# Assuming STS_SERIES_CONTROL_TABLE is defined globally
|
||||
|
||||
def print_all_params(robot):
|
||||
"""
|
||||
Reads all parameters from the STS_SERIES_CONTROL_TABLE and prints their values.
|
||||
"""
|
||||
for param in STS_SERIES_CONTROL_TABLE.keys():
|
||||
try:
|
||||
val = robot.arm_bus.read(param)
|
||||
print(f"{param} = {val}")
|
||||
except Exception as e:
|
||||
print(f"{param} read failed: {e}")
|
||||
|
||||
|
||||
# Example usage:
|
||||
print_all_params(robot)
|
||||
@@ -1,26 +0,0 @@
|
||||
#include <DFRobot_VisualRotaryEncoder.h>
|
||||
|
||||
DFRobot_VisualRotaryEncoder_I2C sensor(0x54, &Wire);
|
||||
|
||||
void setup()
|
||||
{
|
||||
Serial.begin(115200);
|
||||
|
||||
// Attempt to initialize the sensor
|
||||
while (NO_ERR != sensor.begin()) {
|
||||
// Failed? Just wait a bit and try again
|
||||
delay(3000);
|
||||
}
|
||||
}
|
||||
|
||||
void loop()
|
||||
{
|
||||
// Read the encoder value
|
||||
uint16_t encoderValue = sensor.getEncoderValue();
|
||||
|
||||
// Print it followed by a newline
|
||||
Serial.println(encoderValue);
|
||||
|
||||
// Delay 10ms between readings
|
||||
delay(10);
|
||||
}
|
||||
@@ -1,544 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"what are the actual interest values on the hopejr? make like a map"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"can change these dynamically so if the arm is moving down we can relax it instead of tensing it? so for example decreasing torque if the target position is lower than the actual position, for example. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"arm_calibration = robot.get_arm_calibration()\n",
|
||||
"robot.arm_bus.write(\"Goal_Position\", arm_calibration[\"start_pos\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Present Position: [1494]\n",
|
||||
"Acceleration Read: [20]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import time\n",
|
||||
"from hopejr import HopeJuniorRobot\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"robot = HopeJuniorRobot()\n",
|
||||
"robot.connect()\n",
|
||||
"\n",
|
||||
"# Example read of the current position\n",
|
||||
"print(\"Present Position:\", robot.arm_bus.read(\"Present_Position\"))\n",
|
||||
"\n",
|
||||
"# Enable torque and set acceleration\n",
|
||||
"robot.arm_bus.write(\"Torque_Enable\", 1)\n",
|
||||
"robot.arm_bus.write(\"Acceleration\", 20)\n",
|
||||
"print(\"Acceleration Read:\", robot.arm_bus.read(\"Acceleration\"))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"robot.arm_bus.write(\"Torque_Limit\", 100,[\"elbow_flex\"])\n",
|
||||
"robot.arm_bus.write(\"Protective_Torque\", 0, [\"elbow_flex\"])\n",
|
||||
"robot.arm_bus.write(\"Acceleration\", 20)\n",
|
||||
"robot.arm_bus.write(\"Goal_Position\", [2000], [\"elbow_flex\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"array([1000, 1000, 1000, 1000, 1000, 1000, 1000])"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"robot.arm_bus.read(\"Max_Torque_Limit\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"robot.arm_bus.write(\"Goal_Position\", [1909, 1977, 1820, 1000, 707, 1941, 1036]) #"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"robot.arm_bus.write(\"Max_Voltage_Limit\", [160, 140, 140, 160, 140, 140, 140]) #so its not torque limit nor max torque limit, , no protective torque or overload torque\n",
|
||||
"#it's 1) max voltage limit, min-max angle limits are arbitrairly set for all the motors, max temp is only set for the shoulder\n",
|
||||
"#max acceleration is also set, we could lower that in the elbow to make it less responsive to commands basically\n",
|
||||
"#so we limit speed and temperature, maybe we should limit torque thouhg, minimum startup force is also important. protection current as well\n",
|
||||
"#changed that to 310.\n",
|
||||
"#\"Max_Voltage_Limit\" also needs to be changed, different from torque_limit"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Model = [777]\n",
|
||||
"ID = [7]\n",
|
||||
"Baud_Rate = [0]\n",
|
||||
"Return_Delay = [0]\n",
|
||||
"Response_Status_Level = [1]\n",
|
||||
"Min_Angle_Limit = [1140]\n",
|
||||
"Max_Angle_Limit = [2750]\n",
|
||||
"Max_Temperature_Limit = [70]\n",
|
||||
"Max_Voltage_Limit = [140]\n",
|
||||
"Min_Voltage_Limit = [40]\n",
|
||||
"Max_Torque_Limit = [1000]\n",
|
||||
"Phase = [12]\n",
|
||||
"Unloading_Condition = [44]\n",
|
||||
"LED_Alarm_Condition = [47]\n",
|
||||
"P_Coefficient = [32]\n",
|
||||
"D_Coefficient = [32]\n",
|
||||
"I_Coefficient = [0]\n",
|
||||
"Minimum_Startup_Force = [16]\n",
|
||||
"CW_Dead_Zone = [1]\n",
|
||||
"CCW_Dead_Zone = [1]\n",
|
||||
"Protection_Current = [310]\n",
|
||||
"Angular_Resolution = [1]\n",
|
||||
"Offset = [1047]\n",
|
||||
"Mode = [0]\n",
|
||||
"Protective_Torque = [20]\n",
|
||||
"Protection_Time = [200]\n",
|
||||
"Overload_Torque = [80]\n",
|
||||
"Speed_closed_loop_P_proportional_coefficient = [10]\n",
|
||||
"Over_Current_Protection_Time = [200]\n",
|
||||
"Velocity_closed_loop_I_integral_coefficient = [200]\n",
|
||||
"Torque_Enable = [1]\n",
|
||||
"Acceleration = [20]\n",
|
||||
"Goal_Position = [0]\n",
|
||||
"Goal_Time = [0]\n",
|
||||
"Goal_Speed = [0]\n",
|
||||
"Torque_Limit = [1000]\n",
|
||||
"Lock = [1]\n",
|
||||
"Present_Position = [1494]\n",
|
||||
"Present_Speed = [0]\n",
|
||||
"Present_Load = [0]\n",
|
||||
"Present_Voltage = [123]\n",
|
||||
"Present_Temperature = [24]\n",
|
||||
"Status = [0]\n",
|
||||
"Moving = [0]\n",
|
||||
"Present_Current = [0]\n",
|
||||
"Maximum_Acceleration = [306]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"STS_SERIES_CONTROL_TABLE = {\n",
|
||||
" \"Model\": (3, 2),\n",
|
||||
" \"ID\": (5, 1),\n",
|
||||
" \"Baud_Rate\": (6, 1),\n",
|
||||
" \"Return_Delay\": (7, 1),\n",
|
||||
" \"Response_Status_Level\": (8, 1),\n",
|
||||
" \"Min_Angle_Limit\": (9, 2),\n",
|
||||
" \"Max_Angle_Limit\": (11, 2),\n",
|
||||
" \"Max_Temperature_Limit\": (13, 1),\n",
|
||||
" \"Max_Voltage_Limit\": (14, 1),\n",
|
||||
" \"Min_Voltage_Limit\": (15, 1),\n",
|
||||
" \"Max_Torque_Limit\": (16, 2),\n",
|
||||
" \"Phase\": (18, 1),\n",
|
||||
" \"Unloading_Condition\": (19, 1),\n",
|
||||
" \"LED_Alarm_Condition\": (20, 1),\n",
|
||||
" \"P_Coefficient\": (21, 1),\n",
|
||||
" \"D_Coefficient\": (22, 1),\n",
|
||||
" \"I_Coefficient\": (23, 1),\n",
|
||||
" \"Minimum_Startup_Force\": (24, 2),\n",
|
||||
" \"CW_Dead_Zone\": (26, 1),\n",
|
||||
" \"CCW_Dead_Zone\": (27, 1),\n",
|
||||
" \"Protection_Current\": (28, 2),\n",
|
||||
" \"Angular_Resolution\": (30, 1),\n",
|
||||
" \"Offset\": (31, 2),\n",
|
||||
" \"Mode\": (33, 1),\n",
|
||||
" \"Protective_Torque\": (34, 1),\n",
|
||||
" \"Protection_Time\": (35, 1),\n",
|
||||
" \"Overload_Torque\": (36, 1),\n",
|
||||
" \"Speed_closed_loop_P_proportional_coefficient\": (37, 1),\n",
|
||||
" \"Over_Current_Protection_Time\": (38, 1),\n",
|
||||
" \"Velocity_closed_loop_I_integral_coefficient\": (39, 1),\n",
|
||||
" \"Torque_Enable\": (40, 1),\n",
|
||||
" \"Acceleration\": (41, 1),\n",
|
||||
" \"Goal_Position\": (42, 2),\n",
|
||||
" \"Goal_Time\": (44, 2),\n",
|
||||
" \"Goal_Speed\": (46, 2),\n",
|
||||
" \"Torque_Limit\": (48, 2),\n",
|
||||
" \"Lock\": (55, 1),\n",
|
||||
" \"Present_Position\": (56, 2),\n",
|
||||
" \"Present_Speed\": (58, 2),\n",
|
||||
" \"Present_Load\": (60, 2),\n",
|
||||
" \"Present_Voltage\": (62, 1),\n",
|
||||
" \"Present_Temperature\": (63, 1),\n",
|
||||
" \"Status\": (65, 1),\n",
|
||||
" \"Moving\": (66, 1),\n",
|
||||
" \"Present_Current\": (69, 2),\n",
|
||||
" # Not in the Memory Table\n",
|
||||
" \"Maximum_Acceleration\": (85, 2),\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"import time\n",
|
||||
"\n",
|
||||
"# Assuming STS_SERIES_CONTROL_TABLE is defined globally\n",
|
||||
"\n",
|
||||
"def print_all_params(robot):\n",
|
||||
" \"\"\"\n",
|
||||
" Reads all parameters from the STS_SERIES_CONTROL_TABLE and prints their values.\n",
|
||||
" \"\"\"\n",
|
||||
" for param in STS_SERIES_CONTROL_TABLE.keys():\n",
|
||||
" try:\n",
|
||||
" val = robot.arm_bus.read(param)\n",
|
||||
" print(f\"{param} = {val}\")\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"{param} read failed: {e}\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Example usage:\n",
|
||||
"print_all_params(robot)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"probably max input voltage, we can also look at temperature and "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Acceleration Read: [20 20]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"\n",
|
||||
"print(\"Acceleration Read:\", robot.arm_bus.read(\"Acceleration\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"robot.arm_bus.write(\"LED_Alarm_Condition\", 2, [\"elbow_flex\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"robot.arm_bus.write(\"Acceleration\", 20, [\"elbow_flex\"])\n",
|
||||
"robot.arm_bus.write(\"Acceleration\", 100, [\"wrist_yaw\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 73,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"robot.arm_bus.write(\"Goal_Position\", [1000, 1000], [\"elbow_flex\", \"wrist_yaw\"])\n",
|
||||
"time.sleep(2)\n",
|
||||
"robot.arm_bus.write(\"Goal_Position\", [2000, 2000], [\"elbow_flex\", \"wrist_yaw\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 68,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Elbow Flex Current: [1]\n",
|
||||
"Elbow Flex Current: [0]\n",
|
||||
"Elbow Flex Current: [3]\n",
|
||||
"Elbow Flex Current: [1]\n",
|
||||
"Elbow Flex Current: [1]\n",
|
||||
"Elbow Flex Current: [2]\n",
|
||||
"Elbow Flex Current: [1]\n",
|
||||
"Elbow Flex Current: [1]\n",
|
||||
"Elbow Flex Current: [2]\n",
|
||||
"Elbow Flex Current: [1]\n",
|
||||
"Elbow Flex Current: [1]\n",
|
||||
"Elbow Flex Current: [0]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ename": "KeyboardInterrupt",
|
||||
"evalue": "",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[68], line 25\u001b[0m\n\u001b[1;32m 22\u001b[0m time\u001b[38;5;241m.\u001b[39msleep(\u001b[38;5;241m2\u001b[39m)\n\u001b[1;32m 23\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 24\u001b[0m \u001b[38;5;66;03m# If current is zero, hold at pos_a for a bit\u001b[39;00m\n\u001b[0;32m---> 25\u001b[0m \u001b[43mtime\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msleep\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\n",
|
||||
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import time\n",
|
||||
"\n",
|
||||
"# Enable torque on elbow_flex\n",
|
||||
"robot.arm_bus.write(\"Torque_Enable\", 1, [\"elbow_flex\"])\n",
|
||||
"\n",
|
||||
"pos_a = 2500\n",
|
||||
"pos_b = 1000\n",
|
||||
"\n",
|
||||
"robot.arm_bus.write(\"Goal_Position\", pos_a, [\"elbow_flex\"])\n",
|
||||
"time.sleep(2)\n",
|
||||
"\n",
|
||||
"while True:\n",
|
||||
" current_val = robot.arm_bus.read(\"Present_Current\", \"elbow_flex\")\n",
|
||||
" print(\"Elbow Flex Current:\", current_val)\n",
|
||||
" \n",
|
||||
" # If the servo is under non-zero load/current, switch position\n",
|
||||
" if current_val > 1:\n",
|
||||
" robot.arm_bus.write(\"Goal_Position\", pos_b, [\"elbow_flex\"])\n",
|
||||
" time.sleep(2)\n",
|
||||
" # Go back to pos_a again\n",
|
||||
" robot.arm_bus.write(\"Goal_Position\", pos_a, [\"elbow_flex\"])\n",
|
||||
" time.sleep(2)\n",
|
||||
" else:\n",
|
||||
" # If current is zero, hold at pos_a for a bit\n",
|
||||
" time.sleep(1)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\"Acceleration\" = 0, infinitely fast\n",
|
||||
"\"Acceleration\" = 20 slow\n",
|
||||
"elbow_flex is the LED one (4)\n",
|
||||
"\n",
|
||||
"robot.arm_bus.write(\"LED_Alarm_Condition\", 2, [\"elbow_flex\"]) #on constantly\n",
|
||||
"robot.arm_bus.write(\"LED_Alarm_Condition\", 1, [\"elbow_flex\"]) #beeping\n",
|
||||
"robot.arm_bus.write(\"LED_Alarm_Condition\", 0, [\"elbow_flex\"]) #off\n",
|
||||
"\n",
|
||||
"\"Max_Torque_Limit\": (16, 2), is what i have o play around with or \"Protective_Torque\": (37, 1), maybe\n",
|
||||
"\n",
|
||||
"robot.arm_bus.write(\"Torque_Enable\", 1, [\"elbow_flex\"]) 1 can move 0 cant move\n",
|
||||
"\n",
|
||||
"robot.arm_bus.write(\"Torque_Limit\", 300, [\"elbow_flex\"]) #how strong/weak the servo is. 0 makes it so that it cannot move basically, but i'd like to have that value change honestly and for it to be waeaker\n",
|
||||
"\n",
|
||||
"robot.arm_bus.write(\"Torque_Limit\", 20,[\"elbow_flex\"]) 20 in ordre to get some motion\n",
|
||||
"\n",
|
||||
"default is 1000\n",
|
||||
"\n",
|
||||
"robot.arm_bus.write(\"Goal_Speed\", -s, [\"elbow_flex\"]) #changes how fast the servo moves when going to the target, does not make it move with a specific speed "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"import time\n",
|
||||
"\n",
|
||||
"# Enable torque on elbow_flex\n",
|
||||
"robot.arm_bus.write(\"Torque_Enable\", 1, [\"elbow_flex\"])\n",
|
||||
"\n",
|
||||
"pos_a = 1000\n",
|
||||
"pos_b = 2500\n",
|
||||
"\n",
|
||||
"robot.arm_bus.write(\"Goal_Position\", pos_a, [\"elbow_flex\"])\n",
|
||||
"time.sleep(2)\n",
|
||||
"\n",
|
||||
"while True:\n",
|
||||
" current_val = robot.arm_bus.read(\"Present_Current\", \"elbow_flex\")\n",
|
||||
" print(\"Elbow Flex Current:\", current_val)\n",
|
||||
" \n",
|
||||
" # If the servo is under non-zero load/current, switch position\n",
|
||||
" if current_val > 1:\n",
|
||||
" robot.arm_bus.write(\"Goal_Position\", pos_b, [\"elbow_flex\"])\n",
|
||||
" time.sleep(2)\n",
|
||||
" # Go back to pos_a again\n",
|
||||
" robot.arm_bus.write(\"Goal_Position\", pos_a, [\"elbow_flex\"])\n",
|
||||
" time.sleep(2)\n",
|
||||
" else:\n",
|
||||
" # If current is zero, hold at pos_a for a bit\n",
|
||||
" time.sleep(1)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"so if current is larger than x then you disable it \n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"robot.arm_bus.write(\"LED_Alarm_Condition\", 2, [\"elbow_flex\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0]\n",
|
||||
"[0]\n",
|
||||
"[2]\n",
|
||||
"[4]\n",
|
||||
"[0]\n",
|
||||
"[0]\n",
|
||||
"[0]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ename": "KeyboardInterrupt",
|
||||
"evalue": "",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[43], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28mprint\u001b[39m(robot\u001b[38;5;241m.\u001b[39marm_bus\u001b[38;5;241m.\u001b[39mread(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mPresent_Current\u001b[39m\u001b[38;5;124m\"\u001b[39m, [\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124melbow_flex\u001b[39m\u001b[38;5;124m\"\u001b[39m]))\n\u001b[0;32m----> 3\u001b[0m \u001b[43mtime\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msleep\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\n",
|
||||
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"while True:\n",
|
||||
" print(robot.arm_bus.read(\"Present_Current\", [\"elbow_flex\"]))\n",
|
||||
" time.sleep(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 47,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Max_Voltage_Limit = [160 140 140 160 140 140 80]\n",
|
||||
"Min_Angle_Limit = [ 650 1300 1300 1200 600 1725 0]\n",
|
||||
"Max_Angle_Limit = [2600 2050 2800 2500 4096 2250 4095]\n",
|
||||
"Max_Temperature_Limit = [80 70 70 80 70 70 70]\n",
|
||||
"Acceleration = [20 20 20 20 20 20 20]\n",
|
||||
"Torque_Limit = [1000 1000 1000 200 1000 1000 1000]\n",
|
||||
"Minimum_Startup_Force = [15 16 16 12 16 16 16]\n",
|
||||
"Protection_Current = [310 310 310 310 310 310 500]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import time\n",
|
||||
"\n",
|
||||
"def print_important_params(robot):\n",
|
||||
"\n",
|
||||
" # Example parameters you mentioned; adjust as needed\n",
|
||||
" param_list = [\n",
|
||||
" \"Max_Voltage_Limit\",\n",
|
||||
" \"Min_Angle_Limit\",\n",
|
||||
" \"Max_Angle_Limit\",\n",
|
||||
" \"Max_Temperature_Limit\",\n",
|
||||
" \"Acceleration\", # or \"Maximum_Acceleration\" if you prefer that register\n",
|
||||
" \"Torque_Limit\", # or \"Max_Torque_Limit\" if your table uses that\n",
|
||||
" \"Minimum_Startup_Force\",\n",
|
||||
" \"Protection_Current\",\n",
|
||||
" ]\n",
|
||||
" \n",
|
||||
" for param in param_list:\n",
|
||||
" try:\n",
|
||||
" val = robot.arm_bus.read(param)\n",
|
||||
" print(f\"{param} = {val}\")\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"{param} read failed: {e}\")\n",
|
||||
"\n",
|
||||
"# -------------------------------\n",
|
||||
"# Example usage\n",
|
||||
"\n",
|
||||
"print_important_params(robot)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "lerobot",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
import time
|
||||
from hopejr import HopeJuniorRobot
|
||||
|
||||
|
||||
def main():
|
||||
# Instantiate and connect to the robot
|
||||
robot = HopeJuniorRobot()
|
||||
robot.connect()
|
||||
|
||||
# Example read of the current position
|
||||
print("Present Position:", robot.arm_bus.read("Present_Position"))
|
||||
|
||||
# Enable torque and set acceleration
|
||||
robot.arm_bus.write("Torque_Enable", 1)
|
||||
robot.arm_bus.write("Acceleration", 20)
|
||||
print("Acceleration Read:", robot.arm_bus.read("Acceleration"))
|
||||
|
||||
# Move elbow_flex and wrist_yaw a few times
|
||||
robot.arm_bus.write("Goal_Position", [1000, 1000], ["elbow_flex", "wrist_yaw"])
|
||||
time.sleep(2)
|
||||
robot.arm_bus.write("Goal_Position", [1500, 1500], ["elbow_flex", "wrist_yaw"])
|
||||
time.sleep(2)
|
||||
robot.arm_bus.write("Goal_Position", [1000, 1000], ["elbow_flex", "wrist_yaw"])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,49 +0,0 @@
|
||||
STS_SERIES_CONTROL_TABLE = {
|
||||
"Model": (3, 2),
|
||||
"ID": (5, 1),
|
||||
"Baud_Rate": (6, 1),
|
||||
"Return_Delay": (7, 1),
|
||||
"Response_Status_Level": (8, 1),
|
||||
"Min_Angle_Limit": (9, 2),
|
||||
"Max_Angle_Limit": (11, 2),
|
||||
"Max_Temperature_Limit": (13, 1),
|
||||
"Max_Voltage_Limit": (14, 1),
|
||||
"Min_Voltage_Limit": (15, 1),
|
||||
"Max_Torque_Limit": (16, 2),
|
||||
"Phase": (18, 1),
|
||||
"Unloading_Condition": (19, 1),
|
||||
"LED_Alarm_Condition": (20, 1),
|
||||
"P_Coefficient": (21, 1),
|
||||
"D_Coefficient": (22, 1),
|
||||
"I_Coefficient": (23, 1),
|
||||
"Minimum_Startup_Force": (24, 2),
|
||||
"CW_Dead_Zone": (26, 1),
|
||||
"CCW_Dead_Zone": (27, 1),
|
||||
"Protection_Current": (28, 2),
|
||||
"Angular_Resolution": (30, 1),
|
||||
"Offset": (31, 2),
|
||||
"Mode": (33, 1),
|
||||
"Protective_Torque": (34, 1),
|
||||
"Protection_Time": (35, 1),
|
||||
"Overload_Torque": (36, 1),
|
||||
"Speed_closed_loop_P_proportional_coefficient": (37, 1),
|
||||
"Over_Current_Protection_Time": (38, 1),
|
||||
"Velocity_closed_loop_I_integral_coefficient": (39, 1),
|
||||
"Torque_Enable": (40, 1),
|
||||
"Acceleration": (41, 1),
|
||||
"Goal_Position": (42, 2),
|
||||
"Goal_Time": (44, 2),
|
||||
"Goal_Speed": (46, 2),
|
||||
"Torque_Limit": (48, 2),
|
||||
"Lock": (55, 1),
|
||||
"Present_Position": (56, 2),
|
||||
"Present_Speed": (58, 2),
|
||||
"Present_Load": (60, 2),
|
||||
"Present_Voltage": (62, 1),
|
||||
"Present_Temperature": (63, 1),
|
||||
"Status": (65, 1),
|
||||
"Moving": (66, 1),
|
||||
"Present_Current": (69, 2),
|
||||
# Not in the Memory Table
|
||||
"Maximum_Acceleration": (85, 2),
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
|
||||
First check that kicks in is current:
|
||||
Protection_Current (310) amperes or sth
|
||||
Present_Current, compared against protection crrent
|
||||
Over_Current_Protection_Time, how long until you shut it down
|
||||
|
||||
make a quick update about this
|
||||
|
||||
variables of interest are
|
||||
Max_Torque_Limit = 1000,
|
||||
Present_Load = 1000-something, which triggered the overload torque mechanism
|
||||
Overload_Torque = 80, how much of the max torque limit do we allow?
|
||||
Protection_Time = 200, after how long do we set Torque_Enable to 1? *not true lol
|
||||
Protective_Torque = 20, after we trigger the safety mechanism, how much torque do we allow the motor to have?
|
||||
|
||||
theres actually no temperature or voltage check that the feetechs perform, the only two are current and torque, which works like i said above
|
||||
@@ -27,9 +27,6 @@ Example:
|
||||
print(lerobot.available_real_world_datasets)
|
||||
print(lerobot.available_policies)
|
||||
print(lerobot.available_policies_per_env)
|
||||
print(lerobot.available_robots)
|
||||
print(lerobot.available_cameras)
|
||||
print(lerobot.available_motors)
|
||||
```
|
||||
|
||||
When implementing a new dataset loadable with LeRobotDataset follow these steps:
|
||||
@@ -132,60 +129,13 @@ available_real_world_datasets = [
|
||||
"lerobot/unitreeh1_rearrange_objects",
|
||||
"lerobot/unitreeh1_two_robot_greeting",
|
||||
"lerobot/unitreeh1_warehouse",
|
||||
"lerobot/nyu_rot_dataset",
|
||||
"lerobot/utokyo_saytap",
|
||||
"lerobot/imperialcollege_sawyer_wrist_cam",
|
||||
"lerobot/utokyo_xarm_bimanual",
|
||||
"lerobot/tokyo_u_lsmo",
|
||||
"lerobot/utokyo_pr2_opening_fridge",
|
||||
"lerobot/cmu_franka_exploration_dataset",
|
||||
"lerobot/cmu_stretch",
|
||||
"lerobot/asu_table_top",
|
||||
"lerobot/utokyo_pr2_tabletop_manipulation",
|
||||
"lerobot/utokyo_xarm_pick_and_place",
|
||||
"lerobot/ucsd_kitchen_dataset",
|
||||
"lerobot/austin_buds_dataset",
|
||||
"lerobot/dlr_sara_grid_clamp",
|
||||
"lerobot/conq_hose_manipulation",
|
||||
"lerobot/columbia_cairlab_pusht_real",
|
||||
"lerobot/dlr_sara_pour",
|
||||
"lerobot/dlr_edan_shared_control",
|
||||
"lerobot/ucsd_pick_and_place_dataset",
|
||||
"lerobot/berkeley_cable_routing",
|
||||
"lerobot/nyu_franka_play_dataset",
|
||||
"lerobot/austin_sirius_dataset",
|
||||
"lerobot/cmu_play_fusion",
|
||||
"lerobot/berkeley_gnm_sac_son",
|
||||
"lerobot/nyu_door_opening_surprising_effectiveness",
|
||||
"lerobot/berkeley_fanuc_manipulation",
|
||||
"lerobot/jaco_play",
|
||||
"lerobot/viola",
|
||||
"lerobot/kaist_nonprehensile",
|
||||
"lerobot/berkeley_mvp",
|
||||
"lerobot/uiuc_d3field",
|
||||
"lerobot/berkeley_gnm_recon",
|
||||
"lerobot/austin_sailor_dataset",
|
||||
"lerobot/utaustin_mutex",
|
||||
"lerobot/roboturk",
|
||||
"lerobot/stanford_hydra_dataset",
|
||||
"lerobot/berkeley_autolab_ur5",
|
||||
"lerobot/stanford_robocook",
|
||||
"lerobot/toto",
|
||||
"lerobot/fmb",
|
||||
"lerobot/droid_100",
|
||||
"lerobot/berkeley_rpt",
|
||||
"lerobot/stanford_kuka_multimodal_dataset",
|
||||
"lerobot/iamlab_cmu_pickup_insert",
|
||||
"lerobot/taco_play",
|
||||
"lerobot/berkeley_gnm_cory_hall",
|
||||
"lerobot/usc_cloth_sim",
|
||||
]
|
||||
|
||||
available_datasets = list(
|
||||
itertools.chain(*available_datasets_per_env.values(), available_real_world_datasets)
|
||||
)
|
||||
|
||||
# lists all available policies from `lerobot/common/policies`
|
||||
# lists all available policies from `lerobot/common/policies` by their class attribute: `name`.
|
||||
available_policies = [
|
||||
"act",
|
||||
"diffusion",
|
||||
@@ -193,35 +143,12 @@ available_policies = [
|
||||
"vqbet",
|
||||
]
|
||||
|
||||
# lists all available robots from `lerobot/common/robot_devices/robots`
|
||||
available_robots = [
|
||||
"koch",
|
||||
"koch_bimanual",
|
||||
"aloha",
|
||||
"so100",
|
||||
"moss",
|
||||
]
|
||||
|
||||
# lists all available cameras from `lerobot/common/robot_devices/cameras`
|
||||
available_cameras = [
|
||||
"opencv",
|
||||
"intelrealsense",
|
||||
]
|
||||
|
||||
# lists all available motors from `lerobot/common/robot_devices/motors`
|
||||
available_motors = [
|
||||
"dynamixel",
|
||||
"feetech",
|
||||
]
|
||||
|
||||
# keys and values refer to yaml files
|
||||
available_policies_per_env = {
|
||||
"aloha": ["act"],
|
||||
"pusht": ["diffusion", "vqbet"],
|
||||
"xarm": ["tdmpc"],
|
||||
"koch_real": ["act_koch_real"],
|
||||
"aloha_real": ["act_aloha_real"],
|
||||
"dora_aloha_real": ["act_aloha_real"],
|
||||
"dora_aloha_real": ["act_real"],
|
||||
}
|
||||
|
||||
env_task_pairs = [(env, task) for env, tasks in available_tasks_per_env.items() for task in tasks]
|
||||
|
||||
@@ -40,10 +40,6 @@ def get_stats_einops_patterns(dataset, num_workers=0):
|
||||
|
||||
stats_patterns = {}
|
||||
for key, feats_type in dataset.features.items():
|
||||
# NOTE: skip language_instruction embedding in stats computation
|
||||
if key == "language_instruction":
|
||||
continue
|
||||
|
||||
# sanity check that tensors are not float64
|
||||
assert batch[key].dtype != torch.float64
|
||||
|
||||
@@ -68,7 +64,7 @@ def get_stats_einops_patterns(dataset, num_workers=0):
|
||||
return stats_patterns
|
||||
|
||||
|
||||
def compute_stats(dataset, batch_size=8, num_workers=8, max_num_samples=None):
|
||||
def compute_stats(dataset, batch_size=32, num_workers=16, max_num_samples=None):
|
||||
"""Compute mean/std and min/max statistics of all data keys in a LeRobotDataset."""
|
||||
if max_num_samples is None:
|
||||
max_num_samples = len(dataset)
|
||||
|
||||
@@ -1,468 +0,0 @@
|
||||
"""Functions to create an empty dataset, and populate it with frames."""
|
||||
# TODO(rcadene, aliberts): to adapt as class methods of next version of LeRobotDataset
|
||||
|
||||
import concurrent
|
||||
import json
|
||||
import logging
|
||||
import multiprocessing
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
import tqdm
|
||||
from PIL import Image
|
||||
|
||||
from lerobot.common.datasets.compute_stats import compute_stats
|
||||
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset
|
||||
from lerobot.common.datasets.push_dataset_to_hub.aloha_hdf5_format import to_hf_dataset
|
||||
from lerobot.common.datasets.push_dataset_to_hub.utils import concatenate_episodes, get_default_encoding
|
||||
from lerobot.common.datasets.utils import calculate_episode_data_index, create_branch
|
||||
from lerobot.common.datasets.video_utils import encode_video_frames
|
||||
from lerobot.common.utils.utils import log_say
|
||||
from lerobot.scripts.push_dataset_to_hub import (
|
||||
push_dataset_card_to_hub,
|
||||
push_meta_data_to_hub,
|
||||
push_videos_to_hub,
|
||||
save_meta_data,
|
||||
)
|
||||
|
||||
########################################################################################
|
||||
# Asynchrounous saving of images on disk
|
||||
########################################################################################
|
||||
|
||||
|
||||
def safe_stop_image_writer(func):
|
||||
# TODO(aliberts): Allow to pass custom exceptions
|
||||
# (e.g. ThreadServiceExit, KeyboardInterrupt, SystemExit, UnpluggedError, DynamixelCommError)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception as e:
|
||||
image_writer = kwargs.get("dataset", {}).get("image_writer")
|
||||
if image_writer is not None:
|
||||
print("Waiting for image writer to terminate...")
|
||||
stop_image_writer(image_writer, timeout=20)
|
||||
raise e
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def save_image(img_tensor, key, frame_index, episode_index, videos_dir: str):
|
||||
img = Image.fromarray(img_tensor.numpy())
|
||||
path = Path(videos_dir) / f"{key}_episode_{episode_index:06d}" / f"frame_{frame_index:06d}.png"
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
img.save(str(path), quality=100)
|
||||
|
||||
|
||||
def loop_to_save_images_in_threads(image_queue, num_threads):
|
||||
if num_threads < 1:
|
||||
raise NotImplementedError(f"Only `num_threads>=1` is supported for now, but {num_threads=} given.")
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor:
|
||||
futures = []
|
||||
while True:
|
||||
# Blocks until a frame is available
|
||||
frame_data = image_queue.get()
|
||||
|
||||
# As usually done, exit loop when receiving None to stop the worker
|
||||
if frame_data is None:
|
||||
break
|
||||
|
||||
image, key, frame_index, episode_index, videos_dir = frame_data
|
||||
futures.append(executor.submit(save_image, image, key, frame_index, episode_index, videos_dir))
|
||||
|
||||
# Before exiting function, wait for all threads to complete
|
||||
with tqdm.tqdm(total=len(futures), desc="Writing images") as progress_bar:
|
||||
concurrent.futures.wait(futures)
|
||||
progress_bar.update(len(futures))
|
||||
|
||||
|
||||
def start_image_writer_processes(image_queue, num_processes, num_threads_per_process):
|
||||
if num_processes < 1:
|
||||
raise ValueError(f"Only `num_processes>=1` is supported, but {num_processes=} given.")
|
||||
|
||||
if num_threads_per_process < 1:
|
||||
raise NotImplementedError(
|
||||
"Only `num_threads_per_process>=1` is supported for now, but {num_threads_per_process=} given."
|
||||
)
|
||||
|
||||
processes = []
|
||||
for _ in range(num_processes):
|
||||
process = multiprocessing.Process(
|
||||
target=loop_to_save_images_in_threads,
|
||||
args=(image_queue, num_threads_per_process),
|
||||
)
|
||||
process.start()
|
||||
processes.append(process)
|
||||
return processes
|
||||
|
||||
|
||||
def stop_processes(processes, queue, timeout):
|
||||
# Send None to each process to signal them to stop
|
||||
for _ in processes:
|
||||
queue.put(None)
|
||||
|
||||
# Wait maximum 20 seconds for all processes to terminate
|
||||
for process in processes:
|
||||
process.join(timeout=timeout)
|
||||
|
||||
# If not terminated after 20 seconds, force termination
|
||||
if process.is_alive():
|
||||
process.terminate()
|
||||
|
||||
# Close the queue, no more items can be put in the queue
|
||||
queue.close()
|
||||
|
||||
# Ensure all background queue threads have finished
|
||||
queue.join_thread()
|
||||
|
||||
|
||||
def start_image_writer(num_processes, num_threads):
|
||||
"""This function abstract away the initialisation of processes or/and threads to
|
||||
save images on disk asynchrounously, which is critical to control a robot and record data
|
||||
at a high frame rate.
|
||||
|
||||
When `num_processes=0`, it returns a dictionary containing a threads pool of size `num_threads`.
|
||||
When `num_processes>0`, it returns a dictionary containing a processes pool of size `num_processes`,
|
||||
where each subprocess starts their own threads pool of size `num_threads`.
|
||||
|
||||
The optimal number of processes and threads depends on your computer capabilities.
|
||||
We advise to use 4 threads per camera with 0 processes. If the fps is not stable, try to increase or lower
|
||||
the number of threads. If it is still not stable, try to use 1 subprocess, or more.
|
||||
"""
|
||||
image_writer = {}
|
||||
|
||||
if num_processes == 0:
|
||||
futures = []
|
||||
threads_pool = concurrent.futures.ThreadPoolExecutor(max_workers=num_threads)
|
||||
image_writer["threads_pool"], image_writer["futures"] = threads_pool, futures
|
||||
else:
|
||||
# TODO(rcadene): When using num_processes>1, `multiprocessing.Manager().Queue()`
|
||||
# might be better than `multiprocessing.Queue()`. Source: https://www.geeksforgeeks.org/python-multiprocessing-queue-vs-multiprocessing-manager-queue
|
||||
image_queue = multiprocessing.Queue()
|
||||
processes_pool = start_image_writer_processes(
|
||||
image_queue, num_processes=num_processes, num_threads_per_process=num_threads
|
||||
)
|
||||
image_writer["processes_pool"], image_writer["image_queue"] = processes_pool, image_queue
|
||||
|
||||
return image_writer
|
||||
|
||||
|
||||
def async_save_image(image_writer, image, key, frame_index, episode_index, videos_dir):
|
||||
"""This function abstract away the saving of an image on disk asynchrounously. It uses a dictionary
|
||||
called image writer which contains either a pool of processes or a pool of threads.
|
||||
"""
|
||||
if "threads_pool" in image_writer:
|
||||
threads_pool, futures = image_writer["threads_pool"], image_writer["futures"]
|
||||
futures.append(threads_pool.submit(save_image, image, key, frame_index, episode_index, videos_dir))
|
||||
else:
|
||||
image_queue = image_writer["image_queue"]
|
||||
image_queue.put((image, key, frame_index, episode_index, videos_dir))
|
||||
|
||||
|
||||
def stop_image_writer(image_writer, timeout):
|
||||
if "threads_pool" in image_writer:
|
||||
futures = image_writer["futures"]
|
||||
# Before exiting function, wait for all threads to complete
|
||||
with tqdm.tqdm(total=len(futures), desc="Writing images") as progress_bar:
|
||||
concurrent.futures.wait(futures, timeout=timeout)
|
||||
progress_bar.update(len(futures))
|
||||
else:
|
||||
processes_pool, image_queue = image_writer["processes_pool"], image_writer["image_queue"]
|
||||
stop_processes(processes_pool, image_queue, timeout=timeout)
|
||||
|
||||
|
||||
########################################################################################
|
||||
# Functions to initialize, resume and populate a dataset
|
||||
########################################################################################
|
||||
|
||||
|
||||
def init_dataset(
|
||||
repo_id,
|
||||
root,
|
||||
force_override,
|
||||
fps,
|
||||
video,
|
||||
write_images,
|
||||
num_image_writer_processes,
|
||||
num_image_writer_threads,
|
||||
):
|
||||
local_dir = Path(root) / repo_id
|
||||
if local_dir.exists() and force_override:
|
||||
shutil.rmtree(local_dir)
|
||||
|
||||
episodes_dir = local_dir / "episodes"
|
||||
episodes_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
videos_dir = local_dir / "videos"
|
||||
videos_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Logic to resume data recording
|
||||
rec_info_path = episodes_dir / "data_recording_info.json"
|
||||
if rec_info_path.exists():
|
||||
with open(rec_info_path) as f:
|
||||
rec_info = json.load(f)
|
||||
num_episodes = rec_info["last_episode_index"] + 1
|
||||
else:
|
||||
num_episodes = 0
|
||||
|
||||
dataset = {
|
||||
"repo_id": repo_id,
|
||||
"local_dir": local_dir,
|
||||
"videos_dir": videos_dir,
|
||||
"episodes_dir": episodes_dir,
|
||||
"fps": fps,
|
||||
"video": video,
|
||||
"rec_info_path": rec_info_path,
|
||||
"num_episodes": num_episodes,
|
||||
}
|
||||
|
||||
if write_images:
|
||||
# Initialize processes or/and threads dedicated to save images on disk asynchronously,
|
||||
# which is critical to control a robot and record data at a high frame rate.
|
||||
image_writer = start_image_writer(
|
||||
num_processes=num_image_writer_processes,
|
||||
num_threads=num_image_writer_threads,
|
||||
)
|
||||
dataset["image_writer"] = image_writer
|
||||
|
||||
return dataset
|
||||
|
||||
|
||||
def add_frame(dataset, observation, action):
|
||||
if "current_episode" not in dataset:
|
||||
# initialize episode dictionary
|
||||
ep_dict = {}
|
||||
for key in observation:
|
||||
if key not in ep_dict:
|
||||
ep_dict[key] = []
|
||||
for key in action:
|
||||
if key not in ep_dict:
|
||||
ep_dict[key] = []
|
||||
|
||||
ep_dict["episode_index"] = []
|
||||
ep_dict["frame_index"] = []
|
||||
ep_dict["timestamp"] = []
|
||||
ep_dict["next.done"] = []
|
||||
|
||||
dataset["current_episode"] = ep_dict
|
||||
dataset["current_frame_index"] = 0
|
||||
|
||||
ep_dict = dataset["current_episode"]
|
||||
episode_index = dataset["num_episodes"]
|
||||
frame_index = dataset["current_frame_index"]
|
||||
videos_dir = dataset["videos_dir"]
|
||||
video = dataset["video"]
|
||||
fps = dataset["fps"]
|
||||
|
||||
ep_dict["episode_index"].append(episode_index)
|
||||
ep_dict["frame_index"].append(frame_index)
|
||||
ep_dict["timestamp"].append(frame_index / fps)
|
||||
ep_dict["next.done"].append(False)
|
||||
|
||||
img_keys = [key for key in observation if "image" in key]
|
||||
non_img_keys = [key for key in observation if "image" not in key]
|
||||
|
||||
# Save all observed modalities except images
|
||||
for key in non_img_keys:
|
||||
ep_dict[key].append(observation[key])
|
||||
|
||||
# Save actions
|
||||
for key in action:
|
||||
ep_dict[key].append(action[key])
|
||||
|
||||
if "image_writer" not in dataset:
|
||||
dataset["current_frame_index"] += 1
|
||||
return
|
||||
|
||||
# Save images
|
||||
image_writer = dataset["image_writer"]
|
||||
for key in img_keys:
|
||||
imgs_dir = videos_dir / f"{key}_episode_{episode_index:06d}"
|
||||
async_save_image(
|
||||
image_writer,
|
||||
image=observation[key],
|
||||
key=key,
|
||||
frame_index=frame_index,
|
||||
episode_index=episode_index,
|
||||
videos_dir=str(videos_dir),
|
||||
)
|
||||
|
||||
if video:
|
||||
fname = f"{key}_episode_{episode_index:06d}.mp4"
|
||||
frame_info = {"path": f"videos/{fname}", "timestamp": frame_index / fps}
|
||||
else:
|
||||
frame_info = str(imgs_dir / f"frame_{frame_index:06d}.png")
|
||||
|
||||
ep_dict[key].append(frame_info)
|
||||
|
||||
dataset["current_frame_index"] += 1
|
||||
|
||||
|
||||
def delete_current_episode(dataset):
|
||||
del dataset["current_episode"]
|
||||
del dataset["current_frame_index"]
|
||||
|
||||
# delete temporary images
|
||||
episode_index = dataset["num_episodes"]
|
||||
videos_dir = dataset["videos_dir"]
|
||||
for tmp_imgs_dir in videos_dir.glob(f"*_episode_{episode_index:06d}"):
|
||||
shutil.rmtree(tmp_imgs_dir)
|
||||
|
||||
|
||||
def save_current_episode(dataset):
|
||||
episode_index = dataset["num_episodes"]
|
||||
ep_dict = dataset["current_episode"]
|
||||
episodes_dir = dataset["episodes_dir"]
|
||||
rec_info_path = dataset["rec_info_path"]
|
||||
|
||||
ep_dict["next.done"][-1] = True
|
||||
|
||||
for key in ep_dict:
|
||||
if "observation" in key and "image" not in key:
|
||||
ep_dict[key] = torch.stack(ep_dict[key])
|
||||
|
||||
ep_dict["action"] = torch.stack(ep_dict["action"])
|
||||
ep_dict["episode_index"] = torch.tensor(ep_dict["episode_index"])
|
||||
ep_dict["frame_index"] = torch.tensor(ep_dict["frame_index"])
|
||||
ep_dict["timestamp"] = torch.tensor(ep_dict["timestamp"])
|
||||
ep_dict["next.done"] = torch.tensor(ep_dict["next.done"])
|
||||
|
||||
ep_path = episodes_dir / f"episode_{episode_index}.pth"
|
||||
torch.save(ep_dict, ep_path)
|
||||
|
||||
rec_info = {
|
||||
"last_episode_index": episode_index,
|
||||
}
|
||||
with open(rec_info_path, "w") as f:
|
||||
json.dump(rec_info, f)
|
||||
|
||||
# force re-initialization of episode dictionnary during add_frame
|
||||
del dataset["current_episode"]
|
||||
|
||||
dataset["num_episodes"] += 1
|
||||
|
||||
|
||||
def encode_videos(dataset, image_keys, play_sounds):
|
||||
log_say("Encoding videos", play_sounds)
|
||||
|
||||
num_episodes = dataset["num_episodes"]
|
||||
videos_dir = dataset["videos_dir"]
|
||||
local_dir = dataset["local_dir"]
|
||||
fps = dataset["fps"]
|
||||
|
||||
# Use ffmpeg to convert frames stored as png into mp4 videos
|
||||
for episode_index in tqdm.tqdm(range(num_episodes)):
|
||||
for key in image_keys:
|
||||
# key = f"observation.images.{name}"
|
||||
tmp_imgs_dir = videos_dir / f"{key}_episode_{episode_index:06d}"
|
||||
fname = f"{key}_episode_{episode_index:06d}.mp4"
|
||||
video_path = local_dir / "videos" / fname
|
||||
if video_path.exists():
|
||||
# Skip if video is already encoded. Could be the case when resuming data recording.
|
||||
continue
|
||||
# note: `encode_video_frames` is a blocking call. Making it asynchronous shouldn't speedup encoding,
|
||||
# since video encoding with ffmpeg is already using multithreading.
|
||||
encode_video_frames(tmp_imgs_dir, video_path, fps, overwrite=True)
|
||||
shutil.rmtree(tmp_imgs_dir)
|
||||
|
||||
|
||||
def from_dataset_to_lerobot_dataset(dataset, play_sounds):
|
||||
log_say("Consolidate episodes", play_sounds)
|
||||
|
||||
num_episodes = dataset["num_episodes"]
|
||||
episodes_dir = dataset["episodes_dir"]
|
||||
videos_dir = dataset["videos_dir"]
|
||||
video = dataset["video"]
|
||||
fps = dataset["fps"]
|
||||
repo_id = dataset["repo_id"]
|
||||
|
||||
ep_dicts = []
|
||||
for episode_index in tqdm.tqdm(range(num_episodes)):
|
||||
ep_path = episodes_dir / f"episode_{episode_index}.pth"
|
||||
ep_dict = torch.load(ep_path)
|
||||
ep_dicts.append(ep_dict)
|
||||
data_dict = concatenate_episodes(ep_dicts)
|
||||
|
||||
if video:
|
||||
image_keys = [key for key in data_dict if "image" in key]
|
||||
encode_videos(dataset, image_keys, play_sounds)
|
||||
|
||||
hf_dataset = to_hf_dataset(data_dict, video)
|
||||
episode_data_index = calculate_episode_data_index(hf_dataset)
|
||||
|
||||
info = {
|
||||
"codebase_version": CODEBASE_VERSION,
|
||||
"fps": fps,
|
||||
"video": video,
|
||||
}
|
||||
if video:
|
||||
info["encoding"] = get_default_encoding()
|
||||
|
||||
lerobot_dataset = LeRobotDataset.from_preloaded(
|
||||
repo_id=repo_id,
|
||||
hf_dataset=hf_dataset,
|
||||
episode_data_index=episode_data_index,
|
||||
info=info,
|
||||
videos_dir=videos_dir,
|
||||
)
|
||||
|
||||
return lerobot_dataset
|
||||
|
||||
|
||||
def save_lerobot_dataset_on_disk(lerobot_dataset):
|
||||
hf_dataset = lerobot_dataset.hf_dataset
|
||||
info = lerobot_dataset.info
|
||||
stats = lerobot_dataset.stats
|
||||
episode_data_index = lerobot_dataset.episode_data_index
|
||||
local_dir = lerobot_dataset.videos_dir.parent
|
||||
meta_data_dir = local_dir / "meta_data"
|
||||
|
||||
hf_dataset = hf_dataset.with_format(None) # to remove transforms that cant be saved
|
||||
hf_dataset.save_to_disk(str(local_dir / "train"))
|
||||
|
||||
save_meta_data(info, stats, episode_data_index, meta_data_dir)
|
||||
|
||||
|
||||
def push_lerobot_dataset_to_hub(lerobot_dataset, tags):
|
||||
hf_dataset = lerobot_dataset.hf_dataset
|
||||
local_dir = lerobot_dataset.videos_dir.parent
|
||||
videos_dir = lerobot_dataset.videos_dir
|
||||
repo_id = lerobot_dataset.repo_id
|
||||
video = lerobot_dataset.video
|
||||
meta_data_dir = local_dir / "meta_data"
|
||||
|
||||
if not (local_dir / "train").exists():
|
||||
raise ValueError(
|
||||
"You need to run `save_lerobot_dataset_on_disk(lerobot_dataset)` before pushing to the hub."
|
||||
)
|
||||
|
||||
hf_dataset.push_to_hub(repo_id, revision="main")
|
||||
push_meta_data_to_hub(repo_id, meta_data_dir, revision="main")
|
||||
push_dataset_card_to_hub(repo_id, revision="main", tags=tags)
|
||||
if video:
|
||||
push_videos_to_hub(repo_id, videos_dir, revision="main")
|
||||
create_branch(repo_id, repo_type="dataset", branch=CODEBASE_VERSION)
|
||||
|
||||
|
||||
def create_lerobot_dataset(dataset, run_compute_stats, push_to_hub, tags, play_sounds):
|
||||
if "image_writer" in dataset:
|
||||
logging.info("Waiting for image writer to terminate...")
|
||||
image_writer = dataset["image_writer"]
|
||||
stop_image_writer(image_writer, timeout=20)
|
||||
|
||||
lerobot_dataset = from_dataset_to_lerobot_dataset(dataset, play_sounds)
|
||||
|
||||
if run_compute_stats:
|
||||
log_say("Computing dataset statistics", play_sounds)
|
||||
lerobot_dataset.stats = compute_stats(lerobot_dataset)
|
||||
else:
|
||||
logging.info("Skipping computation of the dataset statistics")
|
||||
lerobot_dataset.stats = {}
|
||||
|
||||
save_lerobot_dataset_on_disk(lerobot_dataset)
|
||||
|
||||
if push_to_hub:
|
||||
push_lerobot_dataset_to_hub(lerobot_dataset, tags)
|
||||
|
||||
return lerobot_dataset
|
||||
@@ -60,8 +60,8 @@ AVAILABLE_RAW_REPO_IDS = {
|
||||
"lerobot-raw/aloha_static_vinh_cup_left_raw": "aloha_hdf5",
|
||||
"lerobot-raw/aloha_static_vinh_cup_raw": "aloha_hdf5",
|
||||
"lerobot-raw/aloha_static_ziploc_slide_raw": "aloha_hdf5",
|
||||
"lerobot-raw/umi_cup_in_the_wild_raw": "umi_zarr",
|
||||
"lerobot-raw/pusht_raw": "pusht_zarr",
|
||||
"lerobot-raw/umi_cup_in_the_wild_raw": "umi_zarr",
|
||||
"lerobot-raw/unitreeh1_fold_clothes_raw": "aloha_hdf5",
|
||||
"lerobot-raw/unitreeh1_rearrange_objects_raw": "aloha_hdf5",
|
||||
"lerobot-raw/unitreeh1_two_robot_greeting_raw": "aloha_hdf5",
|
||||
@@ -70,74 +70,6 @@ AVAILABLE_RAW_REPO_IDS = {
|
||||
"lerobot-raw/xarm_lift_medium_replay_raw": "xarm_pkl",
|
||||
"lerobot-raw/xarm_push_medium_raw": "xarm_pkl",
|
||||
"lerobot-raw/xarm_push_medium_replay_raw": "xarm_pkl",
|
||||
"lerobot-raw/fractal20220817_data_raw": "openx_rlds.fractal20220817_data",
|
||||
"lerobot-raw/kuka_raw": "openx_rlds.kuka",
|
||||
"lerobot-raw/bridge_openx_raw": "openx_rlds.bridge_openx",
|
||||
"lerobot-raw/taco_play_raw": "openx_rlds.taco_play",
|
||||
"lerobot-raw/jaco_play_raw": "openx_rlds.jaco_play",
|
||||
"lerobot-raw/berkeley_cable_routing_raw": "openx_rlds.berkeley_cable_routing",
|
||||
"lerobot-raw/roboturk_raw": "openx_rlds.roboturk",
|
||||
"lerobot-raw/nyu_door_opening_surprising_effectiveness_raw": "openx_rlds.nyu_door_opening_surprising_effectiveness",
|
||||
"lerobot-raw/viola_raw": "openx_rlds.viola",
|
||||
"lerobot-raw/berkeley_autolab_ur5_raw": "openx_rlds.berkeley_autolab_ur5",
|
||||
"lerobot-raw/toto_raw": "openx_rlds.toto",
|
||||
"lerobot-raw/language_table_raw": "openx_rlds.language_table",
|
||||
"lerobot-raw/columbia_cairlab_pusht_real_raw": "openx_rlds.columbia_cairlab_pusht_real",
|
||||
"lerobot-raw/stanford_kuka_multimodal_dataset_raw": "openx_rlds.stanford_kuka_multimodal_dataset",
|
||||
"lerobot-raw/nyu_rot_dataset_raw": "openx_rlds.nyu_rot_dataset",
|
||||
"lerobot-raw/io_ai_tech_raw": "openx_rlds.io_ai_tech",
|
||||
"lerobot-raw/stanford_hydra_dataset_raw": "openx_rlds.stanford_hydra_dataset",
|
||||
"lerobot-raw/austin_buds_dataset_raw": "openx_rlds.austin_buds_dataset",
|
||||
"lerobot-raw/nyu_franka_play_dataset_raw": "openx_rlds.nyu_franka_play_dataset",
|
||||
"lerobot-raw/maniskill_dataset_raw": "openx_rlds.maniskill_dataset",
|
||||
"lerobot-raw/furniture_bench_dataset_raw": "openx_rlds.furniture_bench_dataset",
|
||||
"lerobot-raw/cmu_franka_exploration_dataset_raw": "openx_rlds.cmu_franka_exploration_dataset",
|
||||
"lerobot-raw/ucsd_kitchen_dataset_raw": "openx_rlds.ucsd_kitchen_dataset",
|
||||
"lerobot-raw/ucsd_pick_and_place_dataset_raw": "openx_rlds.ucsd_pick_and_place_dataset",
|
||||
"lerobot-raw/spoc_raw": "openx_rlds.spoc",
|
||||
"lerobot-raw/austin_sailor_dataset_raw": "openx_rlds.austin_sailor_dataset",
|
||||
"lerobot-raw/austin_sirius_dataset_raw": "openx_rlds.austin_sirius_dataset",
|
||||
"lerobot-raw/bc_z_raw": "openx_rlds.bc_z",
|
||||
"lerobot-raw/utokyo_pr2_opening_fridge_raw": "openx_rlds.utokyo_pr2_opening_fridge",
|
||||
"lerobot-raw/utokyo_pr2_tabletop_manipulation_raw": "openx_rlds.utokyo_pr2_tabletop_manipulation",
|
||||
"lerobot-raw/utokyo_xarm_pick_and_place_raw": "openx_rlds.utokyo_xarm_pick_and_place",
|
||||
"lerobot-raw/utokyo_xarm_bimanual_raw": "openx_rlds.utokyo_xarm_bimanual",
|
||||
"lerobot-raw/utokyo_saytap_raw": "openx_rlds.utokyo_saytap",
|
||||
"lerobot-raw/robo_net_raw": "openx_rlds.robo_net",
|
||||
"lerobot-raw/robo_set_raw": "openx_rlds.robo_set",
|
||||
"lerobot-raw/berkeley_mvp_raw": "openx_rlds.berkeley_mvp",
|
||||
"lerobot-raw/berkeley_rpt_raw": "openx_rlds.berkeley_rpt",
|
||||
"lerobot-raw/kaist_nonprehensile_raw": "openx_rlds.kaist_nonprehensile",
|
||||
"lerobot-raw/stanford_mask_vit_raw": "openx_rlds.stanford_mask_vit",
|
||||
"lerobot-raw/tokyo_u_lsmo_raw": "openx_rlds.tokyo_u_lsmo",
|
||||
"lerobot-raw/dlr_sara_pour_raw": "openx_rlds.dlr_sara_pour",
|
||||
"lerobot-raw/dlr_sara_grid_clamp_raw": "openx_rlds.dlr_sara_grid_clamp",
|
||||
"lerobot-raw/dlr_edan_shared_control_raw": "openx_rlds.dlr_edan_shared_control",
|
||||
"lerobot-raw/asu_table_top_raw": "openx_rlds.asu_table_top",
|
||||
"lerobot-raw/stanford_robocook_raw": "openx_rlds.stanford_robocook",
|
||||
"lerobot-raw/imperialcollege_sawyer_wrist_cam_raw": "openx_rlds.imperialcollege_sawyer_wrist_cam",
|
||||
"lerobot-raw/iamlab_cmu_pickup_insert_raw": "openx_rlds.iamlab_cmu_pickup_insert",
|
||||
"lerobot-raw/uiuc_d3field_raw": "openx_rlds.uiuc_d3field",
|
||||
"lerobot-raw/utaustin_mutex_raw": "openx_rlds.utaustin_mutex",
|
||||
"lerobot-raw/berkeley_fanuc_manipulation_raw": "openx_rlds.berkeley_fanuc_manipulation",
|
||||
"lerobot-raw/cmu_playing_with_food_raw": "openx_rlds.cmu_playing_with_food",
|
||||
"lerobot-raw/cmu_play_fusion_raw": "openx_rlds.cmu_play_fusion",
|
||||
"lerobot-raw/cmu_stretch_raw": "openx_rlds.cmu_stretch",
|
||||
"lerobot-raw/berkeley_gnm_recon_raw": "openx_rlds.berkeley_gnm_recon",
|
||||
"lerobot-raw/berkeley_gnm_cory_hall_raw": "openx_rlds.berkeley_gnm_cory_hall",
|
||||
"lerobot-raw/berkeley_gnm_sac_son_raw": "openx_rlds.berkeley_gnm_sac_son",
|
||||
"lerobot-raw/droid_raw": "openx_rlds.droid",
|
||||
"lerobot-raw/droid_100_raw": "openx_rlds.droid100",
|
||||
"lerobot-raw/fmb_raw": "openx_rlds.fmb",
|
||||
"lerobot-raw/dobbe_raw": "openx_rlds.dobbe",
|
||||
"lerobot-raw/usc_cloth_sim_raw": "openx_rlds.usc_cloth_sim",
|
||||
"lerobot-raw/plex_robosuite_raw": "openx_rlds.plex_robosuite",
|
||||
"lerobot-raw/conq_hose_manipulation_raw": "openx_rlds.conq_hose_manipulation",
|
||||
"lerobot-raw/vima_raw": "openx_rlds.vima",
|
||||
"lerobot-raw/robot_vqa_raw": "openx_rlds.robot_vqa",
|
||||
"lerobot-raw/mimic_play_raw": "openx_rlds.mimic_play",
|
||||
"lerobot-raw/tidybot_raw": "openx_rlds.tidybot",
|
||||
"lerobot-raw/eth_agent_affordances_raw": "openx_rlds.eth_agent_affordances",
|
||||
}
|
||||
|
||||
|
||||
@@ -178,7 +110,7 @@ def download_all_raw_datasets(data_dir: Path | None = None):
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description=f"""A script to download raw datasets from Hugging Face hub to a local directory. Here is a
|
||||
non exhaustive list of available repositories to use in `--repo-id`: {list(AVAILABLE_RAW_REPO_IDS.keys())}""",
|
||||
non exhaustive list of available repositories to use in `--repo-id`: {AVAILABLE_RAW_REPO_IDS}""",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
|
||||
@@ -1,639 +0,0 @@
|
||||
OPENX_DATASET_CONFIGS:
|
||||
fractal20220817_data:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- base_pose_tool_reached
|
||||
- gripper_closed
|
||||
fps: 3
|
||||
|
||||
kuka:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- clip_function_input/base_pose_tool_reached
|
||||
- gripper_closed
|
||||
fps: 10
|
||||
|
||||
bridge_openx:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- EEF_state
|
||||
- gripper_state
|
||||
fps: 5
|
||||
|
||||
taco_play:
|
||||
image_obs_keys:
|
||||
- rgb_static
|
||||
- rgb_gripper
|
||||
depth_obs_keys:
|
||||
- depth_static
|
||||
- depth_gripper
|
||||
state_obs_keys:
|
||||
- state_eef
|
||||
- state_gripper
|
||||
fps: 15
|
||||
|
||||
jaco_play:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- image_wrist
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state_eef
|
||||
- state_gripper
|
||||
fps: 10
|
||||
|
||||
berkeley_cable_routing:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- top_image
|
||||
- wrist45_image
|
||||
- wrist225_image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- robot_state
|
||||
fps: 10
|
||||
|
||||
roboturk:
|
||||
image_obs_keys:
|
||||
- front_rgb
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- null
|
||||
fps: 10
|
||||
|
||||
nyu_door_opening_surprising_effectiveness:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- null
|
||||
fps: 3
|
||||
|
||||
viola:
|
||||
image_obs_keys:
|
||||
- agentview_rgb
|
||||
- eye_in_hand_rgb
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- joint_states
|
||||
- gripper_states
|
||||
fps: 20
|
||||
|
||||
berkeley_autolab_ur5:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- hand_image
|
||||
depth_obs_keys:
|
||||
- image_with_depth
|
||||
state_obs_keys:
|
||||
- state
|
||||
fps: 5
|
||||
|
||||
toto:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state
|
||||
fps: 30
|
||||
|
||||
language_table:
|
||||
image_obs_keys:
|
||||
- rgb
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- effector_translation
|
||||
fps: 10
|
||||
|
||||
columbia_cairlab_pusht_real:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- wrist_image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- robot_state
|
||||
fps: 10
|
||||
|
||||
stanford_kuka_multimodal_dataset_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- depth_image
|
||||
state_obs_keys:
|
||||
- ee_position
|
||||
- ee_orientation
|
||||
fps: 20
|
||||
|
||||
nyu_rot_dataset_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- eef_state
|
||||
- gripper_state
|
||||
fps: 3
|
||||
|
||||
io_ai_tech:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- image_fisheye
|
||||
- image_left_side
|
||||
- image_right_side
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state
|
||||
fps: 3
|
||||
|
||||
stanford_hydra_dataset_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- wrist_image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- eef_state
|
||||
- gripper_state
|
||||
fps: 10
|
||||
|
||||
austin_buds_dataset_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- wrist_image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state
|
||||
fps: 20
|
||||
|
||||
nyu_franka_play_dataset_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- image_additional_view
|
||||
depth_obs_keys:
|
||||
- depth
|
||||
- depth_additional_view
|
||||
state_obs_keys:
|
||||
- eef_state
|
||||
fps: 3
|
||||
|
||||
maniskill_dataset_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- wrist_image
|
||||
depth_obs_keys:
|
||||
- depth
|
||||
- wrist_depth
|
||||
state_obs_keys:
|
||||
- tcp_pose
|
||||
- gripper_state
|
||||
fps: 20
|
||||
|
||||
furniture_bench_dataset_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- wrist_image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state
|
||||
fps: 10
|
||||
|
||||
cmu_franka_exploration_dataset_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- highres_image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- null
|
||||
fps: 10
|
||||
|
||||
ucsd_kitchen_dataset_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- joint_state
|
||||
fps: 2
|
||||
|
||||
ucsd_pick_and_place_dataset_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- eef_state
|
||||
- gripper_state
|
||||
fps: 3
|
||||
|
||||
spoc:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- image_manipulation
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- null
|
||||
fps: 3
|
||||
|
||||
austin_sailor_dataset_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- wrist_image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state
|
||||
fps: 20
|
||||
|
||||
austin_sirius_dataset_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- wrist_image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state
|
||||
fps: 20
|
||||
|
||||
bc_z:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- present/xyz
|
||||
- present/axis_angle
|
||||
- present/sensed_close
|
||||
fps: 10
|
||||
|
||||
utokyo_pr2_opening_fridge_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- eef_state
|
||||
- gripper_state
|
||||
fps: 10
|
||||
|
||||
utokyo_pr2_tabletop_manipulation_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- eef_state
|
||||
- gripper_state
|
||||
fps: 10
|
||||
|
||||
utokyo_xarm_pick_and_place_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- image2
|
||||
- hand_image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- end_effector_pose
|
||||
fps: 10
|
||||
|
||||
utokyo_xarm_bimanual_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- pose_r
|
||||
fps: 10
|
||||
|
||||
robo_net:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- image1
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- eef_state
|
||||
- gripper_state
|
||||
fps: 1
|
||||
|
||||
robo_set:
|
||||
image_obs_keys:
|
||||
- image_left
|
||||
- image_right
|
||||
- image_wrist
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state
|
||||
- state_velocity
|
||||
fps: 5
|
||||
|
||||
berkeley_mvp_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- hand_image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- gripper
|
||||
- pose
|
||||
- joint_pos
|
||||
fps: 5
|
||||
|
||||
berkeley_rpt_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- hand_image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- joint_pos
|
||||
- gripper
|
||||
fps: 30
|
||||
|
||||
kaist_nonprehensile_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state
|
||||
fps: 10
|
||||
|
||||
stanford_mask_vit_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- eef_state
|
||||
- gripper_state
|
||||
|
||||
tokyo_u_lsmo_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- eef_state
|
||||
- gripper_state
|
||||
fps: 10
|
||||
|
||||
dlr_sara_pour_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state
|
||||
fps: 10
|
||||
|
||||
dlr_sara_grid_clamp_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state
|
||||
fps: 10
|
||||
|
||||
dlr_edan_shared_control_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state
|
||||
fps: 5
|
||||
|
||||
asu_table_top_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- eef_state
|
||||
- gripper_state
|
||||
fps: 12.5
|
||||
|
||||
stanford_robocook_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image_1
|
||||
- image_2
|
||||
depth_obs_keys:
|
||||
- depth_1
|
||||
- depth_2
|
||||
state_obs_keys:
|
||||
- eef_state
|
||||
- gripper_state
|
||||
fps: 5
|
||||
|
||||
imperialcollege_sawyer_wrist_cam:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- wrist_image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state
|
||||
fps: 10
|
||||
|
||||
iamlab_cmu_pickup_insert_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- wrist_image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- joint_state
|
||||
- gripper_state
|
||||
fps: 20
|
||||
|
||||
uiuc_d3field:
|
||||
image_obs_keys:
|
||||
- image_1
|
||||
- image_2
|
||||
depth_obs_keys:
|
||||
- depth_1
|
||||
- depth_2
|
||||
state_obs_keys:
|
||||
- null
|
||||
fps: 1
|
||||
|
||||
utaustin_mutex:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- wrist_image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state
|
||||
fps: 20
|
||||
|
||||
berkeley_fanuc_manipulation:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- wrist_image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- joint_state
|
||||
- gripper_state
|
||||
fps: 10
|
||||
|
||||
cmu_playing_with_food:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- finger_vision_1
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state
|
||||
fps: 10
|
||||
|
||||
cmu_play_fusion:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state
|
||||
fps: 5
|
||||
|
||||
cmu_stretch:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- eef_state
|
||||
- gripper_state
|
||||
fps: 10
|
||||
|
||||
berkeley_gnm_recon:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state
|
||||
- position
|
||||
- yaw
|
||||
fps: 3
|
||||
|
||||
berkeley_gnm_cory_hall:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state
|
||||
- position
|
||||
- yaw
|
||||
fps: 5
|
||||
|
||||
berkeley_gnm_sac_son:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state
|
||||
- position
|
||||
- yaw
|
||||
fps: 10
|
||||
|
||||
droid:
|
||||
image_obs_keys:
|
||||
- exterior_image_1_left
|
||||
- exterior_image_2_left
|
||||
- wrist_image_left
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- proprio
|
||||
fps: 15
|
||||
|
||||
droid_100:
|
||||
image_obs_keys:
|
||||
- exterior_image_1_left
|
||||
- exterior_image_2_left
|
||||
- wrist_image_left
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- proprio
|
||||
fps: 15
|
||||
|
||||
fmb:
|
||||
image_obs_keys:
|
||||
- image_side_1
|
||||
- image_side_2
|
||||
- image_wrist_1
|
||||
- image_wrist_2
|
||||
depth_obs_keys:
|
||||
- image_side_1_depth
|
||||
- image_side_2_depth
|
||||
- image_wrist_1_depth
|
||||
- image_wrist_2_depth
|
||||
state_obs_keys:
|
||||
- proprio
|
||||
fps: 10
|
||||
|
||||
dobbe:
|
||||
image_obs_keys:
|
||||
- wrist_image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- proprio
|
||||
fps: 3.75
|
||||
|
||||
usc_cloth_sim_converted_externally_to_rlds:
|
||||
image_obs_keys:
|
||||
- image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- null
|
||||
fps: 10
|
||||
|
||||
plex_robosuite:
|
||||
image_obs_keys:
|
||||
- image
|
||||
- wrist_image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state
|
||||
fps: 20
|
||||
|
||||
conq_hose_manipulation:
|
||||
image_obs_keys:
|
||||
- frontleft_fisheye_image
|
||||
- frontright_fisheye_image
|
||||
- hand_color_image
|
||||
depth_obs_keys:
|
||||
- null
|
||||
state_obs_keys:
|
||||
- state
|
||||
fps: 30
|
||||
@@ -1,106 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the Licens e.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
NOTE(YL): Adapted from:
|
||||
Octo: https://github.com/octo-models/octo/blob/main/octo/data/utils/data_utils.py
|
||||
|
||||
data_utils.py
|
||||
|
||||
Additional utils for data processing.
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
|
||||
def binarize_gripper_actions(actions: tf.Tensor) -> tf.Tensor:
|
||||
"""
|
||||
Converts gripper actions from continuous to binary values (0 and 1).
|
||||
|
||||
We exploit that fact that most of the time, the gripper is fully open (near 1.0) or fully closed (near 0.0). As it
|
||||
transitions between the two, it sometimes passes through a few intermediate values. We relabel those intermediate
|
||||
values based on the state that is reached _after_ those intermediate values.
|
||||
|
||||
In the edge case that the trajectory ends with an intermediate value, we give up on binarizing and relabel that
|
||||
chunk of intermediate values as the last action in the trajectory.
|
||||
|
||||
The `scan_fn` implements the following logic:
|
||||
new_actions = np.empty_like(actions)
|
||||
carry = actions[-1]
|
||||
for i in reversed(range(actions.shape[0])):
|
||||
if in_between_mask[i]:
|
||||
carry = carry
|
||||
else:
|
||||
carry = float(open_mask[i])
|
||||
new_actions[i] = carry
|
||||
"""
|
||||
open_mask, closed_mask = actions > 0.95, actions < 0.05
|
||||
in_between_mask = tf.logical_not(tf.logical_or(open_mask, closed_mask))
|
||||
is_open_float = tf.cast(open_mask, tf.float32)
|
||||
|
||||
def scan_fn(carry, i):
|
||||
return tf.cond(in_between_mask[i], lambda: tf.cast(carry, tf.float32), lambda: is_open_float[i])
|
||||
|
||||
return tf.scan(scan_fn, tf.range(tf.shape(actions)[0]), actions[-1], reverse=True)
|
||||
|
||||
|
||||
def invert_gripper_actions(actions: tf.Tensor) -> tf.Tensor:
|
||||
return 1 - actions
|
||||
|
||||
|
||||
def rel2abs_gripper_actions(actions: tf.Tensor) -> tf.Tensor:
|
||||
"""
|
||||
Converts relative gripper actions (+1 for closing, -1 for opening) to absolute actions (0 = closed; 1 = open).
|
||||
|
||||
Assumes that the first relative gripper is not redundant (i.e. close when already closed)!
|
||||
"""
|
||||
# Note =>> -1 for closing, 1 for opening, 0 for no change
|
||||
opening_mask, closing_mask = actions < -0.1, actions > 0.1
|
||||
thresholded_actions = tf.where(opening_mask, 1, tf.where(closing_mask, -1, 0))
|
||||
|
||||
def scan_fn(carry, i):
|
||||
return tf.cond(thresholded_actions[i] == 0, lambda: carry, lambda: thresholded_actions[i])
|
||||
|
||||
# If no relative grasp, assumes open for whole trajectory
|
||||
start = -1 * thresholded_actions[tf.argmax(thresholded_actions != 0, axis=0)]
|
||||
start = tf.cond(start == 0, lambda: 1, lambda: start)
|
||||
|
||||
# Note =>> -1 for closed, 1 for open
|
||||
new_actions = tf.scan(scan_fn, tf.range(tf.shape(actions)[0]), start)
|
||||
new_actions = tf.cast(new_actions, tf.float32) / 2 + 0.5
|
||||
|
||||
return new_actions
|
||||
|
||||
|
||||
# === Bridge-V2 =>> Dataset-Specific Transform ===
|
||||
def relabel_bridge_actions(traj: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Relabels actions to use reached proprioceptive state; discards last timestep (no-action)."""
|
||||
movement_actions = traj["observation"]["state"][1:, :6] - traj["observation"]["state"][:-1, :6]
|
||||
traj_truncated = tf.nest.map_structure(lambda x: x[:-1], traj)
|
||||
traj_truncated["action"] = tf.concat([movement_actions, traj["action"][:-1, -1:]], axis=1)
|
||||
|
||||
return traj_truncated
|
||||
|
||||
|
||||
# === RLDS Dataset Initialization Utilities ===
|
||||
def pprint_data_mixture(dataset_kwargs_list: List[Dict[str, Any]], dataset_weights: List[int]) -> None:
|
||||
print("\n######################################################################################")
|
||||
print(f"# Loading the following {len(dataset_kwargs_list)} datasets (incl. sampling weight):{'': >24} #")
|
||||
for dataset_kwargs, weight in zip(dataset_kwargs_list, dataset_weights, strict=False):
|
||||
pad = 80 - len(dataset_kwargs["name"])
|
||||
print(f"# {dataset_kwargs['name']}: {weight:=>{pad}f} #")
|
||||
print("######################################################################################\n")
|
||||
@@ -1,200 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
NOTE(YL): Adapted from:
|
||||
OpenVLA: https://github.com/openvla/openvla
|
||||
|
||||
Episode transforms for DROID dataset.
|
||||
"""
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
import tensorflow as tf
|
||||
import tensorflow_graphics.geometry.transformation as tfg
|
||||
|
||||
|
||||
def rmat_to_euler(rot_mat):
|
||||
return tfg.euler.from_rotation_matrix(rot_mat)
|
||||
|
||||
|
||||
def euler_to_rmat(euler):
|
||||
return tfg.rotation_matrix_3d.from_euler(euler)
|
||||
|
||||
|
||||
def invert_rmat(rot_mat):
|
||||
return tfg.rotation_matrix_3d.inverse(rot_mat)
|
||||
|
||||
|
||||
def rotmat_to_rot6d(mat):
|
||||
"""
|
||||
Converts rotation matrix to R6 rotation representation (first two rows in rotation matrix).
|
||||
Args:
|
||||
mat: rotation matrix
|
||||
|
||||
Returns: 6d vector (first two rows of rotation matrix)
|
||||
|
||||
"""
|
||||
r6 = mat[..., :2, :]
|
||||
r6_0, r6_1 = r6[..., 0, :], r6[..., 1, :]
|
||||
r6_flat = tf.concat([r6_0, r6_1], axis=-1)
|
||||
return r6_flat
|
||||
|
||||
|
||||
def velocity_act_to_wrist_frame(velocity, wrist_in_robot_frame):
|
||||
"""
|
||||
Translates velocity actions (translation + rotation) from base frame of the robot to wrist frame.
|
||||
Args:
|
||||
velocity: 6d velocity action (3 x translation, 3 x rotation)
|
||||
wrist_in_robot_frame: 6d pose of the end-effector in robot base frame
|
||||
|
||||
Returns: 9d velocity action in robot wrist frame (3 x translation, 6 x rotation as R6)
|
||||
|
||||
"""
|
||||
r_frame = euler_to_rmat(wrist_in_robot_frame[:, 3:6])
|
||||
r_frame_inv = invert_rmat(r_frame)
|
||||
|
||||
# world to wrist: dT_pi = R^-1 dT_rbt
|
||||
vel_t = (r_frame_inv @ velocity[:, :3][..., None])[..., 0]
|
||||
|
||||
# world to wrist: dR_pi = R^-1 dR_rbt R
|
||||
dr_ = euler_to_rmat(velocity[:, 3:6])
|
||||
dr_ = r_frame_inv @ (dr_ @ r_frame)
|
||||
dr_r6 = rotmat_to_rot6d(dr_)
|
||||
return tf.concat([vel_t, dr_r6], axis=-1)
|
||||
|
||||
|
||||
def rand_swap_exterior_images(img1, img2):
|
||||
"""
|
||||
Randomly swaps the two exterior images (for training with single exterior input).
|
||||
"""
|
||||
return tf.cond(tf.random.uniform(shape=[]) > 0.5, lambda: (img1, img2), lambda: (img2, img1))
|
||||
|
||||
|
||||
def droid_baseact_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
DROID dataset transformation for actions expressed in *base* frame of the robot.
|
||||
"""
|
||||
dt = trajectory["action_dict"]["cartesian_velocity"][:, :3]
|
||||
dr_ = trajectory["action_dict"]["cartesian_velocity"][:, 3:6]
|
||||
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
dt,
|
||||
dr_,
|
||||
1 - trajectory["action_dict"]["gripper_position"],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
trajectory["observation"]["exterior_image_1_left"], trajectory["observation"]["exterior_image_2_left"] = (
|
||||
rand_swap_exterior_images(
|
||||
trajectory["observation"]["exterior_image_1_left"],
|
||||
trajectory["observation"]["exterior_image_2_left"],
|
||||
)
|
||||
)
|
||||
trajectory["observation"]["proprio"] = tf.concat(
|
||||
(
|
||||
trajectory["observation"]["cartesian_position"],
|
||||
trajectory["observation"]["gripper_position"],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def droid_wristact_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
DROID dataset transformation for actions expressed in *wrist* frame of the robot.
|
||||
"""
|
||||
wrist_act = velocity_act_to_wrist_frame(
|
||||
trajectory["action_dict"]["cartesian_velocity"], trajectory["observation"]["cartesian_position"]
|
||||
)
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
wrist_act,
|
||||
trajectory["action_dict"]["gripper_position"],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
trajectory["observation"]["exterior_image_1_left"], trajectory["observation"]["exterior_image_2_left"] = (
|
||||
rand_swap_exterior_images(
|
||||
trajectory["observation"]["exterior_image_1_left"],
|
||||
trajectory["observation"]["exterior_image_2_left"],
|
||||
)
|
||||
)
|
||||
trajectory["observation"]["proprio"] = tf.concat(
|
||||
(
|
||||
trajectory["observation"]["cartesian_position"],
|
||||
trajectory["observation"]["gripper_position"],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def droid_finetuning_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
DROID dataset transformation for actions expressed in *base* frame of the robot.
|
||||
"""
|
||||
dt = trajectory["action_dict"]["cartesian_velocity"][:, :3]
|
||||
dr_ = trajectory["action_dict"]["cartesian_velocity"][:, 3:6]
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
dt,
|
||||
dr_,
|
||||
1 - trajectory["action_dict"]["gripper_position"],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
trajectory["observation"]["proprio"] = tf.concat(
|
||||
(
|
||||
trajectory["observation"]["cartesian_position"],
|
||||
trajectory["observation"]["gripper_position"],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def zero_action_filter(traj: Dict) -> bool:
|
||||
"""
|
||||
Filters transitions whose actions are all-0 (only relative actions, no gripper action).
|
||||
Note: this filter is applied *after* action normalization, so need to compare to "normalized 0".
|
||||
"""
|
||||
droid_q01 = tf.convert_to_tensor(
|
||||
[
|
||||
-0.7776297926902771,
|
||||
-0.5803514122962952,
|
||||
-0.5795090794563293,
|
||||
-0.6464047729969025,
|
||||
-0.7041108310222626,
|
||||
-0.8895104378461838,
|
||||
]
|
||||
)
|
||||
droid_q99 = tf.convert_to_tensor(
|
||||
[
|
||||
0.7597932070493698,
|
||||
0.5726242214441299,
|
||||
0.7351000607013702,
|
||||
0.6705610305070877,
|
||||
0.6464948207139969,
|
||||
0.8897542208433151,
|
||||
]
|
||||
)
|
||||
droid_norm_0_act = (
|
||||
2 * (tf.zeros_like(traj["action"][:, :6]) - droid_q01) / (droid_q99 - droid_q01 + 1e-8) - 1
|
||||
)
|
||||
|
||||
return tf.reduce_any(tf.math.abs(traj["action"][:, :6] - droid_norm_0_act) > 1e-5)
|
||||
@@ -1,859 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
NOTE(YL): Adapted from:
|
||||
OpenVLA: https://github.com/openvla/openvla
|
||||
Octo: https://github.com/octo-models/octo
|
||||
|
||||
transforms.py
|
||||
|
||||
Defines a registry of per-dataset standardization transforms for each dataset in Open-X Embodiment.
|
||||
|
||||
Transforms adopt the following structure:
|
||||
Input: Dictionary of *batched* features (i.e., has leading time dimension)
|
||||
Output: Dictionary `step` =>> {
|
||||
"observation": {
|
||||
<image_keys, depth_image_keys>
|
||||
State (in chosen state representation)
|
||||
},
|
||||
"action": Action (in chosen action representation),
|
||||
"language_instruction": str
|
||||
}
|
||||
"""
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
from lerobot.common.datasets.push_dataset_to_hub.openx.data_utils import (
|
||||
binarize_gripper_actions,
|
||||
invert_gripper_actions,
|
||||
rel2abs_gripper_actions,
|
||||
relabel_bridge_actions,
|
||||
)
|
||||
|
||||
|
||||
def droid_baseact_transform_fn():
|
||||
from lerobot.common.datasets.push_dataset_to_hub.openx.droid_utils import droid_baseact_transform
|
||||
|
||||
return droid_baseact_transform
|
||||
|
||||
|
||||
def bridge_openx_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Applies to version of Bridge V2 in Open X-Embodiment mixture.
|
||||
|
||||
Note =>> In original Bridge V2 dataset, the first timestep has an all-zero action, so we remove it!
|
||||
"""
|
||||
for key in trajectory:
|
||||
if key == "traj_metadata":
|
||||
continue
|
||||
elif key in ["observation", "action"]:
|
||||
for key2 in trajectory[key]:
|
||||
trajectory[key][key2] = trajectory[key][key2][1:]
|
||||
else:
|
||||
trajectory[key] = trajectory[key][1:]
|
||||
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"]["world_vector"],
|
||||
trajectory["action"]["rotation_delta"],
|
||||
tf.cast(trajectory["action"]["open_gripper"][:, None], tf.float32),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
|
||||
trajectory = relabel_bridge_actions(trajectory)
|
||||
trajectory["observation"]["EEF_state"] = trajectory["observation"]["state"][:, :6]
|
||||
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -1:]
|
||||
return trajectory
|
||||
|
||||
|
||||
def bridge_orig_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Applies to original version of Bridge V2 from the official project website.
|
||||
|
||||
Note =>> In original Bridge V2 dataset, the first timestep has an all-zero action, so we remove it!
|
||||
"""
|
||||
for key in trajectory:
|
||||
if key == "traj_metadata":
|
||||
continue
|
||||
elif key == "observation":
|
||||
for key2 in trajectory[key]:
|
||||
trajectory[key][key2] = trajectory[key][key2][1:]
|
||||
else:
|
||||
trajectory[key] = trajectory[key][1:]
|
||||
|
||||
trajectory["action"] = tf.concat(
|
||||
[
|
||||
trajectory["action"][:, :6],
|
||||
binarize_gripper_actions(trajectory["action"][:, -1])[:, None],
|
||||
],
|
||||
axis=1,
|
||||
)
|
||||
trajectory = relabel_bridge_actions(trajectory)
|
||||
trajectory["observation"]["EEF_state"] = trajectory["observation"]["state"][:, :6]
|
||||
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -1:]
|
||||
return trajectory
|
||||
|
||||
|
||||
def ppgm_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["action"] = tf.concat(
|
||||
[
|
||||
trajectory["action"][:, :6],
|
||||
binarize_gripper_actions(trajectory["action"][:, -1])[:, None],
|
||||
],
|
||||
axis=1,
|
||||
)
|
||||
trajectory["observation"]["EEF_state"] = trajectory["observation"]["cartesian_position"][:, :6]
|
||||
trajectory["observation"]["gripper_state"] = trajectory["observation"]["gripper_position"][:, -1:]
|
||||
return trajectory
|
||||
|
||||
|
||||
def rt1_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# make gripper action absolute action, +1 = open, 0 = close
|
||||
gripper_action = trajectory["action"]["gripper_closedness_action"][:, 0]
|
||||
gripper_action = rel2abs_gripper_actions(gripper_action)
|
||||
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"]["world_vector"],
|
||||
trajectory["action"]["rotation_delta"],
|
||||
gripper_action[:, None],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
|
||||
return trajectory
|
||||
|
||||
|
||||
def kuka_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# make gripper action absolute action, +1 = open, 0 = close
|
||||
gripper_action = trajectory["action"]["gripper_closedness_action"][:, 0]
|
||||
gripper_action = rel2abs_gripper_actions(gripper_action)
|
||||
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"]["world_vector"],
|
||||
trajectory["action"]["rotation_delta"],
|
||||
gripper_action[:, None],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
# decode compressed state
|
||||
eef_value = tf.io.decode_compressed(
|
||||
trajectory["observation"]["clip_function_input/base_pose_tool_reached"],
|
||||
compression_type="ZLIB",
|
||||
)
|
||||
eef_value = tf.io.decode_raw(eef_value, tf.float32)
|
||||
trajectory["observation"]["clip_function_input/base_pose_tool_reached"] = tf.reshape(eef_value, (-1, 7))
|
||||
gripper_value = tf.io.decode_compressed(
|
||||
trajectory["observation"]["gripper_closed"], compression_type="ZLIB"
|
||||
)
|
||||
gripper_value = tf.io.decode_raw(gripper_value, tf.float32)
|
||||
trajectory["observation"]["gripper_closed"] = tf.reshape(gripper_value, (-1, 1))
|
||||
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
|
||||
return trajectory
|
||||
|
||||
|
||||
def taco_play_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["state_eef"] = trajectory["observation"]["robot_obs"][:, :6]
|
||||
trajectory["observation"]["state_gripper"] = trajectory["observation"]["robot_obs"][:, 7:8]
|
||||
trajectory["action"] = trajectory["action"]["rel_actions_world"]
|
||||
|
||||
# invert gripper action + clip, +1 = open, 0 = close
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"][:, :6],
|
||||
tf.clip_by_value(trajectory["action"][:, -1:], 0, 1),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
|
||||
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
|
||||
return trajectory
|
||||
|
||||
|
||||
def jaco_play_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["state_eef"] = trajectory["observation"]["end_effector_cartesian_pos"][:, :6]
|
||||
trajectory["observation"]["state_gripper"] = trajectory["observation"]["end_effector_cartesian_pos"][
|
||||
:, -1:
|
||||
]
|
||||
|
||||
# make gripper action absolute action, +1 = open, 0 = close
|
||||
gripper_action = trajectory["action"]["gripper_closedness_action"][:, 0]
|
||||
gripper_action = rel2abs_gripper_actions(gripper_action)
|
||||
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"]["world_vector"],
|
||||
tf.zeros_like(trajectory["action"]["world_vector"]),
|
||||
gripper_action[:, None],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
|
||||
return trajectory
|
||||
|
||||
|
||||
def berkeley_cable_routing_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"]["world_vector"],
|
||||
trajectory["action"]["rotation_delta"],
|
||||
tf.zeros_like(trajectory["action"]["world_vector"][:, :1]),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
|
||||
return trajectory
|
||||
|
||||
|
||||
def roboturk_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# invert absolute gripper action, +1 = open, 0 = close
|
||||
gripper_action = invert_gripper_actions(
|
||||
tf.clip_by_value(trajectory["action"]["gripper_closedness_action"], 0, 1)
|
||||
)
|
||||
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"]["world_vector"],
|
||||
trajectory["action"]["rotation_delta"],
|
||||
gripper_action,
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
|
||||
trajectory["language_embedding"] = trajectory["observation"]["natural_language_embedding"]
|
||||
return trajectory
|
||||
|
||||
|
||||
def nyu_door_opening_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# make gripper action absolute action, +1 = open, 0 = close
|
||||
gripper_action = trajectory["action"]["gripper_closedness_action"][:, 0]
|
||||
gripper_action = rel2abs_gripper_actions(gripper_action)
|
||||
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"]["world_vector"],
|
||||
trajectory["action"]["rotation_delta"],
|
||||
gripper_action[:, None],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
|
||||
return trajectory
|
||||
|
||||
|
||||
def viola_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# make gripper action, +1 = open, 0 = close
|
||||
gripper_action = trajectory["action"]["gripper_closedness_action"][:, None]
|
||||
gripper_action = tf.clip_by_value(gripper_action, 0, 1)
|
||||
gripper_action = invert_gripper_actions(gripper_action)
|
||||
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"]["world_vector"],
|
||||
trajectory["action"]["rotation_delta"],
|
||||
gripper_action,
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
|
||||
return trajectory
|
||||
|
||||
|
||||
def berkeley_autolab_ur5_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["state"] = trajectory["observation"]["robot_state"][:, 6:14]
|
||||
|
||||
# make gripper action absolute action, +1 = open, 0 = close
|
||||
gripper_action = trajectory["action"]["gripper_closedness_action"]
|
||||
gripper_action = rel2abs_gripper_actions(gripper_action)
|
||||
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"]["world_vector"],
|
||||
trajectory["action"]["rotation_delta"],
|
||||
gripper_action[:, None],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
|
||||
return trajectory
|
||||
|
||||
|
||||
def toto_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"]["world_vector"],
|
||||
trajectory["action"]["rotation_delta"],
|
||||
tf.cast(trajectory["action"]["open_gripper"][:, None], tf.float32),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
|
||||
return trajectory
|
||||
|
||||
|
||||
def language_table_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# default to "open" gripper
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"],
|
||||
tf.zeros_like(trajectory["action"]),
|
||||
tf.zeros_like(trajectory["action"]),
|
||||
tf.ones_like(trajectory["action"][:, :1]),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
|
||||
# decode language instruction
|
||||
instruction_bytes = trajectory["observation"]["instruction"]
|
||||
instruction_encoded = tf.strings.unicode_encode(instruction_bytes, output_encoding="UTF-8")
|
||||
# Remove trailing padding --> convert RaggedTensor to regular Tensor.
|
||||
trajectory["language_instruction"] = tf.strings.split(instruction_encoded, "\x00")[:, :1].to_tensor()[
|
||||
:, 0
|
||||
]
|
||||
return trajectory
|
||||
|
||||
|
||||
def pusht_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"]["world_vector"],
|
||||
trajectory["action"]["rotation_delta"],
|
||||
trajectory["action"]["gripper_closedness_action"][:, None],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
|
||||
return trajectory
|
||||
|
||||
|
||||
def stanford_kuka_multimodal_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["depth_image"] = trajectory["observation"]["depth_image"][..., 0]
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"][:, :3],
|
||||
tf.zeros_like(trajectory["action"][:, :3]),
|
||||
trajectory["action"][:, -1:],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def nyu_rot_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["eef_state"] = trajectory["observation"]["state"][..., :6]
|
||||
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][..., -1:]
|
||||
trajectory["action"] = trajectory["action"][..., :7]
|
||||
return trajectory
|
||||
|
||||
|
||||
def stanford_hydra_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# invert gripper action, +1 = open, 0 = close
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"][:, :6],
|
||||
invert_gripper_actions(trajectory["action"][:, -1:]),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
|
||||
trajectory["observation"]["eef_state"] = tf.concat(
|
||||
(
|
||||
trajectory["observation"]["state"][:, :3],
|
||||
trajectory["observation"]["state"][:, 7:10],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -3:-2]
|
||||
return trajectory
|
||||
|
||||
|
||||
def austin_buds_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# invert gripper action + clip, +1 = open, 0 = close
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"][:, :6],
|
||||
invert_gripper_actions(tf.clip_by_value(trajectory["action"][:, -1:], 0, 1)),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
|
||||
trajectory["observation"]["state"] = trajectory["observation"]["state"][:, :8]
|
||||
return trajectory
|
||||
|
||||
|
||||
def nyu_franka_play_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["depth"] = tf.cast(trajectory["observation"]["depth"][..., 0], tf.float32)
|
||||
trajectory["observation"]["depth_additional_view"] = tf.cast(
|
||||
trajectory["observation"]["depth_additional_view"][..., 0], tf.float32
|
||||
)
|
||||
trajectory["observation"]["eef_state"] = trajectory["observation"]["state"][:, -6:]
|
||||
|
||||
# clip gripper action, +1 = open, 0 = close
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"][:, -8:-2],
|
||||
tf.clip_by_value(trajectory["action"][:, -2:-1], 0, 1),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def maniskill_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][..., 7:8]
|
||||
return trajectory
|
||||
|
||||
|
||||
def furniture_bench_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
import tensorflow_graphics.geometry.transformation as tft
|
||||
|
||||
trajectory["observation"]["state"] = tf.concat(
|
||||
(
|
||||
trajectory["observation"]["state"][:, :7],
|
||||
trajectory["observation"]["state"][:, -1:],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
|
||||
# invert gripper action + clip, +1 = open, 0 = close
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"][:, :3],
|
||||
tft.euler.from_quaternion(trajectory["action"][:, 3:7]),
|
||||
invert_gripper_actions(tf.clip_by_value(trajectory["action"][:, -1:], 0, 1)),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def cmu_franka_exploration_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["action"] = trajectory["action"][..., :-1]
|
||||
return trajectory
|
||||
|
||||
|
||||
def ucsd_kitchen_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["joint_state"] = trajectory["observation"]["state"][:, :7]
|
||||
trajectory["action"] = trajectory["action"][..., :-1]
|
||||
return trajectory
|
||||
|
||||
|
||||
def ucsd_pick_place_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["eef_state"] = trajectory["observation"]["state"][:, :6]
|
||||
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -1:]
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"][:, :3],
|
||||
tf.zeros_like(trajectory["action"][:, :3]),
|
||||
trajectory["action"][:, -1:],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def austin_sailor_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# invert gripper action + clip, +1 = open, 0 = close
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"][:, :6],
|
||||
invert_gripper_actions(tf.clip_by_value(trajectory["action"][:, -1:], 0, 1)),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def austin_sirius_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# invert gripper action + clip, +1 = open, 0 = close
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"][:, :6],
|
||||
invert_gripper_actions(tf.clip_by_value(trajectory["action"][:, -1:], 0, 1)),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def bc_z_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"]["future/xyz_residual"][:, :3],
|
||||
trajectory["action"]["future/axis_angle_residual"][:, :3],
|
||||
invert_gripper_actions(tf.cast(trajectory["action"]["future/target_close"][:, :1], tf.float32)),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
trajectory["language_instruction"] = trajectory["observation"]["natural_language_instruction"]
|
||||
return trajectory
|
||||
|
||||
|
||||
def tokyo_pr2_opening_fridge_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["eef_state"] = trajectory["observation"]["state"][:, :6]
|
||||
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -1:]
|
||||
trajectory["action"] = trajectory["action"][..., :-1]
|
||||
return trajectory
|
||||
|
||||
|
||||
def tokyo_pr2_tabletop_manipulation_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["eef_state"] = trajectory["observation"]["state"][:, :6]
|
||||
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -1:]
|
||||
trajectory["action"] = trajectory["action"][..., :-1]
|
||||
return trajectory
|
||||
|
||||
|
||||
def utokyo_xarm_bimanual_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["action"] = trajectory["action"][..., -7:]
|
||||
return trajectory
|
||||
|
||||
|
||||
def robo_net_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["eef_state"] = tf.concat(
|
||||
(
|
||||
trajectory["observation"]["state"][:, :4],
|
||||
tf.zeros_like(trajectory["observation"]["state"][:, :2]),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -1:]
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"][:, :4],
|
||||
tf.zeros_like(trajectory["action"][:, :2]),
|
||||
trajectory["action"][:, -1:],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def berkeley_mvp_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
trajectory["observation"]["state"] = tf.concat((
|
||||
tf.cast(trajectory["observation"]["gripper"][:, None], tf.float32),
|
||||
trajectory["observation"]["pose"],
|
||||
trajectory["observation"]["joint_pos"],),
|
||||
axis=-1,)
|
||||
"""
|
||||
trajectory["observation"]["gripper"] = tf.cast(trajectory["observation"]["gripper"][:, None], tf.float32)
|
||||
return trajectory
|
||||
|
||||
|
||||
def berkeley_rpt_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["gripper"] = tf.cast(trajectory["observation"]["gripper"][:, None], tf.float32)
|
||||
return trajectory
|
||||
|
||||
|
||||
def kaist_nonprehensible_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["state"] = trajectory["observation"]["state"][:, -7:]
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"][:, :6],
|
||||
tf.zeros_like(trajectory["action"][:, :1]),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def stanford_mask_vit_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["eef_state"] = tf.concat(
|
||||
(
|
||||
trajectory["observation"]["end_effector_pose"][:, :4],
|
||||
tf.zeros_like(trajectory["observation"]["end_effector_pose"][:, :2]),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
trajectory["observation"]["gripper_state"] = trajectory["observation"]["end_effector_pose"][:, -1:]
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"][:, :4],
|
||||
tf.zeros_like(trajectory["action"][:, :2]),
|
||||
trajectory["action"][:, -1:],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def tokyo_lsmo_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["eef_state"] = trajectory["observation"]["state"][:, :6]
|
||||
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -1:]
|
||||
return trajectory
|
||||
|
||||
|
||||
def dlr_sara_grid_clamp_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["state"] = trajectory["observation"]["state"][:, :6]
|
||||
return trajectory
|
||||
|
||||
|
||||
def dlr_edan_shared_control_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# invert gripper action, +1 = open, 0 = close
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"][:, :6],
|
||||
invert_gripper_actions(trajectory["action"][:, -1:]),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def asu_table_top_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["eef_state"] = trajectory["ground_truth_states"]["EE"]
|
||||
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -1:]
|
||||
return trajectory
|
||||
|
||||
|
||||
def robocook_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["eef_state"] = trajectory["observation"]["state"][:, :6]
|
||||
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -1:]
|
||||
return trajectory
|
||||
|
||||
|
||||
def imperial_wristcam_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["action"] = trajectory["action"][..., :-1]
|
||||
return trajectory
|
||||
|
||||
|
||||
def iamlab_pick_insert_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
import tensorflow_graphics.geometry.transformation as tft
|
||||
|
||||
trajectory["observation"]["joint_state"] = trajectory["observation"]["state"][:, :7]
|
||||
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, 7:8]
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"][:, :3],
|
||||
tft.euler.from_quaternion(trajectory["action"][:, 3:7]),
|
||||
trajectory["action"][:, 7:8],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def uiuc_d3field_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"],
|
||||
tf.zeros_like(trajectory["action"]),
|
||||
tf.zeros_like(trajectory["action"][:, :1]),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def utaustin_mutex_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["state"] = trajectory["observation"]["state"][:, :8]
|
||||
|
||||
# invert gripper action + clip, +1 = open, 0 = close
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"][:, :6],
|
||||
invert_gripper_actions(tf.clip_by_value(trajectory["action"][:, -1:], 0, 1)),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def berkeley_fanuc_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["joint_state"] = trajectory["observation"]["state"][:, :6]
|
||||
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, 6:7]
|
||||
|
||||
# dataset does not store gripper actions, so use gripper state info, invert so +1 = open, 0 = close
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"],
|
||||
invert_gripper_actions(trajectory["observation"]["gripper_state"]),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def cmu_playing_with_food_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
import tensorflow_graphics.geometry.transformation as tft
|
||||
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"][:, :3],
|
||||
tft.euler.from_quaternion(trajectory["action"][:, 3:7]),
|
||||
trajectory["action"][:, -1:],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def playfusion_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"][:, :3],
|
||||
trajectory["action"][:, -4:],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def cmu_stretch_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["eef_state"] = tf.concat(
|
||||
(
|
||||
trajectory["observation"]["state"][:, :3],
|
||||
tf.zeros_like(trajectory["observation"]["state"][:, :3]),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
trajectory["observation"]["gripper_state"] = trajectory["observation"]["state"][:, -1:]
|
||||
trajectory["action"] = trajectory["action"][..., :-1]
|
||||
return trajectory
|
||||
|
||||
|
||||
def gnm_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
trajectory["observation"]["state"] = tf.concat(
|
||||
(
|
||||
trajectory["observation"]["position"],
|
||||
tf.zeros_like(trajectory["observation"]["state"][:, :3]),
|
||||
trajectory["observation"]["yaw"],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"],
|
||||
tf.zeros_like(trajectory["action"]),
|
||||
tf.zeros_like(trajectory["action"]),
|
||||
tf.zeros_like(trajectory["action"][:, :1]),
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def fmb_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# every input feature is batched, ie has leading batch dimension
|
||||
trajectory["observation"]["proprio"] = tf.concat(
|
||||
(
|
||||
trajectory["observation"]["eef_pose"],
|
||||
trajectory["observation"]["state_gripper_pose"][..., None],
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def dobbe_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# every input feature is batched, ie has leading batch dimension
|
||||
trajectory["observation"]["proprio"] = trajectory["observation"]["state"]
|
||||
return trajectory
|
||||
|
||||
|
||||
def robo_set_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# gripper action is in -1...1 --> clip to 0...1, flip
|
||||
gripper_action = trajectory["action"][:, -1:]
|
||||
gripper_action = invert_gripper_actions(tf.clip_by_value(gripper_action, 0, 1))
|
||||
|
||||
trajectory["action"] = tf.concat(
|
||||
(
|
||||
trajectory["action"][:, :7],
|
||||
gripper_action,
|
||||
),
|
||||
axis=-1,
|
||||
)
|
||||
return trajectory
|
||||
|
||||
|
||||
def identity_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
return trajectory
|
||||
|
||||
|
||||
# === Registry ===
|
||||
OPENX_STANDARDIZATION_TRANSFORMS = {
|
||||
"bridge_openx": bridge_openx_dataset_transform,
|
||||
"bridge_orig": bridge_orig_dataset_transform,
|
||||
"bridge_dataset": bridge_orig_dataset_transform,
|
||||
"ppgm": ppgm_dataset_transform,
|
||||
"ppgm_static": ppgm_dataset_transform,
|
||||
"ppgm_wrist": ppgm_dataset_transform,
|
||||
"fractal20220817_data": rt1_dataset_transform,
|
||||
"kuka": kuka_dataset_transform,
|
||||
"taco_play": taco_play_dataset_transform,
|
||||
"jaco_play": jaco_play_dataset_transform,
|
||||
"berkeley_cable_routing": berkeley_cable_routing_dataset_transform,
|
||||
"roboturk": roboturk_dataset_transform,
|
||||
"nyu_door_opening_surprising_effectiveness": nyu_door_opening_dataset_transform,
|
||||
"viola": viola_dataset_transform,
|
||||
"berkeley_autolab_ur5": berkeley_autolab_ur5_dataset_transform,
|
||||
"toto": toto_dataset_transform,
|
||||
"language_table": language_table_dataset_transform,
|
||||
"columbia_cairlab_pusht_real": pusht_dataset_transform,
|
||||
"stanford_kuka_multimodal_dataset_converted_externally_to_rlds": stanford_kuka_multimodal_dataset_transform,
|
||||
"nyu_rot_dataset_converted_externally_to_rlds": nyu_rot_dataset_transform,
|
||||
"stanford_hydra_dataset_converted_externally_to_rlds": stanford_hydra_dataset_transform,
|
||||
"austin_buds_dataset_converted_externally_to_rlds": austin_buds_dataset_transform,
|
||||
"nyu_franka_play_dataset_converted_externally_to_rlds": nyu_franka_play_dataset_transform,
|
||||
"maniskill_dataset_converted_externally_to_rlds": maniskill_dataset_transform,
|
||||
"furniture_bench_dataset_converted_externally_to_rlds": furniture_bench_dataset_transform,
|
||||
"cmu_franka_exploration_dataset_converted_externally_to_rlds": cmu_franka_exploration_dataset_transform,
|
||||
"ucsd_kitchen_dataset_converted_externally_to_rlds": ucsd_kitchen_dataset_transform,
|
||||
"ucsd_pick_and_place_dataset_converted_externally_to_rlds": ucsd_pick_place_dataset_transform,
|
||||
"austin_sailor_dataset_converted_externally_to_rlds": austin_sailor_dataset_transform,
|
||||
"austin_sirius_dataset_converted_externally_to_rlds": austin_sirius_dataset_transform,
|
||||
"bc_z": bc_z_dataset_transform,
|
||||
"utokyo_pr2_opening_fridge_converted_externally_to_rlds": tokyo_pr2_opening_fridge_dataset_transform,
|
||||
"utokyo_pr2_tabletop_manipulation_converted_externally_to_rlds": tokyo_pr2_tabletop_manipulation_dataset_transform,
|
||||
"utokyo_xarm_pick_and_place_converted_externally_to_rlds": identity_transform,
|
||||
"utokyo_xarm_bimanual_converted_externally_to_rlds": utokyo_xarm_bimanual_dataset_transform,
|
||||
"robo_net": robo_net_dataset_transform,
|
||||
"berkeley_mvp_converted_externally_to_rlds": berkeley_mvp_dataset_transform,
|
||||
"berkeley_rpt_converted_externally_to_rlds": berkeley_rpt_dataset_transform,
|
||||
"kaist_nonprehensile_converted_externally_to_rlds": kaist_nonprehensible_dataset_transform,
|
||||
"stanford_mask_vit_converted_externally_to_rlds": stanford_mask_vit_dataset_transform,
|
||||
"tokyo_u_lsmo_converted_externally_to_rlds": tokyo_lsmo_dataset_transform,
|
||||
"dlr_sara_pour_converted_externally_to_rlds": identity_transform,
|
||||
"dlr_sara_grid_clamp_converted_externally_to_rlds": dlr_sara_grid_clamp_dataset_transform,
|
||||
"dlr_edan_shared_control_converted_externally_to_rlds": dlr_edan_shared_control_dataset_transform,
|
||||
"asu_table_top_converted_externally_to_rlds": asu_table_top_dataset_transform,
|
||||
"stanford_robocook_converted_externally_to_rlds": robocook_dataset_transform,
|
||||
"imperialcollege_sawyer_wrist_cam": imperial_wristcam_dataset_transform,
|
||||
"iamlab_cmu_pickup_insert_converted_externally_to_rlds": iamlab_pick_insert_dataset_transform,
|
||||
"uiuc_d3field": uiuc_d3field_dataset_transform,
|
||||
"utaustin_mutex": utaustin_mutex_dataset_transform,
|
||||
"berkeley_fanuc_manipulation": berkeley_fanuc_dataset_transform,
|
||||
"cmu_playing_with_food": cmu_playing_with_food_dataset_transform,
|
||||
"cmu_play_fusion": playfusion_dataset_transform,
|
||||
"cmu_stretch": cmu_stretch_dataset_transform,
|
||||
"berkeley_gnm_recon": gnm_dataset_transform,
|
||||
"berkeley_gnm_cory_hall": gnm_dataset_transform,
|
||||
"berkeley_gnm_sac_son": gnm_dataset_transform,
|
||||
"droid": droid_baseact_transform_fn(),
|
||||
"droid_100": droid_baseact_transform_fn(), # first 100 episodes of droid
|
||||
"fmb": fmb_transform,
|
||||
"dobbe": dobbe_dataset_transform,
|
||||
"robo_set": robo_set_dataset_transform,
|
||||
"usc_cloth_sim_converted_externally_to_rlds": identity_transform,
|
||||
"plex_robosuite": identity_transform,
|
||||
"conq_hose_manipulation": identity_transform,
|
||||
"io_ai_tech": identity_transform,
|
||||
"spoc": identity_transform,
|
||||
}
|
||||
@@ -1,359 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
For https://github.com/google-deepmind/open_x_embodiment (OPENX) datasets.
|
||||
|
||||
Example:
|
||||
python lerobot/scripts/push_dataset_to_hub.py \
|
||||
--raw-dir /hdd/tensorflow_datasets/bridge_dataset/1.0.0/ \
|
||||
--repo-id youliangtan/sampled_bridge_data_v2 \
|
||||
--raw-format openx_rlds.bridge_orig \
|
||||
--episodes 3 4 5 8 9
|
||||
|
||||
Exact dataset fps defined in openx/config.py, obtained from:
|
||||
https://docs.google.com/spreadsheets/d/1rPBD77tk60AEIGZrGSODwyyzs5FgCU9Uz3h-3_t2A9g/edit?gid=0#gid=0&range=R:R
|
||||
"""
|
||||
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
import tensorflow_datasets as tfds
|
||||
import torch
|
||||
import tqdm
|
||||
import yaml
|
||||
from datasets import Dataset, Features, Image, Sequence, Value
|
||||
from PIL import Image as PILImage
|
||||
|
||||
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
|
||||
from lerobot.common.datasets.push_dataset_to_hub.openx.transforms import OPENX_STANDARDIZATION_TRANSFORMS
|
||||
from lerobot.common.datasets.push_dataset_to_hub.utils import (
|
||||
concatenate_episodes,
|
||||
get_default_encoding,
|
||||
save_images_concurrently,
|
||||
)
|
||||
from lerobot.common.datasets.utils import (
|
||||
calculate_episode_data_index,
|
||||
hf_transform_to_torch,
|
||||
)
|
||||
from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames
|
||||
|
||||
with open("lerobot/common/datasets/push_dataset_to_hub/openx/configs.yaml") as f:
|
||||
_openx_list = yaml.safe_load(f)
|
||||
|
||||
OPENX_DATASET_CONFIGS = _openx_list["OPENX_DATASET_CONFIGS"]
|
||||
|
||||
np.set_printoptions(precision=2)
|
||||
|
||||
|
||||
def tf_to_torch(data):
|
||||
return torch.from_numpy(data.numpy())
|
||||
|
||||
|
||||
def tf_img_convert(img):
|
||||
if img.dtype == tf.string:
|
||||
img = tf.io.decode_image(img, expand_animations=False, dtype=tf.uint8)
|
||||
elif img.dtype != tf.uint8:
|
||||
raise ValueError(f"Unsupported image dtype: found with dtype {img.dtype}")
|
||||
return img.numpy()
|
||||
|
||||
|
||||
def _broadcast_metadata_rlds(i: tf.Tensor, traj: dict) -> dict:
|
||||
"""
|
||||
In the RLDS format, each trajectory has some top-level metadata that is explicitly separated out, and a "steps"
|
||||
entry. This function moves the "steps" entry to the top level, broadcasting any metadata to the length of the
|
||||
trajectory. This function also adds the extra metadata fields `_len`, `_traj_index`, and `_frame_index`.
|
||||
|
||||
NOTE: adapted from DLimp library https://github.com/kvablack/dlimp/
|
||||
"""
|
||||
steps = traj.pop("steps")
|
||||
|
||||
traj_len = tf.shape(tf.nest.flatten(steps)[0])[0]
|
||||
|
||||
# broadcast metadata to the length of the trajectory
|
||||
metadata = tf.nest.map_structure(lambda x: tf.repeat(x, traj_len), traj)
|
||||
|
||||
# put steps back in
|
||||
assert "traj_metadata" not in steps
|
||||
traj = {**steps, "traj_metadata": metadata}
|
||||
|
||||
assert "_len" not in traj
|
||||
assert "_traj_index" not in traj
|
||||
assert "_frame_index" not in traj
|
||||
traj["_len"] = tf.repeat(traj_len, traj_len)
|
||||
traj["_traj_index"] = tf.repeat(i, traj_len)
|
||||
traj["_frame_index"] = tf.range(traj_len)
|
||||
|
||||
return traj
|
||||
|
||||
|
||||
def load_from_raw(
|
||||
raw_dir: Path,
|
||||
videos_dir: Path,
|
||||
fps: int,
|
||||
video: bool,
|
||||
episodes: list[int] | None = None,
|
||||
encoding: dict | None = None,
|
||||
openx_dataset_name: str | None = None,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
raw_dir (Path): _description_
|
||||
videos_dir (Path): _description_
|
||||
fps (int): _description_
|
||||
video (bool): _description_
|
||||
episodes (list[int] | None, optional): _description_. Defaults to None.
|
||||
"""
|
||||
ds_builder = tfds.builder_from_directory(str(raw_dir))
|
||||
dataset = ds_builder.as_dataset(
|
||||
split="all",
|
||||
decoders={"steps": tfds.decode.SkipDecoding()},
|
||||
)
|
||||
|
||||
dataset_info = ds_builder.info
|
||||
print("dataset_info: ", dataset_info)
|
||||
|
||||
ds_length = len(dataset)
|
||||
dataset = dataset.take(ds_length)
|
||||
# "flatten" the dataset as such we can apply trajectory level map() easily
|
||||
# each [obs][key] has a shape of (frame_size, ...)
|
||||
dataset = dataset.enumerate().map(_broadcast_metadata_rlds)
|
||||
|
||||
# we will apply the standardization transform if the dataset_name is provided
|
||||
# if the dataset name is not provided and the goal is to convert any rlds formatted dataset
|
||||
# search for 'image' keys in the observations
|
||||
if openx_dataset_name is not None:
|
||||
print(" - applying standardization transform for dataset: ", openx_dataset_name)
|
||||
assert openx_dataset_name in OPENX_STANDARDIZATION_TRANSFORMS
|
||||
transform_fn = OPENX_STANDARDIZATION_TRANSFORMS[openx_dataset_name]
|
||||
dataset = dataset.map(transform_fn)
|
||||
|
||||
image_keys = OPENX_DATASET_CONFIGS[openx_dataset_name]["image_obs_keys"]
|
||||
else:
|
||||
obs_keys = dataset_info.features["steps"]["observation"].keys()
|
||||
image_keys = [key for key in obs_keys if "image" in key]
|
||||
|
||||
lang_key = "language_instruction" if "language_instruction" in dataset.element_spec else None
|
||||
|
||||
print(" - image_keys: ", image_keys)
|
||||
print(" - lang_key: ", lang_key)
|
||||
|
||||
it = iter(dataset)
|
||||
|
||||
ep_dicts = []
|
||||
# Init temp path to save ep_dicts in case of crash
|
||||
tmp_ep_dicts_dir = videos_dir.parent.joinpath("ep_dicts")
|
||||
tmp_ep_dicts_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# check if ep_dicts have already been saved in /tmp
|
||||
starting_ep_idx = 0
|
||||
saved_ep_dicts = [ep.__str__() for ep in tmp_ep_dicts_dir.iterdir()]
|
||||
if len(saved_ep_dicts) > 0:
|
||||
saved_ep_dicts.sort()
|
||||
# get last ep_idx number
|
||||
starting_ep_idx = int(saved_ep_dicts[-1][-13:-3]) + 1
|
||||
for i in range(starting_ep_idx):
|
||||
episode = next(it)
|
||||
ep_dicts.append(torch.load(saved_ep_dicts[i]))
|
||||
|
||||
# if we user specified episodes, skip the ones not in the list
|
||||
if episodes is not None:
|
||||
if ds_length == 0:
|
||||
raise ValueError("No episodes found.")
|
||||
# convert episodes index to sorted list
|
||||
episodes = sorted(episodes)
|
||||
|
||||
for ep_idx in tqdm.tqdm(range(starting_ep_idx, ds_length)):
|
||||
episode = next(it)
|
||||
|
||||
# if user specified episodes, skip the ones not in the list
|
||||
if episodes is not None:
|
||||
if len(episodes) == 0:
|
||||
break
|
||||
if ep_idx == episodes[0]:
|
||||
# process this episode
|
||||
print(" selecting episode idx: ", ep_idx)
|
||||
episodes.pop(0)
|
||||
else:
|
||||
continue # skip
|
||||
|
||||
num_frames = episode["action"].shape[0]
|
||||
|
||||
###########################################################
|
||||
# Handle the episodic data
|
||||
|
||||
# last step of demonstration is considered done
|
||||
done = torch.zeros(num_frames, dtype=torch.bool)
|
||||
done[-1] = True
|
||||
ep_dict = {}
|
||||
langs = [] # TODO: might be located in "observation"
|
||||
|
||||
image_array_dict = {key: [] for key in image_keys}
|
||||
|
||||
# We will create the state observation tensor by stacking the state
|
||||
# obs keys defined in the openx/configs.py
|
||||
if openx_dataset_name is not None:
|
||||
state_obs_keys = OPENX_DATASET_CONFIGS[openx_dataset_name]["state_obs_keys"]
|
||||
# stack the state observations, if is None, pad with zeros
|
||||
states = []
|
||||
for key in state_obs_keys:
|
||||
if key in episode["observation"]:
|
||||
states.append(tf_to_torch(episode["observation"][key]))
|
||||
else:
|
||||
states.append(torch.zeros(num_frames, 1)) # pad with zeros
|
||||
states = torch.cat(states, dim=1)
|
||||
# assert states.shape == (num_frames, 8), f"states shape: {states.shape}"
|
||||
else:
|
||||
states = tf_to_torch(episode["observation"]["state"])
|
||||
|
||||
actions = tf_to_torch(episode["action"])
|
||||
rewards = tf_to_torch(episode["reward"]).float()
|
||||
|
||||
# If lang_key is present, convert the entire tensor at once
|
||||
if lang_key is not None:
|
||||
langs = [str(x) for x in episode[lang_key]]
|
||||
|
||||
for im_key in image_keys:
|
||||
imgs = episode["observation"][im_key]
|
||||
image_array_dict[im_key] = [tf_img_convert(img) for img in imgs]
|
||||
|
||||
# simple assertions
|
||||
for item in [states, actions, rewards, done]:
|
||||
assert len(item) == num_frames
|
||||
|
||||
###########################################################
|
||||
|
||||
# loop through all cameras
|
||||
for im_key in image_keys:
|
||||
img_key = f"observation.images.{im_key}"
|
||||
imgs_array = image_array_dict[im_key]
|
||||
imgs_array = np.array(imgs_array)
|
||||
if video:
|
||||
# save png images in temporary directory
|
||||
tmp_imgs_dir = videos_dir / "tmp_images"
|
||||
save_images_concurrently(imgs_array, tmp_imgs_dir)
|
||||
|
||||
# encode images to a mp4 video
|
||||
fname = f"{img_key}_episode_{ep_idx:06d}.mp4"
|
||||
video_path = videos_dir / fname
|
||||
encode_video_frames(tmp_imgs_dir, video_path, fps, **(encoding or {}))
|
||||
|
||||
# clean temporary images directory
|
||||
shutil.rmtree(tmp_imgs_dir)
|
||||
|
||||
# store the reference to the video frame
|
||||
ep_dict[img_key] = [
|
||||
{"path": f"videos/{fname}", "timestamp": i / fps} for i in range(num_frames)
|
||||
]
|
||||
else:
|
||||
ep_dict[img_key] = [PILImage.fromarray(x) for x in imgs_array]
|
||||
|
||||
if lang_key is not None:
|
||||
ep_dict["language_instruction"] = langs
|
||||
|
||||
ep_dict["observation.state"] = states
|
||||
ep_dict["action"] = actions
|
||||
ep_dict["timestamp"] = torch.arange(0, num_frames, 1) / fps
|
||||
ep_dict["episode_index"] = torch.tensor([ep_idx] * num_frames)
|
||||
ep_dict["frame_index"] = torch.arange(0, num_frames, 1)
|
||||
ep_dict["next.reward"] = rewards
|
||||
ep_dict["next.done"] = done
|
||||
|
||||
path_ep_dict = tmp_ep_dicts_dir.joinpath(
|
||||
"ep_dict_" + "0" * (10 - len(str(ep_idx))) + str(ep_idx) + ".pt"
|
||||
)
|
||||
torch.save(ep_dict, path_ep_dict)
|
||||
|
||||
ep_dicts.append(ep_dict)
|
||||
|
||||
data_dict = concatenate_episodes(ep_dicts)
|
||||
|
||||
total_frames = data_dict["frame_index"].shape[0]
|
||||
data_dict["index"] = torch.arange(0, total_frames, 1)
|
||||
return data_dict
|
||||
|
||||
|
||||
def to_hf_dataset(data_dict, video) -> Dataset:
|
||||
features = {}
|
||||
|
||||
keys = [key for key in data_dict if "observation.images." in key]
|
||||
for key in keys:
|
||||
if video:
|
||||
features[key] = VideoFrame()
|
||||
else:
|
||||
features[key] = Image()
|
||||
|
||||
features["observation.state"] = Sequence(
|
||||
length=data_dict["observation.state"].shape[1], feature=Value(dtype="float32", id=None)
|
||||
)
|
||||
if "observation.velocity" in data_dict:
|
||||
features["observation.velocity"] = Sequence(
|
||||
length=data_dict["observation.velocity"].shape[1], feature=Value(dtype="float32", id=None)
|
||||
)
|
||||
if "observation.effort" in data_dict:
|
||||
features["observation.effort"] = Sequence(
|
||||
length=data_dict["observation.effort"].shape[1], feature=Value(dtype="float32", id=None)
|
||||
)
|
||||
if "language_instruction" in data_dict:
|
||||
features["language_instruction"] = Value(dtype="string", id=None)
|
||||
|
||||
features["action"] = Sequence(
|
||||
length=data_dict["action"].shape[1], feature=Value(dtype="float32", id=None)
|
||||
)
|
||||
features["episode_index"] = Value(dtype="int64", id=None)
|
||||
features["frame_index"] = Value(dtype="int64", id=None)
|
||||
features["timestamp"] = Value(dtype="float32", id=None)
|
||||
features["next.reward"] = Value(dtype="float32", id=None)
|
||||
features["next.done"] = Value(dtype="bool", id=None)
|
||||
features["index"] = Value(dtype="int64", id=None)
|
||||
|
||||
hf_dataset = Dataset.from_dict(data_dict, features=Features(features))
|
||||
hf_dataset.set_transform(hf_transform_to_torch)
|
||||
return hf_dataset
|
||||
|
||||
|
||||
def from_raw_to_lerobot_format(
|
||||
raw_dir: Path,
|
||||
videos_dir: Path,
|
||||
fps: int | None = None,
|
||||
video: bool = True,
|
||||
episodes: list[int] | None = None,
|
||||
encoding: dict | None = None,
|
||||
openx_dataset_name: str | None = None,
|
||||
):
|
||||
"""This is a test impl for rlds conversion"""
|
||||
if openx_dataset_name is None:
|
||||
# set a default rlds frame rate if the dataset is not from openx
|
||||
fps = 30
|
||||
elif "fps" not in OPENX_DATASET_CONFIGS[openx_dataset_name]:
|
||||
raise ValueError(
|
||||
"fps for this dataset is not specified in openx/configs.py yet," "means it is not yet tested"
|
||||
)
|
||||
fps = OPENX_DATASET_CONFIGS[openx_dataset_name]["fps"]
|
||||
|
||||
data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, encoding, openx_dataset_name)
|
||||
hf_dataset = to_hf_dataset(data_dict, video)
|
||||
episode_data_index = calculate_episode_data_index(hf_dataset)
|
||||
info = {
|
||||
"codebase_version": CODEBASE_VERSION,
|
||||
"fps": fps,
|
||||
"video": video,
|
||||
}
|
||||
if video:
|
||||
info["encoding"] = get_default_encoding()
|
||||
|
||||
return hf_dataset, episode_data_index, info
|
||||
@@ -32,7 +32,7 @@ DATASET_CARD_TEMPLATE = """
|
||||
---
|
||||
# Metadata will go there
|
||||
---
|
||||
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
|
||||
This dataset was created using [🤗 LeRobot](https://github.com/huggingface/lerobot).
|
||||
|
||||
"""
|
||||
|
||||
@@ -80,11 +80,6 @@ def hf_transform_to_torch(items_dict: dict[torch.Tensor | None]):
|
||||
if isinstance(first_item, PILImage.Image):
|
||||
to_tensor = transforms.ToTensor()
|
||||
items_dict[key] = [to_tensor(img) for img in items_dict[key]]
|
||||
elif isinstance(first_item, str):
|
||||
# TODO (michel-aractingi): add str2embedding via language tokenizer
|
||||
# For now we leave this part up to the user to choose how to address
|
||||
# language conditioned tasks
|
||||
pass
|
||||
elif isinstance(first_item, dict) and "path" in first_item and "timestamp" in first_item:
|
||||
# video frame will be processed downstream
|
||||
pass
|
||||
|
||||
@@ -39,7 +39,7 @@ def preprocess_observation(observations: dict[str, np.ndarray]) -> dict[str, Ten
|
||||
|
||||
# sanity check that images are channel last
|
||||
_, h, w, c = img.shape
|
||||
assert c < h and c < w, f"expect channel last images, but instead got {img.shape=}"
|
||||
assert c < h and c < w, f"expect channel first images, but instead {img.shape}"
|
||||
|
||||
# sanity check that images are uint8
|
||||
assert img.dtype == torch.uint8, f"expect torch.uint8, but instead {img.dtype=}"
|
||||
|
||||
@@ -189,7 +189,7 @@ class Logger:
|
||||
training_state["scheduler"] = scheduler.state_dict()
|
||||
torch.save(training_state, save_dir / self.training_state_file_name)
|
||||
|
||||
def save_checkpoint(
|
||||
def save_checkpont(
|
||||
self,
|
||||
train_step: int,
|
||||
policy: Policy,
|
||||
|
||||
@@ -296,7 +296,7 @@ class ACT(nn.Module):
|
||||
self.use_images = any(k.startswith("observation.image") for k in config.input_shapes)
|
||||
self.use_env_state = "observation.environment_state" in config.input_shapes
|
||||
if self.config.use_vae:
|
||||
self.vae_encoder = ACTEncoder(config, is_vae_encoder=True)
|
||||
self.vae_encoder = ACTEncoder(config)
|
||||
self.vae_encoder_cls_embed = nn.Embedding(1, config.dim_model)
|
||||
# Projection layer for joint-space configuration to hidden dimension.
|
||||
if self.use_robot_state:
|
||||
@@ -521,11 +521,9 @@ class ACT(nn.Module):
|
||||
class ACTEncoder(nn.Module):
|
||||
"""Convenience module for running multiple encoder layers, maybe followed by normalization."""
|
||||
|
||||
def __init__(self, config: ACTConfig, is_vae_encoder: bool = False):
|
||||
def __init__(self, config: ACTConfig):
|
||||
super().__init__()
|
||||
self.is_vae_encoder = is_vae_encoder
|
||||
num_layers = config.n_vae_encoder_layers if self.is_vae_encoder else config.n_encoder_layers
|
||||
self.layers = nn.ModuleList([ACTEncoderLayer(config) for _ in range(num_layers)])
|
||||
self.layers = nn.ModuleList([ACTEncoderLayer(config) for _ in range(config.n_encoder_layers)])
|
||||
self.norm = nn.LayerNorm(config.dim_model) if config.pre_norm else nn.Identity()
|
||||
|
||||
def forward(
|
||||
|
||||
@@ -196,12 +196,3 @@ class DiffusionConfig:
|
||||
f"`noise_scheduler_type` must be one of {supported_noise_schedulers}. "
|
||||
f"Got {self.noise_scheduler_type}."
|
||||
)
|
||||
|
||||
# Check that the horizon size and U-Net downsampling is compatible.
|
||||
# U-Net downsamples by 2 with each stage.
|
||||
downsampling_factor = 2 ** len(self.down_dims)
|
||||
if self.horizon % downsampling_factor != 0:
|
||||
raise ValueError(
|
||||
"The horizon should be an integer multiple of the downsampling factor (which is determined "
|
||||
f"by `len(down_dims)`). Got {self.horizon=} and {self.down_dims=}"
|
||||
)
|
||||
|
||||
@@ -106,6 +106,6 @@ def make_policy(
|
||||
policy = policy_cls(policy_cfg)
|
||||
policy.load_state_dict(policy_cls.from_pretrained(pretrained_policy_name_or_path).state_dict())
|
||||
|
||||
#policy.to(get_safe_torch_device(hydra_cfg.device))
|
||||
policy.to(get_safe_torch_device(hydra_cfg.device))
|
||||
|
||||
return policy
|
||||
|
||||
@@ -350,22 +350,17 @@ class VQBeTModel(nn.Module):
|
||||
|
||||
# get action features (pass through GPT)
|
||||
features = self.policy(input_tokens)
|
||||
# len(self.config.input_shapes) is the number of different observation modes.
|
||||
# this line gets the index of action prompt tokens.
|
||||
# len(self.config.input_shapes) is the number of different observation modes. this line gets the index of action prompt tokens.
|
||||
historical_act_pred_index = np.arange(0, n_obs_steps) * (len(self.config.input_shapes) + 1) + len(
|
||||
self.config.input_shapes
|
||||
)
|
||||
|
||||
# only extract the output tokens at the position of action query:
|
||||
# Behavior Transformer (BeT), and VQ-BeT are both sequence-to-sequence prediction models,
|
||||
# mapping sequential observation to sequential action (please refer to section 2.2 in BeT paper https://arxiv.org/pdf/2206.11251).
|
||||
# Thus, it predicts a historical action sequence, in addition to current and future actions (predicting future actions : optional).
|
||||
if len_additional_action_token > 0:
|
||||
features = torch.cat(
|
||||
[features[:, historical_act_pred_index], features[:, -len_additional_action_token:]], dim=1
|
||||
)
|
||||
else:
|
||||
features = features[:, historical_act_pred_index]
|
||||
# Behavior Transformer (BeT), and VQ-BeT are both sequence-to-sequence prediction models, mapping sequential observation to sequential action (please refer to section 2.2 in BeT paper https://arxiv.org/pdf/2206.11251).
|
||||
# Thus, it predict historical action sequence, in addition to current and future actions (predicting future actions : optional).
|
||||
features = torch.cat(
|
||||
[features[:, historical_act_pred_index], features[:, -len_additional_action_token:]], dim=1
|
||||
)
|
||||
# pass through action head
|
||||
action_head_output = self.action_head(features)
|
||||
# if rollout, VQ-BeT don't calculate loss
|
||||
|
||||
@@ -1,557 +0,0 @@
|
||||
"""
|
||||
This file contains utilities for recording frames from Intel Realsense cameras.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import concurrent.futures
|
||||
import logging
|
||||
import math
|
||||
import shutil
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
from collections import Counter
|
||||
from dataclasses import dataclass, replace
|
||||
from pathlib import Path
|
||||
from threading import Thread
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
from lerobot.common.robot_devices.utils import (
|
||||
RobotDeviceAlreadyConnectedError,
|
||||
RobotDeviceNotConnectedError,
|
||||
busy_wait,
|
||||
)
|
||||
from lerobot.common.utils.utils import capture_timestamp_utc
|
||||
|
||||
SERIAL_NUMBER_INDEX = 1
|
||||
|
||||
|
||||
def find_cameras(raise_when_empty=True, mock=False) -> list[dict]:
|
||||
"""
|
||||
Find the names and the serial numbers of the Intel RealSense cameras
|
||||
connected to the computer.
|
||||
"""
|
||||
if mock:
|
||||
import tests.mock_pyrealsense2 as rs
|
||||
else:
|
||||
import pyrealsense2 as rs
|
||||
|
||||
cameras = []
|
||||
for device in rs.context().query_devices():
|
||||
serial_number = int(device.get_info(rs.camera_info(SERIAL_NUMBER_INDEX)))
|
||||
name = device.get_info(rs.camera_info.name)
|
||||
cameras.append(
|
||||
{
|
||||
"serial_number": serial_number,
|
||||
"name": name,
|
||||
}
|
||||
)
|
||||
|
||||
if raise_when_empty and len(cameras) == 0:
|
||||
raise OSError(
|
||||
"Not a single camera was detected. Try re-plugging, or re-installing `librealsense` and its python wrapper `pyrealsense2`, or updating the firmware."
|
||||
)
|
||||
|
||||
return cameras
|
||||
|
||||
|
||||
def save_image(img_array, serial_number, frame_index, images_dir):
|
||||
try:
|
||||
img = Image.fromarray(img_array)
|
||||
path = images_dir / f"camera_{serial_number}_frame_{frame_index:06d}.png"
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
img.save(str(path), quality=100)
|
||||
logging.info(f"Saved image: {path}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save image for camera {serial_number} frame {frame_index}: {e}")
|
||||
|
||||
|
||||
def save_images_from_cameras(
|
||||
images_dir: Path,
|
||||
serial_numbers: list[int] | None = None,
|
||||
fps=None,
|
||||
width=None,
|
||||
height=None,
|
||||
record_time_s=2,
|
||||
mock=False,
|
||||
):
|
||||
"""
|
||||
Initializes all the cameras and saves images to the directory. Useful to visually identify the camera
|
||||
associated to a given serial number.
|
||||
"""
|
||||
if serial_numbers is None or len(serial_numbers) == 0:
|
||||
camera_infos = find_cameras(mock=mock)
|
||||
serial_numbers = [cam["serial_number"] for cam in camera_infos]
|
||||
|
||||
if mock:
|
||||
import tests.mock_cv2 as cv2
|
||||
else:
|
||||
import cv2
|
||||
|
||||
print("Connecting cameras")
|
||||
cameras = []
|
||||
for cam_sn in serial_numbers:
|
||||
print(f"{cam_sn=}")
|
||||
camera = IntelRealSenseCamera(cam_sn, fps=fps, width=width, height=height, mock=mock)
|
||||
camera.connect()
|
||||
print(
|
||||
f"IntelRealSenseCamera({camera.serial_number}, fps={camera.fps}, width={camera.width}, height={camera.height}, color_mode={camera.color_mode})"
|
||||
)
|
||||
cameras.append(camera)
|
||||
|
||||
images_dir = Path(images_dir)
|
||||
if images_dir.exists():
|
||||
shutil.rmtree(
|
||||
images_dir,
|
||||
)
|
||||
images_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
print(f"Saving images to {images_dir}")
|
||||
frame_index = 0
|
||||
start_time = time.perf_counter()
|
||||
try:
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
|
||||
while True:
|
||||
now = time.perf_counter()
|
||||
|
||||
for camera in cameras:
|
||||
# If we use async_read when fps is None, the loop will go full speed, and we will end up
|
||||
# saving the same images from the cameras multiple times until the RAM/disk is full.
|
||||
image = camera.read() if fps is None else camera.async_read()
|
||||
if image is None:
|
||||
print("No Frame")
|
||||
|
||||
bgr_converted_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
||||
|
||||
executor.submit(
|
||||
save_image,
|
||||
bgr_converted_image,
|
||||
camera.serial_number,
|
||||
frame_index,
|
||||
images_dir,
|
||||
)
|
||||
|
||||
if fps is not None:
|
||||
dt_s = time.perf_counter() - now
|
||||
busy_wait(1 / fps - dt_s)
|
||||
|
||||
if time.perf_counter() - start_time > record_time_s:
|
||||
break
|
||||
|
||||
print(f"Frame: {frame_index:04d}\tLatency (ms): {(time.perf_counter() - now) * 1000:.2f}")
|
||||
|
||||
frame_index += 1
|
||||
finally:
|
||||
print(f"Images have been saved to {images_dir}")
|
||||
for camera in cameras:
|
||||
camera.disconnect()
|
||||
|
||||
|
||||
@dataclass
|
||||
class IntelRealSenseCameraConfig:
|
||||
"""
|
||||
Example of tested options for Intel Real Sense D405:
|
||||
|
||||
```python
|
||||
IntelRealSenseCameraConfig(30, 640, 480)
|
||||
IntelRealSenseCameraConfig(60, 640, 480)
|
||||
IntelRealSenseCameraConfig(90, 640, 480)
|
||||
IntelRealSenseCameraConfig(30, 1280, 720)
|
||||
IntelRealSenseCameraConfig(30, 640, 480, use_depth=True)
|
||||
IntelRealSenseCameraConfig(30, 640, 480, rotation=90)
|
||||
```
|
||||
"""
|
||||
|
||||
fps: int | None = None
|
||||
width: int | None = None
|
||||
height: int | None = None
|
||||
color_mode: str = "rgb"
|
||||
use_depth: bool = False
|
||||
force_hardware_reset: bool = True
|
||||
rotation: int | None = None
|
||||
mock: bool = False
|
||||
|
||||
def __post_init__(self):
|
||||
if self.color_mode not in ["rgb", "bgr"]:
|
||||
raise ValueError(
|
||||
f"`color_mode` is expected to be 'rgb' or 'bgr', but {self.color_mode} is provided."
|
||||
)
|
||||
|
||||
at_least_one_is_not_none = self.fps is not None or self.width is not None or self.height is not None
|
||||
at_least_one_is_none = self.fps is None or self.width is None or self.height is None
|
||||
if at_least_one_is_not_none and at_least_one_is_none:
|
||||
raise ValueError(
|
||||
"For `fps`, `width` and `height`, either all of them need to be set, or none of them, "
|
||||
f"but {self.fps=}, {self.width=}, {self.height=} were provided."
|
||||
)
|
||||
|
||||
if self.rotation not in [-90, None, 90, 180]:
|
||||
raise ValueError(f"`rotation` must be in [-90, None, 90, 180] (got {self.rotation})")
|
||||
|
||||
|
||||
class IntelRealSenseCamera:
|
||||
"""
|
||||
The IntelRealSenseCamera class is similar to OpenCVCamera class but adds additional features for Intel Real Sense cameras:
|
||||
- is instantiated with the serial number of the camera - won't randomly change as it can be the case of OpenCVCamera for Linux,
|
||||
- can also be instantiated with the camera's name — if it's unique — using IntelRealSenseCamera.init_from_name(),
|
||||
- depth map can be returned.
|
||||
|
||||
To find the camera indices of your cameras, you can run our utility script that will save a few frames for each camera:
|
||||
```bash
|
||||
python lerobot/common/robot_devices/cameras/intelrealsense.py --images-dir outputs/images_from_intelrealsense_cameras
|
||||
```
|
||||
|
||||
When an IntelRealSenseCamera is instantiated, if no specific config is provided, the default fps, width, height and color_mode
|
||||
of the given camera will be used.
|
||||
|
||||
Example of usage:
|
||||
```python
|
||||
# Instantiate with its serial number
|
||||
camera = IntelRealSenseCamera(128422271347)
|
||||
# Or by its name if it's unique
|
||||
camera = IntelRealSenseCamera.init_from_name("Intel RealSense D405")
|
||||
camera.connect()
|
||||
color_image = camera.read()
|
||||
# when done using the camera, consider disconnecting
|
||||
camera.disconnect()
|
||||
```
|
||||
|
||||
Example of changing default fps, width, height and color_mode:
|
||||
```python
|
||||
camera = IntelRealSenseCamera(serial_number, fps=30, width=1280, height=720)
|
||||
camera = connect() # applies the settings, might error out if these settings are not compatible with the camera
|
||||
|
||||
camera = IntelRealSenseCamera(serial_number, fps=90, width=640, height=480)
|
||||
camera = connect()
|
||||
|
||||
camera = IntelRealSenseCamera(serial_number, fps=90, width=640, height=480, color_mode="bgr")
|
||||
camera = connect()
|
||||
```
|
||||
|
||||
Example of returning depth:
|
||||
```python
|
||||
camera = IntelRealSenseCamera(serial_number, use_depth=True)
|
||||
camera.connect()
|
||||
color_image, depth_map = camera.read()
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
serial_number: int,
|
||||
config: IntelRealSenseCameraConfig | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
if config is None:
|
||||
config = IntelRealSenseCameraConfig()
|
||||
|
||||
# Overwrite the config arguments using kwargs
|
||||
config = replace(config, **kwargs)
|
||||
|
||||
self.serial_number = serial_number
|
||||
self.fps = config.fps
|
||||
self.width = config.width
|
||||
self.height = config.height
|
||||
self.color_mode = config.color_mode
|
||||
self.use_depth = config.use_depth
|
||||
self.force_hardware_reset = config.force_hardware_reset
|
||||
self.mock = config.mock
|
||||
|
||||
self.camera = None
|
||||
self.is_connected = False
|
||||
self.thread = None
|
||||
self.stop_event = None
|
||||
self.color_image = None
|
||||
self.depth_map = None
|
||||
self.logs = {}
|
||||
|
||||
if self.mock:
|
||||
import tests.mock_cv2 as cv2
|
||||
else:
|
||||
import cv2
|
||||
|
||||
# TODO(alibets): Do we keep original width/height or do we define them after rotation?
|
||||
self.rotation = None
|
||||
if config.rotation == -90:
|
||||
self.rotation = cv2.ROTATE_90_COUNTERCLOCKWISE
|
||||
elif config.rotation == 90:
|
||||
self.rotation = cv2.ROTATE_90_CLOCKWISE
|
||||
elif config.rotation == 180:
|
||||
self.rotation = cv2.ROTATE_180
|
||||
|
||||
@classmethod
|
||||
def init_from_name(cls, name: str, config: IntelRealSenseCameraConfig | None = None, **kwargs):
|
||||
camera_infos = find_cameras()
|
||||
camera_names = [cam["name"] for cam in camera_infos]
|
||||
this_name_count = Counter(camera_names)[name]
|
||||
if this_name_count > 1:
|
||||
# TODO(aliberts): Test this with multiple identical cameras (Aloha)
|
||||
raise ValueError(
|
||||
f"Multiple {name} cameras have been detected. Please use their serial number to instantiate them."
|
||||
)
|
||||
|
||||
name_to_serial_dict = {cam["name"]: cam["serial_number"] for cam in camera_infos}
|
||||
cam_sn = name_to_serial_dict[name]
|
||||
|
||||
if config is None:
|
||||
config = IntelRealSenseCameraConfig()
|
||||
|
||||
# Overwrite the config arguments using kwargs
|
||||
config = replace(config, **kwargs)
|
||||
|
||||
return cls(serial_number=cam_sn, config=config, **kwargs)
|
||||
|
||||
def connect(self):
|
||||
if self.is_connected:
|
||||
raise RobotDeviceAlreadyConnectedError(
|
||||
f"IntelRealSenseCamera({self.serial_number}) is already connected."
|
||||
)
|
||||
|
||||
if self.mock:
|
||||
import tests.mock_pyrealsense2 as rs
|
||||
else:
|
||||
import pyrealsense2 as rs
|
||||
|
||||
config = rs.config()
|
||||
config.enable_device(str(self.serial_number))
|
||||
|
||||
if self.fps and self.width and self.height:
|
||||
# TODO(rcadene): can we set rgb8 directly?
|
||||
config.enable_stream(rs.stream.color, self.width, self.height, rs.format.rgb8, self.fps)
|
||||
else:
|
||||
config.enable_stream(rs.stream.color)
|
||||
|
||||
if self.use_depth:
|
||||
if self.fps and self.width and self.height:
|
||||
config.enable_stream(rs.stream.depth, self.width, self.height, rs.format.z16, self.fps)
|
||||
else:
|
||||
config.enable_stream(rs.stream.depth)
|
||||
|
||||
self.camera = rs.pipeline()
|
||||
try:
|
||||
profile = self.camera.start(config)
|
||||
is_camera_open = True
|
||||
except RuntimeError:
|
||||
is_camera_open = False
|
||||
traceback.print_exc()
|
||||
|
||||
# If the camera doesn't work, display the camera indices corresponding to
|
||||
# valid cameras.
|
||||
if not is_camera_open:
|
||||
# Verify that the provided `serial_number` is valid before printing the traceback
|
||||
camera_infos = find_cameras()
|
||||
serial_numbers = [cam["serial_number"] for cam in camera_infos]
|
||||
if self.serial_number not in serial_numbers:
|
||||
raise ValueError(
|
||||
f"`serial_number` is expected to be one of these available cameras {serial_numbers}, but {self.serial_number} is provided instead. "
|
||||
"To find the serial number you should use, run `python lerobot/common/robot_devices/cameras/intelrealsense.py`."
|
||||
)
|
||||
|
||||
raise OSError(f"Can't access IntelRealSenseCamera({self.serial_number}).")
|
||||
|
||||
color_stream = profile.get_stream(rs.stream.color)
|
||||
color_profile = color_stream.as_video_stream_profile()
|
||||
actual_fps = color_profile.fps()
|
||||
actual_width = color_profile.width()
|
||||
actual_height = color_profile.height()
|
||||
|
||||
# Using `math.isclose` since actual fps can be a float (e.g. 29.9 instead of 30)
|
||||
if self.fps is not None and not math.isclose(self.fps, actual_fps, rel_tol=1e-3):
|
||||
# Using `OSError` since it's a broad that encompasses issues related to device communication
|
||||
raise OSError(
|
||||
f"Can't set {self.fps=} for IntelRealSenseCamera({self.serial_number}). Actual value is {actual_fps}."
|
||||
)
|
||||
if self.width is not None and self.width != actual_width:
|
||||
raise OSError(
|
||||
f"Can't set {self.width=} for IntelRealSenseCamera({self.serial_number}). Actual value is {actual_width}."
|
||||
)
|
||||
if self.height is not None and self.height != actual_height:
|
||||
raise OSError(
|
||||
f"Can't set {self.height=} for IntelRealSenseCamera({self.serial_number}). Actual value is {actual_height}."
|
||||
)
|
||||
|
||||
self.fps = round(actual_fps)
|
||||
self.width = round(actual_width)
|
||||
self.height = round(actual_height)
|
||||
|
||||
self.is_connected = True
|
||||
|
||||
def read(self, temporary_color: str | None = None) -> np.ndarray | tuple[np.ndarray, np.ndarray]:
|
||||
"""Read a frame from the camera returned in the format height x width x channels (e.g. 480 x 640 x 3)
|
||||
of type `np.uint8`, contrarily to the pytorch format which is float channel first.
|
||||
|
||||
When `use_depth=True`, returns a tuple `(color_image, depth_map)` with a depth map in the format
|
||||
height x width (e.g. 480 x 640) of type np.uint16.
|
||||
|
||||
Note: Reading a frame is done every `camera.fps` times per second, and it is blocking.
|
||||
If you are reading data from other sensors, we advise to use `camera.async_read()` which is non blocking version of `camera.read()`.
|
||||
"""
|
||||
if not self.is_connected:
|
||||
raise RobotDeviceNotConnectedError(
|
||||
f"IntelRealSenseCamera({self.serial_number}) is not connected. Try running `camera.connect()` first."
|
||||
)
|
||||
|
||||
if self.mock:
|
||||
import tests.mock_cv2 as cv2
|
||||
else:
|
||||
import cv2
|
||||
|
||||
start_time = time.perf_counter()
|
||||
|
||||
frame = self.camera.wait_for_frames(timeout_ms=5000)
|
||||
|
||||
color_frame = frame.get_color_frame()
|
||||
|
||||
if not color_frame:
|
||||
raise OSError(f"Can't capture color image from IntelRealSenseCamera({self.serial_number}).")
|
||||
|
||||
color_image = np.asanyarray(color_frame.get_data())
|
||||
|
||||
requested_color_mode = self.color_mode if temporary_color is None else temporary_color
|
||||
if requested_color_mode not in ["rgb", "bgr"]:
|
||||
raise ValueError(
|
||||
f"Expected color values are 'rgb' or 'bgr', but {requested_color_mode} is provided."
|
||||
)
|
||||
|
||||
# IntelRealSense uses RGB format as default (red, green, blue).
|
||||
if requested_color_mode == "bgr":
|
||||
color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
|
||||
|
||||
h, w, _ = color_image.shape
|
||||
if h != self.height or w != self.width:
|
||||
raise OSError(
|
||||
f"Can't capture color image with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead."
|
||||
)
|
||||
|
||||
if self.rotation is not None:
|
||||
color_image = cv2.rotate(color_image, self.rotation)
|
||||
|
||||
# log the number of seconds it took to read the image
|
||||
self.logs["delta_timestamp_s"] = time.perf_counter() - start_time
|
||||
|
||||
# log the utc time at which the image was received
|
||||
self.logs["timestamp_utc"] = capture_timestamp_utc()
|
||||
|
||||
if self.use_depth:
|
||||
depth_frame = frame.get_depth_frame()
|
||||
if not depth_frame:
|
||||
raise OSError(f"Can't capture depth image from IntelRealSenseCamera({self.serial_number}).")
|
||||
|
||||
depth_map = np.asanyarray(depth_frame.get_data())
|
||||
|
||||
h, w = depth_map.shape
|
||||
if h != self.height or w != self.width:
|
||||
raise OSError(
|
||||
f"Can't capture depth map with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead."
|
||||
)
|
||||
|
||||
if self.rotation is not None:
|
||||
depth_map = cv2.rotate(depth_map, self.rotation)
|
||||
|
||||
return color_image, depth_map
|
||||
else:
|
||||
return color_image
|
||||
|
||||
def read_loop(self):
|
||||
while not self.stop_event.is_set():
|
||||
if self.use_depth:
|
||||
self.color_image, self.depth_map = self.read()
|
||||
else:
|
||||
self.color_image = self.read()
|
||||
|
||||
def async_read(self):
|
||||
"""Access the latest color image"""
|
||||
if not self.is_connected:
|
||||
raise RobotDeviceNotConnectedError(
|
||||
f"IntelRealSenseCamera({self.serial_number}) is not connected. Try running `camera.connect()` first."
|
||||
)
|
||||
|
||||
if self.thread is None:
|
||||
self.stop_event = threading.Event()
|
||||
self.thread = Thread(target=self.read_loop, args=())
|
||||
self.thread.daemon = True
|
||||
self.thread.start()
|
||||
|
||||
num_tries = 0
|
||||
while self.color_image is None:
|
||||
# TODO(rcadene, aliberts): intelrealsense has diverged compared to opencv over here
|
||||
num_tries += 1
|
||||
time.sleep(1 / self.fps)
|
||||
if num_tries > self.fps and (self.thread.ident is None or not self.thread.is_alive()):
|
||||
raise Exception(
|
||||
"The thread responsible for `self.async_read()` took too much time to start. There might be an issue. Verify that `self.thread.start()` has been called."
|
||||
)
|
||||
|
||||
if self.use_depth:
|
||||
return self.color_image, self.depth_map
|
||||
else:
|
||||
return self.color_image
|
||||
|
||||
def disconnect(self):
|
||||
if not self.is_connected:
|
||||
raise RobotDeviceNotConnectedError(
|
||||
f"IntelRealSenseCamera({self.serial_number}) is not connected. Try running `camera.connect()` first."
|
||||
)
|
||||
|
||||
if self.thread is not None and self.thread.is_alive():
|
||||
# wait for the thread to finish
|
||||
self.stop_event.set()
|
||||
self.thread.join()
|
||||
self.thread = None
|
||||
self.stop_event = None
|
||||
|
||||
self.camera.stop()
|
||||
self.camera = None
|
||||
|
||||
self.is_connected = False
|
||||
|
||||
def __del__(self):
|
||||
if getattr(self, "is_connected", False):
|
||||
self.disconnect()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Save a few frames using `IntelRealSenseCamera` for all cameras connected to the computer, or a selected subset."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--serial-numbers",
|
||||
type=int,
|
||||
nargs="*",
|
||||
default=None,
|
||||
help="List of serial numbers used to instantiate the `IntelRealSenseCamera`. If not provided, find and use all available camera indices.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--fps",
|
||||
type=int,
|
||||
default=30,
|
||||
help="Set the number of frames recorded per seconds for all cameras. If not provided, use the default fps of each camera.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--width",
|
||||
type=str,
|
||||
default=640,
|
||||
help="Set the width for all cameras. If not provided, use the default width of each camera.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--height",
|
||||
type=str,
|
||||
default=480,
|
||||
help="Set the height for all cameras. If not provided, use the default height of each camera.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--images-dir",
|
||||
type=Path,
|
||||
default="outputs/images_from_intelrealsense_cameras",
|
||||
help="Set directory to save a few frames for each camera.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--record-time-s",
|
||||
type=float,
|
||||
default=2.0,
|
||||
help="Set the number of seconds used to record the frames. By default, 2 seconds.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
save_images_from_cameras(**vars(args))
|
||||
@@ -13,15 +13,17 @@ from dataclasses import dataclass, replace
|
||||
from pathlib import Path
|
||||
from threading import Thread
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
from lerobot.common.robot_devices.utils import (
|
||||
RobotDeviceAlreadyConnectedError,
|
||||
RobotDeviceNotConnectedError,
|
||||
busy_wait,
|
||||
)
|
||||
from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError
|
||||
from lerobot.common.utils.utils import capture_timestamp_utc
|
||||
from lerobot.scripts.control_robot import busy_wait
|
||||
|
||||
# Use 1 thread to avoid blocking the main thread. Especially useful during data collection
|
||||
# when other threads are used to save the images.
|
||||
cv2.setNumThreads(1)
|
||||
|
||||
# The maximum opencv device index depends on your operating system. For instance,
|
||||
# if you have 3 cameras, they should be associated to index 0, 1, and 2. This is the case
|
||||
@@ -31,44 +33,20 @@ from lerobot.common.utils.utils import capture_timestamp_utc
|
||||
MAX_OPENCV_INDEX = 60
|
||||
|
||||
|
||||
def find_cameras(raise_when_empty=False, max_index_search_range=MAX_OPENCV_INDEX, mock=False) -> list[dict]:
|
||||
cameras = []
|
||||
def find_camera_indices(raise_when_empty=False, max_index_search_range=MAX_OPENCV_INDEX):
|
||||
if platform.system() == "Linux":
|
||||
# Linux uses camera ports
|
||||
print("Linux detected. Finding available camera indices through scanning '/dev/video*' ports")
|
||||
possible_ports = [str(port) for port in Path("/dev").glob("video*")]
|
||||
ports = _find_cameras(possible_ports, mock=mock)
|
||||
for port in ports:
|
||||
cameras.append(
|
||||
{
|
||||
"port": port,
|
||||
"index": int(port.removeprefix("/dev/video")),
|
||||
}
|
||||
)
|
||||
possible_camera_ids = []
|
||||
for port in Path("/dev").glob("video*"):
|
||||
camera_idx = int(str(port).replace("/dev/video", ""))
|
||||
possible_camera_ids.append(camera_idx)
|
||||
else:
|
||||
print(
|
||||
"Mac or Windows detected. Finding available camera indices through "
|
||||
f"scanning all indices from 0 to {MAX_OPENCV_INDEX}"
|
||||
)
|
||||
possible_indices = range(max_index_search_range)
|
||||
indices = _find_cameras(possible_indices, mock=mock)
|
||||
for index in indices:
|
||||
cameras.append(
|
||||
{
|
||||
"port": None,
|
||||
"index": index,
|
||||
}
|
||||
)
|
||||
|
||||
return cameras
|
||||
|
||||
|
||||
def _find_cameras(
|
||||
possible_camera_ids: list[int | str], raise_when_empty=False, mock=False
|
||||
) -> list[int | str]:
|
||||
if mock:
|
||||
import tests.mock_cv2 as cv2
|
||||
else:
|
||||
import cv2
|
||||
possible_camera_ids = range(max_index_search_range)
|
||||
|
||||
camera_ids = []
|
||||
for camera_idx in possible_camera_ids:
|
||||
@@ -89,16 +67,6 @@ def _find_cameras(
|
||||
return camera_ids
|
||||
|
||||
|
||||
def is_valid_unix_path(path: str) -> bool:
|
||||
"""Note: if 'path' points to a symlink, this will return True only if the target exists"""
|
||||
p = Path(path)
|
||||
return p.is_absolute() and p.exists()
|
||||
|
||||
|
||||
def get_camera_index_from_unix_port(port: Path) -> int:
|
||||
return int(str(port.resolve()).removeprefix("/dev/video"))
|
||||
|
||||
|
||||
def save_image(img_array, camera_index, frame_index, images_dir):
|
||||
img = Image.fromarray(img_array)
|
||||
path = images_dir / f"camera_{camera_index:02d}_frame_{frame_index:06d}.png"
|
||||
@@ -107,26 +75,15 @@ def save_image(img_array, camera_index, frame_index, images_dir):
|
||||
|
||||
|
||||
def save_images_from_cameras(
|
||||
images_dir: Path,
|
||||
camera_ids: list | None = None,
|
||||
fps=None,
|
||||
width=None,
|
||||
height=None,
|
||||
record_time_s=2,
|
||||
mock=False,
|
||||
images_dir: Path, camera_ids: list[int] | None = None, fps=None, width=None, height=None, record_time_s=2
|
||||
):
|
||||
"""
|
||||
Initializes all the cameras and saves images to the directory. Useful to visually identify the camera
|
||||
associated to a given camera index.
|
||||
"""
|
||||
if camera_ids is None or len(camera_ids) == 0:
|
||||
camera_infos = find_cameras(mock=mock)
|
||||
camera_ids = [cam["index"] for cam in camera_infos]
|
||||
if camera_ids is None:
|
||||
camera_ids = find_camera_indices()
|
||||
|
||||
print("Connecting cameras")
|
||||
cameras = []
|
||||
for cam_idx in camera_ids:
|
||||
camera = OpenCVCamera(cam_idx, fps=fps, width=width, height=height, mock=mock)
|
||||
camera = OpenCVCamera(cam_idx, fps=fps, width=width, height=height)
|
||||
camera.connect()
|
||||
print(
|
||||
f"OpenCVCamera({camera.camera_index}, fps={camera.fps}, width={camera.width}, "
|
||||
@@ -144,7 +101,7 @@ def save_images_from_cameras(
|
||||
print(f"Saving images to {images_dir}")
|
||||
frame_index = 0
|
||||
start_time = time.perf_counter()
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
|
||||
while True:
|
||||
now = time.perf_counter()
|
||||
|
||||
@@ -165,11 +122,11 @@ def save_images_from_cameras(
|
||||
dt_s = time.perf_counter() - now
|
||||
busy_wait(1 / fps - dt_s)
|
||||
|
||||
print(f"Frame: {frame_index:04d}\tLatency (ms): {(time.perf_counter() - now) * 1000:.2f}")
|
||||
|
||||
if time.perf_counter() - start_time > record_time_s:
|
||||
break
|
||||
|
||||
print(f"Frame: {frame_index:04d}\tLatency (ms): {(time.perf_counter() - now) * 1000:.2f}")
|
||||
|
||||
frame_index += 1
|
||||
|
||||
print(f"Images have been saved to {images_dir}")
|
||||
@@ -192,18 +149,13 @@ class OpenCVCameraConfig:
|
||||
width: int | None = None
|
||||
height: int | None = None
|
||||
color_mode: str = "rgb"
|
||||
rotation: int | None = None
|
||||
mock: bool = False
|
||||
|
||||
def __post_init__(self):
|
||||
if self.color_mode not in ["rgb", "bgr"]:
|
||||
raise ValueError(
|
||||
f"`color_mode` is expected to be 'rgb' or 'bgr', but {self.color_mode} is provided."
|
||||
f"Expected color_mode values are 'rgb' or 'bgr', but {self.color_mode} is provided."
|
||||
)
|
||||
|
||||
if self.rotation not in [-90, None, 90, 180]:
|
||||
raise ValueError(f"`rotation` must be in [-90, None, 90, 180] (got {self.rotation})")
|
||||
|
||||
|
||||
class OpenCVCamera:
|
||||
"""
|
||||
@@ -244,32 +196,17 @@ class OpenCVCamera:
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(self, camera_index: int | str, config: OpenCVCameraConfig | None = None, **kwargs):
|
||||
def __init__(self, camera_index: int, config: OpenCVCameraConfig | None = None, **kwargs):
|
||||
if config is None:
|
||||
config = OpenCVCameraConfig()
|
||||
|
||||
# Overwrite config arguments using kwargs
|
||||
config = replace(config, **kwargs)
|
||||
|
||||
self.camera_index = camera_index
|
||||
self.port = None
|
||||
|
||||
# Linux uses ports for connecting to cameras
|
||||
if platform.system() == "Linux":
|
||||
if isinstance(self.camera_index, int):
|
||||
self.port = Path(f"/dev/video{self.camera_index}")
|
||||
elif isinstance(self.camera_index, str) and is_valid_unix_path(self.camera_index):
|
||||
self.port = Path(self.camera_index)
|
||||
# Retrieve the camera index from a potentially symlinked path
|
||||
self.camera_index = get_camera_index_from_unix_port(self.port)
|
||||
else:
|
||||
raise ValueError(f"Please check the provided camera_index: {camera_index}")
|
||||
|
||||
self.fps = config.fps
|
||||
self.width = config.width
|
||||
self.height = config.height
|
||||
self.color_mode = config.color_mode
|
||||
self.mock = config.mock
|
||||
|
||||
self.camera = None
|
||||
self.is_connected = False
|
||||
@@ -278,60 +215,43 @@ class OpenCVCamera:
|
||||
self.color_image = None
|
||||
self.logs = {}
|
||||
|
||||
if self.mock:
|
||||
import tests.mock_cv2 as cv2
|
||||
else:
|
||||
import cv2
|
||||
|
||||
# TODO(aliberts): Do we keep original width/height or do we define them after rotation?
|
||||
self.rotation = None
|
||||
if config.rotation == -90:
|
||||
self.rotation = cv2.ROTATE_90_COUNTERCLOCKWISE
|
||||
elif config.rotation == 90:
|
||||
self.rotation = cv2.ROTATE_90_CLOCKWISE
|
||||
elif config.rotation == 180:
|
||||
self.rotation = cv2.ROTATE_180
|
||||
|
||||
def connect(self):
|
||||
if self.is_connected:
|
||||
raise RobotDeviceAlreadyConnectedError(f"OpenCVCamera({self.camera_index}) is already connected.")
|
||||
raise RobotDeviceAlreadyConnectedError(f"Camera {self.camera_index} is already connected.")
|
||||
|
||||
if self.mock:
|
||||
import tests.mock_cv2 as cv2
|
||||
else:
|
||||
import cv2
|
||||
|
||||
# Use 1 thread to avoid blocking the main thread. Especially useful during data collection
|
||||
# when other threads are used to save the images.
|
||||
cv2.setNumThreads(1)
|
||||
|
||||
camera_idx = f"/dev/video{self.camera_index}" if platform.system() == "Linux" else self.camera_index
|
||||
# First create a temporary camera trying to access `camera_index`,
|
||||
# and verify it is a valid camera by calling `isOpened`.
|
||||
tmp_camera = cv2.VideoCapture(camera_idx)
|
||||
|
||||
if platform.system() == "Linux":
|
||||
# Linux uses ports for connecting to cameras
|
||||
tmp_camera = cv2.VideoCapture(f"/dev/video{self.camera_index}")
|
||||
else:
|
||||
tmp_camera = cv2.VideoCapture(self.camera_index)
|
||||
|
||||
is_camera_open = tmp_camera.isOpened()
|
||||
# Release camera to make it accessible for `find_camera_indices`
|
||||
tmp_camera.release()
|
||||
del tmp_camera
|
||||
|
||||
# If the camera doesn't work, display the camera indices corresponding to
|
||||
# valid cameras.
|
||||
if not is_camera_open:
|
||||
# Verify that the provided `camera_index` is valid before printing the traceback
|
||||
cameras_info = find_cameras()
|
||||
available_cam_ids = [cam["index"] for cam in cameras_info]
|
||||
available_cam_ids = find_camera_indices()
|
||||
if self.camera_index not in available_cam_ids:
|
||||
raise ValueError(
|
||||
f"`camera_index` is expected to be one of these available cameras {available_cam_ids}, but {self.camera_index} is provided instead. "
|
||||
"To find the camera index you should use, run `python lerobot/common/robot_devices/cameras/opencv.py`."
|
||||
)
|
||||
|
||||
raise OSError(f"Can't access OpenCVCamera({camera_idx}).")
|
||||
raise OSError(f"Can't access camera {self.camera_index}.")
|
||||
|
||||
# Secondly, create the camera that will be used downstream.
|
||||
# Note: For some unknown reason, calling `isOpened` blocks the camera which then
|
||||
# needs to be re-created.
|
||||
self.camera = cv2.VideoCapture(camera_idx)
|
||||
if platform.system() == "Linux":
|
||||
self.camera = cv2.VideoCapture(f"/dev/video{self.camera_index}")
|
||||
else:
|
||||
self.camera = cv2.VideoCapture(self.camera_index)
|
||||
|
||||
if self.fps is not None:
|
||||
self.camera.set(cv2.CAP_PROP_FPS, self.fps)
|
||||
@@ -344,30 +264,28 @@ class OpenCVCamera:
|
||||
actual_width = self.camera.get(cv2.CAP_PROP_FRAME_WIDTH)
|
||||
actual_height = self.camera.get(cv2.CAP_PROP_FRAME_HEIGHT)
|
||||
|
||||
# Using `math.isclose` since actual fps can be a float (e.g. 29.9 instead of 30)
|
||||
if self.fps is not None and not math.isclose(self.fps, actual_fps, rel_tol=1e-3):
|
||||
# Using `OSError` since it's a broad that encompasses issues related to device communication
|
||||
raise OSError(
|
||||
f"Can't set {self.fps=} for OpenCVCamera({self.camera_index}). Actual value is {actual_fps}."
|
||||
f"Can't set {self.fps=} for camera {self.camera_index}. Actual value is {actual_fps}."
|
||||
)
|
||||
if self.width is not None and not math.isclose(self.width, actual_width, rel_tol=1e-3):
|
||||
if self.width is not None and self.width != actual_width:
|
||||
raise OSError(
|
||||
f"Can't set {self.width=} for OpenCVCamera({self.camera_index}). Actual value is {actual_width}."
|
||||
f"Can't set {self.width=} for camera {self.camera_index}. Actual value is {actual_width}."
|
||||
)
|
||||
if self.height is not None and not math.isclose(self.height, actual_height, rel_tol=1e-3):
|
||||
if self.height is not None and self.height != actual_height:
|
||||
raise OSError(
|
||||
f"Can't set {self.height=} for OpenCVCamera({self.camera_index}). Actual value is {actual_height}."
|
||||
f"Can't set {self.height=} for camera {self.camera_index}. Actual value is {actual_height}."
|
||||
)
|
||||
|
||||
self.fps = round(actual_fps)
|
||||
self.width = round(actual_width)
|
||||
self.height = round(actual_height)
|
||||
self.fps = actual_fps
|
||||
self.width = actual_width
|
||||
self.height = actual_height
|
||||
|
||||
self.is_connected = True
|
||||
|
||||
def read(self, temporary_color_mode: str | None = None) -> np.ndarray:
|
||||
"""Read a frame from the camera returned in the format (height, width, channels)
|
||||
(e.g. 480 x 640 x 3), contrarily to the pytorch format which is channel first.
|
||||
(e.g. (640, 480, 3)), contrarily to the pytorch format which is channel first.
|
||||
|
||||
Note: Reading a frame is done every `camera.fps` times per second, and it is blocking.
|
||||
If you are reading data from other sensors, we advise to use `camera.async_read()` which is non blocking version of `camera.read()`.
|
||||
@@ -380,7 +298,6 @@ class OpenCVCamera:
|
||||
start_time = time.perf_counter()
|
||||
|
||||
ret, color_image = self.camera.read()
|
||||
|
||||
if not ret:
|
||||
raise OSError(f"Can't capture color image from camera {self.camera_index}.")
|
||||
|
||||
@@ -391,23 +308,17 @@ class OpenCVCamera:
|
||||
f"Expected color values are 'rgb' or 'bgr', but {requested_color_mode} is provided."
|
||||
)
|
||||
|
||||
# OpenCV uses BGR format as default (blue, green, red) for all operations, including displaying images.
|
||||
# OpenCV uses BGR format as default (blue, green red) for all operations, including displaying images.
|
||||
# However, Deep Learning framework such as LeRobot uses RGB format as default to train neural networks,
|
||||
# so we convert the image color from BGR to RGB.
|
||||
if requested_color_mode == "rgb":
|
||||
if self.mock:
|
||||
import tests.mock_cv2 as cv2
|
||||
else:
|
||||
import cv2
|
||||
|
||||
color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
|
||||
|
||||
h, w, _ = color_image.shape
|
||||
if h != self.height or w != self.width:
|
||||
color_image = cv2.resize(color_image, (640, 480))
|
||||
|
||||
if self.rotation is not None:
|
||||
color_image = cv2.rotate(color_image, self.rotation)
|
||||
raise OSError(
|
||||
f"Can't capture color image with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead."
|
||||
)
|
||||
|
||||
# log the number of seconds it took to read the image
|
||||
self.logs["delta_timestamp_s"] = time.perf_counter() - start_time
|
||||
@@ -415,16 +326,11 @@ class OpenCVCamera:
|
||||
# log the utc time at which the image was received
|
||||
self.logs["timestamp_utc"] = capture_timestamp_utc()
|
||||
|
||||
self.color_image = color_image
|
||||
|
||||
return color_image
|
||||
|
||||
def read_loop(self):
|
||||
while not self.stop_event.is_set():
|
||||
try:
|
||||
self.color_image = self.read()
|
||||
except Exception as e:
|
||||
print(f"Error reading in thread: {e}")
|
||||
while self.stop_event is None or not self.stop_event.is_set():
|
||||
self.color_image = self.read()
|
||||
|
||||
def async_read(self):
|
||||
if not self.is_connected:
|
||||
@@ -439,14 +345,15 @@ class OpenCVCamera:
|
||||
self.thread.start()
|
||||
|
||||
num_tries = 0
|
||||
while True:
|
||||
if self.color_image is not None:
|
||||
return self.color_image
|
||||
|
||||
time.sleep(1 / self.fps)
|
||||
while self.color_image is None:
|
||||
num_tries += 1
|
||||
if num_tries > self.fps * 2:
|
||||
raise TimeoutError("Timed out waiting for async_read() to start.")
|
||||
time.sleep(1 / self.fps)
|
||||
if num_tries > self.fps and (self.thread.ident is None or not self.thread.is_alive()):
|
||||
raise Exception(
|
||||
"The thread responsible for `self.async_read()` took too much time to start. There might be an issue. Verify that `self.thread.start()` has been called."
|
||||
)
|
||||
|
||||
return self.color_image
|
||||
|
||||
def disconnect(self):
|
||||
if not self.is_connected:
|
||||
@@ -454,14 +361,16 @@ class OpenCVCamera:
|
||||
f"OpenCVCamera({self.camera_index}) is not connected. Try running `camera.connect()` first."
|
||||
)
|
||||
|
||||
if self.thread is not None:
|
||||
if self.thread is not None and self.thread.is_alive():
|
||||
# wait for the thread to finish
|
||||
self.stop_event.set()
|
||||
self.thread.join() # wait for the thread to finish
|
||||
self.thread.join()
|
||||
self.thread = None
|
||||
self.stop_event = None
|
||||
|
||||
self.camera.release()
|
||||
self.camera = None
|
||||
|
||||
self.is_connected = False
|
||||
|
||||
def __del__(self):
|
||||
@@ -507,7 +416,7 @@ if __name__ == "__main__":
|
||||
parser.add_argument(
|
||||
"--record-time-s",
|
||||
type=float,
|
||||
default=4.0,
|
||||
default=2.0,
|
||||
help="Set the number of seconds used to record the frames. By default, 2 seconds.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -1,8 +1,55 @@
|
||||
from pathlib import Path
|
||||
from typing import Protocol
|
||||
|
||||
import cv2
|
||||
import einops
|
||||
import numpy as np
|
||||
|
||||
|
||||
def write_shape_on_image_inplace(image):
|
||||
height, width = image.shape[:2]
|
||||
text = f"Width: {width} Height: {height}"
|
||||
|
||||
# Define the font, scale, color, and thickness
|
||||
font = cv2.FONT_HERSHEY_SIMPLEX
|
||||
font_scale = 1
|
||||
color = (255, 0, 0) # Blue in BGR
|
||||
thickness = 2
|
||||
|
||||
position = (10, height - 10) # 10 pixels from the bottom-left corner
|
||||
cv2.putText(image, text, position, font, font_scale, color, thickness)
|
||||
|
||||
|
||||
def save_color_image(image, path, write_shape=False):
|
||||
path = Path(path)
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
if write_shape:
|
||||
write_shape_on_image_inplace(image)
|
||||
cv2.imwrite(str(path), image)
|
||||
|
||||
|
||||
def save_depth_image(depth, path, write_shape=False):
|
||||
path = Path(path)
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
|
||||
depth_image = cv2.applyColorMap(cv2.convertScaleAbs(depth, alpha=0.03), cv2.COLORMAP_JET)
|
||||
|
||||
if write_shape:
|
||||
write_shape_on_image_inplace(depth_image)
|
||||
cv2.imwrite(str(path), depth_image)
|
||||
|
||||
|
||||
def convert_torch_image_to_cv2(tensor, rgb_to_bgr=True):
|
||||
assert tensor.ndim == 3
|
||||
c, h, w = tensor.shape
|
||||
assert c < h and c < w
|
||||
color_image = einops.rearrange(tensor, "c h w -> h w c").numpy()
|
||||
if rgb_to_bgr:
|
||||
color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
|
||||
return color_image
|
||||
|
||||
|
||||
# Defines a camera type
|
||||
class Camera(Protocol):
|
||||
def connect(self): ...
|
||||
|
||||
@@ -1,330 +0,0 @@
|
||||
########################################################################################
|
||||
# Utilities
|
||||
########################################################################################
|
||||
|
||||
|
||||
import logging
|
||||
import time
|
||||
import traceback
|
||||
from contextlib import nullcontext
|
||||
from copy import copy
|
||||
from functools import cache
|
||||
|
||||
import cv2
|
||||
import torch
|
||||
import tqdm
|
||||
from termcolor import colored
|
||||
|
||||
from lerobot.common.datasets.populate_dataset import add_frame, safe_stop_image_writer
|
||||
from lerobot.common.policies.factory import make_policy
|
||||
from lerobot.common.robot_devices.robots.utils import Robot
|
||||
from lerobot.common.robot_devices.utils import busy_wait
|
||||
from lerobot.common.utils.utils import get_safe_torch_device, init_hydra_config, set_global_seed
|
||||
from lerobot.scripts.eval import get_pretrained_policy_path
|
||||
|
||||
|
||||
def log_control_info(robot: Robot, dt_s, episode_index=None, frame_index=None, fps=None):
|
||||
log_items = []
|
||||
if episode_index is not None:
|
||||
log_items.append(f"ep:{episode_index}")
|
||||
if frame_index is not None:
|
||||
log_items.append(f"frame:{frame_index}")
|
||||
|
||||
def log_dt(shortname, dt_val_s):
|
||||
nonlocal log_items, fps
|
||||
info_str = f"{shortname}:{dt_val_s * 1000:5.2f} ({1/ dt_val_s:3.1f}hz)"
|
||||
if fps is not None:
|
||||
actual_fps = 1 / dt_val_s
|
||||
if actual_fps < fps - 1:
|
||||
info_str = colored(info_str, "yellow")
|
||||
log_items.append(info_str)
|
||||
|
||||
# total step time displayed in milliseconds and its frequency
|
||||
log_dt("dt", dt_s)
|
||||
return
|
||||
# TODO(aliberts): move robot-specific logs logic in robot.print_logs()
|
||||
if not robot.robot_type.startswith("stretch"):
|
||||
for name in robot.leader_arms:
|
||||
key = f"read_leader_{name}_pos_dt_s"
|
||||
if key in robot.logs:
|
||||
log_dt("dtRlead", robot.logs[key])
|
||||
|
||||
for name in robot.follower_arms:
|
||||
key = f"write_follower_{name}_goal_pos_dt_s"
|
||||
if key in robot.logs:
|
||||
log_dt("dtWfoll", robot.logs[key])
|
||||
|
||||
key = f"read_follower_{name}_pos_dt_s"
|
||||
if key in robot.logs:
|
||||
log_dt("dtRfoll", robot.logs[key])
|
||||
|
||||
for name in robot.cameras:
|
||||
key = f"read_camera_{name}_dt_s"
|
||||
if key in robot.logs:
|
||||
log_dt(f"dtR{name}", robot.logs[key])
|
||||
|
||||
info_str = " ".join(log_items)
|
||||
logging.info(info_str)
|
||||
|
||||
|
||||
@cache
|
||||
def is_headless():
|
||||
"""Detects if python is running without a monitor."""
|
||||
try:
|
||||
import pynput # noqa
|
||||
|
||||
return False
|
||||
except Exception:
|
||||
print(
|
||||
"Error trying to import pynput. Switching to headless mode. "
|
||||
"As a result, the video stream from the cameras won't be shown, "
|
||||
"and you won't be able to change the control flow with keyboards. "
|
||||
"For more info, see traceback below.\n"
|
||||
)
|
||||
traceback.print_exc()
|
||||
print()
|
||||
return True
|
||||
|
||||
|
||||
def has_method(_object: object, method_name: str):
|
||||
return hasattr(_object, method_name) and callable(getattr(_object, method_name))
|
||||
|
||||
|
||||
def predict_action(observation, policy, device, use_amp):
|
||||
observation = copy(observation)
|
||||
with (
|
||||
torch.inference_mode(),
|
||||
torch.autocast(device_type=device),
|
||||
):
|
||||
# Convert to pytorch format: channel first and float32 in [0,1] with batch dimension
|
||||
for name in observation:
|
||||
if "image" in name:
|
||||
observation[name] = observation[name].type(torch.float32) / 255
|
||||
observation[name] = observation[name].permute(2, 0, 1).contiguous()
|
||||
observation[name] = observation[name].unsqueeze(0)
|
||||
observation[name] = observation[name].to(device)
|
||||
|
||||
# Compute the next action with the policy
|
||||
# based on the current observation
|
||||
action = policy.select_action(observation)
|
||||
|
||||
# Remove batch dimension
|
||||
action = action.squeeze(0)
|
||||
|
||||
# Move to cpu, if not already the case
|
||||
action = action.to("cpu")
|
||||
|
||||
return action
|
||||
|
||||
|
||||
def init_keyboard_listener():
|
||||
# Allow to exit early while recording an episode or resetting the environment,
|
||||
# by tapping the right arrow key '->'. This might require a sudo permission
|
||||
# to allow your terminal to monitor keyboard events.
|
||||
events = {}
|
||||
events["exit_early"] = False
|
||||
events["rerecord_episode"] = False
|
||||
events["stop_recording"] = False
|
||||
|
||||
if is_headless():
|
||||
logging.warning(
|
||||
"Headless environment detected. On-screen cameras display and keyboard inputs will not be available."
|
||||
)
|
||||
listener = None
|
||||
return listener, events
|
||||
|
||||
# Only import pynput if not in a headless environment
|
||||
from pynput import keyboard
|
||||
|
||||
def on_press(key):
|
||||
try:
|
||||
if key == keyboard.Key.right:
|
||||
print("Right arrow key pressed. Exiting loop...")
|
||||
events["exit_early"] = True
|
||||
elif key == keyboard.Key.left:
|
||||
print("Left arrow key pressed. Exiting loop and rerecord the last episode...")
|
||||
events["rerecord_episode"] = True
|
||||
events["exit_early"] = True
|
||||
elif key == keyboard.Key.esc:
|
||||
print("Escape key pressed. Stopping data recording...")
|
||||
events["stop_recording"] = True
|
||||
events["exit_early"] = True
|
||||
except Exception as e:
|
||||
print(f"Error handling key press: {e}")
|
||||
|
||||
listener = keyboard.Listener(on_press=on_press)
|
||||
listener.start()
|
||||
|
||||
return listener, events
|
||||
|
||||
|
||||
def init_policy(pretrained_policy_name_or_path, policy_overrides):
|
||||
"""Instantiate the policy and load fps, device and use_amp from config yaml"""
|
||||
pretrained_policy_path = get_pretrained_policy_path(pretrained_policy_name_or_path)
|
||||
hydra_cfg = init_hydra_config(pretrained_policy_path / "config.yaml", policy_overrides)
|
||||
policy = make_policy(hydra_cfg=hydra_cfg, pretrained_policy_name_or_path=pretrained_policy_path)
|
||||
|
||||
# Check device is available
|
||||
#device = get_safe_torch_device(hydra_cfg.device, log=True)
|
||||
device="mps"
|
||||
use_amp = hydra_cfg.use_amp
|
||||
policy_fps = hydra_cfg.env.fps
|
||||
|
||||
policy.eval()
|
||||
policy.to(device)
|
||||
|
||||
torch.backends.cudnn.benchmark = True
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
set_global_seed(hydra_cfg.seed)
|
||||
return policy, policy_fps, device, use_amp
|
||||
|
||||
|
||||
def warmup_record(
|
||||
robot,
|
||||
events,
|
||||
enable_teloperation,
|
||||
warmup_time_s,
|
||||
display_cameras,
|
||||
fps,
|
||||
):
|
||||
control_loop(
|
||||
robot=robot,
|
||||
control_time_s=warmup_time_s,
|
||||
display_cameras=display_cameras,
|
||||
events=events,
|
||||
fps=fps,
|
||||
teleoperate=enable_teloperation,
|
||||
)
|
||||
|
||||
|
||||
def record_episode(
|
||||
robot,
|
||||
dataset,
|
||||
events,
|
||||
episode_time_s,
|
||||
display_cameras,
|
||||
policy,
|
||||
device,
|
||||
use_amp,
|
||||
fps,
|
||||
):
|
||||
control_loop(
|
||||
robot=robot,
|
||||
control_time_s=episode_time_s,
|
||||
display_cameras=display_cameras,
|
||||
dataset=dataset,
|
||||
events=events,
|
||||
policy=policy,
|
||||
device=device,
|
||||
use_amp=use_amp,
|
||||
fps=fps,
|
||||
teleoperate=policy is None,
|
||||
)
|
||||
|
||||
|
||||
@safe_stop_image_writer
|
||||
def control_loop(
|
||||
robot,
|
||||
control_time_s=None,
|
||||
teleoperate=False,
|
||||
display_cameras=False,
|
||||
dataset=None,
|
||||
events=None,
|
||||
policy=None,
|
||||
device=None,
|
||||
use_amp=None,
|
||||
fps=None,
|
||||
):
|
||||
# TODO(rcadene): Add option to record logs
|
||||
if not robot.is_connected:
|
||||
robot.connect()
|
||||
|
||||
if events is None:
|
||||
events = {"exit_early": False}
|
||||
|
||||
if control_time_s is None:
|
||||
control_time_s = float("inf")
|
||||
|
||||
if teleoperate and policy is not None:
|
||||
raise ValueError("When `teleoperate` is True, `policy` should be None.")
|
||||
|
||||
if dataset is not None and fps is not None and dataset["fps"] != fps:
|
||||
raise ValueError(f"The dataset fps should be equal to requested fps ({dataset['fps']} != {fps}).")
|
||||
|
||||
timestamp = 0
|
||||
start_episode_t = time.perf_counter()
|
||||
while timestamp < control_time_s:
|
||||
start_loop_t = time.perf_counter()
|
||||
if teleoperate:
|
||||
observation, action = robot.teleop_step(record_data=True)
|
||||
else:
|
||||
observation = robot.capture_observation()
|
||||
|
||||
if policy is not None:
|
||||
pred_action = predict_action(observation, policy, device, use_amp)
|
||||
# Action can eventually be clipped using `max_relative_target`,
|
||||
# so action actually sent is saved in the dataset.
|
||||
action = robot.send_action(pred_action)
|
||||
action = {"action": action}
|
||||
|
||||
if dataset is not None:
|
||||
add_frame(dataset, observation, action)
|
||||
|
||||
if display_cameras and not is_headless():
|
||||
image_keys = [key for key in observation if "image" in key]
|
||||
for key in image_keys:
|
||||
cv2.imshow(key, cv2.cvtColor(observation[key].numpy(), cv2.COLOR_RGB2BGR))
|
||||
cv2.waitKey(1)
|
||||
|
||||
if fps is not None:
|
||||
dt_s = time.perf_counter() - start_loop_t
|
||||
busy_wait(1 / fps - dt_s)
|
||||
|
||||
dt_s = time.perf_counter() - start_loop_t
|
||||
#log_control_info(robot, dt_s, fps=fps)
|
||||
|
||||
timestamp = time.perf_counter() - start_episode_t
|
||||
if events["exit_early"]:
|
||||
events["exit_early"] = False
|
||||
break
|
||||
|
||||
|
||||
def reset_environment(robot, events, reset_time_s):
|
||||
# TODO(rcadene): refactor warmup_record and reset_environment
|
||||
# TODO(alibets): allow for teleop during reset
|
||||
if has_method(robot, "teleop_safety_stop"):
|
||||
robot.teleop_safety_stop()
|
||||
|
||||
timestamp = 0
|
||||
start_vencod_t = time.perf_counter()
|
||||
|
||||
# Wait if necessary
|
||||
with tqdm.tqdm(total=reset_time_s, desc="Waiting") as pbar:
|
||||
while timestamp < reset_time_s:
|
||||
time.sleep(1)
|
||||
timestamp = time.perf_counter() - start_vencod_t
|
||||
pbar.update(1)
|
||||
if events["exit_early"]:
|
||||
events["exit_early"] = False
|
||||
break
|
||||
|
||||
|
||||
def stop_recording(robot, listener, display_cameras):
|
||||
robot.disconnect()
|
||||
|
||||
if not is_headless():
|
||||
if listener is not None:
|
||||
listener.stop()
|
||||
|
||||
if display_cameras:
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
|
||||
def sanity_check_dataset_name(repo_id, policy):
|
||||
_, dataset_name = repo_id.split("/")
|
||||
# either repo_id doesnt start with "eval_" and there is no policy
|
||||
# or repo_id starts with "eval_" and there is a policy
|
||||
if dataset_name.startswith("eval_") == (policy is None):
|
||||
raise ValueError(
|
||||
f"Your dataset name begins by 'eval_' ({dataset_name}) but no policy is provided ({policy})."
|
||||
)
|
||||
@@ -1,12 +1,22 @@
|
||||
import enum
|
||||
import logging
|
||||
import math
|
||||
import time
|
||||
import traceback
|
||||
from copy import deepcopy
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import tqdm
|
||||
from dynamixel_sdk import (
|
||||
COMM_SUCCESS,
|
||||
DXL_HIBYTE,
|
||||
DXL_HIWORD,
|
||||
DXL_LOBYTE,
|
||||
DXL_LOWORD,
|
||||
GroupSyncRead,
|
||||
GroupSyncWrite,
|
||||
PacketHandler,
|
||||
PortHandler,
|
||||
)
|
||||
|
||||
from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError
|
||||
from lerobot.common.utils.utils import capture_timestamp_utc
|
||||
@@ -17,28 +27,11 @@ TIMEOUT_MS = 1000
|
||||
|
||||
MAX_ID_RANGE = 252
|
||||
|
||||
# The following bounds define the lower and upper joints range (after calibration).
|
||||
# For joints in degree (i.e. revolute joints), their nominal range is [-180, 180] degrees
|
||||
# which corresponds to a half rotation on the left and half rotation on the right.
|
||||
# Some joints might require higher range, so we allow up to [-270, 270] degrees until
|
||||
# an error is raised.
|
||||
LOWER_BOUND_DEGREE = -270
|
||||
UPPER_BOUND_DEGREE = 270
|
||||
# For joints in percentage (i.e. joints that move linearly like the prismatic joint of a gripper),
|
||||
# their nominal range is [0, 100] %. For instance, for Aloha gripper, 0% is fully
|
||||
# closed, and 100% is fully open. To account for slight calibration issue, we allow up to
|
||||
# [-10, 110] until an error is raised.
|
||||
LOWER_BOUND_LINEAR = -10
|
||||
UPPER_BOUND_LINEAR = 110
|
||||
|
||||
HALF_TURN_DEGREE = 180
|
||||
|
||||
# https://emanual.robotis.com/docs/en/dxl/x/xl330-m077
|
||||
# https://emanual.robotis.com/docs/en/dxl/x/xl330-m288
|
||||
# https://emanual.robotis.com/docs/en/dxl/x/xl430-w250
|
||||
# https://emanual.robotis.com/docs/en/dxl/x/xm430-w350
|
||||
# https://emanual.robotis.com/docs/en/dxl/x/xm540-w270
|
||||
# https://emanual.robotis.com/docs/en/dxl/x/xc430-w150
|
||||
|
||||
# data_name: (address, size_byte)
|
||||
X_SERIES_CONTROL_TABLE = {
|
||||
@@ -116,7 +109,6 @@ MODEL_CONTROL_TABLE = {
|
||||
"xl430-w250": X_SERIES_CONTROL_TABLE,
|
||||
"xm430-w350": X_SERIES_CONTROL_TABLE,
|
||||
"xm540-w270": X_SERIES_CONTROL_TABLE,
|
||||
"xc430-w150": X_SERIES_CONTROL_TABLE,
|
||||
}
|
||||
|
||||
MODEL_RESOLUTION = {
|
||||
@@ -126,7 +118,6 @@ MODEL_RESOLUTION = {
|
||||
"xl430-w250": 4096,
|
||||
"xm430-w350": 4096,
|
||||
"xm540-w270": 4096,
|
||||
"xc430-w150": 4096,
|
||||
}
|
||||
|
||||
MODEL_BAUDRATE_TABLE = {
|
||||
@@ -136,47 +127,44 @@ MODEL_BAUDRATE_TABLE = {
|
||||
"xl430-w250": X_SERIES_BAUDRATE_TABLE,
|
||||
"xm430-w350": X_SERIES_BAUDRATE_TABLE,
|
||||
"xm540-w270": X_SERIES_BAUDRATE_TABLE,
|
||||
"xc430-w150": X_SERIES_BAUDRATE_TABLE,
|
||||
}
|
||||
|
||||
NUM_READ_RETRY = 10
|
||||
NUM_WRITE_RETRY = 10
|
||||
|
||||
|
||||
def convert_degrees_to_steps(degrees: float | np.ndarray, models: str | list[str]) -> np.ndarray:
|
||||
"""This function converts the degree range to the step range for indicating motors rotation.
|
||||
It assumes a motor achieves a full rotation by going from -180 degree position to +180.
|
||||
def convert_degrees_to_steps(degrees: float | np.ndarray, models: str | list[str]):
|
||||
"""This function convert the degree range to the step range for indicating motors rotation.
|
||||
It assums a motor achieves a full rotation by going from -180 degree position to +180.
|
||||
The motor resolution (e.g. 4096) corresponds to the number of steps needed to achieve a full rotation.
|
||||
"""
|
||||
if isinstance(degrees, float):
|
||||
degrees = np.array(degrees)
|
||||
|
||||
resolutions = [MODEL_RESOLUTION[model] for model in models]
|
||||
steps = degrees / 180 * np.array(resolutions) / 2
|
||||
steps = steps.astype(int)
|
||||
return steps
|
||||
|
||||
|
||||
def convert_to_bytes(value, bytes, mock=False):
|
||||
if mock:
|
||||
return value
|
||||
|
||||
import dynamixel_sdk as dxl
|
||||
|
||||
def convert_to_bytes(value, bytes):
|
||||
# Note: No need to convert back into unsigned int, since this byte preprocessing
|
||||
# already handles it for us.
|
||||
if bytes == 1:
|
||||
data = [
|
||||
dxl.DXL_LOBYTE(dxl.DXL_LOWORD(value)),
|
||||
DXL_LOBYTE(DXL_LOWORD(value)),
|
||||
]
|
||||
elif bytes == 2:
|
||||
data = [
|
||||
dxl.DXL_LOBYTE(dxl.DXL_LOWORD(value)),
|
||||
dxl.DXL_HIBYTE(dxl.DXL_LOWORD(value)),
|
||||
DXL_LOBYTE(DXL_LOWORD(value)),
|
||||
DXL_HIBYTE(DXL_LOWORD(value)),
|
||||
]
|
||||
elif bytes == 4:
|
||||
data = [
|
||||
dxl.DXL_LOBYTE(dxl.DXL_LOWORD(value)),
|
||||
dxl.DXL_HIBYTE(dxl.DXL_LOWORD(value)),
|
||||
dxl.DXL_LOBYTE(dxl.DXL_HIWORD(value)),
|
||||
dxl.DXL_HIBYTE(dxl.DXL_HIWORD(value)),
|
||||
DXL_LOBYTE(DXL_LOWORD(value)),
|
||||
DXL_HIBYTE(DXL_LOWORD(value)),
|
||||
DXL_LOBYTE(DXL_HIWORD(value)),
|
||||
DXL_HIBYTE(DXL_HIWORD(value)),
|
||||
]
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
@@ -228,29 +216,54 @@ def assert_same_address(model_ctrl_table, motor_models, data_name):
|
||||
)
|
||||
|
||||
|
||||
def find_available_ports():
|
||||
ports = []
|
||||
for path in Path("/dev").glob("tty*"):
|
||||
ports.append(str(path))
|
||||
return ports
|
||||
|
||||
|
||||
def find_port():
|
||||
print("Finding all available ports for the DynamixelMotorsBus.")
|
||||
ports_before = find_available_ports()
|
||||
print(ports_before)
|
||||
|
||||
print("Remove the usb cable from your DynamixelMotorsBus and press Enter when done.")
|
||||
input()
|
||||
|
||||
time.sleep(0.5)
|
||||
ports_after = find_available_ports()
|
||||
ports_diff = list(set(ports_before) - set(ports_after))
|
||||
|
||||
if len(ports_diff) == 1:
|
||||
port = ports_diff[0]
|
||||
print(f"The port of this DynamixelMotorsBus is '{port}'")
|
||||
print("Reconnect the usb cable.")
|
||||
elif len(ports_diff) == 0:
|
||||
raise OSError(f"Could not detect the port. No difference was found ({ports_diff}).")
|
||||
else:
|
||||
raise OSError(f"Could not detect the port. More than one port was found ({ports_diff}).")
|
||||
|
||||
|
||||
class TorqueMode(enum.Enum):
|
||||
ENABLED = 1
|
||||
DISABLED = 0
|
||||
|
||||
|
||||
class OperatingMode(enum.Enum):
|
||||
VELOCITY = 1
|
||||
POSITION = 3
|
||||
EXTENDED_POSITION = 4
|
||||
CURRENT_CONTROLLED_POSITION = 5
|
||||
PWM = 16
|
||||
UNKNOWN = -1
|
||||
|
||||
|
||||
class DriveMode(enum.Enum):
|
||||
NON_INVERTED = 0
|
||||
INVERTED = 1
|
||||
|
||||
|
||||
class CalibrationMode(enum.Enum):
|
||||
# Joints with rotational motions are expressed in degrees in nominal range of [-180, 180]
|
||||
DEGREE = 0
|
||||
# Joints with linear motions (like gripper of Aloha) are experessed in nominal range of [0, 100]
|
||||
LINEAR = 1
|
||||
|
||||
|
||||
class JointOutOfRangeError(Exception):
|
||||
def __init__(self, message="Joint is out of range"):
|
||||
self.message = message
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
class DynamixelMotorsBus:
|
||||
# TODO(rcadene): Add a script to find the motor indices without DynamixelWizzard2
|
||||
"""
|
||||
@@ -260,8 +273,8 @@ class DynamixelMotorsBus:
|
||||
A DynamixelMotorsBus instance requires a port (e.g. `DynamixelMotorsBus(port="/dev/tty.usbmodem575E0031751"`)).
|
||||
To find the port, you can run our utility script:
|
||||
```bash
|
||||
python lerobot/scripts/find_motors_bus_port.py
|
||||
>>> Finding all available ports for the MotorBus.
|
||||
python lerobot/common/robot_devices/motors/dynamixel.py
|
||||
>>> Finding all available ports for the DynamixelMotorsBus.
|
||||
>>> ['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
>>> Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
|
||||
>>> The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751.
|
||||
@@ -297,11 +310,9 @@ class DynamixelMotorsBus:
|
||||
motors: dict[str, tuple[int, str]],
|
||||
extra_model_control_table: dict[str, list[tuple]] | None = None,
|
||||
extra_model_resolution: dict[str, int] | None = None,
|
||||
mock=False,
|
||||
):
|
||||
self.port = port
|
||||
self.motors = motors
|
||||
self.mock = mock
|
||||
|
||||
self.model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE)
|
||||
if extra_model_control_table:
|
||||
@@ -325,13 +336,8 @@ class DynamixelMotorsBus:
|
||||
f"DynamixelMotorsBus({self.port}) is already connected. Do not call `motors_bus.connect()` twice."
|
||||
)
|
||||
|
||||
if self.mock:
|
||||
import tests.mock_dynamixel_sdk as dxl
|
||||
else:
|
||||
import dynamixel_sdk as dxl
|
||||
|
||||
self.port_handler = dxl.PortHandler(self.port)
|
||||
self.packet_handler = dxl.PacketHandler(PROTOCOL_VERSION)
|
||||
self.port_handler = PortHandler(self.port)
|
||||
self.packet_handler = PacketHandler(PROTOCOL_VERSION)
|
||||
|
||||
try:
|
||||
if not self.port_handler.openPort():
|
||||
@@ -339,7 +345,7 @@ class DynamixelMotorsBus:
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
print(
|
||||
"\nTry running `python lerobot/scripts/find_motors_bus_port.py` to make sure you are using the correct port.\n"
|
||||
"\nTry running `python lerobot/common/robot_devices/motors/dynamixel.py` to make sure you are using the correct port.\n"
|
||||
)
|
||||
raise
|
||||
|
||||
@@ -348,18 +354,25 @@ class DynamixelMotorsBus:
|
||||
|
||||
self.port_handler.setPacketTimeoutMillis(TIMEOUT_MS)
|
||||
|
||||
# Set expected baudrate for the bus
|
||||
self.set_bus_baudrate(BAUDRATE)
|
||||
|
||||
if not self.are_motors_configured():
|
||||
input(
|
||||
"\n/!\\ A configuration issue has been detected with your motors: \n"
|
||||
"If it's the first time that you use these motors, press enter to configure your motors... but before "
|
||||
"verify that all the cables are connected the proper way. If you find an issue, before making a modification, "
|
||||
"kill the python process, unplug the power cord to not damage the motors, rewire correctly, then plug the power "
|
||||
"again and relaunch the script.\n"
|
||||
)
|
||||
print()
|
||||
self.configure_motors()
|
||||
|
||||
def reconnect(self):
|
||||
if self.mock:
|
||||
import tests.mock_dynamixel_sdk as dxl
|
||||
else:
|
||||
import dynamixel_sdk as dxl
|
||||
|
||||
self.port_handler = dxl.PortHandler(self.port)
|
||||
self.packet_handler = dxl.PacketHandler(PROTOCOL_VERSION)
|
||||
|
||||
self.port_handler = PortHandler(self.port)
|
||||
self.packet_handler = PacketHandler(PROTOCOL_VERSION)
|
||||
if not self.port_handler.openPort():
|
||||
raise OSError(f"Failed to open port '{self.port}'.")
|
||||
|
||||
self.is_connected = True
|
||||
|
||||
def are_motors_configured(self):
|
||||
@@ -371,14 +384,120 @@ class DynamixelMotorsBus:
|
||||
print(e)
|
||||
return False
|
||||
|
||||
def find_motor_indices(self, possible_ids=None, num_retry=2):
|
||||
def configure_motors(self):
|
||||
# TODO(rcadene): This script assumes motors follow the X_SERIES baudrates
|
||||
# TODO(rcadene): Refactor this function with intermediate high-level functions
|
||||
|
||||
print("Scanning all baudrates and motor indices")
|
||||
all_baudrates = set(X_SERIES_BAUDRATE_TABLE.values())
|
||||
ids_per_baudrate = {}
|
||||
for baudrate in all_baudrates:
|
||||
self.set_bus_baudrate(baudrate)
|
||||
present_ids = self.find_motor_indices()
|
||||
if len(present_ids) > 0:
|
||||
ids_per_baudrate[baudrate] = present_ids
|
||||
print(f"Motor indices detected: {ids_per_baudrate}")
|
||||
print()
|
||||
|
||||
possible_baudrates = list(ids_per_baudrate.keys())
|
||||
possible_ids = list({idx for sublist in ids_per_baudrate.values() for idx in sublist})
|
||||
untaken_ids = list(set(range(MAX_ID_RANGE)) - set(possible_ids) - set(self.motor_indices))
|
||||
|
||||
# Connect successively one motor to the chain and write a unique random index for each
|
||||
for i in range(len(self.motors)):
|
||||
self.disconnect()
|
||||
input(
|
||||
"1. Unplug the power cord\n"
|
||||
"2. Plug/unplug minimal number of cables to only have the first "
|
||||
f"{i+1} motor(s) ({self.motor_names[:i+1]}) connected.\n"
|
||||
"3. Re-plug the power cord\n"
|
||||
"Press Enter to continue..."
|
||||
)
|
||||
print()
|
||||
self.reconnect()
|
||||
|
||||
if i > 0:
|
||||
try:
|
||||
self._read_with_motor_ids(self.motor_models, untaken_ids[:i], "ID")
|
||||
except ConnectionError:
|
||||
print(f"Failed to read from {untaken_ids[:i+1]}. Make sure the power cord is plugged in.")
|
||||
input("Press Enter to continue...")
|
||||
print()
|
||||
self.reconnect()
|
||||
|
||||
print("Scanning possible baudrates and motor indices")
|
||||
motor_found = False
|
||||
for baudrate in possible_baudrates:
|
||||
self.set_bus_baudrate(baudrate)
|
||||
present_ids = self.find_motor_indices(possible_ids)
|
||||
if len(present_ids) == 1:
|
||||
present_idx = present_ids[0]
|
||||
print(f"Detected motor with index {present_idx}")
|
||||
|
||||
if baudrate != BAUDRATE:
|
||||
print(f"Setting its baudrate to {BAUDRATE}")
|
||||
baudrate_idx = list(X_SERIES_BAUDRATE_TABLE.values()).index(BAUDRATE)
|
||||
|
||||
# The write can fail, so we allow retries
|
||||
for _ in range(NUM_WRITE_RETRY):
|
||||
self._write_with_motor_ids(
|
||||
self.motor_models, present_idx, "Baud_Rate", baudrate_idx
|
||||
)
|
||||
time.sleep(0.5)
|
||||
self.set_bus_baudrate(BAUDRATE)
|
||||
try:
|
||||
present_baudrate_idx = self._read_with_motor_ids(
|
||||
self.motor_models, present_idx, "Baud_Rate"
|
||||
)
|
||||
except ConnectionError:
|
||||
print("Failed to write baudrate. Retrying.")
|
||||
self.set_bus_baudrate(baudrate)
|
||||
continue
|
||||
break
|
||||
else:
|
||||
raise
|
||||
|
||||
if present_baudrate_idx != baudrate_idx:
|
||||
raise OSError("Failed to write baudrate.")
|
||||
|
||||
print(f"Setting its index to a temporary untaken index ({untaken_ids[i]})")
|
||||
self._write_with_motor_ids(self.motor_models, present_idx, "ID", untaken_ids[i])
|
||||
|
||||
present_idx = self._read_with_motor_ids(self.motor_models, untaken_ids[i], "ID")
|
||||
if present_idx != untaken_ids[i]:
|
||||
raise OSError("Failed to write index.")
|
||||
|
||||
motor_found = True
|
||||
break
|
||||
elif len(present_ids) > 1:
|
||||
raise OSError(f"More than one motor detected ({present_ids}), but only one was expected.")
|
||||
|
||||
if not motor_found:
|
||||
raise OSError(
|
||||
"No motor found, but one new motor expected. Verify power cord is plugged in and retry."
|
||||
)
|
||||
print()
|
||||
|
||||
print(f"Setting expected motor indices: {self.motor_indices}")
|
||||
self.set_bus_baudrate(BAUDRATE)
|
||||
self._write_with_motor_ids(
|
||||
self.motor_models, untaken_ids[: len(self.motors)], "ID", self.motor_indices
|
||||
)
|
||||
print()
|
||||
|
||||
if (self.read("ID") != self.motor_indices).any():
|
||||
raise OSError("Failed to write motors indices.")
|
||||
|
||||
print("Configuration is done!")
|
||||
|
||||
def find_motor_indices(self, possible_ids=None):
|
||||
if possible_ids is None:
|
||||
possible_ids = range(MAX_ID_RANGE)
|
||||
|
||||
indices = []
|
||||
for idx in tqdm.tqdm(possible_ids):
|
||||
try:
|
||||
present_idx = self.read_with_motor_ids(self.motor_models, [idx], "ID", num_retry=num_retry)[0]
|
||||
present_idx = self._read_with_motor_ids(self.motor_models, [idx], "ID")[0]
|
||||
except ConnectionError:
|
||||
continue
|
||||
|
||||
@@ -412,22 +531,9 @@ class DynamixelMotorsBus:
|
||||
def motor_indices(self) -> list[int]:
|
||||
return [idx for idx, _ in self.motors.values()]
|
||||
|
||||
def set_calibration(self, calibration: dict[str, list]):
|
||||
def set_calibration(self, calibration: dict[str, tuple[int, bool]]):
|
||||
self.calibration = calibration
|
||||
|
||||
def apply_calibration_autocorrect(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""This function applies the calibration, automatically detects out of range errors for motors values and attempts to correct.
|
||||
|
||||
For more info, see docstring of `apply_calibration` and `autocorrect_calibration`.
|
||||
"""
|
||||
try:
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
except JointOutOfRangeError as e:
|
||||
print(e)
|
||||
self.autocorrect_calibration(values, motor_names)
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
return values
|
||||
|
||||
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
|
||||
a "zero position" at 0 degree.
|
||||
@@ -445,205 +551,56 @@ class DynamixelMotorsBus:
|
||||
if motor_names is None:
|
||||
motor_names = self.motor_names
|
||||
|
||||
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
|
||||
values = values.astype(np.float32)
|
||||
# Convert from unsigned int32 original range [0, 2**32[ to centered signed int32 range [-2**31, 2**31[
|
||||
values = values.astype(np.int32)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
calib_idx = self.calibration["motor_names"].index(name)
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
homing_offset, drive_mode = self.calibration[name]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.DEGREE:
|
||||
drive_mode = self.calibration["drive_mode"][calib_idx]
|
||||
homing_offset = self.calibration["homing_offset"][calib_idx]
|
||||
_, model = self.motors[name]
|
||||
resolution = self.model_resolution[model]
|
||||
# Update direction of rotation of the motor to match between leader and follower. In fact, the motor of the leader for a given joint
|
||||
# can be assembled in an opposite direction in term of rotation than the motor of the follower on the same joint.
|
||||
if drive_mode:
|
||||
values[i] *= -1
|
||||
|
||||
# Update direction of rotation of the motor to match between leader and follower.
|
||||
# In fact, the motor of the leader for a given joint can be assembled in an
|
||||
# opposite direction in term of rotation than the motor of the follower on the same joint.
|
||||
if drive_mode:
|
||||
values[i] *= -1
|
||||
# Convert from range [-2**31, 2**31[ to nominal range ]-resolution, resolution[ (e.g. ]-2048, 2048[)
|
||||
values[i] += homing_offset
|
||||
|
||||
# Convert from range [-2**31, 2**31] to
|
||||
# nominal range [-resolution//2, resolution//2] (e.g. [-2048, 2048])
|
||||
values[i] += homing_offset
|
||||
|
||||
# Convert from range [-resolution//2, resolution//2] to
|
||||
# universal float32 centered degree range [-180, 180]
|
||||
# (e.g. 2048 / (4096 // 2) * 180 = 180)
|
||||
values[i] = values[i] / (resolution // 2) * HALF_TURN_DEGREE
|
||||
|
||||
if (values[i] < LOWER_BOUND_DEGREE) or (values[i] > UPPER_BOUND_DEGREE):
|
||||
raise JointOutOfRangeError(
|
||||
f"Wrong motor position range detected for {name}. "
|
||||
f"Expected to be in nominal range of [-{HALF_TURN_DEGREE}, {HALF_TURN_DEGREE}] degrees (a full rotation), "
|
||||
f"with a maximum range of [{LOWER_BOUND_DEGREE}, {UPPER_BOUND_DEGREE}] degrees to account for joints that can rotate a bit more, "
|
||||
f"but present value is {values[i]} degree. "
|
||||
"This might be due to a cable connection issue creating an artificial 360 degrees jump in motor values. "
|
||||
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
|
||||
)
|
||||
|
||||
elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# Rescale the present position to a nominal range [0, 100] %,
|
||||
# useful for joints with linear motions like Aloha gripper
|
||||
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
|
||||
|
||||
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
|
||||
raise JointOutOfRangeError(
|
||||
f"Wrong motor position range detected for {name}. "
|
||||
f"Expected to be in nominal range of [0, 100] % (a full linear translation), "
|
||||
f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, "
|
||||
f"but present value is {values[i]} %. "
|
||||
"This might be due to a cable connection issue creating an artificial jump in motor values. "
|
||||
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
|
||||
)
|
||||
# Convert from range ]-resolution, resolution[ to the universal float32 centered degree range ]-180, 180[
|
||||
values = values.astype(np.float32)
|
||||
for i, name in enumerate(motor_names):
|
||||
_, model = self.motors[name]
|
||||
resolution = self.model_resolution[model]
|
||||
values[i] = values[i] / (resolution // 2) * 180
|
||||
|
||||
return values
|
||||
|
||||
def autocorrect_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""This function automatically detects issues with values of motors after calibration, and correct for these issues.
|
||||
|
||||
Some motors might have values outside of expected maximum bounds after calibration.
|
||||
For instance, for a joint in degree, its value can be outside [-270, 270] degrees, which is totally unexpected given
|
||||
a nominal range of [-180, 180] degrees, which represents half a turn to the left or right starting from zero position.
|
||||
|
||||
Known issues:
|
||||
#1: Motor value randomly shifts of a full turn, caused by hardware/connection errors.
|
||||
#2: Motor internal homing offset is shifted by a full turn, caused by using default calibration (e.g Aloha).
|
||||
#3: motor internal homing offset is shifted by less or more than a full turn, caused by using default calibration
|
||||
or by human error during manual calibration.
|
||||
|
||||
Issues #1 and #2 can be solved by shifting the calibration homing offset by a full turn.
|
||||
Issue #3 will be visually detected by user and potentially captured by the safety feature `max_relative_target`,
|
||||
that will slow down the motor, raise an error asking to recalibrate. Manual recalibrating will solve the issue.
|
||||
|
||||
Note: A full turn corresponds to 360 degrees but also to 4096 steps for a motor resolution of 4096.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.motor_names
|
||||
|
||||
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
|
||||
values = values.astype(np.float32)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
calib_idx = self.calibration["motor_names"].index(name)
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.DEGREE:
|
||||
drive_mode = self.calibration["drive_mode"][calib_idx]
|
||||
homing_offset = self.calibration["homing_offset"][calib_idx]
|
||||
_, model = self.motors[name]
|
||||
resolution = self.model_resolution[model]
|
||||
|
||||
# Update direction of rotation of the motor to match between leader and follower.
|
||||
# In fact, the motor of the leader for a given joint can be assembled in an
|
||||
# opposite direction in term of rotation than the motor of the follower on the same joint.
|
||||
if drive_mode:
|
||||
values[i] *= -1
|
||||
|
||||
# Convert from initial range to range [-180, 180] degrees
|
||||
calib_val = (values[i] + homing_offset) / (resolution // 2) * HALF_TURN_DEGREE
|
||||
in_range = (calib_val > LOWER_BOUND_DEGREE) and (calib_val < UPPER_BOUND_DEGREE)
|
||||
|
||||
# Solve this inequality to find the factor to shift the range into [-180, 180] degrees
|
||||
# values[i] = (values[i] + homing_offset + resolution * factor) / (resolution // 2) * HALF_TURN_DEGREE
|
||||
# - HALF_TURN_DEGREE <= (values[i] + homing_offset + resolution * factor) / (resolution // 2) * HALF_TURN_DEGREE <= HALF_TURN_DEGREE
|
||||
# (- (resolution // 2) - values[i] - homing_offset) / resolution <= factor <= ((resolution // 2) - values[i] - homing_offset) / resolution
|
||||
low_factor = (-(resolution // 2) - values[i] - homing_offset) / resolution
|
||||
upp_factor = ((resolution // 2) - values[i] - homing_offset) / resolution
|
||||
|
||||
elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# Convert from initial range to range [0, 100] in %
|
||||
calib_val = (values[i] - start_pos) / (end_pos - start_pos) * 100
|
||||
in_range = (calib_val > LOWER_BOUND_LINEAR) and (calib_val < UPPER_BOUND_LINEAR)
|
||||
|
||||
# Solve this inequality to find the factor to shift the range into [0, 100] %
|
||||
# values[i] = (values[i] - start_pos + resolution * factor) / (end_pos + resolution * factor - start_pos - resolution * factor) * 100
|
||||
# values[i] = (values[i] - start_pos + resolution * factor) / (end_pos - start_pos) * 100
|
||||
# 0 <= (values[i] - start_pos + resolution * factor) / (end_pos - start_pos) * 100 <= 100
|
||||
# (start_pos - values[i]) / resolution <= factor <= (end_pos - values[i]) / resolution
|
||||
low_factor = (start_pos - values[i]) / resolution
|
||||
upp_factor = (end_pos - values[i]) / resolution
|
||||
|
||||
if not in_range:
|
||||
# Get first integer between the two bounds
|
||||
if low_factor < upp_factor:
|
||||
factor = math.ceil(low_factor)
|
||||
|
||||
if factor > upp_factor:
|
||||
raise ValueError(f"No integer found between bounds [{low_factor=}, {upp_factor=}]")
|
||||
else:
|
||||
factor = math.ceil(upp_factor)
|
||||
|
||||
if factor > low_factor:
|
||||
raise ValueError(f"No integer found between bounds [{low_factor=}, {upp_factor=}]")
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.DEGREE:
|
||||
out_of_range_str = f"{LOWER_BOUND_DEGREE} < {calib_val} < {UPPER_BOUND_DEGREE} degrees"
|
||||
in_range_str = f"{LOWER_BOUND_DEGREE} < {calib_val} < {UPPER_BOUND_DEGREE} degrees"
|
||||
elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
out_of_range_str = f"{LOWER_BOUND_LINEAR} < {calib_val} < {UPPER_BOUND_LINEAR} %"
|
||||
in_range_str = f"{LOWER_BOUND_LINEAR} < {calib_val} < {UPPER_BOUND_LINEAR} %"
|
||||
|
||||
logging.warning(
|
||||
f"Auto-correct calibration of motor '{name}' by shifting value by {abs(factor)} full turns, "
|
||||
f"from '{out_of_range_str}' to '{in_range_str}'."
|
||||
)
|
||||
|
||||
# A full turn corresponds to 360 degrees but also to 4096 steps for a motor resolution of 4096.
|
||||
self.calibration["homing_offset"][calib_idx] += resolution * factor
|
||||
|
||||
def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""Inverse of `apply_calibration`."""
|
||||
if motor_names is None:
|
||||
motor_names = self.motor_names
|
||||
|
||||
# Convert from the universal float32 centered degree range ]-180, 180[ to resolution range ]-resolution, resolution[
|
||||
for i, name in enumerate(motor_names):
|
||||
calib_idx = self.calibration["motor_names"].index(name)
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.DEGREE:
|
||||
drive_mode = self.calibration["drive_mode"][calib_idx]
|
||||
homing_offset = self.calibration["homing_offset"][calib_idx]
|
||||
_, model = self.motors[name]
|
||||
resolution = self.model_resolution[model]
|
||||
|
||||
# Convert from nominal 0-centered degree range [-180, 180] to
|
||||
# 0-centered resolution range (e.g. [-2048, 2048] for resolution=4096)
|
||||
values[i] = values[i] / HALF_TURN_DEGREE * (resolution // 2)
|
||||
|
||||
# Substract the homing offsets to come back to actual motor range of values
|
||||
# which can be arbitrary.
|
||||
values[i] -= homing_offset
|
||||
|
||||
# Remove drive mode, which is the rotation direction of the motor, to come back to
|
||||
# actual motor rotation direction which can be arbitrary.
|
||||
if drive_mode:
|
||||
values[i] *= -1
|
||||
|
||||
elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# Convert from nominal lnear range of [0, 100] % to
|
||||
# actual motor range of values which can be arbitrary.
|
||||
values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos
|
||||
_, model = self.motors[name]
|
||||
resolution = self.model_resolution[model]
|
||||
values[i] = values[i] / 180 * (resolution // 2)
|
||||
|
||||
values = np.round(values).astype(np.int32)
|
||||
|
||||
# Convert from nominal range ]-resolution, resolution[ to centered signed int32 range [-2**31, 2**31[
|
||||
for i, name in enumerate(motor_names):
|
||||
homing_offset, drive_mode = self.calibration[name]
|
||||
values[i] -= homing_offset
|
||||
|
||||
# Update direction of rotation of the motor that was matching between leader and follower to their original direction.
|
||||
# In fact, the motor of the leader for a given joint can be assembled in an opposite direction in term of rotation
|
||||
# than the motor of the follower on the same joint.
|
||||
if drive_mode:
|
||||
values[i] *= -1
|
||||
|
||||
return values
|
||||
|
||||
def read_with_motor_ids(self, motor_models, motor_ids, data_name, num_retry=NUM_READ_RETRY):
|
||||
if self.mock:
|
||||
import tests.mock_dynamixel_sdk as dxl
|
||||
else:
|
||||
import dynamixel_sdk as dxl
|
||||
|
||||
def _read_with_motor_ids(self, motor_models, motor_ids, data_name):
|
||||
return_list = True
|
||||
if not isinstance(motor_ids, list):
|
||||
return_list = False
|
||||
@@ -651,16 +608,12 @@ class DynamixelMotorsBus:
|
||||
|
||||
assert_same_address(self.model_ctrl_table, self.motor_models, data_name)
|
||||
addr, bytes = self.model_ctrl_table[motor_models[0]][data_name]
|
||||
group = dxl.GroupSyncRead(self.port_handler, self.packet_handler, addr, bytes)
|
||||
group = GroupSyncRead(self.port_handler, self.packet_handler, addr, bytes)
|
||||
for idx in motor_ids:
|
||||
group.addParam(idx)
|
||||
|
||||
for _ in range(num_retry):
|
||||
comm = group.txRxPacket()
|
||||
if comm == dxl.COMM_SUCCESS:
|
||||
break
|
||||
|
||||
if comm != dxl.COMM_SUCCESS:
|
||||
comm = group.txRxPacket()
|
||||
if comm != COMM_SUCCESS:
|
||||
raise ConnectionError(
|
||||
f"Read failed due to communication error on port {self.port_handler.port_name} for indices {motor_ids}: "
|
||||
f"{self.packet_handler.getTxRxResult(comm)}"
|
||||
@@ -684,11 +637,6 @@ class DynamixelMotorsBus:
|
||||
|
||||
start_time = time.perf_counter()
|
||||
|
||||
if self.mock:
|
||||
import tests.mock_dynamixel_sdk as dxl
|
||||
else:
|
||||
import dynamixel_sdk as dxl
|
||||
|
||||
if motor_names is None:
|
||||
motor_names = self.motor_names
|
||||
|
||||
@@ -708,18 +656,16 @@ class DynamixelMotorsBus:
|
||||
|
||||
if data_name not in self.group_readers:
|
||||
# create new group reader
|
||||
self.group_readers[group_key] = dxl.GroupSyncRead(
|
||||
self.port_handler, self.packet_handler, addr, bytes
|
||||
)
|
||||
self.group_readers[group_key] = GroupSyncRead(self.port_handler, self.packet_handler, addr, bytes)
|
||||
for idx in motor_ids:
|
||||
self.group_readers[group_key].addParam(idx)
|
||||
|
||||
for _ in range(NUM_READ_RETRY):
|
||||
comm = self.group_readers[group_key].txRxPacket()
|
||||
if comm == dxl.COMM_SUCCESS:
|
||||
if comm == COMM_SUCCESS:
|
||||
break
|
||||
|
||||
if comm != dxl.COMM_SUCCESS:
|
||||
if comm != COMM_SUCCESS:
|
||||
raise ConnectionError(
|
||||
f"Read failed due to communication error on port {self.port} for group_key {group_key}: "
|
||||
f"{self.packet_handler.getTxRxResult(comm)}"
|
||||
@@ -737,7 +683,19 @@ class DynamixelMotorsBus:
|
||||
values = values.astype(np.int32)
|
||||
|
||||
if data_name in CALIBRATION_REQUIRED and self.calibration is not None:
|
||||
values = self.apply_calibration_autocorrect(values, motor_names)
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
|
||||
# We expect our motors to stay in a nominal range of [-180, 180] degrees
|
||||
# which corresponds to a half turn rotation.
|
||||
# However, some motors can turn a bit more, hence we extend the nominal range to [-270, 270]
|
||||
# which is less than a full 360 degree rotation.
|
||||
if not np.all((values > -270) & (values < 270)):
|
||||
raise ValueError(
|
||||
f"Wrong motor position range detected. "
|
||||
f"Expected to be in [-270, +270] but in [{values.min()}, {values.max()}]. "
|
||||
"This might be due to a cable connection issue creating an artificial 360 degrees jump in motor values. "
|
||||
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
|
||||
)
|
||||
|
||||
# log the number of seconds it took to read the data from the motors
|
||||
delta_ts_name = get_log_name("delta_timestamp_s", "read", data_name, motor_names)
|
||||
@@ -749,12 +707,7 @@ class DynamixelMotorsBus:
|
||||
|
||||
return values
|
||||
|
||||
def write_with_motor_ids(self, motor_models, motor_ids, data_name, values, num_retry=NUM_WRITE_RETRY):
|
||||
if self.mock:
|
||||
import tests.mock_dynamixel_sdk as dxl
|
||||
else:
|
||||
import dynamixel_sdk as dxl
|
||||
|
||||
def _write_with_motor_ids(self, motor_models, motor_ids, data_name, values):
|
||||
if not isinstance(motor_ids, list):
|
||||
motor_ids = [motor_ids]
|
||||
if not isinstance(values, list):
|
||||
@@ -762,17 +715,13 @@ class DynamixelMotorsBus:
|
||||
|
||||
assert_same_address(self.model_ctrl_table, motor_models, data_name)
|
||||
addr, bytes = self.model_ctrl_table[motor_models[0]][data_name]
|
||||
group = dxl.GroupSyncWrite(self.port_handler, self.packet_handler, addr, bytes)
|
||||
group = GroupSyncWrite(self.port_handler, self.packet_handler, addr, bytes)
|
||||
for idx, value in zip(motor_ids, values, strict=True):
|
||||
data = convert_to_bytes(value, bytes, self.mock)
|
||||
data = convert_to_bytes(value, bytes)
|
||||
group.addParam(idx, data)
|
||||
|
||||
for _ in range(num_retry):
|
||||
comm = group.txPacket()
|
||||
if comm == dxl.COMM_SUCCESS:
|
||||
break
|
||||
|
||||
if comm != dxl.COMM_SUCCESS:
|
||||
comm = group.txPacket()
|
||||
if comm != COMM_SUCCESS:
|
||||
raise ConnectionError(
|
||||
f"Write failed due to communication error on port {self.port_handler.port_name} for indices {motor_ids}: "
|
||||
f"{self.packet_handler.getTxRxResult(comm)}"
|
||||
@@ -786,11 +735,6 @@ class DynamixelMotorsBus:
|
||||
|
||||
start_time = time.perf_counter()
|
||||
|
||||
if self.mock:
|
||||
import tests.mock_dynamixel_sdk as dxl
|
||||
else:
|
||||
import dynamixel_sdk as dxl
|
||||
|
||||
if motor_names is None:
|
||||
motor_names = self.motor_names
|
||||
|
||||
@@ -820,19 +764,19 @@ class DynamixelMotorsBus:
|
||||
|
||||
init_group = data_name not in self.group_readers
|
||||
if init_group:
|
||||
self.group_writers[group_key] = dxl.GroupSyncWrite(
|
||||
self.group_writers[group_key] = GroupSyncWrite(
|
||||
self.port_handler, self.packet_handler, addr, bytes
|
||||
)
|
||||
|
||||
for idx, value in zip(motor_ids, values, strict=True):
|
||||
data = convert_to_bytes(value, bytes, self.mock)
|
||||
data = convert_to_bytes(value, bytes)
|
||||
if init_group:
|
||||
self.group_writers[group_key].addParam(idx, data)
|
||||
else:
|
||||
self.group_writers[group_key].changeParam(idx, data)
|
||||
|
||||
comm = self.group_writers[group_key].txPacket()
|
||||
if comm != dxl.COMM_SUCCESS:
|
||||
if comm != COMM_SUCCESS:
|
||||
raise ConnectionError(
|
||||
f"Write failed due to communication error on port {self.port} for group_key {group_key}: "
|
||||
f"{self.packet_handler.getTxRxResult(comm)}"
|
||||
@@ -865,3 +809,8 @@ class DynamixelMotorsBus:
|
||||
def __del__(self):
|
||||
if getattr(self, "is_connected", False):
|
||||
self.disconnect()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Helper to find the usb port associated to all your DynamixelMotorsBus.
|
||||
find_port()
|
||||
|
||||
@@ -1,130 +0,0 @@
|
||||
"""Logic to calibrate a robot arm built with dynamixel motors"""
|
||||
# TODO(rcadene, aliberts): move this logic into the robot code when refactoring
|
||||
|
||||
import numpy as np
|
||||
|
||||
from lerobot.common.robot_devices.motors.dynamixel import (
|
||||
CalibrationMode,
|
||||
TorqueMode,
|
||||
convert_degrees_to_steps,
|
||||
)
|
||||
from lerobot.common.robot_devices.motors.utils import MotorsBus
|
||||
|
||||
URL_TEMPLATE = (
|
||||
"https://raw.githubusercontent.com/huggingface/lerobot/main/media/{robot}/{arm}_{position}.webp"
|
||||
)
|
||||
|
||||
# The following positions are provided in nominal degree range ]-180, +180[
|
||||
# For more info on these constants, see comments in the code where they get used.
|
||||
ZERO_POSITION_DEGREE = 0
|
||||
ROTATED_POSITION_DEGREE = 90
|
||||
|
||||
|
||||
def assert_drive_mode(drive_mode):
|
||||
# `drive_mode` is in [0,1] with 0 means original rotation direction for the motor, and 1 means inverted.
|
||||
if not np.all(np.isin(drive_mode, [0, 1])):
|
||||
raise ValueError(f"`drive_mode` contains values other than 0 or 1: ({drive_mode})")
|
||||
|
||||
|
||||
def apply_drive_mode(position, drive_mode):
|
||||
assert_drive_mode(drive_mode)
|
||||
# Convert `drive_mode` from [0, 1] with 0 indicates original rotation direction and 1 inverted,
|
||||
# to [-1, 1] with 1 indicates original rotation direction and -1 inverted.
|
||||
signed_drive_mode = -(drive_mode * 2 - 1)
|
||||
position *= signed_drive_mode
|
||||
return position
|
||||
|
||||
|
||||
def compute_nearest_rounded_position(position, models):
|
||||
delta_turn = convert_degrees_to_steps(ROTATED_POSITION_DEGREE, models)
|
||||
nearest_pos = np.round(position.astype(float) / delta_turn) * delta_turn
|
||||
return nearest_pos.astype(position.dtype)
|
||||
|
||||
|
||||
def run_arm_calibration(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str):
|
||||
"""This function ensures that a neural network trained on data collected on a given robot
|
||||
can work on another robot. For instance before calibration, setting a same goal position
|
||||
for each motor of two different robots will get two very different positions. But after calibration,
|
||||
the two robots will move to the same position.To this end, this function computes the homing offset
|
||||
and the drive mode for each motor of a given robot.
|
||||
|
||||
Homing offset is used to shift the motor position to a ]-2048, +2048[ nominal range (when the motor uses 2048 steps
|
||||
to complete a half a turn). This range is set around an arbitrary "zero position" corresponding to all motor positions
|
||||
being 0. During the calibration process, you will need to manually move the robot to this "zero position".
|
||||
|
||||
Drive mode is used to invert the rotation direction of the motor. This is useful when some motors have been assembled
|
||||
in the opposite orientation for some robots. During the calibration process, you will need to manually move the robot
|
||||
to the "rotated position".
|
||||
|
||||
After calibration, the homing offsets and drive modes are stored in a cache.
|
||||
|
||||
Example of usage:
|
||||
```python
|
||||
run_arm_calibration(arm, "koch", "left", "follower")
|
||||
```
|
||||
"""
|
||||
if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any():
|
||||
raise ValueError("To run calibration, the torque must be disabled on all motors.")
|
||||
|
||||
print(f"\nRunning calibration of {robot_type} {arm_name} {arm_type}...")
|
||||
|
||||
print("\nMove arm to zero position")
|
||||
print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="zero"))
|
||||
input("Press Enter to continue...")
|
||||
|
||||
# We arbitrarily chose our zero target position to be a straight horizontal position with gripper upwards and closed.
|
||||
# It is easy to identify and all motors are in a "quarter turn" position. Once calibration is done, this position will
|
||||
# correspond to every motor angle being 0. If you set all 0 as Goal Position, the arm will move in this position.
|
||||
zero_target_pos = convert_degrees_to_steps(ZERO_POSITION_DEGREE, arm.motor_models)
|
||||
|
||||
# Compute homing offset so that `present_position + homing_offset ~= target_position`.
|
||||
zero_pos = arm.read("Present_Position")
|
||||
zero_nearest_pos = compute_nearest_rounded_position(zero_pos, arm.motor_models)
|
||||
homing_offset = zero_target_pos - zero_nearest_pos
|
||||
|
||||
# The rotated target position corresponds to a rotation of a quarter turn from the zero position.
|
||||
# This allows to identify the rotation direction of each motor.
|
||||
# For instance, if the motor rotates 90 degree, and its value is -90 after applying the homing offset, then we know its rotation direction
|
||||
# is inverted. However, for the calibration being successful, we need everyone to follow the same target position.
|
||||
# Sometimes, there is only one possible rotation direction. For instance, if the gripper is closed, there is only one direction which
|
||||
# corresponds to opening the gripper. When the rotation direction is ambiguous, we arbitrarely rotate clockwise from the point of view
|
||||
# of the previous motor in the kinetic chain.
|
||||
print("\nMove arm to rotated target position")
|
||||
print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="rotated"))
|
||||
input("Press Enter to continue...")
|
||||
|
||||
rotated_target_pos = convert_degrees_to_steps(ROTATED_POSITION_DEGREE, arm.motor_models)
|
||||
|
||||
# Find drive mode by rotating each motor by a quarter of a turn.
|
||||
# Drive mode indicates if the motor rotation direction should be inverted (=1) or not (=0).
|
||||
rotated_pos = arm.read("Present_Position")
|
||||
drive_mode = (rotated_pos < zero_pos).astype(np.int32)
|
||||
|
||||
# Re-compute homing offset to take into account drive mode
|
||||
rotated_drived_pos = apply_drive_mode(rotated_pos, drive_mode)
|
||||
rotated_nearest_pos = compute_nearest_rounded_position(rotated_drived_pos, arm.motor_models)
|
||||
homing_offset = rotated_target_pos - rotated_nearest_pos
|
||||
|
||||
print("\nMove arm to rest position")
|
||||
print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="rest"))
|
||||
input("Press Enter to continue...")
|
||||
print()
|
||||
|
||||
# Joints with rotational motions are expressed in degrees in nominal range of [-180, 180]
|
||||
calib_mode = [CalibrationMode.DEGREE.name] * len(arm.motor_names)
|
||||
|
||||
# TODO(rcadene): make type of joints (DEGREE or LINEAR) configurable from yaml?
|
||||
if robot_type in ["aloha"] and "gripper" in arm.motor_names:
|
||||
# Joints with linear motions (like gripper of Aloha) are experessed in nominal range of [0, 100]
|
||||
calib_idx = arm.motor_names.index("gripper")
|
||||
calib_mode[calib_idx] = CalibrationMode.LINEAR.name
|
||||
|
||||
calib_data = {
|
||||
"homing_offset": homing_offset.tolist(),
|
||||
"drive_mode": drive_mode.tolist(),
|
||||
"start_pos": zero_pos.tolist(),
|
||||
"end_pos": rotated_pos.tolist(),
|
||||
"calib_mode": calib_mode,
|
||||
"motor_names": arm.motor_names,
|
||||
}
|
||||
return calib_data
|
||||
@@ -1,9 +1,7 @@
|
||||
import hydra
|
||||
from omegaconf import DictConfig
|
||||
|
||||
from lerobot.common.robot_devices.robots.utils import Robot
|
||||
|
||||
|
||||
def make_robot(cfg: DictConfig) -> Robot:
|
||||
def make_robot(cfg: DictConfig):
|
||||
robot = hydra.utils.instantiate(cfg)
|
||||
return robot
|
||||
|
||||
@@ -1,484 +0,0 @@
|
||||
"""Logic to calibrate a robot arm built with feetech motors"""
|
||||
# TODO(rcadene, aliberts): move this logic into the robot code when refactoring
|
||||
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
|
||||
from lerobot.common.robot_devices.motors.feetech import (
|
||||
CalibrationMode,
|
||||
TorqueMode,
|
||||
convert_degrees_to_steps,
|
||||
)
|
||||
from lerobot.common.robot_devices.motors.utils import MotorsBus
|
||||
|
||||
URL_TEMPLATE = (
|
||||
"https://raw.githubusercontent.com/huggingface/lerobot/main/media/{robot}/{arm}_{position}.webp"
|
||||
)
|
||||
|
||||
# The following positions are provided in nominal degree range ]-180, +180[
|
||||
# For more info on these constants, see comments in the code where they get used.
|
||||
ZERO_POSITION_DEGREE = 0
|
||||
ROTATED_POSITION_DEGREE = 90
|
||||
|
||||
|
||||
def assert_drive_mode(drive_mode):
|
||||
# `drive_mode` is in [0,1] with 0 means original rotation direction for the motor, and 1 means inverted.
|
||||
if not np.all(np.isin(drive_mode, [0, 1])):
|
||||
raise ValueError(f"`drive_mode` contains values other than 0 or 1: ({drive_mode})")
|
||||
|
||||
|
||||
def apply_drive_mode(position, drive_mode):
|
||||
assert_drive_mode(drive_mode)
|
||||
# Convert `drive_mode` from [0, 1] with 0 indicates original rotation direction and 1 inverted,
|
||||
# to [-1, 1] with 1 indicates original rotation direction and -1 inverted.
|
||||
signed_drive_mode = -(drive_mode * 2 - 1)
|
||||
position *= signed_drive_mode
|
||||
return position
|
||||
|
||||
|
||||
def move_until_block(arm, motor_name, positive_direction=True, while_move_hook=None):
|
||||
count = 0
|
||||
while True:
|
||||
present_pos = arm.read("Present_Position", motor_name)
|
||||
if positive_direction:
|
||||
# Move +100 steps every time. Lower the steps to lower the speed at which the arm moves.
|
||||
arm.write("Goal_Position", present_pos + 100, motor_name)
|
||||
else:
|
||||
arm.write("Goal_Position", present_pos - 100, motor_name)
|
||||
|
||||
if while_move_hook is not None:
|
||||
while_move_hook()
|
||||
|
||||
present_pos = arm.read("Present_Position", motor_name).item()
|
||||
present_speed = arm.read("Present_Speed", motor_name).item()
|
||||
present_current = arm.read("Present_Current", motor_name).item()
|
||||
# present_load = arm.read("Present_Load", motor_name).item()
|
||||
# present_voltage = arm.read("Present_Voltage", motor_name).item()
|
||||
# present_temperature = arm.read("Present_Temperature", motor_name).item()
|
||||
|
||||
# print(f"{present_pos=}")
|
||||
# print(f"{present_speed=}")
|
||||
# print(f"{present_current=}")
|
||||
# print(f"{present_load=}")
|
||||
# print(f"{present_voltage=}")
|
||||
# print(f"{present_temperature=}")
|
||||
|
||||
if present_speed == 0 and present_current > 50:
|
||||
count += 1
|
||||
if count > 100 or present_current > 300:
|
||||
return present_pos
|
||||
else:
|
||||
count = 0
|
||||
|
||||
|
||||
def move_to_calibrate(
|
||||
arm,
|
||||
motor_name,
|
||||
invert_drive_mode=False,
|
||||
positive_first=True,
|
||||
in_between_move_hook=None,
|
||||
while_move_hook=None,
|
||||
):
|
||||
initial_pos = arm.read("Present_Position", motor_name)
|
||||
|
||||
if positive_first:
|
||||
p_present_pos = move_until_block(
|
||||
arm, motor_name, positive_direction=True, while_move_hook=while_move_hook
|
||||
)
|
||||
else:
|
||||
n_present_pos = move_until_block(
|
||||
arm, motor_name, positive_direction=False, while_move_hook=while_move_hook
|
||||
)
|
||||
|
||||
if in_between_move_hook is not None:
|
||||
in_between_move_hook()
|
||||
|
||||
if positive_first:
|
||||
n_present_pos = move_until_block(
|
||||
arm, motor_name, positive_direction=False, while_move_hook=while_move_hook
|
||||
)
|
||||
else:
|
||||
p_present_pos = move_until_block(
|
||||
arm, motor_name, positive_direction=True, while_move_hook=while_move_hook
|
||||
)
|
||||
|
||||
zero_pos = (n_present_pos + p_present_pos) / 2
|
||||
|
||||
calib_data = {
|
||||
"initial_pos": initial_pos,
|
||||
"homing_offset": zero_pos if invert_drive_mode else -zero_pos,
|
||||
"invert_drive_mode": invert_drive_mode,
|
||||
"drive_mode": -1 if invert_drive_mode else 0,
|
||||
"zero_pos": zero_pos,
|
||||
"start_pos": n_present_pos if invert_drive_mode else p_present_pos,
|
||||
"end_pos": p_present_pos if invert_drive_mode else n_present_pos,
|
||||
}
|
||||
return calib_data
|
||||
|
||||
|
||||
def apply_offset(calib, offset):
|
||||
calib["zero_pos"] += offset
|
||||
if calib["drive_mode"]:
|
||||
calib["homing_offset"] += offset
|
||||
else:
|
||||
calib["homing_offset"] -= offset
|
||||
return calib
|
||||
|
||||
|
||||
def run_arm_auto_calibration(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str):
|
||||
if robot_type == "so100":
|
||||
return run_arm_auto_calibration_so100(arm, robot_type, arm_name, arm_type)
|
||||
elif robot_type == "moss":
|
||||
return run_arm_auto_calibration_moss(arm, robot_type, arm_name, arm_type)
|
||||
else:
|
||||
raise ValueError(robot_type)
|
||||
|
||||
|
||||
def run_arm_auto_calibration_so100(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str):
|
||||
"""All the offsets and magic numbers are hand tuned, and are unique to SO-100 follower arms"""
|
||||
if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any():
|
||||
raise ValueError("To run calibration, the torque must be disabled on all motors.")
|
||||
|
||||
if not (robot_type == "so100" and arm_type == "follower"):
|
||||
raise NotImplementedError("Auto calibration only supports the follower of so100 arms for now.")
|
||||
|
||||
print(f"\nRunning calibration of {robot_type} {arm_name} {arm_type}...")
|
||||
|
||||
print("\nMove arm to initial position")
|
||||
print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="initial"))
|
||||
input("Press Enter to continue...")
|
||||
|
||||
# Lower the acceleration of the motors (in [0,254])
|
||||
initial_acceleration = arm.read("Acceleration")
|
||||
arm.write("Lock", 0)
|
||||
arm.write("Acceleration", 10)
|
||||
time.sleep(1)
|
||||
|
||||
arm.write("Torque_Enable", TorqueMode.ENABLED.value)
|
||||
|
||||
print(f'{arm.read("Present_Position", "elbow_flex")=}')
|
||||
|
||||
calib = {}
|
||||
|
||||
init_wf_pos = arm.read("Present_Position", "wrist_flex")
|
||||
init_sl_pos = arm.read("Present_Position", "shoulder_lift")
|
||||
init_ef_pos = arm.read("Present_Position", "elbow_flex")
|
||||
arm.write("Goal_Position", init_wf_pos - 800, "wrist_flex")
|
||||
arm.write("Goal_Position", init_sl_pos + 150 + 1024, "shoulder_lift")
|
||||
arm.write("Goal_Position", init_ef_pos - 2048, "elbow_flex")
|
||||
time.sleep(2)
|
||||
|
||||
print("Calibrate shoulder_pan")
|
||||
calib["shoulder_pan"] = move_to_calibrate(arm, "shoulder_pan")
|
||||
arm.write("Goal_Position", calib["shoulder_pan"]["zero_pos"], "shoulder_pan")
|
||||
time.sleep(1)
|
||||
|
||||
print("Calibrate gripper")
|
||||
calib["gripper"] = move_to_calibrate(arm, "gripper", invert_drive_mode=True)
|
||||
time.sleep(1)
|
||||
|
||||
print("Calibrate wrist_flex")
|
||||
calib["wrist_flex"] = move_to_calibrate(arm, "wrist_flex")
|
||||
calib["wrist_flex"] = apply_offset(calib["wrist_flex"], offset=80)
|
||||
|
||||
def in_between_move_hook():
|
||||
nonlocal arm, calib
|
||||
time.sleep(2)
|
||||
ef_pos = arm.read("Present_Position", "elbow_flex")
|
||||
sl_pos = arm.read("Present_Position", "shoulder_lift")
|
||||
arm.write("Goal_Position", ef_pos + 1024, "elbow_flex")
|
||||
arm.write("Goal_Position", sl_pos - 1024, "shoulder_lift")
|
||||
time.sleep(2)
|
||||
|
||||
print("Calibrate elbow_flex")
|
||||
calib["elbow_flex"] = move_to_calibrate(
|
||||
arm, "elbow_flex", positive_first=False, in_between_move_hook=in_between_move_hook
|
||||
)
|
||||
calib["elbow_flex"] = apply_offset(calib["elbow_flex"], offset=80 - 1024)
|
||||
|
||||
arm.write("Goal_Position", calib["elbow_flex"]["zero_pos"] + 1024 + 512, "elbow_flex")
|
||||
time.sleep(1)
|
||||
|
||||
def in_between_move_hook():
|
||||
nonlocal arm, calib
|
||||
arm.write("Goal_Position", calib["elbow_flex"]["zero_pos"], "elbow_flex")
|
||||
|
||||
print("Calibrate shoulder_lift")
|
||||
calib["shoulder_lift"] = move_to_calibrate(
|
||||
arm,
|
||||
"shoulder_lift",
|
||||
invert_drive_mode=True,
|
||||
positive_first=False,
|
||||
in_between_move_hook=in_between_move_hook,
|
||||
)
|
||||
# add an 30 steps as offset to align with body
|
||||
calib["shoulder_lift"] = apply_offset(calib["shoulder_lift"], offset=1024 - 50)
|
||||
|
||||
def while_move_hook():
|
||||
nonlocal arm, calib
|
||||
positions = {
|
||||
"shoulder_lift": round(calib["shoulder_lift"]["zero_pos"] - 1600),
|
||||
"elbow_flex": round(calib["elbow_flex"]["zero_pos"] + 1700),
|
||||
"wrist_flex": round(calib["wrist_flex"]["zero_pos"] + 800),
|
||||
"gripper": round(calib["gripper"]["end_pos"]),
|
||||
}
|
||||
arm.write("Goal_Position", list(positions.values()), list(positions.keys()))
|
||||
|
||||
arm.write("Goal_Position", round(calib["shoulder_lift"]["zero_pos"] - 1600), "shoulder_lift")
|
||||
time.sleep(2)
|
||||
arm.write("Goal_Position", round(calib["elbow_flex"]["zero_pos"] + 1700), "elbow_flex")
|
||||
time.sleep(2)
|
||||
arm.write("Goal_Position", round(calib["wrist_flex"]["zero_pos"] + 800), "wrist_flex")
|
||||
time.sleep(2)
|
||||
arm.write("Goal_Position", round(calib["gripper"]["end_pos"]), "gripper")
|
||||
time.sleep(2)
|
||||
|
||||
print("Calibrate wrist_roll")
|
||||
calib["wrist_roll"] = move_to_calibrate(
|
||||
arm, "wrist_roll", invert_drive_mode=True, positive_first=False, while_move_hook=while_move_hook
|
||||
)
|
||||
|
||||
arm.write("Goal_Position", calib["wrist_roll"]["zero_pos"], "wrist_roll")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", calib["gripper"]["start_pos"], "gripper")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"], "wrist_flex")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", calib["elbow_flex"]["zero_pos"] + 2048, "elbow_flex")
|
||||
arm.write("Goal_Position", calib["shoulder_lift"]["zero_pos"] - 2048, "shoulder_lift")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", calib["shoulder_pan"]["zero_pos"], "shoulder_pan")
|
||||
time.sleep(1)
|
||||
|
||||
calib_modes = []
|
||||
for name in arm.motor_names:
|
||||
if name == "gripper":
|
||||
calib_modes.append(CalibrationMode.LINEAR.name)
|
||||
else:
|
||||
calib_modes.append(CalibrationMode.DEGREE.name)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": [calib[name]["homing_offset"] for name in arm.motor_names],
|
||||
"drive_mode": [calib[name]["drive_mode"] for name in arm.motor_names],
|
||||
"start_pos": [calib[name]["start_pos"] for name in arm.motor_names],
|
||||
"end_pos": [calib[name]["end_pos"] for name in arm.motor_names],
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": arm.motor_names,
|
||||
}
|
||||
|
||||
# Re-enable original accerlation
|
||||
arm.write("Lock", 0)
|
||||
arm.write("Acceleration", initial_acceleration)
|
||||
time.sleep(1)
|
||||
|
||||
return calib_dict
|
||||
|
||||
|
||||
def run_arm_auto_calibration_moss(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str):
|
||||
"""All the offsets and magic numbers are hand tuned, and are unique to SO-100 follower arms"""
|
||||
if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any():
|
||||
raise ValueError("To run calibration, the torque must be disabled on all motors.")
|
||||
|
||||
if not (robot_type == "moss" and arm_type == "follower"):
|
||||
raise NotImplementedError("Auto calibration only supports the follower of moss arms for now.")
|
||||
|
||||
print(f"\nRunning calibration of {robot_type} {arm_name} {arm_type}...")
|
||||
|
||||
print("\nMove arm to initial position")
|
||||
print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="initial"))
|
||||
input("Press Enter to continue...")
|
||||
|
||||
# Lower the acceleration of the motors (in [0,254])
|
||||
initial_acceleration = arm.read("Acceleration")
|
||||
arm.write("Lock", 0)
|
||||
arm.write("Acceleration", 10)
|
||||
time.sleep(1)
|
||||
|
||||
arm.write("Torque_Enable", TorqueMode.ENABLED.value)
|
||||
|
||||
sl_pos = arm.read("Present_Position", "shoulder_lift")
|
||||
arm.write("Goal_Position", sl_pos - 1024 - 450, "shoulder_lift")
|
||||
ef_pos = arm.read("Present_Position", "elbow_flex")
|
||||
arm.write("Goal_Position", ef_pos + 1024 + 450, "elbow_flex")
|
||||
time.sleep(2)
|
||||
|
||||
calib = {}
|
||||
|
||||
print("Calibrate shoulder_pan")
|
||||
calib["shoulder_pan"] = move_to_calibrate(arm, "shoulder_pan")
|
||||
arm.write("Goal_Position", calib["shoulder_pan"]["zero_pos"], "shoulder_pan")
|
||||
time.sleep(1)
|
||||
|
||||
print("Calibrate gripper")
|
||||
calib["gripper"] = move_to_calibrate(arm, "gripper", invert_drive_mode=True)
|
||||
time.sleep(1)
|
||||
|
||||
print("Calibrate wrist_flex")
|
||||
calib["wrist_flex"] = move_to_calibrate(arm, "wrist_flex", invert_drive_mode=True)
|
||||
calib["wrist_flex"] = apply_offset(calib["wrist_flex"], offset=-210 + 1024)
|
||||
|
||||
wr_pos = arm.read("Present_Position", "wrist_roll")
|
||||
arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 1024, "wrist_flex")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", wr_pos - 1024, "wrist_roll")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 2048, "wrist_flex")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", calib["gripper"]["end_pos"], "gripper")
|
||||
time.sleep(1)
|
||||
|
||||
print("Calibrate wrist_roll")
|
||||
calib["wrist_roll"] = move_to_calibrate(arm, "wrist_roll", invert_drive_mode=True)
|
||||
calib["wrist_roll"] = apply_offset(calib["wrist_roll"], offset=790)
|
||||
|
||||
arm.write("Goal_Position", calib["wrist_roll"]["zero_pos"] - 1024, "wrist_roll")
|
||||
arm.write("Goal_Position", calib["gripper"]["start_pos"], "gripper")
|
||||
arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 1024, "wrist_flex")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", calib["wrist_roll"]["zero_pos"], "wrist_roll")
|
||||
arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 2048, "wrist_flex")
|
||||
|
||||
def in_between_move_elbow_flex_hook():
|
||||
nonlocal arm, calib
|
||||
arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"], "wrist_flex")
|
||||
|
||||
print("Calibrate elbow_flex")
|
||||
calib["elbow_flex"] = move_to_calibrate(
|
||||
arm,
|
||||
"elbow_flex",
|
||||
invert_drive_mode=True,
|
||||
in_between_move_hook=in_between_move_elbow_flex_hook,
|
||||
)
|
||||
arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 1024, "wrist_flex")
|
||||
|
||||
def in_between_move_shoulder_lift_hook():
|
||||
nonlocal arm, calib
|
||||
sl = arm.read("Present_Position", "shoulder_lift")
|
||||
arm.write("Goal_Position", sl - 1500, "shoulder_lift")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", calib["elbow_flex"]["zero_pos"] + 1536, "elbow_flex")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", calib["wrist_flex"]["start_pos"], "wrist_flex")
|
||||
time.sleep(1)
|
||||
|
||||
print("Calibrate shoulder_lift")
|
||||
calib["shoulder_lift"] = move_to_calibrate(
|
||||
arm, "shoulder_lift", in_between_move_hook=in_between_move_shoulder_lift_hook
|
||||
)
|
||||
calib["shoulder_lift"] = apply_offset(calib["shoulder_lift"], offset=-1024)
|
||||
|
||||
arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 1024, "wrist_flex")
|
||||
time.sleep(1)
|
||||
arm.write("Goal_Position", calib["shoulder_lift"]["zero_pos"] + 2048, "shoulder_lift")
|
||||
arm.write("Goal_Position", calib["elbow_flex"]["zero_pos"] - 1024 - 400, "elbow_flex")
|
||||
time.sleep(2)
|
||||
|
||||
calib_modes = []
|
||||
for name in arm.motor_names:
|
||||
if name == "gripper":
|
||||
calib_modes.append(CalibrationMode.LINEAR.name)
|
||||
else:
|
||||
calib_modes.append(CalibrationMode.DEGREE.name)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": [calib[name]["homing_offset"] for name in arm.motor_names],
|
||||
"drive_mode": [calib[name]["drive_mode"] for name in arm.motor_names],
|
||||
"start_pos": [calib[name]["start_pos"] for name in arm.motor_names],
|
||||
"end_pos": [calib[name]["end_pos"] for name in arm.motor_names],
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": arm.motor_names,
|
||||
}
|
||||
|
||||
# Re-enable original accerlation
|
||||
arm.write("Lock", 0)
|
||||
arm.write("Acceleration", initial_acceleration)
|
||||
time.sleep(1)
|
||||
|
||||
return calib_dict
|
||||
|
||||
|
||||
def run_arm_manual_calibration(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str):
|
||||
"""This function ensures that a neural network trained on data collected on a given robot
|
||||
can work on another robot. For instance before calibration, setting a same goal position
|
||||
for each motor of two different robots will get two very different positions. But after calibration,
|
||||
the two robots will move to the same position.To this end, this function computes the homing offset
|
||||
and the drive mode for each motor of a given robot.
|
||||
|
||||
Homing offset is used to shift the motor position to a ]-2048, +2048[ nominal range (when the motor uses 2048 steps
|
||||
to complete a half a turn). This range is set around an arbitrary "zero position" corresponding to all motor positions
|
||||
being 0. During the calibration process, you will need to manually move the robot to this "zero position".
|
||||
|
||||
Drive mode is used to invert the rotation direction of the motor. This is useful when some motors have been assembled
|
||||
in the opposite orientation for some robots. During the calibration process, you will need to manually move the robot
|
||||
to the "rotated position".
|
||||
|
||||
After calibration, the homing offsets and drive modes are stored in a cache.
|
||||
|
||||
Example of usage:
|
||||
```python
|
||||
run_arm_calibration(arm, "so100", "left", "follower")
|
||||
```
|
||||
"""
|
||||
if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any():
|
||||
raise ValueError("To run calibration, the torque must be disabled on all motors.")
|
||||
|
||||
print(f"\nRunning calibration of {robot_type} {arm_name} {arm_type}...")
|
||||
|
||||
print("\nMove arm to zero position")
|
||||
print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="zero"))
|
||||
input("Press Enter to continue...")
|
||||
|
||||
# We arbitrarily chose our zero target position to be a straight horizontal position with gripper upwards and closed.
|
||||
# It is easy to identify and all motors are in a "quarter turn" position. Once calibration is done, this position will
|
||||
# correspond to every motor angle being 0. If you set all 0 as Goal Position, the arm will move in this position.
|
||||
zero_target_pos = convert_degrees_to_steps(ZERO_POSITION_DEGREE, arm.motor_models)
|
||||
|
||||
# Compute homing offset so that `present_position + homing_offset ~= target_position`.
|
||||
zero_pos = arm.read("Present_Position")
|
||||
homing_offset = zero_target_pos - zero_pos
|
||||
|
||||
# The rotated target position corresponds to a rotation of a quarter turn from the zero position.
|
||||
# This allows to identify the rotation direction of each motor.
|
||||
# For instance, if the motor rotates 90 degree, and its value is -90 after applying the homing offset, then we know its rotation direction
|
||||
# is inverted. However, for the calibration being successful, we need everyone to follow the same target position.
|
||||
# Sometimes, there is only one possible rotation direction. For instance, if the gripper is closed, there is only one direction which
|
||||
# corresponds to opening the gripper. When the rotation direction is ambiguous, we arbitrarely rotate clockwise from the point of view
|
||||
# of the previous motor in the kinetic chain.
|
||||
print("\nMove arm to rotated target position")
|
||||
print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="rotated"))
|
||||
input("Press Enter to continue...")
|
||||
|
||||
rotated_target_pos = convert_degrees_to_steps(ROTATED_POSITION_DEGREE, arm.motor_models)
|
||||
|
||||
# Find drive mode by rotating each motor by a quarter of a turn.
|
||||
# Drive mode indicates if the motor rotation direction should be inverted (=1) or not (=0).
|
||||
rotated_pos = arm.read("Present_Position")
|
||||
drive_mode = (rotated_pos < zero_pos).astype(np.int32)
|
||||
|
||||
# Re-compute homing offset to take into account drive mode
|
||||
rotated_drived_pos = apply_drive_mode(rotated_pos, drive_mode)
|
||||
homing_offset = rotated_target_pos - rotated_drived_pos
|
||||
|
||||
print("\nMove arm to rest position")
|
||||
print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="rest"))
|
||||
input("Press Enter to continue...")
|
||||
print()
|
||||
|
||||
# Joints with rotational motions are expressed in degrees in nominal range of [-180, 180]
|
||||
calib_modes = []
|
||||
for name in arm.motor_names:
|
||||
if name == "gripper":
|
||||
calib_modes.append(CalibrationMode.LINEAR.name)
|
||||
else:
|
||||
calib_modes.append(CalibrationMode.DEGREE.name)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset.tolist(),
|
||||
"drive_mode": drive_mode.tolist(),
|
||||
"start_pos": zero_pos.tolist(),
|
||||
"end_pos": rotated_pos.tolist(),
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": arm.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
515
lerobot/common/robot_devices/robots/koch.py
Normal file
@@ -0,0 +1,515 @@
|
||||
import pickle
|
||||
import time
|
||||
from dataclasses import dataclass, field, replace
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from lerobot.common.robot_devices.cameras.utils import Camera
|
||||
from lerobot.common.robot_devices.motors.dynamixel import (
|
||||
OperatingMode,
|
||||
TorqueMode,
|
||||
convert_degrees_to_steps,
|
||||
)
|
||||
from lerobot.common.robot_devices.motors.utils import MotorsBus
|
||||
from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError
|
||||
|
||||
########################################################################
|
||||
# Calibration logic
|
||||
########################################################################
|
||||
|
||||
URL_TEMPLATE = (
|
||||
"https://raw.githubusercontent.com/huggingface/lerobot/main/media/{robot}/{arm}_{position}.webp"
|
||||
)
|
||||
|
||||
# In nominal degree range ]-180, +180[
|
||||
ZERO_POSITION_DEGREE = 0
|
||||
ROTATED_POSITION_DEGREE = 90
|
||||
GRIPPER_OPEN_DEGREE = 35.156
|
||||
|
||||
|
||||
def assert_drive_mode(drive_mode):
|
||||
# `drive_mode` is in [0,1] with 0 means original rotation direction for the motor, and 1 means inverted.
|
||||
if not np.all(np.isin(drive_mode, [0, 1])):
|
||||
raise ValueError(f"`drive_mode` contains values other than 0 or 1: ({drive_mode})")
|
||||
|
||||
|
||||
def apply_drive_mode(position, drive_mode):
|
||||
assert_drive_mode(drive_mode)
|
||||
# Convert `drive_mode` from [0, 1] with 0 indicates original rotation direction and 1 inverted,
|
||||
# to [-1, 1] with 1 indicates original rotation direction and -1 inverted.
|
||||
signed_drive_mode = -(drive_mode * 2 - 1)
|
||||
position *= signed_drive_mode
|
||||
return position
|
||||
|
||||
|
||||
def reset_torque_mode(arm: MotorsBus):
|
||||
# To be configured, all servos must be in "torque disable" mode
|
||||
arm.write("Torque_Enable", TorqueMode.DISABLED.value)
|
||||
|
||||
# Use 'extended position mode' for all motors except gripper, because in joint mode the servos can't
|
||||
# rotate more than 360 degrees (from 0 to 4095) And some mistake can happen while assembling the arm,
|
||||
# you could end up with a servo with a position 0 or 4095 at a crucial point See [
|
||||
# https://emanual.robotis.com/docs/en/dxl/x/x_series/#operating-mode11]
|
||||
all_motors_except_gripper = [name for name in arm.motor_names if name != "gripper"]
|
||||
if len(all_motors_except_gripper) > 0:
|
||||
arm.write("Operating_Mode", OperatingMode.EXTENDED_POSITION.value, all_motors_except_gripper)
|
||||
|
||||
# Use 'position control current based' for gripper to be limited by the limit of the current.
|
||||
# For the follower gripper, it means it can grasp an object without forcing too much even tho,
|
||||
# it's goal position is a complete grasp (both gripper fingers are ordered to join and reach a touch).
|
||||
# For the leader gripper, it means we can use it as a physical trigger, since we can force with our finger
|
||||
# to make it move, and it will move back to its original target position when we release the force.
|
||||
arm.write("Operating_Mode", OperatingMode.CURRENT_CONTROLLED_POSITION.value, "gripper")
|
||||
|
||||
|
||||
def run_arm_calibration(arm: MotorsBus, name: str, arm_type: str):
|
||||
"""This function ensures that a neural network trained on data collected on a given robot
|
||||
can work on another robot. For instance before calibration, setting a same goal position
|
||||
for each motor of two different robots will get two very different positions. But after calibration,
|
||||
the two robots will move to the same position.To this end, this function computes the homing offset
|
||||
and the drive mode for each motor of a given robot.
|
||||
|
||||
Homing offset is used to shift the motor position to a ]-2048, +2048[ nominal range (when the motor uses 2048 steps
|
||||
to complete a half a turn). This range is set around an arbitrary "zero position" corresponding to all motor positions
|
||||
being 0. During the calibration process, you will need to manually move the robot to this "zero position".
|
||||
|
||||
Drive mode is used to invert the rotation direction of the motor. This is useful when some motors have been assembled
|
||||
in the opposite orientation for some robots. During the calibration process, you will need to manually move the robot
|
||||
to the "rotated position".
|
||||
|
||||
After calibration, the homing offsets and drive modes are stored in a cache.
|
||||
|
||||
Example of usage:
|
||||
```python
|
||||
run_arm_calibration(arm, "left", "follower")
|
||||
```
|
||||
"""
|
||||
reset_torque_mode(arm)
|
||||
|
||||
print(f"\nRunning calibration of {name} {arm_type}...")
|
||||
|
||||
print("\nMove arm to zero position")
|
||||
print("See: " + URL_TEMPLATE.format(robot="koch", arm=arm_type, position="zero"))
|
||||
input("Press Enter to continue...")
|
||||
|
||||
# We arbitrarely choosed our zero target position to be a straight horizontal position with gripper upwards and closed.
|
||||
# It is easy to identify and all motors are in a "quarter turn" position. Once calibration is done, this position will
|
||||
# corresponds to every motor angle being 0. If you set all 0 as Goal Position, the arm will move in this position.
|
||||
zero_position = convert_degrees_to_steps(ZERO_POSITION_DEGREE, arm.motor_models)
|
||||
|
||||
def _compute_nearest_rounded_position(position, models):
|
||||
# TODO(rcadene): Rework this function since some motors cant physically rotate a quarter turn
|
||||
# (e.g. the gripper of Aloha arms can only rotate ~50 degree)
|
||||
quarter_turn_degree = 90
|
||||
quarter_turn = convert_degrees_to_steps(quarter_turn_degree, models)
|
||||
nearest_pos = np.round(position.astype(float) / quarter_turn) * quarter_turn
|
||||
return nearest_pos.astype(position.dtype)
|
||||
|
||||
# Compute homing offset so that `present_position + homing_offset ~= target_position`.
|
||||
position = arm.read("Present_Position")
|
||||
position = _compute_nearest_rounded_position(position, arm.motor_models)
|
||||
homing_offset = zero_position - position
|
||||
|
||||
print("\nMove arm to rotated target position")
|
||||
print("See: " + URL_TEMPLATE.format(robot="koch", arm=arm_type, position="rotated"))
|
||||
input("Press Enter to continue...")
|
||||
|
||||
# The rotated target position corresponds to a rotation of a quarter turn from the zero position.
|
||||
# This allows to identify the rotation direction of each motor.
|
||||
# For instance, if the motor rotates 90 degree, and its value is -90 after applying the homing offset, then we know its rotation direction
|
||||
# is inverted. However, for the calibration being successful, we need everyone to follow the same target position.
|
||||
# Sometimes, there is only one possible rotation direction. For instance, if the gripper is closed, there is only one direction which
|
||||
# corresponds to opening the gripper. When the rotation direction is ambiguous, we arbitrarely rotate clockwise from the point of view
|
||||
# of the previous motor in the kinetic chain.
|
||||
rotated_position = convert_degrees_to_steps(ROTATED_POSITION_DEGREE, arm.motor_models)
|
||||
|
||||
# Find drive mode by rotating each motor by a quarter of a turn.
|
||||
# Drive mode indicates if the motor rotation direction should be inverted (=1) or not (=0).
|
||||
position = arm.read("Present_Position")
|
||||
position += homing_offset
|
||||
position = _compute_nearest_rounded_position(position, arm.motor_models)
|
||||
drive_mode = (position != rotated_position).astype(np.int32)
|
||||
|
||||
# Re-compute homing offset to take into account drive mode
|
||||
position = arm.read("Present_Position")
|
||||
position = apply_drive_mode(position, drive_mode)
|
||||
position = _compute_nearest_rounded_position(position, arm.motor_models)
|
||||
homing_offset = rotated_position - position
|
||||
|
||||
print("\nMove arm to rest position")
|
||||
print("See: " + URL_TEMPLATE.format(robot="koch", arm=arm_type, position="rest"))
|
||||
input("Press Enter to continue...")
|
||||
print()
|
||||
|
||||
return homing_offset, drive_mode
|
||||
|
||||
|
||||
########################################################################
|
||||
# Alexander Koch robot arm
|
||||
########################################################################
|
||||
|
||||
|
||||
@dataclass
|
||||
class KochRobotConfig:
|
||||
"""
|
||||
Example of usage:
|
||||
```python
|
||||
KochRobotConfig()
|
||||
```
|
||||
"""
|
||||
|
||||
# Define all components of the robot
|
||||
leader_arms: dict[str, MotorsBus] = field(default_factory=lambda: {})
|
||||
follower_arms: dict[str, MotorsBus] = field(default_factory=lambda: {})
|
||||
cameras: dict[str, Camera] = field(default_factory=lambda: {})
|
||||
|
||||
|
||||
class KochRobot:
|
||||
# TODO(rcadene): Implement force feedback
|
||||
"""This class allows to control any Koch robot of various number of motors.
|
||||
|
||||
A few versions are available:
|
||||
- [Koch v1.0](https://github.com/AlexanderKoch-Koch/low_cost_robot), with and without the wrist-to-elbow expansion, which was developed
|
||||
by Alexander Koch from [Tau Robotics](https://tau-robotics.com): [Github for sourcing and assembly](
|
||||
- [Koch v1.1])https://github.com/jess-moss/koch-v1-1), which was developed by Jess Moss.
|
||||
|
||||
Example of highest frequency teleoperation without camera:
|
||||
```python
|
||||
# Defines how to communicate with the motors of the leader and follower arms
|
||||
leader_arms = {
|
||||
"main": DynamixelMotorsBus(
|
||||
port="/dev/tty.usbmodem575E0031751",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": (1, "xl330-m077"),
|
||||
"shoulder_lift": (2, "xl330-m077"),
|
||||
"elbow_flex": (3, "xl330-m077"),
|
||||
"wrist_flex": (4, "xl330-m077"),
|
||||
"wrist_roll": (5, "xl330-m077"),
|
||||
"gripper": (6, "xl330-m077"),
|
||||
},
|
||||
),
|
||||
}
|
||||
follower_arms = {
|
||||
"main": DynamixelMotorsBus(
|
||||
port="/dev/tty.usbmodem575E0032081",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": (1, "xl430-w250"),
|
||||
"shoulder_lift": (2, "xl430-w250"),
|
||||
"elbow_flex": (3, "xl330-m288"),
|
||||
"wrist_flex": (4, "xl330-m288"),
|
||||
"wrist_roll": (5, "xl330-m288"),
|
||||
"gripper": (6, "xl330-m288"),
|
||||
},
|
||||
),
|
||||
}
|
||||
robot = KochRobot(leader_arms, follower_arms)
|
||||
|
||||
# Connect motors buses and cameras if any (Required)
|
||||
robot.connect()
|
||||
|
||||
while True:
|
||||
robot.teleop_step()
|
||||
```
|
||||
|
||||
Example of highest frequency data collection without camera:
|
||||
```python
|
||||
# Assumes leader and follower arms have been instantiated already (see first example)
|
||||
robot = KochRobot(leader_arms, follower_arms)
|
||||
robot.connect()
|
||||
while True:
|
||||
observation, action = robot.teleop_step(record_data=True)
|
||||
```
|
||||
|
||||
Example of highest frequency data collection with cameras:
|
||||
```python
|
||||
# Defines how to communicate with 2 cameras connected to the computer.
|
||||
# Here, the webcam of the laptop and the phone (connected in USB to the laptop)
|
||||
# can be reached respectively using the camera indices 0 and 1. These indices can be
|
||||
# arbitrary. See the documentation of `OpenCVCamera` to find your own camera indices.
|
||||
cameras = {
|
||||
"laptop": OpenCVCamera(camera_index=0, fps=30, width=640, height=480),
|
||||
"phone": OpenCVCamera(camera_index=1, fps=30, width=640, height=480),
|
||||
}
|
||||
|
||||
# Assumes leader and follower arms have been instantiated already (see first example)
|
||||
robot = KochRobot(leader_arms, follower_arms, cameras)
|
||||
robot.connect()
|
||||
while True:
|
||||
observation, action = robot.teleop_step(record_data=True)
|
||||
```
|
||||
|
||||
Example of controlling the robot with a policy (without running multiple policies in parallel to ensure highest frequency):
|
||||
```python
|
||||
# Assumes leader and follower arms + cameras have been instantiated already (see previous example)
|
||||
robot = KochRobot(leader_arms, follower_arms, cameras)
|
||||
robot.connect()
|
||||
while True:
|
||||
# Uses the follower arms and cameras to capture an observation
|
||||
observation = robot.capture_observation()
|
||||
|
||||
# Assumes a policy has been instantiated
|
||||
with torch.inference_mode():
|
||||
action = policy.select_action(observation)
|
||||
|
||||
# Orders the robot to move
|
||||
robot.send_action(action)
|
||||
```
|
||||
|
||||
Example of disconnecting which is not mandatory since we disconnect when the object is deleted:
|
||||
```python
|
||||
robot.disconnect()
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config: KochRobotConfig | None = None,
|
||||
calibration_path: Path = ".cache/calibration/koch.pkl",
|
||||
**kwargs,
|
||||
):
|
||||
if config is None:
|
||||
config = KochRobotConfig()
|
||||
# Overwrite config arguments using kwargs
|
||||
self.config = replace(config, **kwargs)
|
||||
self.calibration_path = Path(calibration_path)
|
||||
|
||||
self.leader_arms = self.config.leader_arms
|
||||
self.follower_arms = self.config.follower_arms
|
||||
self.cameras = self.config.cameras
|
||||
self.is_connected = False
|
||||
self.logs = {}
|
||||
|
||||
def connect(self):
|
||||
if self.is_connected:
|
||||
raise RobotDeviceAlreadyConnectedError(
|
||||
"KochRobot is already connected. Do not run `robot.connect()` twice."
|
||||
)
|
||||
|
||||
if not self.leader_arms and not self.follower_arms and not self.cameras:
|
||||
raise ValueError(
|
||||
"KochRobot doesn't have any device to connect. See example of usage in docstring of the class."
|
||||
)
|
||||
|
||||
# Connect the arms
|
||||
for name in self.follower_arms:
|
||||
print(f"Connecting {name} follower arm.")
|
||||
self.follower_arms[name].connect()
|
||||
print(f"Connecting {name} leader arm.")
|
||||
self.leader_arms[name].connect()
|
||||
|
||||
# Reset the arms and load or run calibration
|
||||
if self.calibration_path.exists():
|
||||
# Reset all arms before setting calibration
|
||||
for name in self.follower_arms:
|
||||
reset_torque_mode(self.follower_arms[name])
|
||||
for name in self.leader_arms:
|
||||
reset_torque_mode(self.leader_arms[name])
|
||||
|
||||
with open(self.calibration_path, "rb") as f:
|
||||
calibration = pickle.load(f)
|
||||
else:
|
||||
print(f"Missing calibration file '{self.calibration_path}'. Starting calibration precedure.")
|
||||
# Run calibration process which begins by reseting all arms
|
||||
calibration = self.run_calibration()
|
||||
|
||||
print(f"Calibration is done! Saving calibration file '{self.calibration_path}'")
|
||||
self.calibration_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(self.calibration_path, "wb") as f:
|
||||
pickle.dump(calibration, f)
|
||||
|
||||
# Set calibration
|
||||
for name in self.follower_arms:
|
||||
self.follower_arms[name].set_calibration(calibration[f"follower_{name}"])
|
||||
for name in self.leader_arms:
|
||||
self.leader_arms[name].set_calibration(calibration[f"leader_{name}"])
|
||||
|
||||
# Set better PID values to close the gap between recored states and actions
|
||||
# TODO(rcadene): Implement an automatic procedure to set optimial PID values for each motor
|
||||
for name in self.follower_arms:
|
||||
self.follower_arms[name].write("Position_P_Gain", 1500, "elbow_flex")
|
||||
self.follower_arms[name].write("Position_I_Gain", 0, "elbow_flex")
|
||||
self.follower_arms[name].write("Position_D_Gain", 600, "elbow_flex")
|
||||
|
||||
# Enable torque on all motors of the follower arms
|
||||
for name in self.follower_arms:
|
||||
print(f"Activating torque on {name} follower arm.")
|
||||
self.follower_arms[name].write("Torque_Enable", 1)
|
||||
|
||||
# Enable torque on the gripper of the leader arms, and move it to 45 degrees,
|
||||
# so that we can use it as a trigger to close the gripper of the follower arms.
|
||||
for name in self.leader_arms:
|
||||
self.leader_arms[name].write("Torque_Enable", 1, "gripper")
|
||||
self.leader_arms[name].write("Goal_Position", GRIPPER_OPEN_DEGREE, "gripper")
|
||||
|
||||
# Connect the cameras
|
||||
for name in self.cameras:
|
||||
self.cameras[name].connect()
|
||||
|
||||
self.is_connected = True
|
||||
|
||||
def run_calibration(self):
|
||||
calibration = {}
|
||||
|
||||
for name in self.follower_arms:
|
||||
homing_offset, drive_mode = run_arm_calibration(self.follower_arms[name], name, "follower")
|
||||
|
||||
calibration[f"follower_{name}"] = {}
|
||||
for idx, motor_name in enumerate(self.follower_arms[name].motor_names):
|
||||
calibration[f"follower_{name}"][motor_name] = (homing_offset[idx], drive_mode[idx])
|
||||
|
||||
for name in self.leader_arms:
|
||||
homing_offset, drive_mode = run_arm_calibration(self.leader_arms[name], name, "leader")
|
||||
|
||||
calibration[f"leader_{name}"] = {}
|
||||
for idx, motor_name in enumerate(self.leader_arms[name].motor_names):
|
||||
calibration[f"leader_{name}"][motor_name] = (homing_offset[idx], drive_mode[idx])
|
||||
|
||||
return calibration
|
||||
|
||||
def teleop_step(
|
||||
self, record_data=False
|
||||
) -> None | tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]:
|
||||
if not self.is_connected:
|
||||
raise RobotDeviceNotConnectedError(
|
||||
"KochRobot is not connected. You need to run `robot.connect()`."
|
||||
)
|
||||
|
||||
# Prepare to assign the position of the leader to the follower
|
||||
leader_pos = {}
|
||||
for name in self.leader_arms:
|
||||
before_lread_t = time.perf_counter()
|
||||
leader_pos[name] = self.leader_arms[name].read("Present_Position")
|
||||
self.logs[f"read_leader_{name}_pos_dt_s"] = time.perf_counter() - before_lread_t
|
||||
|
||||
follower_goal_pos = {}
|
||||
for name in self.leader_arms:
|
||||
follower_goal_pos[name] = leader_pos[name]
|
||||
|
||||
# Send action
|
||||
for name in self.follower_arms:
|
||||
before_fwrite_t = time.perf_counter()
|
||||
self.follower_arms[name].write("Goal_Position", follower_goal_pos[name])
|
||||
self.logs[f"write_follower_{name}_goal_pos_dt_s"] = time.perf_counter() - before_fwrite_t
|
||||
|
||||
# Early exit when recording data is not requested
|
||||
if not record_data:
|
||||
return
|
||||
|
||||
# TODO(rcadene): Add velocity and other info
|
||||
# Read follower position
|
||||
follower_pos = {}
|
||||
for name in self.follower_arms:
|
||||
before_fread_t = time.perf_counter()
|
||||
follower_pos[name] = self.follower_arms[name].read("Present_Position")
|
||||
self.logs[f"read_follower_{name}_pos_dt_s"] = time.perf_counter() - before_fread_t
|
||||
|
||||
# Create state by concatenating follower current position
|
||||
state = []
|
||||
for name in self.follower_arms:
|
||||
if name in follower_pos:
|
||||
state.append(follower_pos[name])
|
||||
state = np.concatenate(state)
|
||||
|
||||
# Create action by concatenating follower goal position
|
||||
action = []
|
||||
for name in self.follower_arms:
|
||||
if name in follower_goal_pos:
|
||||
action.append(follower_goal_pos[name])
|
||||
action = np.concatenate(action)
|
||||
|
||||
# Capture images from cameras
|
||||
images = {}
|
||||
for name in self.cameras:
|
||||
before_camread_t = time.perf_counter()
|
||||
images[name] = self.cameras[name].async_read()
|
||||
self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
|
||||
self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
|
||||
|
||||
# Populate output dictionnaries and format to pytorch
|
||||
obs_dict, action_dict = {}, {}
|
||||
obs_dict["observation.state"] = torch.from_numpy(state)
|
||||
action_dict["action"] = torch.from_numpy(action)
|
||||
for name in self.cameras:
|
||||
obs_dict[f"observation.images.{name}"] = torch.from_numpy(images[name])
|
||||
|
||||
return obs_dict, action_dict
|
||||
|
||||
def capture_observation(self):
|
||||
"""The returned observations do not have a batch dimension."""
|
||||
if not self.is_connected:
|
||||
raise RobotDeviceNotConnectedError(
|
||||
"KochRobot is not connected. You need to run `robot.connect()`."
|
||||
)
|
||||
|
||||
# Read follower position
|
||||
follower_pos = {}
|
||||
for name in self.follower_arms:
|
||||
before_fread_t = time.perf_counter()
|
||||
follower_pos[name] = self.follower_arms[name].read("Present_Position")
|
||||
self.logs[f"read_follower_{name}_pos_dt_s"] = time.perf_counter() - before_fread_t
|
||||
|
||||
# Create state by concatenating follower current position
|
||||
state = []
|
||||
for name in self.follower_arms:
|
||||
if name in follower_pos:
|
||||
state.append(follower_pos[name])
|
||||
state = np.concatenate(state)
|
||||
|
||||
# Capture images from cameras
|
||||
images = {}
|
||||
for name in self.cameras:
|
||||
before_camread_t = time.perf_counter()
|
||||
images[name] = self.cameras[name].async_read()
|
||||
self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
|
||||
self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
|
||||
|
||||
# Populate output dictionnaries and format to pytorch
|
||||
obs_dict = {}
|
||||
obs_dict["observation.state"] = torch.from_numpy(state)
|
||||
for name in self.cameras:
|
||||
obs_dict[f"observation.images.{name}"] = torch.from_numpy(images[name])
|
||||
return obs_dict
|
||||
|
||||
def send_action(self, action: torch.Tensor):
|
||||
"""The provided action is expected to be a vector."""
|
||||
if not self.is_connected:
|
||||
raise RobotDeviceNotConnectedError(
|
||||
"KochRobot is not connected. You need to run `robot.connect()`."
|
||||
)
|
||||
|
||||
from_idx = 0
|
||||
to_idx = 0
|
||||
follower_goal_pos = {}
|
||||
for name in self.follower_arms:
|
||||
if name in self.follower_arms:
|
||||
to_idx += len(self.follower_arms[name].motor_names)
|
||||
follower_goal_pos[name] = action[from_idx:to_idx].numpy()
|
||||
from_idx = to_idx
|
||||
|
||||
for name in self.follower_arms:
|
||||
self.follower_arms[name].write("Goal_Position", follower_goal_pos[name].astype(np.int32))
|
||||
|
||||
def disconnect(self):
|
||||
if not self.is_connected:
|
||||
raise RobotDeviceNotConnectedError(
|
||||
"KochRobot is not connected. You need to run `robot.connect()` before disconnecting."
|
||||
)
|
||||
|
||||
for name in self.follower_arms:
|
||||
self.follower_arms[name].disconnect()
|
||||
|
||||
for name in self.leader_arms:
|
||||
self.leader_arms[name].disconnect()
|
||||
|
||||
for name in self.cameras:
|
||||
self.cameras[name].disconnect()
|
||||
|
||||
self.is_connected = False
|
||||
|
||||
def __del__(self):
|
||||
if getattr(self, "is_connected", False):
|
||||
self.disconnect()
|
||||
@@ -1,657 +0,0 @@
|
||||
"""Contains logic to instantiate a robot, read information from its motors and cameras,
|
||||
and send orders to its motors.
|
||||
"""
|
||||
# TODO(rcadene, aliberts): reorganize the codebase into one file per robot, with the associated
|
||||
# calibration procedure, to make it easy for people to add their own robot.
|
||||
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
import warnings
|
||||
from dataclasses import dataclass, field, replace
|
||||
from pathlib import Path
|
||||
from typing import Sequence
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from lerobot.common.robot_devices.cameras.utils import Camera
|
||||
from lerobot.common.robot_devices.motors.utils import MotorsBus
|
||||
from lerobot.common.robot_devices.robots.utils import get_arm_id
|
||||
from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError
|
||||
|
||||
|
||||
def ensure_safe_goal_position(
|
||||
goal_pos: torch.Tensor, present_pos: torch.Tensor, max_relative_target: float | list[float]
|
||||
):
|
||||
# Cap relative action target magnitude for safety.
|
||||
diff = goal_pos - present_pos
|
||||
max_relative_target = torch.tensor(max_relative_target)
|
||||
safe_diff = torch.minimum(diff, max_relative_target)
|
||||
safe_diff = torch.maximum(safe_diff, -max_relative_target)
|
||||
safe_goal_pos = present_pos + safe_diff
|
||||
|
||||
if not torch.allclose(goal_pos, safe_goal_pos):
|
||||
logging.warning(
|
||||
"Relative goal position magnitude had to be clamped to be safe.\n"
|
||||
f" requested relative goal position target: {diff}\n"
|
||||
f" clamped relative goal position target: {safe_diff}"
|
||||
)
|
||||
|
||||
return safe_goal_pos
|
||||
|
||||
|
||||
@dataclass
|
||||
class ManipulatorRobotConfig:
|
||||
"""
|
||||
Example of usage:
|
||||
```python
|
||||
ManipulatorRobotConfig()
|
||||
```
|
||||
"""
|
||||
|
||||
# Define all components of the robot
|
||||
robot_type: str = "koch"
|
||||
leader_arms: dict[str, MotorsBus] = field(default_factory=lambda: {})
|
||||
follower_arms: dict[str, MotorsBus] = field(default_factory=lambda: {})
|
||||
cameras: dict[str, Camera] = field(default_factory=lambda: {})
|
||||
|
||||
# Optionally limit the magnitude of the relative positional target vector for safety purposes.
|
||||
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length
|
||||
# as the number of motors in your follower arms (assumes all follower arms have the same number of
|
||||
# motors).
|
||||
max_relative_target: list[float] | float | None = None
|
||||
|
||||
# Optionally set the leader arm in torque mode with the gripper motor set to this angle. This makes it
|
||||
# possible to squeeze the gripper and have it spring back to an open position on its own. If None, the
|
||||
# gripper is not put in torque mode.
|
||||
gripper_open_degree: float | None = None
|
||||
|
||||
def __setattr__(self, prop: str, val):
|
||||
if prop == "max_relative_target" and val is not None and isinstance(val, Sequence):
|
||||
for name in self.follower_arms:
|
||||
if len(self.follower_arms[name].motors) != len(val):
|
||||
raise ValueError(
|
||||
f"len(max_relative_target)={len(val)} but the follower arm with name {name} has "
|
||||
f"{len(self.follower_arms[name].motors)} motors. Please make sure that the "
|
||||
f"`max_relative_target` list has as many parameters as there are motors per arm. "
|
||||
"Note: This feature does not yet work with robots where different follower arms have "
|
||||
"different numbers of motors."
|
||||
)
|
||||
super().__setattr__(prop, val)
|
||||
|
||||
def __post_init__(self):
|
||||
if self.robot_type not in ["koch", "koch_bimanual", "aloha", "so100", "moss"]:
|
||||
raise ValueError(f"Provided robot type ({self.robot_type}) is not supported.")
|
||||
|
||||
|
||||
class ManipulatorRobot:
|
||||
# TODO(rcadene): Implement force feedback
|
||||
"""This class allows to control any manipulator robot of various number of motors.
|
||||
|
||||
Non exaustive list of robots:
|
||||
- [Koch v1.0](https://github.com/AlexanderKoch-Koch/low_cost_robot), with and without the wrist-to-elbow expansion, developed
|
||||
by Alexander Koch from [Tau Robotics](https://tau-robotics.com)
|
||||
- [Koch v1.1](https://github.com/jess-moss/koch-v1-1) developed by Jess Moss
|
||||
- [Aloha](https://www.trossenrobotics.com/aloha-kits) developed by Trossen Robotics
|
||||
|
||||
Example of highest frequency teleoperation without camera:
|
||||
```python
|
||||
# Defines how to communicate with the motors of the leader and follower arms
|
||||
leader_arms = {
|
||||
"main": DynamixelMotorsBus(
|
||||
port="/dev/tty.usbmodem575E0031751",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": (1, "xl330-m077"),
|
||||
"shoulder_lift": (2, "xl330-m077"),
|
||||
"elbow_flex": (3, "xl330-m077"),
|
||||
"wrist_flex": (4, "xl330-m077"),
|
||||
"wrist_roll": (5, "xl330-m077"),
|
||||
"gripper": (6, "xl330-m077"),
|
||||
},
|
||||
),
|
||||
}
|
||||
follower_arms = {
|
||||
"main": DynamixelMotorsBus(
|
||||
port="/dev/tty.usbmodem575E0032081",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": (1, "xl430-w250"),
|
||||
"shoulder_lift": (2, "xl430-w250"),
|
||||
"elbow_flex": (3, "xl330-m288"),
|
||||
"wrist_flex": (4, "xl330-m288"),
|
||||
"wrist_roll": (5, "xl330-m288"),
|
||||
"gripper": (6, "xl330-m288"),
|
||||
},
|
||||
),
|
||||
}
|
||||
robot = ManipulatorRobot(
|
||||
robot_type="koch",
|
||||
calibration_dir=".cache/calibration/koch",
|
||||
leader_arms=leader_arms,
|
||||
follower_arms=follower_arms,
|
||||
)
|
||||
|
||||
# Connect motors buses and cameras if any (Required)
|
||||
robot.connect()
|
||||
|
||||
while True:
|
||||
robot.teleop_step()
|
||||
```
|
||||
|
||||
Example of highest frequency data collection without camera:
|
||||
```python
|
||||
# Assumes leader and follower arms have been instantiated already (see first example)
|
||||
robot = ManipulatorRobot(
|
||||
robot_type="koch",
|
||||
calibration_dir=".cache/calibration/koch",
|
||||
leader_arms=leader_arms,
|
||||
follower_arms=follower_arms,
|
||||
)
|
||||
robot.connect()
|
||||
while True:
|
||||
observation, action = robot.teleop_step(record_data=True)
|
||||
```
|
||||
|
||||
Example of highest frequency data collection with cameras:
|
||||
```python
|
||||
# Defines how to communicate with 2 cameras connected to the computer.
|
||||
# Here, the webcam of the laptop and the phone (connected in USB to the laptop)
|
||||
# can be reached respectively using the camera indices 0 and 1. These indices can be
|
||||
# arbitrary. See the documentation of `OpenCVCamera` to find your own camera indices.
|
||||
cameras = {
|
||||
"laptop": OpenCVCamera(camera_index=0, fps=30, width=640, height=480),
|
||||
"phone": OpenCVCamera(camera_index=1, fps=30, width=640, height=480),
|
||||
}
|
||||
|
||||
# Assumes leader and follower arms have been instantiated already (see first example)
|
||||
robot = ManipulatorRobot(
|
||||
robot_type="koch",
|
||||
calibration_dir=".cache/calibration/koch",
|
||||
leader_arms=leader_arms,
|
||||
follower_arms=follower_arms,
|
||||
cameras=cameras,
|
||||
)
|
||||
robot.connect()
|
||||
while True:
|
||||
observation, action = robot.teleop_step(record_data=True)
|
||||
```
|
||||
|
||||
Example of controlling the robot with a policy (without running multiple policies in parallel to ensure highest frequency):
|
||||
```python
|
||||
# Assumes leader and follower arms + cameras have been instantiated already (see previous example)
|
||||
robot = ManipulatorRobot(
|
||||
robot_type="koch",
|
||||
calibration_dir=".cache/calibration/koch",
|
||||
leader_arms=leader_arms,
|
||||
follower_arms=follower_arms,
|
||||
cameras=cameras,
|
||||
)
|
||||
robot.connect()
|
||||
while True:
|
||||
# Uses the follower arms and cameras to capture an observation
|
||||
observation = robot.capture_observation()
|
||||
|
||||
# Assumes a policy has been instantiated
|
||||
with torch.inference_mode():
|
||||
action = policy.select_action(observation)
|
||||
|
||||
# Orders the robot to move
|
||||
robot.send_action(action)
|
||||
```
|
||||
|
||||
Example of disconnecting which is not mandatory since we disconnect when the object is deleted:
|
||||
```python
|
||||
robot.disconnect()
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config: ManipulatorRobotConfig | None = None,
|
||||
calibration_dir: Path = ".cache/calibration/koch",
|
||||
**kwargs,
|
||||
):
|
||||
if config is None:
|
||||
config = ManipulatorRobotConfig()
|
||||
# Overwrite config arguments using kwargs
|
||||
self.config = replace(config, **kwargs)
|
||||
self.calibration_dir = Path(calibration_dir)
|
||||
|
||||
self.robot_type = self.config.robot_type
|
||||
self.leader_arms = self.config.leader_arms
|
||||
self.follower_arms = self.config.follower_arms
|
||||
self.cameras = self.config.cameras
|
||||
self.is_connected = False
|
||||
self.logs = {}
|
||||
|
||||
@property
|
||||
def has_camera(self):
|
||||
return len(self.cameras) > 0
|
||||
|
||||
@property
|
||||
def num_cameras(self):
|
||||
return len(self.cameras)
|
||||
|
||||
@property
|
||||
def available_arms(self):
|
||||
available_arms = []
|
||||
for name in self.follower_arms:
|
||||
arm_id = get_arm_id(name, "follower")
|
||||
available_arms.append(arm_id)
|
||||
for name in self.leader_arms:
|
||||
arm_id = get_arm_id(name, "leader")
|
||||
available_arms.append(arm_id)
|
||||
return available_arms
|
||||
|
||||
def connect(self):
|
||||
if self.is_connected:
|
||||
raise RobotDeviceAlreadyConnectedError(
|
||||
"ManipulatorRobot is already connected. Do not run `robot.connect()` twice."
|
||||
)
|
||||
|
||||
if not self.leader_arms and not self.follower_arms and not self.cameras:
|
||||
raise ValueError(
|
||||
"ManipulatorRobot doesn't have any device to connect. See example of usage in docstring of the class."
|
||||
)
|
||||
|
||||
# Connect the arms
|
||||
for name in self.follower_arms:
|
||||
print(f"Connecting {name} follower arm.")
|
||||
self.follower_arms[name].connect()
|
||||
for name in self.leader_arms:
|
||||
print(f"Connecting {name} leader arm.")
|
||||
self.leader_arms[name].connect()
|
||||
|
||||
if self.robot_type in ["koch", "koch_bimanual", "aloha"]:
|
||||
from lerobot.common.robot_devices.motors.dynamixel import TorqueMode
|
||||
elif self.robot_type in ["so100", "moss"]:
|
||||
from lerobot.common.robot_devices.motors.feetech import TorqueMode
|
||||
|
||||
# We assume that at connection time, arms are in a rest position, and torque can
|
||||
# be safely disabled to run calibration and/or set robot preset configurations.
|
||||
for name in self.follower_arms:
|
||||
self.follower_arms[name].write("Torque_Enable", TorqueMode.DISABLED.value)
|
||||
for name in self.leader_arms:
|
||||
self.leader_arms[name].write("Torque_Enable", TorqueMode.DISABLED.value)
|
||||
|
||||
self.activate_calibration()
|
||||
|
||||
# Set robot preset (e.g. torque in leader gripper for Koch v1.1)
|
||||
if self.robot_type in ["koch", "koch_bimanual"]:
|
||||
self.set_koch_robot_preset()
|
||||
elif self.robot_type == "aloha":
|
||||
self.set_aloha_robot_preset()
|
||||
elif self.robot_type in ["so100", "moss"]:
|
||||
self.set_so100_robot_preset()
|
||||
|
||||
# Enable torque on all motors of the follower arms
|
||||
for name in self.follower_arms:
|
||||
print(f"Activating torque on {name} follower arm.")
|
||||
self.follower_arms[name].write("Torque_Enable", 1)
|
||||
|
||||
if self.config.gripper_open_degree is not None:
|
||||
if self.robot_type not in ["koch", "koch_bimanual"]:
|
||||
raise NotImplementedError(
|
||||
f"{self.robot_type} does not support position AND current control in the handle, which is require to set the gripper open."
|
||||
)
|
||||
# Set the leader arm in torque mode with the gripper motor set to an angle. This makes it possible
|
||||
# to squeeze the gripper and have it spring back to an open position on its own.
|
||||
for name in self.leader_arms:
|
||||
self.leader_arms[name].write("Torque_Enable", 1, "gripper")
|
||||
self.leader_arms[name].write("Goal_Position", self.config.gripper_open_degree, "gripper")
|
||||
|
||||
# Check both arms can be read
|
||||
for name in self.follower_arms:
|
||||
self.follower_arms[name].read("Present_Position")
|
||||
for name in self.leader_arms:
|
||||
self.leader_arms[name].read("Present_Position")
|
||||
|
||||
# Connect the cameras
|
||||
for name in self.cameras:
|
||||
self.cameras[name].connect()
|
||||
|
||||
self.is_connected = True
|
||||
|
||||
def activate_calibration(self):
|
||||
"""After calibration all motors function in human interpretable ranges.
|
||||
Rotations are expressed in degrees in nominal range of [-180, 180],
|
||||
and linear motions (like gripper of Aloha) in nominal range of [0, 100].
|
||||
"""
|
||||
|
||||
def load_or_run_calibration_(name, arm, arm_type):
|
||||
arm_id = get_arm_id(name, arm_type)
|
||||
arm_calib_path = self.calibration_dir / f"{arm_id}.json"
|
||||
|
||||
if arm_calib_path.exists():
|
||||
with open(arm_calib_path) as f:
|
||||
calibration = json.load(f)
|
||||
else:
|
||||
# TODO(rcadene): display a warning in __init__ if calibration file not available
|
||||
print(f"Missing calibration file '{arm_calib_path}'")
|
||||
|
||||
if self.robot_type in ["koch", "koch_bimanual", "aloha"]:
|
||||
from lerobot.common.robot_devices.robots.dynamixel_calibration import run_arm_calibration
|
||||
|
||||
calibration = run_arm_calibration(arm, self.robot_type, name, arm_type)
|
||||
|
||||
elif self.robot_type in ["so100", "moss"]:
|
||||
from lerobot.common.robot_devices.robots.feetech_calibration import (
|
||||
run_arm_auto_calibration,
|
||||
run_arm_manual_calibration,
|
||||
)
|
||||
|
||||
# TODO(rcadene): better way to handle mocking + test run_arm_auto_calibration
|
||||
if arm_type == "leader" or arm.mock:
|
||||
calibration = run_arm_manual_calibration(arm, self.robot_type, name, arm_type)
|
||||
elif arm_type == "follower":
|
||||
calibration = run_arm_auto_calibration(arm, self.robot_type, name, arm_type)
|
||||
else:
|
||||
raise ValueError(arm_type)
|
||||
|
||||
print(f"Calibration is done! Saving calibration file '{arm_calib_path}'")
|
||||
arm_calib_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(arm_calib_path, "w") as f:
|
||||
json.dump(calibration, f)
|
||||
|
||||
return calibration
|
||||
|
||||
for name, arm in self.follower_arms.items():
|
||||
calibration = load_or_run_calibration_(name, arm, "follower")
|
||||
arm.set_calibration(calibration)
|
||||
for name, arm in self.leader_arms.items():
|
||||
calibration = load_or_run_calibration_(name, arm, "leader")
|
||||
arm.set_calibration(calibration)
|
||||
|
||||
def set_koch_robot_preset(self):
|
||||
def set_operating_mode_(arm):
|
||||
from lerobot.common.robot_devices.motors.dynamixel import TorqueMode
|
||||
|
||||
if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any():
|
||||
raise ValueError("To run set robot preset, the torque must be disabled on all motors.")
|
||||
|
||||
# Use 'extended position mode' for all motors except gripper, because in joint mode the servos can't
|
||||
# rotate more than 360 degrees (from 0 to 4095) And some mistake can happen while assembling the arm,
|
||||
# you could end up with a servo with a position 0 or 4095 at a crucial point See [
|
||||
# https://emanual.robotis.com/docs/en/dxl/x/x_series/#operating-mode11]
|
||||
all_motors_except_gripper = [name for name in arm.motor_names if name != "gripper"]
|
||||
if len(all_motors_except_gripper) > 0:
|
||||
# 4 corresponds to Extended Position on Koch motors
|
||||
arm.write("Operating_Mode", 4, all_motors_except_gripper)
|
||||
|
||||
# Use 'position control current based' for gripper to be limited by the limit of the current.
|
||||
# For the follower gripper, it means it can grasp an object without forcing too much even tho,
|
||||
# it's goal position is a complete grasp (both gripper fingers are ordered to join and reach a touch).
|
||||
# For the leader gripper, it means we can use it as a physical trigger, since we can force with our finger
|
||||
# to make it move, and it will move back to its original target position when we release the force.
|
||||
# 5 corresponds to Current Controlled Position on Koch gripper motors "xl330-m077, xl330-m288"
|
||||
arm.write("Operating_Mode", 5, "gripper")
|
||||
|
||||
for name in self.follower_arms:
|
||||
set_operating_mode_(self.follower_arms[name])
|
||||
|
||||
# Set better PID values to close the gap between recorded states and actions
|
||||
# TODO(rcadene): Implement an automatic procedure to set optimial PID values for each motor
|
||||
self.follower_arms[name].write("Position_P_Gain", 1500, "elbow_flex")
|
||||
self.follower_arms[name].write("Position_I_Gain", 0, "elbow_flex")
|
||||
self.follower_arms[name].write("Position_D_Gain", 600, "elbow_flex")
|
||||
|
||||
if self.config.gripper_open_degree is not None:
|
||||
for name in self.leader_arms:
|
||||
set_operating_mode_(self.leader_arms[name])
|
||||
|
||||
# Enable torque on the gripper of the leader arms, and move it to 45 degrees,
|
||||
# so that we can use it as a trigger to close the gripper of the follower arms.
|
||||
self.leader_arms[name].write("Torque_Enable", 1, "gripper")
|
||||
self.leader_arms[name].write("Goal_Position", self.config.gripper_open_degree, "gripper")
|
||||
|
||||
def set_aloha_robot_preset(self):
|
||||
def set_shadow_(arm):
|
||||
# Set secondary/shadow ID for shoulder and elbow. These joints have two motors.
|
||||
# As a result, if only one of them is required to move to a certain position,
|
||||
# the other will follow. This is to avoid breaking the motors.
|
||||
if "shoulder_shadow" in arm.motor_names:
|
||||
shoulder_idx = arm.read("ID", "shoulder")
|
||||
arm.write("Secondary_ID", shoulder_idx, "shoulder_shadow")
|
||||
|
||||
if "elbow_shadow" in arm.motor_names:
|
||||
elbow_idx = arm.read("ID", "elbow")
|
||||
arm.write("Secondary_ID", elbow_idx, "elbow_shadow")
|
||||
|
||||
for name in self.follower_arms:
|
||||
set_shadow_(self.follower_arms[name])
|
||||
|
||||
for name in self.leader_arms:
|
||||
set_shadow_(self.leader_arms[name])
|
||||
|
||||
for name in self.follower_arms:
|
||||
# Set a velocity limit of 131 as advised by Trossen Robotics
|
||||
self.follower_arms[name].write("Velocity_Limit", 131)
|
||||
|
||||
# Use 'extended position mode' for all motors except gripper, because in joint mode the servos can't
|
||||
# rotate more than 360 degrees (from 0 to 4095) And some mistake can happen while assembling the arm,
|
||||
# you could end up with a servo with a position 0 or 4095 at a crucial point See [
|
||||
# https://emanual.robotis.com/docs/en/dxl/x/x_series/#operating-mode11]
|
||||
all_motors_except_gripper = [
|
||||
name for name in self.follower_arms[name].motor_names if name != "gripper"
|
||||
]
|
||||
if len(all_motors_except_gripper) > 0:
|
||||
# 4 corresponds to Extended Position on Aloha motors
|
||||
self.follower_arms[name].write("Operating_Mode", 4, all_motors_except_gripper)
|
||||
|
||||
# Use 'position control current based' for follower gripper to be limited by the limit of the current.
|
||||
# It can grasp an object without forcing too much even tho,
|
||||
# it's goal position is a complete grasp (both gripper fingers are ordered to join and reach a touch).
|
||||
# 5 corresponds to Current Controlled Position on Aloha gripper follower "xm430-w350"
|
||||
self.follower_arms[name].write("Operating_Mode", 5, "gripper")
|
||||
|
||||
# Note: We can't enable torque on the leader gripper since "xc430-w150" doesn't have
|
||||
# a Current Controlled Position mode.
|
||||
|
||||
if self.config.gripper_open_degree is not None:
|
||||
warnings.warn(
|
||||
f"`gripper_open_degree` is set to {self.config.gripper_open_degree}, but None is expected for Aloha instead",
|
||||
stacklevel=1,
|
||||
)
|
||||
|
||||
def set_so100_robot_preset(self):
|
||||
for name in self.follower_arms:
|
||||
# Mode=0 for Position Control
|
||||
self.follower_arms[name].write("Mode", 0)
|
||||
# Set P_Coefficient to lower value to avoid shakiness (Default is 32)
|
||||
self.follower_arms[name].write("P_Coefficient", 16)
|
||||
# Set I_Coefficient and D_Coefficient to default value 0 and 32
|
||||
self.follower_arms[name].write("I_Coefficient", 0)
|
||||
self.follower_arms[name].write("D_Coefficient", 32)
|
||||
# Close the write lock so that Maximum_Acceleration gets written to EPROM address,
|
||||
# which is mandatory for Maximum_Acceleration to take effect after rebooting.
|
||||
self.follower_arms[name].write("Lock", 0)
|
||||
# Set Maximum_Acceleration to 254 to speedup acceleration and deceleration of
|
||||
# the motors. Note: this configuration is not in the official STS3215 Memory Table
|
||||
self.follower_arms[name].write("Maximum_Acceleration", 254)
|
||||
self.follower_arms[name].write("Acceleration", 254)
|
||||
|
||||
def teleop_step(
|
||||
self, record_data=False
|
||||
) -> None | tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]:
|
||||
if not self.is_connected:
|
||||
raise RobotDeviceNotConnectedError(
|
||||
"ManipulatorRobot is not connected. You need to run `robot.connect()`."
|
||||
)
|
||||
|
||||
# Prepare to assign the position of the leader to the follower
|
||||
leader_pos = {}
|
||||
for name in self.leader_arms:
|
||||
before_lread_t = time.perf_counter()
|
||||
leader_pos[name] = self.leader_arms[name].read("Present_Position")
|
||||
leader_pos[name] = torch.from_numpy(leader_pos[name])
|
||||
self.logs[f"read_leader_{name}_pos_dt_s"] = time.perf_counter() - before_lread_t
|
||||
|
||||
# Send goal position to the follower
|
||||
follower_goal_pos = {}
|
||||
for name in self.follower_arms:
|
||||
before_fwrite_t = time.perf_counter()
|
||||
goal_pos = leader_pos[name]
|
||||
|
||||
# Cap goal position when too far away from present position.
|
||||
# Slower fps expected due to reading from the follower.
|
||||
if self.config.max_relative_target is not None:
|
||||
present_pos = self.follower_arms[name].read("Present_Position")
|
||||
present_pos = torch.from_numpy(present_pos)
|
||||
goal_pos = ensure_safe_goal_position(goal_pos, present_pos, self.config.max_relative_target)
|
||||
|
||||
# Used when record_data=True
|
||||
follower_goal_pos[name] = goal_pos
|
||||
|
||||
goal_pos = goal_pos.numpy().astype(np.float32)
|
||||
self.follower_arms[name].write("Goal_Position", goal_pos)
|
||||
self.logs[f"write_follower_{name}_goal_pos_dt_s"] = time.perf_counter() - before_fwrite_t
|
||||
|
||||
# Early exit when recording data is not requested
|
||||
if not record_data:
|
||||
return
|
||||
|
||||
# TODO(rcadene): Add velocity and other info
|
||||
# Read follower position
|
||||
follower_pos = {}
|
||||
for name in self.follower_arms:
|
||||
before_fread_t = time.perf_counter()
|
||||
follower_pos[name] = self.follower_arms[name].read("Present_Position")
|
||||
follower_pos[name] = torch.from_numpy(follower_pos[name])
|
||||
self.logs[f"read_follower_{name}_pos_dt_s"] = time.perf_counter() - before_fread_t
|
||||
|
||||
# Create state by concatenating follower current position
|
||||
state = []
|
||||
for name in self.follower_arms:
|
||||
if name in follower_pos:
|
||||
state.append(follower_pos[name])
|
||||
state = torch.cat(state)
|
||||
|
||||
# Create action by concatenating follower goal position
|
||||
action = []
|
||||
for name in self.follower_arms:
|
||||
if name in follower_goal_pos:
|
||||
action.append(follower_goal_pos[name])
|
||||
action = torch.cat(action)
|
||||
|
||||
# Capture images from cameras
|
||||
images = {}
|
||||
for name in self.cameras:
|
||||
before_camread_t = time.perf_counter()
|
||||
images[name] = self.cameras[name].async_read()
|
||||
images[name] = torch.from_numpy(images[name])
|
||||
self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
|
||||
self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
|
||||
|
||||
# Populate output dictionnaries
|
||||
obs_dict, action_dict = {}, {}
|
||||
obs_dict["observation.state"] = state
|
||||
action_dict["action"] = action
|
||||
for name in self.cameras:
|
||||
obs_dict[f"observation.images.{name}"] = images[name]
|
||||
|
||||
return obs_dict, action_dict
|
||||
|
||||
def capture_observation(self):
|
||||
"""The returned observations do not have a batch dimension."""
|
||||
if not self.is_connected:
|
||||
raise RobotDeviceNotConnectedError(
|
||||
"ManipulatorRobot is not connected. You need to run `robot.connect()`."
|
||||
)
|
||||
|
||||
# Read follower position
|
||||
follower_pos = {}
|
||||
for name in self.follower_arms:
|
||||
before_fread_t = time.perf_counter()
|
||||
follower_pos[name] = self.follower_arms[name].read("Present_Position")
|
||||
follower_pos[name] = torch.from_numpy(follower_pos[name])
|
||||
self.logs[f"read_follower_{name}_pos_dt_s"] = time.perf_counter() - before_fread_t
|
||||
|
||||
# Create state by concatenating follower current position
|
||||
state = []
|
||||
for name in self.follower_arms:
|
||||
if name in follower_pos:
|
||||
state.append(follower_pos[name])
|
||||
state = torch.cat(state)
|
||||
|
||||
# Capture images from cameras
|
||||
images = {}
|
||||
for name in self.cameras:
|
||||
before_camread_t = time.perf_counter()
|
||||
images[name] = self.cameras[name].async_read()
|
||||
images[name] = torch.from_numpy(images[name])
|
||||
self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
|
||||
self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
|
||||
|
||||
# Populate output dictionnaries and format to pytorch
|
||||
obs_dict = {}
|
||||
obs_dict["observation.state"] = state
|
||||
for name in self.cameras:
|
||||
obs_dict[f"observation.images.{name}"] = images[name]
|
||||
return obs_dict
|
||||
|
||||
def send_action(self, action: torch.Tensor) -> torch.Tensor:
|
||||
"""Command the follower arms to move to a target joint configuration.
|
||||
|
||||
The relative action magnitude may be clipped depending on the configuration parameter
|
||||
`max_relative_target`. In this case, the action sent differs from original action.
|
||||
Thus, this function always returns the action actually sent.
|
||||
|
||||
Args:
|
||||
action: tensor containing the concatenated goal positions for the follower arms.
|
||||
"""
|
||||
if not self.is_connected:
|
||||
raise RobotDeviceNotConnectedError(
|
||||
"ManipulatorRobot is not connected. You need to run `robot.connect()`."
|
||||
)
|
||||
|
||||
from_idx = 0
|
||||
to_idx = 0
|
||||
action_sent = []
|
||||
for name in self.follower_arms:
|
||||
# Get goal position of each follower arm by splitting the action vector
|
||||
to_idx += len(self.follower_arms[name].motor_names)
|
||||
goal_pos = action[from_idx:to_idx]
|
||||
from_idx = to_idx
|
||||
|
||||
# Cap goal position when too far away from present position.
|
||||
# Slower fps expected due to reading from the follower.
|
||||
if self.config.max_relative_target is not None:
|
||||
present_pos = self.follower_arms[name].read("Present_Position")
|
||||
present_pos = torch.from_numpy(present_pos)
|
||||
goal_pos = ensure_safe_goal_position(goal_pos, present_pos, self.config.max_relative_target)
|
||||
|
||||
# Save tensor to concat and return
|
||||
action_sent.append(goal_pos)
|
||||
|
||||
# Send goal position to each follower
|
||||
goal_pos = goal_pos.numpy().astype(np.float32)
|
||||
self.follower_arms[name].write("Goal_Position", goal_pos)
|
||||
|
||||
return torch.cat(action_sent)
|
||||
|
||||
def print_logs(self):
|
||||
pass
|
||||
# TODO(aliberts): move robot-specific logs logic here
|
||||
|
||||
def disconnect(self):
|
||||
if not self.is_connected:
|
||||
raise RobotDeviceNotConnectedError(
|
||||
"ManipulatorRobot is not connected. You need to run `robot.connect()` before disconnecting."
|
||||
)
|
||||
|
||||
for name in self.follower_arms:
|
||||
self.follower_arms[name].disconnect()
|
||||
|
||||
for name in self.leader_arms:
|
||||
self.leader_arms[name].disconnect()
|
||||
|
||||
for name in self.cameras:
|
||||
self.cameras[name].disconnect()
|
||||
|
||||
self.is_connected = False
|
||||
|
||||
def __del__(self):
|
||||
if getattr(self, "is_connected", False):
|
||||
self.disconnect()
|
||||
@@ -1,216 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import time
|
||||
from dataclasses import dataclass, field, replace
|
||||
|
||||
import torch
|
||||
from stretch_body.gamepad_teleop import GamePadTeleop
|
||||
from stretch_body.robot import Robot as StretchAPI
|
||||
from stretch_body.robot_params import RobotParams
|
||||
|
||||
from lerobot.common.robot_devices.cameras.utils import Camera
|
||||
|
||||
|
||||
@dataclass
|
||||
class StretchRobotConfig:
|
||||
robot_type: str | None = "stretch"
|
||||
cameras: dict[str, Camera] = field(default_factory=lambda: {})
|
||||
# TODO(aliberts): add feature with max_relative target
|
||||
# TODO(aliberts): add comment on max_relative target
|
||||
max_relative_target: list[float] | float | None = None
|
||||
|
||||
|
||||
class StretchRobot(StretchAPI):
|
||||
"""Wrapper of stretch_body.robot.Robot"""
|
||||
|
||||
def __init__(self, config: StretchRobotConfig | None = None, **kwargs):
|
||||
super().__init__()
|
||||
if config is None:
|
||||
config = StretchRobotConfig()
|
||||
# Overwrite config arguments using kwargs
|
||||
self.config = replace(config, **kwargs)
|
||||
|
||||
self.robot_type = self.config.robot_type
|
||||
self.cameras = self.config.cameras
|
||||
self.is_connected = False
|
||||
self.teleop = None
|
||||
self.logs = {}
|
||||
|
||||
# TODO(aliberts): test this
|
||||
RobotParams.set_logging_level("WARNING")
|
||||
RobotParams.set_logging_formatter("brief_console_formatter")
|
||||
|
||||
self.state_keys = None
|
||||
self.action_keys = None
|
||||
|
||||
def connect(self) -> None:
|
||||
self.is_connected = self.startup()
|
||||
if not self.is_connected:
|
||||
print("Another process is already using Stretch. Try running 'stretch_free_robot_process.py'")
|
||||
raise ConnectionError()
|
||||
|
||||
for name in self.cameras:
|
||||
self.cameras[name].connect()
|
||||
self.is_connected = self.is_connected and self.cameras[name].is_connected
|
||||
|
||||
if not self.is_connected:
|
||||
print("Could not connect to the cameras, check that all cameras are plugged-in.")
|
||||
raise ConnectionError()
|
||||
|
||||
self.run_calibration()
|
||||
|
||||
def run_calibration(self) -> None:
|
||||
if not self.is_homed():
|
||||
self.home()
|
||||
|
||||
def teleop_step(
|
||||
self, record_data=False
|
||||
) -> None | tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]:
|
||||
# TODO(aliberts): return ndarrays instead of torch.Tensors
|
||||
if not self.is_connected:
|
||||
raise ConnectionError()
|
||||
|
||||
if self.teleop is None:
|
||||
self.teleop = GamePadTeleop(robot_instance=False)
|
||||
self.teleop.startup(robot=self)
|
||||
|
||||
before_read_t = time.perf_counter()
|
||||
state = self.get_state()
|
||||
action = self.teleop.gamepad_controller.get_state()
|
||||
self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t
|
||||
|
||||
before_write_t = time.perf_counter()
|
||||
self.teleop.do_motion(robot=self)
|
||||
self.push_command()
|
||||
self.logs["write_pos_dt_s"] = time.perf_counter() - before_write_t
|
||||
|
||||
if self.state_keys is None:
|
||||
self.state_keys = list(state)
|
||||
|
||||
if not record_data:
|
||||
return
|
||||
|
||||
state = torch.as_tensor(list(state.values()))
|
||||
action = torch.as_tensor(list(action.values()))
|
||||
|
||||
# Capture images from cameras
|
||||
images = {}
|
||||
for name in self.cameras:
|
||||
before_camread_t = time.perf_counter()
|
||||
images[name] = self.cameras[name].async_read()
|
||||
images[name] = torch.from_numpy(images[name])
|
||||
self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
|
||||
self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
|
||||
|
||||
# Populate output dictionnaries
|
||||
obs_dict, action_dict = {}, {}
|
||||
obs_dict["observation.state"] = state
|
||||
action_dict["action"] = action
|
||||
for name in self.cameras:
|
||||
obs_dict[f"observation.images.{name}"] = images[name]
|
||||
|
||||
return obs_dict, action_dict
|
||||
|
||||
def get_state(self) -> dict:
|
||||
status = self.get_status()
|
||||
return {
|
||||
"head_pan.pos": status["head"]["head_pan"]["pos"],
|
||||
"head_tilt.pos": status["head"]["head_tilt"]["pos"],
|
||||
"lift.pos": status["lift"]["pos"],
|
||||
"arm.pos": status["arm"]["pos"],
|
||||
"wrist_pitch.pos": status["end_of_arm"]["wrist_pitch"]["pos"],
|
||||
"wrist_roll.pos": status["end_of_arm"]["wrist_roll"]["pos"],
|
||||
"wrist_yaw.pos": status["end_of_arm"]["wrist_yaw"]["pos"],
|
||||
"gripper.pos": status["end_of_arm"]["stretch_gripper"]["pos"],
|
||||
"base_x.vel": status["base"]["x_vel"],
|
||||
"base_y.vel": status["base"]["y_vel"],
|
||||
"base_theta.vel": status["base"]["theta_vel"],
|
||||
}
|
||||
|
||||
def capture_observation(self) -> dict:
|
||||
# TODO(aliberts): return ndarrays instead of torch.Tensors
|
||||
before_read_t = time.perf_counter()
|
||||
state = self.get_state()
|
||||
self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t
|
||||
|
||||
if self.state_keys is None:
|
||||
self.state_keys = list(state)
|
||||
|
||||
state = torch.as_tensor(list(state.values()))
|
||||
|
||||
# Capture images from cameras
|
||||
images = {}
|
||||
for name in self.cameras:
|
||||
before_camread_t = time.perf_counter()
|
||||
images[name] = self.cameras[name].async_read()
|
||||
images[name] = torch.from_numpy(images[name])
|
||||
self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
|
||||
self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
|
||||
|
||||
# Populate output dictionnaries
|
||||
obs_dict = {}
|
||||
obs_dict["observation.state"] = state
|
||||
for name in self.cameras:
|
||||
obs_dict[f"observation.images.{name}"] = images[name]
|
||||
|
||||
return obs_dict
|
||||
|
||||
def send_action(self, action: torch.Tensor) -> torch.Tensor:
|
||||
# TODO(aliberts): return ndarrays instead of torch.Tensors
|
||||
if not self.is_connected:
|
||||
raise ConnectionError()
|
||||
|
||||
if self.teleop is None:
|
||||
self.teleop = GamePadTeleop(robot_instance=False)
|
||||
self.teleop.startup(robot=self)
|
||||
|
||||
if self.action_keys is None:
|
||||
dummy_action = self.teleop.gamepad_controller.get_state()
|
||||
self.action_keys = list(dummy_action.keys())
|
||||
|
||||
action_dict = dict(zip(self.action_keys, action.tolist(), strict=True))
|
||||
|
||||
before_write_t = time.perf_counter()
|
||||
self.teleop.do_motion(state=action_dict, robot=self)
|
||||
self.push_command()
|
||||
self.logs["write_pos_dt_s"] = time.perf_counter() - before_write_t
|
||||
|
||||
# TODO(aliberts): return action_sent when motion is limited
|
||||
return action
|
||||
|
||||
def print_logs(self) -> None:
|
||||
pass
|
||||
# TODO(aliberts): move robot-specific logs logic here
|
||||
|
||||
def teleop_safety_stop(self) -> None:
|
||||
if self.teleop is not None:
|
||||
self.teleop._safety_stop(robot=self)
|
||||
|
||||
def disconnect(self) -> None:
|
||||
self.stop()
|
||||
if self.teleop is not None:
|
||||
self.teleop.gamepad_controller.stop()
|
||||
self.teleop.stop()
|
||||
|
||||
if len(self.cameras) > 0:
|
||||
for cam in self.cameras.values():
|
||||
cam.disconnect()
|
||||
|
||||
self.is_connected = False
|
||||
|
||||
def __del__(self):
|
||||
self.disconnect()
|
||||
@@ -1,20 +1,9 @@
|
||||
from typing import Protocol
|
||||
|
||||
|
||||
def get_arm_id(name, arm_type):
|
||||
"""Returns the string identifier of a robot arm. For instance, for a bimanual manipulator
|
||||
like Aloha, it could be left_follower, right_follower, left_leader, or right_leader.
|
||||
"""
|
||||
return f"{name}_{arm_type}"
|
||||
|
||||
|
||||
class Robot(Protocol):
|
||||
# TODO(rcadene, aliberts): Add unit test checking the protocol is implemented in the corresponding classes
|
||||
robot_type: str
|
||||
|
||||
def connect(self): ...
|
||||
def init_teleop(self): ...
|
||||
def run_calibration(self): ...
|
||||
def teleop_step(self, record_data=False): ...
|
||||
def capture_observation(self): ...
|
||||
def send_action(self, action): ...
|
||||
def disconnect(self): ...
|
||||
|
||||
@@ -1,35 +1,3 @@
|
||||
import platform
|
||||
import time
|
||||
|
||||
|
||||
def busy_wait(seconds):
|
||||
if platform.system() == "Darwin":
|
||||
# On Mac, `time.sleep` is not accurate and we need to use this while loop trick,
|
||||
# but it consumes CPU cycles.
|
||||
# TODO(rcadene): find an alternative: from python 11, time.sleep is precise
|
||||
end_time = time.perf_counter() + seconds
|
||||
while time.perf_counter() < end_time:
|
||||
pass
|
||||
else:
|
||||
# On Linux time.sleep is accurate
|
||||
if seconds > 0:
|
||||
time.sleep(seconds)
|
||||
|
||||
|
||||
def safe_disconnect(func):
|
||||
# TODO(aliberts): Allow to pass custom exceptions
|
||||
# (e.g. ThreadServiceExit, KeyboardInterrupt, SystemExit, UnpluggedError, DynamixelCommError)
|
||||
def wrapper(robot, *args, **kwargs):
|
||||
try:
|
||||
return func(robot, *args, **kwargs)
|
||||
except Exception as e:
|
||||
if robot.is_connected:
|
||||
robot.disconnect()
|
||||
raise e
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class RobotDeviceNotConnectedError(Exception):
|
||||
"""Exception raised when the robot device is not connected."""
|
||||
|
||||
|
||||
@@ -14,9 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import os
|
||||
import os.path as osp
|
||||
import platform
|
||||
import random
|
||||
from contextlib import contextmanager
|
||||
from datetime import datetime, timezone
|
||||
@@ -29,18 +27,6 @@ import torch
|
||||
from omegaconf import DictConfig
|
||||
|
||||
|
||||
def none_or_int(value):
|
||||
if value == "None":
|
||||
return None
|
||||
return int(value)
|
||||
|
||||
|
||||
def inside_slurm():
|
||||
"""Check whether the python process was launched through slurm"""
|
||||
# TODO(rcadene): return False for interactive mode `--pty bash`
|
||||
return "SLURM_JOB_ID" in os.environ
|
||||
|
||||
|
||||
def get_safe_torch_device(cfg_device: str, log: bool = False) -> torch.device:
|
||||
"""Given a string, return a torch.device with checks on whether the device is available."""
|
||||
match cfg_device:
|
||||
@@ -172,6 +158,7 @@ def init_hydra_config(config_path: str, overrides: list[str] | None = None) -> D
|
||||
version_base="1.2",
|
||||
)
|
||||
cfg = hydra.compose(Path(config_path).stem, overrides)
|
||||
|
||||
return cfg
|
||||
|
||||
|
||||
@@ -190,30 +177,3 @@ def print_cuda_memory_usage():
|
||||
|
||||
def capture_timestamp_utc():
|
||||
return datetime.now(timezone.utc)
|
||||
|
||||
|
||||
def say(text, blocking=False):
|
||||
# Check if mac, linux, or windows.
|
||||
if platform.system() == "Darwin":
|
||||
cmd = f'say "{text}"'
|
||||
if not blocking:
|
||||
cmd += " &"
|
||||
elif platform.system() == "Linux":
|
||||
cmd = f'spd-say "{text}"'
|
||||
if blocking:
|
||||
cmd += " --wait"
|
||||
elif platform.system() == "Windows":
|
||||
# TODO(rcadene): Make blocking option work for Windows
|
||||
cmd = (
|
||||
'PowerShell -Command "Add-Type -AssemblyName System.Speech; '
|
||||
f"(New-Object System.Speech.Synthesis.SpeechSynthesizer).Speak('{text}')\""
|
||||
)
|
||||
|
||||
os.system(cmd)
|
||||
|
||||
|
||||
def log_say(text, play_sounds, blocking=False):
|
||||
logging.info(text)
|
||||
|
||||
if play_sounds:
|
||||
say(text, blocking)
|
||||
|
||||