Compare commits
30 Commits
user/mshuk
...
feat/autop
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d4f9807ed0 | ||
|
|
caadc887ad | ||
|
|
8f98672ecc | ||
|
|
78df84f758 | ||
|
|
85099f45f4 | ||
|
|
d694ea1d38 | ||
|
|
a00936686f | ||
|
|
2feb5edc65 | ||
|
|
b80e55ca44 | ||
|
|
e8ce388109 | ||
|
|
a4c1da25de | ||
|
|
a003e7c081 | ||
|
|
a27411022d | ||
|
|
3827974b58 | ||
|
|
b299cfea8a | ||
|
|
bf6f89a5b5 | ||
|
|
8861546ad8 | ||
|
|
9c1a893ee3 | ||
|
|
e81c36cf74 | ||
|
|
ed83cbd4f2 | ||
|
|
2a33b9ad87 | ||
|
|
6e85aa13ec | ||
|
|
af05a1725c | ||
|
|
800c4a847f | ||
|
|
bba8c4c0d4 | ||
|
|
68b369e321 | ||
|
|
8d60ac3ffc | ||
|
|
659ec4434d | ||
|
|
da265ca920 | ||
|
|
a1809ad3de |
161
.github/workflows/pr_style_bot.yml
vendored
Normal file
@@ -0,0 +1,161 @@
|
||||
# Adapted from https://github.com/huggingface/diffusers/blob/main/.github/workflows/pr_style_bot.yml
|
||||
name: PR Style Bot
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
permissions: {}
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: "3.10"
|
||||
|
||||
jobs:
|
||||
check-permissions:
|
||||
if: >
|
||||
contains(github.event.comment.body, '@bot /style') &&
|
||||
github.event.issue.pull_request != null
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
is_authorized: ${{ steps.check_user_permission.outputs.has_permission }}
|
||||
steps:
|
||||
- name: Check user permission
|
||||
id: check_user_permission
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
const comment_user = context.payload.comment.user.login;
|
||||
const { data: permission } = await github.rest.repos.getCollaboratorPermissionLevel({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
username: comment_user
|
||||
});
|
||||
|
||||
const authorized =
|
||||
permission.permission === 'admin' ||
|
||||
permission.permission === 'write';
|
||||
|
||||
console.log(
|
||||
`User ${comment_user} has permission level: ${permission.permission}, ` +
|
||||
`authorized: ${authorized} (admins & maintainers allowed)`
|
||||
);
|
||||
|
||||
core.setOutput('has_permission', authorized);
|
||||
|
||||
run-style-bot:
|
||||
needs: check-permissions
|
||||
if: needs.check-permissions.outputs.is_authorized == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Extract PR details
|
||||
id: pr_info
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
const prNumber = context.payload.issue.number;
|
||||
const { data: pr } = await github.rest.pulls.get({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
pull_number: prNumber
|
||||
});
|
||||
|
||||
// We capture both the branch ref and the "full_name" of the head repo
|
||||
// so that we can check out the correct repository & branch (including forks).
|
||||
core.setOutput("prNumber", prNumber);
|
||||
core.setOutput("headRef", pr.head.ref);
|
||||
core.setOutput("headRepoFullName", pr.head.repo.full_name);
|
||||
|
||||
- name: Check out PR branch
|
||||
uses: actions/checkout@v4
|
||||
env:
|
||||
HEADREPOFULLNAME: ${{ steps.pr_info.outputs.headRepoFullName }}
|
||||
HEADREF: ${{ steps.pr_info.outputs.headRef }}
|
||||
with:
|
||||
persist-credentials: true
|
||||
# Instead of checking out the base repo, use the contributor's repo name
|
||||
repository: ${{ env.HEADREPOFULLNAME }}
|
||||
ref: ${{ env.HEADREF }}
|
||||
# You may need fetch-depth: 0 for being able to push
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Debug
|
||||
env:
|
||||
HEADREPOFULLNAME: ${{ steps.pr_info.outputs.headRepoFullName }}
|
||||
HEADREF: ${{ steps.pr_info.outputs.headRef }}
|
||||
PRNUMBER: ${{ steps.pr_info.outputs.prNumber }}
|
||||
run: |
|
||||
echo "PR number: ${PRNUMBER}"
|
||||
echo "Head Ref: ${HEADREF}"
|
||||
echo "Head Repo Full Name: ${HEADREPOFULLNAME}"
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Get Ruff Version from pre-commit-config.yaml
|
||||
id: get-ruff-version
|
||||
run: |
|
||||
RUFF_VERSION=$(awk '/repo: https:\/\/github.com\/astral-sh\/ruff-pre-commit/{flag=1;next}/rev:/{if(flag){print $2;exit}}' .pre-commit-config.yaml)
|
||||
echo "ruff_version=${RUFF_VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Install Ruff
|
||||
env:
|
||||
RUFF_VERSION: ${{ steps.get-ruff-version.outputs.ruff_version }}
|
||||
run: python -m pip install "ruff==${RUFF_VERSION}"
|
||||
|
||||
- name: Ruff check
|
||||
run: ruff check --fix
|
||||
|
||||
- name: Ruff format
|
||||
run: ruff format
|
||||
|
||||
- name: Commit and push changes
|
||||
id: commit_and_push
|
||||
env:
|
||||
HEADREPOFULLNAME: ${{ steps.pr_info.outputs.headRepoFullName }}
|
||||
HEADREF: ${{ steps.pr_info.outputs.headRef }}
|
||||
PRNUMBER: ${{ steps.pr_info.outputs.prNumber }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
echo "HEADREPOFULLNAME: ${HEADREPOFULLNAME}, HEADREF: ${HEADREF}"
|
||||
# Configure git with the Actions bot user
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config --local lfs.https://github.com/.locksverify false
|
||||
|
||||
# Make sure your 'origin' remote is set to the contributor's fork
|
||||
git remote set-url origin "https://x-access-token:${GITHUB_TOKEN}@github.com/${HEADREPOFULLNAME}.git"
|
||||
|
||||
# If there are changes after running style/quality, commit them
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
git add .
|
||||
git commit -m "Apply style fixes"
|
||||
# Push to the original contributor's forked branch
|
||||
git push origin HEAD:${HEADREF}
|
||||
echo "changes_pushed=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "No changes to commit."
|
||||
echo "changes_pushed=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Comment on PR with workflow run link
|
||||
if: steps.commit_and_push.outputs.changes_pushed == 'true'
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
const prNumber = parseInt(process.env.prNumber, 10);
|
||||
const runUrl = `${process.env.GITHUB_SERVER_URL}/${process.env.GITHUB_REPOSITORY}/actions/runs/${process.env.GITHUB_RUN_ID}`
|
||||
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: prNumber,
|
||||
body: `Style fixes have been applied. [View the workflow run here](${runUrl}).`
|
||||
});
|
||||
env:
|
||||
prNumber: ${{ steps.pr_info.outputs.prNumber }}
|
||||
18
.github/workflows/quality.yml
vendored
@@ -32,13 +32,27 @@ jobs:
|
||||
id: get-ruff-version
|
||||
run: |
|
||||
RUFF_VERSION=$(awk '/repo: https:\/\/github.com\/astral-sh\/ruff-pre-commit/{flag=1;next}/rev:/{if(flag){print $2;exit}}' .pre-commit-config.yaml)
|
||||
echo "RUFF_VERSION=${RUFF_VERSION}" >> $GITHUB_ENV
|
||||
echo "ruff_version=${RUFF_VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Install Ruff
|
||||
run: python -m pip install "ruff==${{ env.RUFF_VERSION }}"
|
||||
env:
|
||||
RUFF_VERSION: ${{ steps.get-ruff-version.outputs.ruff_version }}
|
||||
run: python -m pip install "ruff==${RUFF_VERSION}"
|
||||
|
||||
- name: Ruff check
|
||||
run: ruff check --output-format=github
|
||||
|
||||
- name: Ruff format
|
||||
run: ruff format --diff
|
||||
|
||||
typos:
|
||||
name: Typos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: typos-action
|
||||
uses: crate-ci/typos@v1.29.10
|
||||
|
||||
2
.github/workflows/test-docker-build.yml
vendored
@@ -43,7 +43,7 @@ jobs:
|
||||
needs: get_changed_files
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
if: ${{ needs.get_changed_files.outputs.matrix }} != ''
|
||||
if: needs.get_changed_files.outputs.matrix != ''
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
||||
@@ -2,6 +2,7 @@ exclude: ^(tests/data)
|
||||
default_language_version:
|
||||
python: python3.10
|
||||
repos:
|
||||
##### Style / Misc. #####
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
@@ -13,21 +14,34 @@ repos:
|
||||
- id: check-toml
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
- repo: https://github.com/crate-ci/typos
|
||||
rev: v1.30.0
|
||||
hooks:
|
||||
- id: typos
|
||||
args: [--force-exclude]
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v3.19.1
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.9.6
|
||||
rev: v0.9.9
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [--fix]
|
||||
- id: ruff-format
|
||||
|
||||
##### Security #####
|
||||
- repo: https://github.com/gitleaks/gitleaks
|
||||
rev: v8.23.3
|
||||
rev: v8.24.0
|
||||
hooks:
|
||||
- id: gitleaks
|
||||
- repo: https://github.com/woodruffw/zizmor-pre-commit
|
||||
rev: v1.3.1
|
||||
rev: v1.4.1
|
||||
hooks:
|
||||
- id: zizmor
|
||||
- repo: https://github.com/PyCQA/bandit
|
||||
rev: 1.8.3
|
||||
hooks:
|
||||
- id: bandit
|
||||
args: ["-c", "pyproject.toml"]
|
||||
additional_dependencies: ["bandit[toml]"]
|
||||
|
||||
@@ -228,7 +228,7 @@ Follow these steps to start contributing:
|
||||
git commit
|
||||
```
|
||||
|
||||
Note, if you already commited some changes that have a wrong formatting, you can use:
|
||||
Note, if you already committed some changes that have a wrong formatting, you can use:
|
||||
```bash
|
||||
pre-commit run --all-files
|
||||
```
|
||||
|
||||
34
README.md
@@ -23,15 +23,24 @@
|
||||
</div>
|
||||
|
||||
<h2 align="center">
|
||||
<p><a href="https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md">New robot in town: SO-100</a></p>
|
||||
<p><a href="https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md">
|
||||
Build Your Own SO-100 Robot!</a></p>
|
||||
</h2>
|
||||
|
||||
<div align="center">
|
||||
<img src="media/so100/leader_follower.webp?raw=true" alt="SO-100 leader and follower arms" title="SO-100 leader and follower arms" width="50%">
|
||||
<p>We just added a new tutorial on how to build a more affordable robot, at the price of $110 per arm!</p>
|
||||
<p>Teach it new skills by showing it a few moves with just a laptop.</p>
|
||||
<p>Then watch your homemade robot act autonomously 🤯</p>
|
||||
<p>Follow the link to the <a href="https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md">full tutorial for SO-100</a>.</p>
|
||||
<img src="media/so100/leader_follower.webp?raw=true" alt="SO-100 leader and follower arms" title="SO-100 leader and follower arms" width="50%">
|
||||
|
||||
<p><strong>Meet the SO-100 – Just $110 per arm!</strong></p>
|
||||
<p>Train it in minutes with a few simple moves on your laptop.</p>
|
||||
<p>Then sit back and watch your creation act autonomously! 🤯</p>
|
||||
|
||||
<p><a href="https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md">
|
||||
Get the full SO-100 tutorial here.</a></p>
|
||||
|
||||
<p>Want to take it to the next level? Make your SO-100 mobile by building LeKiwi!</p>
|
||||
<p>Check out the <a href="https://github.com/huggingface/lerobot/blob/main/examples/11_use_lekiwi.md">LeKiwi tutorial</a> and bring your robot to life on wheels.</p>
|
||||
|
||||
<img src="media/lekiwi/kiwi.webp?raw=true" alt="LeKiwi mobile robot" title="LeKiwi mobile robot" width="50%">
|
||||
</div>
|
||||
|
||||
<br/>
|
||||
@@ -83,15 +92,20 @@ git clone https://github.com/huggingface/lerobot.git
|
||||
cd lerobot
|
||||
```
|
||||
|
||||
Create a virtual environment with Python 3.10 and activate it, e.g. with [`miniconda`](https://docs.anaconda.com/free/miniconda/index.html):
|
||||
Create a virtual environment with Python 3.10 and activate it using [`uv`](https://github.com/astral-sh/uv):
|
||||
```bash
|
||||
conda create -y -n lerobot python=3.10
|
||||
conda activate lerobot
|
||||
# Install uv if you haven't already
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
|
||||
# Create and activate virtual environment with Python 3.10
|
||||
uv venv .venv --python=3.10
|
||||
source .venv/bin/activate # On Unix/macOS
|
||||
# .venv\Scripts\activate # On Windows
|
||||
```
|
||||
|
||||
Install 🤗 LeRobot:
|
||||
```bash
|
||||
pip install -e .
|
||||
uv pip install -e .
|
||||
```
|
||||
|
||||
> **NOTE:** Depending on your platform, If you encounter any build errors during this step
|
||||
|
||||
@@ -114,7 +114,7 @@ We tried to measure the most impactful parameters for both encoding and decoding
|
||||
|
||||
Additional encoding parameters exist that are not included in this benchmark. In particular:
|
||||
- `-preset` which allows for selecting encoding presets. This represents a collection of options that will provide a certain encoding speed to compression ratio. By leaving this parameter unspecified, it is considered to be `medium` for libx264 and libx265 and `8` for libsvtav1.
|
||||
- `-tune` which allows to optimize the encoding for certains aspects (e.g. film quality, fast decoding, etc.).
|
||||
- `-tune` which allows to optimize the encoding for certain aspects (e.g. film quality, fast decoding, etc.).
|
||||
|
||||
See the documentation mentioned above for more detailed info on these settings and for a more comprehensive list of other parameters.
|
||||
|
||||
|
||||
@@ -1,33 +1,29 @@
|
||||
# Configure image
|
||||
ARG PYTHON_VERSION=3.10
|
||||
|
||||
FROM python:${PYTHON_VERSION}-slim
|
||||
ARG PYTHON_VERSION
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install apt dependencies
|
||||
# Configure environment variables
|
||||
ARG PYTHON_VERSION
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV MUJOCO_GL="egl"
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
|
||||
# Install dependencies and set up Python in a single layer
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential cmake git git-lfs \
|
||||
build-essential cmake git \
|
||||
libglib2.0-0 libgl1-mesa-glx libegl1-mesa ffmpeg \
|
||||
speech-dispatcher libgeos-dev \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
&& ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python \
|
||||
&& python -m venv /opt/venv \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/* \
|
||||
&& echo "source /opt/venv/bin/activate" >> /root/.bashrc
|
||||
|
||||
# Create virtual environment
|
||||
RUN ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python
|
||||
RUN python -m venv /opt/venv
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
RUN echo "source /opt/venv/bin/activate" >> /root/.bashrc
|
||||
|
||||
# Install LeRobot
|
||||
RUN git lfs install
|
||||
RUN git clone https://github.com/huggingface/lerobot.git /lerobot
|
||||
# Clone repository and install LeRobot in a single layer
|
||||
COPY . /lerobot
|
||||
WORKDIR /lerobot
|
||||
RUN pip install --upgrade --no-cache-dir pip
|
||||
RUN pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]" \
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
|
||||
# Set EGL as the rendering backend for MuJoCo
|
||||
ENV MUJOCO_GL="egl"
|
||||
RUN /opt/venv/bin/pip install --upgrade --no-cache-dir pip \
|
||||
&& /opt/venv/bin/pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]" \
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
|
||||
# Execute in bash shell rather than python
|
||||
CMD ["/bin/bash"]
|
||||
|
||||
@@ -8,7 +8,7 @@ ENV PATH="/opt/venv/bin:$PATH"
|
||||
|
||||
# Install dependencies and set up Python in a single layer
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential cmake git git-lfs \
|
||||
build-essential cmake git \
|
||||
libglib2.0-0 libgl1-mesa-glx libegl1-mesa ffmpeg \
|
||||
speech-dispatcher libgeos-dev \
|
||||
python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \
|
||||
@@ -18,8 +18,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
&& echo "source /opt/venv/bin/activate" >> /root/.bashrc
|
||||
|
||||
# Clone repository and install LeRobot in a single layer
|
||||
COPY . /lerobot
|
||||
WORKDIR /lerobot
|
||||
RUN git lfs install \
|
||||
&& git clone https://github.com/huggingface/lerobot.git . \
|
||||
&& /opt/venv/bin/pip install --upgrade --no-cache-dir pip \
|
||||
RUN /opt/venv/bin/pip install --upgrade --no-cache-dir pip \
|
||||
&& /opt/venv/bin/pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]"
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
- [A. Source the parts](#a-source-the-parts)
|
||||
- [B. Install LeRobot](#b-install-lerobot)
|
||||
- [C. Configure the motors](#c-configure-the-motors)
|
||||
- [D. Assemble the arms](#d-assemble-the-arms)
|
||||
- [C. Configure the Motors](#c-configure-the-motors)
|
||||
- [D. Step-by-Step Assembly Instructions](#d-step-by-step-assembly-instructions)
|
||||
- [E. Calibrate](#e-calibrate)
|
||||
- [F. Teleoperate](#f-teleoperate)
|
||||
- [G. Record a dataset](#g-record-a-dataset)
|
||||
@@ -70,6 +70,7 @@ conda install -y -c conda-forge "opencv>=4.10.0"
|
||||
```
|
||||
Great :hugs:! You are now done installing LeRobot and we can begin assembling the SO100 arms :robot:.
|
||||
Every time you now want to use LeRobot you can go to the `~/lerobot` folder where we installed LeRobot and run one of the commands.
|
||||
|
||||
## C. Configure the motors
|
||||
|
||||
> [!NOTE]
|
||||
@@ -98,22 +99,22 @@ Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem5
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
|
||||
Remove the usb cable from your MotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect leader arm and press Enter...]
|
||||
|
||||
The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751
|
||||
The port of this MotorsBus is /dev/tty.usbmodem575E0031751
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
|
||||
Remove the usb cable from your MotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect follower arm and press Enter...]
|
||||
|
||||
The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081
|
||||
The port of this MotorsBus is /dev/tty.usbmodem575E0032081
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
@@ -221,19 +222,13 @@ Redo the process for all your motors until ID 6. Do the same for the 6 motors of
|
||||
|
||||
Follow the video for removing gears. You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm.
|
||||
|
||||
#### c. Add motor horn to all 12 motors
|
||||
## D. Step-by-Step Assembly Instructions
|
||||
|
||||
<details>
|
||||
<summary><strong>Video adding motor horn</strong></summary>
|
||||
**Step 1: Clean Parts**
|
||||
- Remove all support material from the 3D-printed parts.
|
||||
---
|
||||
|
||||
<video src="https://github.com/user-attachments/assets/ef3391a4-ad05-4100-b2bd-1699bf86c969"></video>
|
||||
|
||||
</details>
|
||||
|
||||
Follow the video for adding the motor horn. For SO-100, you need to align the holes on the motor horn to the motor spline to be approximately 1:30, 4:30, 7:30 and 10:30.
|
||||
Try to avoid rotating the motor while doing so to keep position 2048 set during configuration. It is especially tricky for the leader motors as it is more sensible without the gears, but it's ok if it's a bit rotated.
|
||||
|
||||
## D. Assemble the arms
|
||||
### Additional Guidance
|
||||
|
||||
<details>
|
||||
<summary><strong>Video assembling arms</strong></summary>
|
||||
@@ -242,7 +237,211 @@ Try to avoid rotating the motor while doing so to keep position 2048 set during
|
||||
|
||||
</details>
|
||||
|
||||
Follow the video for assembling the arms. It is important to insert the cables into the motor that is being assembled before you assemble the motor into the arm! Inserting the cables beforehand is much easier than doing this afterward. The first arm should take a bit more than 1 hour to assemble, but once you get used to it, you can do it under 1 hour for the second arm.
|
||||
**Note:**
|
||||
This video provides visual guidance for assembling the arms, but it doesn't specify when or how to do the wiring. Inserting the cables beforehand is much easier than doing it afterward. The first arm may take a bit more than 1 hour to assemble, but once you get used to it, you can assemble the second arm in under 1 hour.
|
||||
|
||||
---
|
||||
|
||||
### First Motor
|
||||
|
||||
**Step 2: Insert Wires**
|
||||
- Insert two wires into the first motor.
|
||||
|
||||
<img src="../media/tutorial/img1.jpg" style="height:300px;">
|
||||
|
||||
**Step 3: Install in Base**
|
||||
- Place the first motor into the base.
|
||||
|
||||
<img src="../media/tutorial/img2.jpg" style="height:300px;">
|
||||
|
||||
**Step 4: Secure Motor**
|
||||
- Fasten the motor with 4 screws. Two from the bottom and two from top.
|
||||
|
||||
**Step 5: Attach Motor Holder**
|
||||
- Slide over the first motor holder and fasten it using two screws (one on each side).
|
||||
|
||||
<img src="../media/tutorial/img4.jpg" style="height:300px;">
|
||||
|
||||
**Step 6: Attach Motor Horns**
|
||||
- Install both motor horns, securing the top horn with a screw. Try not to move the motor position when attaching the motor horn, especially for the leader arms, where we removed the gears.
|
||||
|
||||
<img src="../media/tutorial/img5.jpg" style="height:300px;">
|
||||
<details>
|
||||
<summary><strong>Video adding motor horn</strong></summary>
|
||||
<video src="https://github.com/user-attachments/assets/ef3391a4-ad05-4100-b2bd-1699bf86c969"></video>
|
||||
</details>
|
||||
|
||||
**Step 7: Attach Shoulder Part**
|
||||
- Route one wire to the back of the robot and the other to the left or in photo towards you (see photo).
|
||||
- Attach the shoulder part.
|
||||
|
||||
<img src="../media/tutorial/img6.jpg" style="height:300px;">
|
||||
|
||||
**Step 8: Secure Shoulder**
|
||||
- Tighten the shoulder part with 4 screws on top and 4 on the bottom
|
||||
*(access bottom holes by turning the shoulder).*
|
||||
|
||||
---
|
||||
|
||||
### Second Motor Assembly
|
||||
|
||||
**Step 9: Install Motor 2**
|
||||
- Slide the second motor in from the top and link the wire from motor 1 to motor 2.
|
||||
|
||||
<img src="../media/tutorial/img8.jpg" style="height:300px;">
|
||||
|
||||
**Step 10: Attach Shoulder Holder**
|
||||
- Add the shoulder motor holder.
|
||||
- Ensure the wire from motor 1 to motor 2 goes behind the holder while the other wire is routed upward (see photo).
|
||||
- This part can be tight to assemble, you can use a workbench like the image or a similar setup to push the part around the motor.
|
||||
|
||||
<div style="display: flex;">
|
||||
<img src="../media/tutorial/img9.jpg" style="height:250px;">
|
||||
<img src="../media/tutorial/img10.jpg" style="height:250px;">
|
||||
<img src="../media/tutorial/img12.jpg" style="height:250px;">
|
||||
</div>
|
||||
|
||||
**Step 11: Secure Motor 2**
|
||||
- Fasten the second motor with 4 screws.
|
||||
|
||||
**Step 12: Attach Motor Horn**
|
||||
- Attach both motor horns to motor 2, again use the horn screw.
|
||||
|
||||
**Step 13: Attach Base**
|
||||
- Install the base attachment using 2 screws.
|
||||
|
||||
<img src="../media/tutorial/img11.jpg" style="height:300px;">
|
||||
|
||||
**Step 14: Attach Upper Arm**
|
||||
- Attach the upper arm with 4 screws on each side.
|
||||
|
||||
<img src="../media/tutorial/img13.jpg" style="height:300px;">
|
||||
|
||||
---
|
||||
|
||||
### Third Motor Assembly
|
||||
|
||||
**Step 15: Install Motor 3**
|
||||
- Route the motor cable from motor 2 through the cable holder to motor 3, then secure motor 3 with 4 screws.
|
||||
|
||||
**Step 16: Attach Motor Horn**
|
||||
- Attach both motor horns to motor 3 and secure one again with a horn screw.
|
||||
|
||||
<img src="../media/tutorial/img14.jpg" style="height:300px;">
|
||||
|
||||
**Step 17: Attach Forearm**
|
||||
- Connect the forearm to motor 3 using 4 screws on each side.
|
||||
|
||||
<img src="../media/tutorial/img15.jpg" style="height:300px;">
|
||||
|
||||
---
|
||||
|
||||
### Fourth Motor Assembly
|
||||
|
||||
**Step 18: Install Motor 4**
|
||||
- Slide in motor 4, attach the cable from motor 3, and secure the cable in its holder with a screw.
|
||||
|
||||
<div style="display: flex;">
|
||||
<img src="../media/tutorial/img16.jpg" style="height:300px;">
|
||||
<img src="../media/tutorial/img19.jpg" style="height:300px;">
|
||||
</div>
|
||||
|
||||
**Step 19: Attach Motor Holder 4**
|
||||
- Install the fourth motor holder (a tight fit). Ensure one wire is routed upward and the wire from motor 3 is routed downward (see photo).
|
||||
|
||||
<img src="../media/tutorial/img17.jpg" style="height:300px;">
|
||||
|
||||
**Step 20: Secure Motor 4 & Attach Horn**
|
||||
- Fasten motor 4 with 4 screws and attach its motor horns, use for one a horn screw.
|
||||
|
||||
<img src="../media/tutorial/img18.jpg" style="height:300px;">
|
||||
|
||||
---
|
||||
|
||||
### Wrist Assembly
|
||||
|
||||
**Step 21: Install Motor 5**
|
||||
- Insert motor 5 into the wrist holder and secure it with 2 front screws.
|
||||
|
||||
<img src="../media/tutorial/img20.jpg" style="height:300px;">
|
||||
|
||||
**Step 22: Attach Wrist**
|
||||
- Connect the wire from motor 4 to motor 5. And already insert the other wire for the gripper.
|
||||
- Secure the wrist to motor 4 using 4 screws on both sides.
|
||||
|
||||
<img src="../media/tutorial/img22.jpg" style="height:300px;">
|
||||
|
||||
**Step 23: Attach Wrist Horn**
|
||||
- Install only one motor horn on the wrist motor and secure it with a horn screw.
|
||||
|
||||
<img src="../media/tutorial/img23.jpg" style="height:300px;">
|
||||
|
||||
---
|
||||
|
||||
### Follower Configuration
|
||||
|
||||
**Step 24: Attach Gripper**
|
||||
- Attach the gripper to motor 5.
|
||||
|
||||
<img src="../media/tutorial/img24.jpg" style="height:300px;">
|
||||
|
||||
**Step 25: Install Gripper Motor**
|
||||
- Insert the gripper motor, connect the motor wire from motor 5 to motor 6, and secure it with 3 screws on each side.
|
||||
|
||||
<img src="../media/tutorial/img25.jpg" style="height:300px;">
|
||||
|
||||
**Step 26: Attach Gripper Horn & Claw**
|
||||
- Attach the motor horns and again use a horn screw.
|
||||
- Install the gripper claw and secure it with 4 screws on both sides.
|
||||
|
||||
<img src="../media/tutorial/img26.jpg" style="height:300px;">
|
||||
|
||||
**Step 27: Mount Controller**
|
||||
- Attach the motor controller on the back.
|
||||
|
||||
<div style="display: flex;">
|
||||
<img src="../media/tutorial/img27.jpg" style="height:300px;">
|
||||
<img src="../media/tutorial/img28.jpg" style="height:300px;">
|
||||
</div>
|
||||
|
||||
*Assembly complete – proceed to Leader arm assembly.*
|
||||
|
||||
---
|
||||
|
||||
### Leader Configuration
|
||||
|
||||
For the leader configuration, perform **Steps 1–23**. Make sure that you removed the motor gears from the motors.
|
||||
|
||||
**Step 24: Attach Leader Holder**
|
||||
- Mount the leader holder onto the wrist and secure it with a screw.
|
||||
|
||||
<img src="../media/tutorial/img29.jpg" style="height:300px;">
|
||||
|
||||
**Step 25: Attach Handle**
|
||||
- Attach the handle to motor 5 using 4 screws.
|
||||
|
||||
<img src="../media/tutorial/img30.jpg" style="height:300px;">
|
||||
|
||||
**Step 26: Install Gripper Motor**
|
||||
- Insert the gripper motor, secure it with 3 screws on each side, attach a motor horn using a horn screw, and connect the motor wire.
|
||||
|
||||
<img src="../media/tutorial/img31.jpg" style="height:300px;">
|
||||
|
||||
**Step 27: Attach Trigger**
|
||||
- Attach the follower trigger with 4 screws.
|
||||
|
||||
<img src="../media/tutorial/img32.jpg" style="height:300px;">
|
||||
|
||||
**Step 28: Mount Controller**
|
||||
- Attach the motor controller on the back.
|
||||
|
||||
<div style="display: flex;">
|
||||
<img src="../media/tutorial/img27.jpg" style="height:300px;">
|
||||
<img src="../media/tutorial/img28.jpg" style="height:300px;">
|
||||
</div>
|
||||
|
||||
*Assembly complete – proceed to calibration.*
|
||||
|
||||
|
||||
## E. Calibrate
|
||||
|
||||
|
||||
@@ -23,6 +23,9 @@ Follow this [README](https://github.com/SIGRobotics-UIUC/LeKiwi). It contains th
|
||||
|
||||
Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
|
||||
|
||||
### Wired version
|
||||
If you have the **wired** LeKiwi version you can skip the installation of the Raspberry Pi and setting up SSH. You can also run all commands directly on your PC for both the LeKiwi scripts and the leader arm scripts for teleoperating.
|
||||
|
||||
## B. Install software on Pi
|
||||
Now we have to setup the remote PC that will run on the LeKiwi Robot. This is normally a Raspberry Pi, but can be any PC that can run on 5V and has enough usb ports (2 or more) for the cameras and motor control board.
|
||||
|
||||
@@ -185,7 +188,7 @@ sudo chmod 666 /dev/ttyACM1
|
||||
|
||||
#### d. Update config file
|
||||
|
||||
IMPORTANTLY: Now that you have your ports of leader and follower arm and ip adress of the mobile-so100, update the **ip** in Network configuration, **port** in leader_arms and **port** in lekiwi. In the [`LeKiwiRobotConfig`](../lerobot/common/robot_devices/robots/configs.py) file. Where you will find something like:
|
||||
IMPORTANTLY: Now that you have your ports of leader and follower arm and ip address of the mobile-so100, update the **ip** in Network configuration, **port** in leader_arms and **port** in lekiwi. In the [`LeKiwiRobotConfig`](../lerobot/common/robot_devices/robots/configs.py) file. Where you will find something like:
|
||||
```python
|
||||
@RobotConfig.register_subclass("lekiwi")
|
||||
@dataclass
|
||||
@@ -246,6 +249,110 @@ class LeKiwiRobotConfig(RobotConfig):
|
||||
}
|
||||
)
|
||||
|
||||
teleop_keys: dict[str, str] = field(
|
||||
default_factory=lambda: {
|
||||
# Movement
|
||||
"forward": "w",
|
||||
"backward": "s",
|
||||
"left": "a",
|
||||
"right": "d",
|
||||
"rotate_left": "z",
|
||||
"rotate_right": "x",
|
||||
# Speed control
|
||||
"speed_up": "r",
|
||||
"speed_down": "f",
|
||||
# quit teleop
|
||||
"quit": "q",
|
||||
}
|
||||
)
|
||||
|
||||
mock: bool = False
|
||||
```
|
||||
|
||||
## Wired version
|
||||
|
||||
For the wired LeKiwi version your configured IP address should refer to your own laptop (127.0.0.1), because leader arm and LeKiwi are in this case connected to own laptop. Below and example configuration for this wired setup:
|
||||
```python
|
||||
@RobotConfig.register_subclass("lekiwi")
|
||||
@dataclass
|
||||
class LeKiwiRobotConfig(RobotConfig):
|
||||
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
|
||||
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
|
||||
# the number of motors in your follower arms.
|
||||
max_relative_target: int | None = None
|
||||
|
||||
# Network Configuration
|
||||
ip: str = "127.0.0.1"
|
||||
port: int = 5555
|
||||
video_port: int = 5556
|
||||
|
||||
cameras: dict[str, CameraConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"front": OpenCVCameraConfig(
|
||||
camera_index=0, fps=30, width=640, height=480, rotation=90
|
||||
),
|
||||
"wrist": OpenCVCameraConfig(
|
||||
camera_index=1, fps=30, width=640, height=480, rotation=180
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
calibration_dir: str = ".cache/calibration/lekiwi"
|
||||
|
||||
leader_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem585A0077581",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
follower_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem58760431061",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
"left_wheel": (7, "sts3215"),
|
||||
"back_wheel": (8, "sts3215"),
|
||||
"right_wheel": (9, "sts3215"),
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
teleop_keys: dict[str, str] = field(
|
||||
default_factory=lambda: {
|
||||
# Movement
|
||||
"forward": "w",
|
||||
"backward": "s",
|
||||
"left": "a",
|
||||
"right": "d",
|
||||
"rotate_left": "z",
|
||||
"rotate_right": "x",
|
||||
# Speed control
|
||||
"speed_up": "r",
|
||||
"speed_down": "f",
|
||||
# quit teleop
|
||||
"quit": "q",
|
||||
}
|
||||
)
|
||||
|
||||
mock: bool = False
|
||||
```
|
||||
|
||||
@@ -272,6 +379,9 @@ python lerobot/scripts/control_robot.py \
|
||||
--control.arms='["main_follower"]'
|
||||
```
|
||||
|
||||
### Wired version
|
||||
If you have the **wired** LeKiwi version please run all commands including this calibration command on your laptop.
|
||||
|
||||
### Calibrate leader arm
|
||||
Then to calibrate the leader arm (which is attached to the laptop/pc). You will need to move the leader arm to these positions sequentially:
|
||||
|
||||
@@ -324,7 +434,10 @@ You should see on your laptop something like this: ```[INFO] Connected to remote
|
||||
| F | Decrease speed |
|
||||
|
||||
> [!TIP]
|
||||
> If you use a different keyboard you can change the keys for each commmand in the [`LeKiwiRobotConfig`](../lerobot/common/robot_devices/robots/configs.py).
|
||||
> If you use a different keyboard you can change the keys for each command in the [`LeKiwiRobotConfig`](../lerobot/common/robot_devices/robots/configs.py).
|
||||
|
||||
### Wired version
|
||||
If you have the **wired** LeKiwi version please run all commands including both these teleoperation commands on your laptop.
|
||||
|
||||
## Troubleshoot communication
|
||||
|
||||
@@ -364,6 +477,13 @@ Make sure the configuration file on both your laptop/pc and the Raspberry Pi is
|
||||
# G. Record a dataset
|
||||
Once you're familiar with teleoperation, you can record your first dataset with LeKiwi.
|
||||
|
||||
To start the program on LeKiwi, SSH into your Raspberry Pi, and run `conda activate lerobot` and this script:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=lekiwi \
|
||||
--control.type=remote_robot
|
||||
```
|
||||
|
||||
If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
@@ -374,8 +494,7 @@ Store your Hugging Face repository name in a variable to run these commands:
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
|
||||
Record 2 episodes and upload your dataset to the hub:
|
||||
On your laptop then run this command to record 2 episodes and upload your dataset to the hub:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=lekiwi \
|
||||
@@ -393,6 +512,9 @@ python lerobot/scripts/control_robot.py \
|
||||
|
||||
Note: You can resume recording by adding `--control.resume=true`.
|
||||
|
||||
### Wired version
|
||||
If you have the **wired** LeKiwi version please run all commands including both these record dataset commands on your laptop.
|
||||
|
||||
# H. Visualize a dataset
|
||||
|
||||
If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
|
||||
|
||||
@@ -2,7 +2,7 @@ This tutorial explains how to use [Moss v1](https://github.com/jess-moss/moss-ro
|
||||
|
||||
## Source the parts
|
||||
|
||||
Follow this [README](https://github.com/jess-moss/moss-robot-arms). It contains the bill of materials, with link to source the parts, as well as the instructions to 3D print the parts, and advices if it's your first time printing or if you don't own a 3D printer already.
|
||||
Follow this [README](https://github.com/jess-moss/moss-robot-arms). It contains the bill of materials with link to source the parts, as well as the instructions to 3D print the parts and advice if it's your first time printing or if you don't own a 3D printer already.
|
||||
|
||||
**Important**: Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
|
||||
|
||||
|
||||
@@ -85,7 +85,7 @@ def main():
|
||||
done = False
|
||||
while not done:
|
||||
for batch in dataloader:
|
||||
batch = {k: v.to(device, non_blocking=True) for k, v in batch.items()}
|
||||
batch = {k: (v.to(device) if isinstance(v, torch.Tensor) else v) for k, v in batch.items()}
|
||||
loss, _ = policy.forward(batch)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
@@ -398,7 +398,7 @@ And here are the corresponding positions for the leader arm:
|
||||
|
||||
You can watch a [video tutorial of the calibration procedure](https://youtu.be/8drnU9uRY24) for more details.
|
||||
|
||||
During calibration, we count the number of full 360-degree rotations your motors have made since they were first used. That's why we ask yo to move to this arbitrary "zero" position. We don't actually "set" the zero position, so you don't need to be accurate. After calculating these "offsets" to shift the motor values around 0, we need to assess the rotation direction of each motor, which might differ. That's why we ask you to rotate all motors to roughly 90 degrees, to mesure if the values changed negatively or positively.
|
||||
During calibration, we count the number of full 360-degree rotations your motors have made since they were first used. That's why we ask yo to move to this arbitrary "zero" position. We don't actually "set" the zero position, so you don't need to be accurate. After calculating these "offsets" to shift the motor values around 0, we need to assess the rotation direction of each motor, which might differ. That's why we ask you to rotate all motors to roughly 90 degrees, to measure if the values changed negatively or positively.
|
||||
|
||||
Finally, the rest position ensures that the follower and leader arms are roughly aligned after calibration, preventing sudden movements that could damage the motors when starting teleoperation.
|
||||
|
||||
@@ -663,7 +663,7 @@ camera.disconnect()
|
||||
|
||||
**Instantiate your robot with cameras**
|
||||
|
||||
Additionaly, you can set up your robot to work with your cameras.
|
||||
Additionally, you can set up your robot to work with your cameras.
|
||||
|
||||
Modify the following Python code with the appropriate camera names and configurations:
|
||||
```python
|
||||
@@ -825,8 +825,8 @@ It contains:
|
||||
- `dtRlead: 5.06 (197.5hz)` which is the delta time of reading the present position of the leader arm.
|
||||
- `dtWfoll: 0.25 (3963.7hz)` which is the delta time of writing the goal position on the follower arm ; writing is asynchronous so it takes less time than reading.
|
||||
- `dtRfoll: 6.22 (160.7hz)` which is the delta time of reading the present position on the follower arm.
|
||||
- `dtRlaptop:32.57 (30.7hz) ` which is the delta time of capturing an image from the laptop camera in the thread running asynchrously.
|
||||
- `dtRphone:33.84 (29.5hz)` which is the delta time of capturing an image from the phone camera in the thread running asynchrously.
|
||||
- `dtRlaptop:32.57 (30.7hz) ` which is the delta time of capturing an image from the laptop camera in the thread running asynchronously.
|
||||
- `dtRphone:33.84 (29.5hz)` which is the delta time of capturing an image from the phone camera in the thread running asynchronously.
|
||||
|
||||
Troubleshooting:
|
||||
- On Linux, if you encounter a hanging issue when using cameras, uninstall opencv and re-install it with conda:
|
||||
@@ -846,7 +846,7 @@ At the end of data recording, your dataset will be uploaded on your Hugging Face
|
||||
echo https://huggingface.co/datasets/${HF_USER}/koch_test
|
||||
```
|
||||
|
||||
### b. Advices for recording dataset
|
||||
### b. Advice for recording dataset
|
||||
|
||||
Once you're comfortable with data recording, it's time to create a larger dataset for training. A good starting task is grasping an object at different locations and placing it in a bin. We suggest recording at least 50 episodes, with 10 episodes per location. Keep the cameras fixed and maintain consistent grasping behavior throughout the recordings.
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ python lerobot/scripts/control_robot.py \
|
||||
```
|
||||
This is equivalent to running `stretch_robot_home.py`
|
||||
|
||||
> **Note:** If you run any of the LeRobot scripts below and Stretch is not poperly homed, it will automatically home/calibrate first.
|
||||
> **Note:** If you run any of the LeRobot scripts below and Stretch is not properly homed, it will automatically home/calibrate first.
|
||||
|
||||
**Teleoperate**
|
||||
Before trying teleoperation, you need activate the gamepad controller by pressing the middle button. For more info, see Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/hello_robot/#gamepad-teleoperation).
|
||||
|
||||
@@ -172,10 +172,10 @@ python lerobot/scripts/control_robot.py \
|
||||
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
|
||||
1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_aloha_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_aloha_test`).
|
||||
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_aloha_test`).
|
||||
3. We use `--control.num_image_writer_processes=1` instead of the default value (`0`). On our computer, using a dedicated process to write images from the 4 cameras on disk allows to reach constent 30 fps during inference. Feel free to explore different values for `--control.num_image_writer_processes`.
|
||||
3. We use `--control.num_image_writer_processes=1` instead of the default value (`0`). On our computer, using a dedicated process to write images from the 4 cameras on disk allows to reach constant 30 fps during inference. Feel free to explore different values for `--control.num_image_writer_processes`.
|
||||
|
||||
## More
|
||||
|
||||
Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth explaination.
|
||||
Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth explanation.
|
||||
|
||||
If you have any question or need help, please reach out on Discord in the channel `#aloha-arm`.
|
||||
|
||||
@@ -92,7 +92,7 @@ def compute_episode_stats(episode_data: dict[str, list[str] | np.ndarray], featu
|
||||
axes_to_reduce = (0, 2, 3) # keep channel dim
|
||||
keepdims = True
|
||||
else:
|
||||
ep_ft_array = data # data is alreay a np.ndarray
|
||||
ep_ft_array = data # data is already a np.ndarray
|
||||
axes_to_reduce = 0 # compute stats over the first axis
|
||||
keepdims = data.ndim == 1 # keep as np.array
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import contextlib
|
||||
import logging
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
@@ -27,6 +28,7 @@ import torch.utils
|
||||
from datasets import concatenate_datasets, load_dataset
|
||||
from huggingface_hub import HfApi, snapshot_download
|
||||
from huggingface_hub.constants import REPOCARD_NAME
|
||||
from huggingface_hub.errors import RevisionNotFoundError
|
||||
|
||||
from lerobot.common.constants import HF_LEROBOT_HOME
|
||||
from lerobot.common.datasets.compute_stats import aggregate_stats, compute_episode_stats
|
||||
@@ -226,7 +228,7 @@ class LeRobotDatasetMetadata:
|
||||
|
||||
def add_task(self, task: str):
|
||||
"""
|
||||
Given a task in natural language, add it to the dictionnary of tasks.
|
||||
Given a task in natural language, add it to the dictionary of tasks.
|
||||
"""
|
||||
if task in self.task_to_task_index:
|
||||
raise ValueError(f"The task '{task}' already exists and can't be added twice.")
|
||||
@@ -389,7 +391,7 @@ class LeRobotDataset(torch.utils.data.Dataset):
|
||||
- info contains various information about the dataset like shapes, keys, fps etc.
|
||||
- stats stores the dataset statistics of the different modalities for normalization
|
||||
- tasks contains the prompts for each task of the dataset, which can be used for
|
||||
task-conditionned training.
|
||||
task-conditioned training.
|
||||
- hf_dataset (from datasets.Dataset), which will read any values from parquet files.
|
||||
- videos (optional) from which frames are loaded to be synchronous with data from parquet files.
|
||||
|
||||
@@ -517,6 +519,7 @@ class LeRobotDataset(torch.utils.data.Dataset):
|
||||
branch: str | None = None,
|
||||
tags: list | None = None,
|
||||
license: str | None = "apache-2.0",
|
||||
tag_version: bool = True,
|
||||
push_videos: bool = True,
|
||||
private: bool = False,
|
||||
allow_patterns: list[str] | str | None = None,
|
||||
@@ -562,6 +565,11 @@ class LeRobotDataset(torch.utils.data.Dataset):
|
||||
)
|
||||
card.push_to_hub(repo_id=self.repo_id, repo_type="dataset", revision=branch)
|
||||
|
||||
if tag_version:
|
||||
with contextlib.suppress(RevisionNotFoundError):
|
||||
hub_api.delete_tag(self.repo_id, tag=CODEBASE_VERSION, repo_type="dataset")
|
||||
hub_api.create_tag(self.repo_id, tag=CODEBASE_VERSION, revision=branch, repo_type="dataset")
|
||||
|
||||
def pull_from_repo(
|
||||
self,
|
||||
allow_patterns: list[str] | str | None = None,
|
||||
@@ -848,7 +856,7 @@ class LeRobotDataset(torch.utils.data.Dataset):
|
||||
episode_buffer["index"] = np.arange(self.meta.total_frames, self.meta.total_frames + episode_length)
|
||||
episode_buffer["episode_index"] = np.full((episode_length,), episode_index)
|
||||
|
||||
# Add new tasks to the tasks dictionnary
|
||||
# Add new tasks to the tasks dictionary
|
||||
for task in episode_tasks:
|
||||
task_index = self.meta.get_task_index(task)
|
||||
if task_index is None:
|
||||
|
||||
@@ -152,7 +152,7 @@ def download_raw(raw_dir: Path, repo_id: str):
|
||||
stacklevel=1,
|
||||
)
|
||||
|
||||
# Send warning if raw_dir isn't well formated
|
||||
# Send warning if raw_dir isn't well formatted
|
||||
if raw_dir.parts[-2] != user_id or raw_dir.parts[-1] != dataset_id:
|
||||
warnings.warn(
|
||||
f"""`raw_dir` ({raw_dir}) doesn't contain a community or user id `/` the name of the dataset that
|
||||
|
||||
@@ -68,9 +68,9 @@ def load_from_raw(raw_dir: Path, videos_dir: Path, fps: int, video: bool, episod
|
||||
modality_df,
|
||||
on="timestamp_utc",
|
||||
# "nearest" is the best option over "backward", since the latter can desynchronizes camera timestamps by
|
||||
# matching timestamps that are too far appart, in order to fit the backward constraints. It's not the case for "nearest".
|
||||
# matching timestamps that are too far apart, in order to fit the backward constraints. It's not the case for "nearest".
|
||||
# However, note that "nearest" might synchronize the reference camera with other cameras on slightly future timestamps.
|
||||
# are too far appart.
|
||||
# are too far apart.
|
||||
direction="nearest",
|
||||
tolerance=pd.Timedelta(f"{1 / fps} seconds"),
|
||||
)
|
||||
@@ -126,7 +126,7 @@ def load_from_raw(raw_dir: Path, videos_dir: Path, fps: int, video: bool, episod
|
||||
videos_dir.parent.mkdir(parents=True, exist_ok=True)
|
||||
videos_dir.symlink_to((raw_dir / "videos").absolute())
|
||||
|
||||
# sanity check the video paths are well formated
|
||||
# sanity check the video paths are well formatted
|
||||
for key in df:
|
||||
if "observation.images." not in key:
|
||||
continue
|
||||
@@ -143,7 +143,7 @@ def load_from_raw(raw_dir: Path, videos_dir: Path, fps: int, video: bool, episod
|
||||
# it is the case for video_frame dictionary = [{"path": ..., "timestamp": ...}]
|
||||
data_dict[key] = [video_frame[0] for video_frame in df[key].values]
|
||||
|
||||
# sanity check the video path is well formated
|
||||
# sanity check the video path is well formatted
|
||||
video_path = videos_dir.parent / data_dict[key][0]["path"]
|
||||
if not video_path.exists():
|
||||
raise ValueError(f"Video file not found in {video_path}")
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
For all datasets in the RLDS format.
|
||||
For https://github.com/google-deepmind/open_x_embodiment (OPENX) datasets.
|
||||
|
||||
NOTE: You need to install tensorflow and tensorflow_datsets before running this script.
|
||||
NOTE: You need to install tensorflow and tensorflow_datasets before running this script.
|
||||
|
||||
Example:
|
||||
python lerobot/scripts/push_dataset_to_hub.py \
|
||||
|
||||
@@ -31,6 +31,7 @@ import packaging.version
|
||||
import torch
|
||||
from datasets.table import embed_table_storage
|
||||
from huggingface_hub import DatasetCard, DatasetCardData, HfApi
|
||||
from huggingface_hub.errors import RevisionNotFoundError
|
||||
from PIL import Image as PILImage
|
||||
from torchvision import transforms
|
||||
|
||||
@@ -222,7 +223,7 @@ def load_episodes(local_dir: Path) -> dict:
|
||||
|
||||
|
||||
def write_episode_stats(episode_index: int, episode_stats: dict, local_dir: Path):
|
||||
# We wrap episode_stats in a dictionnary since `episode_stats["episode_index"]`
|
||||
# We wrap episode_stats in a dictionary since `episode_stats["episode_index"]`
|
||||
# is a dictionary of stats and not an integer.
|
||||
episode_stats = {"episode_index": episode_index, "stats": serialize_dict(episode_stats)}
|
||||
append_jsonlines(episode_stats, local_dir / EPISODES_STATS_PATH)
|
||||
@@ -325,6 +326,19 @@ def get_safe_version(repo_id: str, version: str | packaging.version.Version) ->
|
||||
)
|
||||
hub_versions = get_repo_versions(repo_id)
|
||||
|
||||
if not hub_versions:
|
||||
raise RevisionNotFoundError(
|
||||
f"""Your dataset must be tagged with a codebase version.
|
||||
Assuming _version_ is the codebase_version value in the info.json, you can run this:
|
||||
```python
|
||||
from huggingface_hub import HfApi
|
||||
|
||||
hub_api = HfApi()
|
||||
hub_api.create_tag("{repo_id}", tag="_version_", repo_type="dataset")
|
||||
```
|
||||
"""
|
||||
)
|
||||
|
||||
if target_version in hub_versions:
|
||||
return f"v{target_version}"
|
||||
|
||||
@@ -445,10 +459,10 @@ def get_episode_data_index(
|
||||
if episodes is not None:
|
||||
episode_lengths = {ep_idx: episode_lengths[ep_idx] for ep_idx in episodes}
|
||||
|
||||
cumulative_lenghts = list(accumulate(episode_lengths.values()))
|
||||
cumulative_lengths = list(accumulate(episode_lengths.values()))
|
||||
return {
|
||||
"from": torch.LongTensor([0] + cumulative_lenghts[:-1]),
|
||||
"to": torch.LongTensor(cumulative_lenghts),
|
||||
"from": torch.LongTensor([0] + cumulative_lengths[:-1]),
|
||||
"to": torch.LongTensor(cumulative_lengths),
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ from lerobot.common.robot_devices.robots.configs import AlohaRobotConfig
|
||||
|
||||
LOCAL_DIR = Path("data/")
|
||||
|
||||
# spellchecker:off
|
||||
ALOHA_MOBILE_INFO = {
|
||||
"robot_config": AlohaRobotConfig(),
|
||||
"license": "mit",
|
||||
@@ -856,6 +857,7 @@ DATASETS = {
|
||||
}""").lstrip(),
|
||||
},
|
||||
}
|
||||
# spellchecker:on
|
||||
|
||||
|
||||
def batch_convert():
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
"""
|
||||
This script will help you convert any LeRobot dataset already pushed to the hub from codebase version 1.6 to
|
||||
2.0. You will be required to provide the 'tasks', which is a short but accurate description in plain English
|
||||
for each of the task performed in the dataset. This will allow to easily train models with task-conditionning.
|
||||
for each of the task performed in the dataset. This will allow to easily train models with task-conditioning.
|
||||
|
||||
We support 3 different scenarios for these tasks (see instructions below):
|
||||
1. Single task dataset: all episodes of your dataset have the same single task.
|
||||
|
||||
@@ -57,7 +57,7 @@ def convert_dataset(
|
||||
dataset.meta.info["codebase_version"] = CODEBASE_VERSION
|
||||
write_info(dataset.meta.info, dataset.root)
|
||||
|
||||
dataset.push_to_hub(branch=branch, allow_patterns="meta/")
|
||||
dataset.push_to_hub(branch=branch, tag_version=False, allow_patterns="meta/")
|
||||
|
||||
# delete old stats.json file
|
||||
if (dataset.root / STATS_PATH).is_file:
|
||||
|
||||
@@ -73,7 +73,7 @@ def decode_video_frames_torchvision(
|
||||
last_ts = max(timestamps)
|
||||
|
||||
# access closest key frame of the first requested frame
|
||||
# Note: closest key frame timestamp is usally smaller than `first_ts` (e.g. key frame can be the first frame of the video)
|
||||
# Note: closest key frame timestamp is usually smaller than `first_ts` (e.g. key frame can be the first frame of the video)
|
||||
# for details on what `seek` is doing see: https://pyav.basswood-io.com/docs/stable/api/container.html?highlight=inputcontainer#av.container.InputContainer.seek
|
||||
reader.seek(first_ts, keyframes_only=keyframes_only)
|
||||
|
||||
|
||||
@@ -37,12 +37,12 @@ def make_env(cfg: EnvConfig, n_envs: int = 1, use_async_envs: bool = False) -> g
|
||||
Args:
|
||||
cfg (EnvConfig): the config of the environment to instantiate.
|
||||
n_envs (int, optional): The number of parallelized env to return. Defaults to 1.
|
||||
use_async_envs (bool, optional): Wether to return an AsyncVectorEnv or a SyncVectorEnv. Defaults to
|
||||
use_async_envs (bool, optional): Whether to return an AsyncVectorEnv or a SyncVectorEnv. Defaults to
|
||||
False.
|
||||
|
||||
Raises:
|
||||
ValueError: if n_envs < 1
|
||||
ModuleNotFoundError: If the requested env package is not intalled
|
||||
ModuleNotFoundError: If the requested env package is not installed
|
||||
|
||||
Returns:
|
||||
gym.vector.VectorEnv: The parallelized gym.env instance.
|
||||
|
||||
@@ -64,7 +64,7 @@ class ACTConfig(PreTrainedConfig):
|
||||
output_normalization_modes: Similar dictionary as `normalize_input_modes`, but to unnormalize to the
|
||||
original scale. Note that this is also used for normalizing the training targets.
|
||||
vision_backbone: Name of the torchvision resnet backbone to use for encoding images.
|
||||
pretrained_backbone_weights: Pretrained weights from torchvision to initalize the backbone.
|
||||
pretrained_backbone_weights: Pretrained weights from torchvision to initialize the backbone.
|
||||
`None` means no pretrained weights.
|
||||
replace_final_stride_with_dilation: Whether to replace the ResNet's final 2x2 stride with a dilated
|
||||
convolution.
|
||||
|
||||
409
lerobot/common/policies/auto/configuration_auto.py
Normal file
@@ -0,0 +1,409 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import importlib
|
||||
import logging
|
||||
import os
|
||||
from collections import OrderedDict
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional, Type, Union
|
||||
|
||||
from lerobot.common.policies.pretrained import PreTrainedPolicy
|
||||
from lerobot.configs.policies import PreTrainedConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Constants
|
||||
IMPORT_PATHS = ["lerobot.common.policies.{0}.configuration_{0}"]
|
||||
|
||||
POLICY_IMPORT_PATHS = ["lerobot.common.policies.{0}.modeling_{0}"]
|
||||
|
||||
|
||||
def policy_type_to_module_name(policy_type: str) -> str:
|
||||
"""
|
||||
Convert policy type to module name format.
|
||||
|
||||
Args:
|
||||
policy_type: The policy type identifier (e.g. 'lerobot/vqbet-pusht')
|
||||
|
||||
Returns:
|
||||
str: Normalized module name (e.g. 'vqbet')
|
||||
|
||||
Examples:
|
||||
>>> policy_type_to_module_name("lerobot/vqbet-pusht")
|
||||
'vqbet'
|
||||
"""
|
||||
# TODO(Steven): This is a temporary solution, we should have a more robust way to handle this
|
||||
return policy_type.replace("lerobot/", "").replace("-", "_").replace("_", "").replace("pusht", "")
|
||||
|
||||
|
||||
class _LazyPolicyConfigMapping(OrderedDict):
|
||||
def __init__(self, mapping: Dict[str, str]):
|
||||
self._mapping = mapping
|
||||
self._extra_content: Dict[str, Any] = {}
|
||||
self._modules: Dict[str, Any] = {}
|
||||
|
||||
def __getitem__(self, key: str) -> Any:
|
||||
if key in self._extra_content:
|
||||
return self._extra_content[key]
|
||||
if key not in self._mapping:
|
||||
raise KeyError(f"Policy type '{key}' not found in mapping")
|
||||
|
||||
value = self._mapping[key]
|
||||
module_name = policy_type_to_module_name(key)
|
||||
|
||||
for import_path in IMPORT_PATHS:
|
||||
try:
|
||||
if key not in self._modules:
|
||||
self._modules[key] = importlib.import_module(import_path.format(module_name))
|
||||
logger.debug(f"Config module: {module_name} imported")
|
||||
if hasattr(self._modules[key], value):
|
||||
return getattr(self._modules[key], value)
|
||||
except ImportError:
|
||||
continue
|
||||
|
||||
raise ImportError(f"Could not find configuration class {value} for policy type {key}")
|
||||
|
||||
def keys(self):
|
||||
return list(self._mapping.keys()) + list(self._extra_content.keys())
|
||||
|
||||
def values(self):
|
||||
return [self[k] for k in self._mapping] + list(self._extra_content.values())
|
||||
|
||||
def items(self):
|
||||
return [(k, self[k]) for k in self._mapping] + list(self._extra_content.items())
|
||||
|
||||
def __iter__(self):
|
||||
return iter(list(self._mapping.keys()) + list(self._extra_content.keys()))
|
||||
|
||||
def __contains__(self, item):
|
||||
return item in self._mapping or item in self._extra_content
|
||||
|
||||
def register(self, key, value, exist_ok=False):
|
||||
"""
|
||||
Register a new configuration in this mapping.
|
||||
"""
|
||||
if key in self._mapping and not exist_ok:
|
||||
raise ValueError(f"'{key}' is already used by a Policy Config, pick another name.")
|
||||
self._extra_content[key] = value
|
||||
|
||||
|
||||
POLICY_CONFIG_NAMES_MAPPING = OrderedDict(
|
||||
[
|
||||
("vqbet", "VQBeTConfig"),
|
||||
("lerobot/vqbet_pusht", "VQBeTConfig"),
|
||||
]
|
||||
)
|
||||
|
||||
POLICY_CONFIG_MAPPING = _LazyPolicyConfigMapping(POLICY_CONFIG_NAMES_MAPPING)
|
||||
|
||||
|
||||
class _LazyPolicyMapping(OrderedDict):
|
||||
"""
|
||||
A dictionary that lazily loads its values when they are requested.
|
||||
"""
|
||||
|
||||
def __init__(self, mapping: Dict[str, str]):
|
||||
self._mapping = mapping
|
||||
self._extra_content: Dict[str, Type[PreTrainedPolicy]] = {}
|
||||
self._modules: Dict[str, Any] = {}
|
||||
self._config_mapping: Dict[Type[PreTrainedConfig], Type[PreTrainedPolicy]] = {}
|
||||
self._initialized_types: set[str] = set()
|
||||
|
||||
def _lazy_init_for_type(self, policy_type: str) -> None:
|
||||
"""Lazily initialize mappings for a policy type if not already done."""
|
||||
if policy_type not in self._initialized_types:
|
||||
try:
|
||||
config_class = POLICY_CONFIG_MAPPING[policy_type]
|
||||
self._config_mapping[config_class] = self[policy_type]
|
||||
self._initialized_types.add(policy_type)
|
||||
except (ImportError, AttributeError, KeyError) as e:
|
||||
logger.warning(f"Could not automatically map config for policy type {policy_type}: {str(e)}")
|
||||
|
||||
def __getitem__(self, key: str) -> Type[PreTrainedPolicy]:
|
||||
"""Get a policy class by key with lazy loading."""
|
||||
if key in self._extra_content:
|
||||
return self._extra_content[key]
|
||||
if key not in self._mapping:
|
||||
raise KeyError(f"Policy type '{key}' not found in mapping")
|
||||
|
||||
value = self._mapping[key]
|
||||
module_name = policy_type_to_module_name(key)
|
||||
|
||||
for import_path in POLICY_IMPORT_PATHS:
|
||||
try:
|
||||
if key not in self._modules:
|
||||
self._modules[key] = importlib.import_module(import_path.format(module_name))
|
||||
logger.debug(
|
||||
f"Policy module: {module_name} imported from {import_path.format(module_name)}"
|
||||
)
|
||||
if hasattr(self._modules[key], value):
|
||||
return getattr(self._modules[key], value)
|
||||
except ImportError:
|
||||
continue
|
||||
|
||||
raise ImportError(
|
||||
f"Could not find policy class {value} for policy type {key}. "
|
||||
f"Tried paths: {[p.format(module_name) for p in POLICY_IMPORT_PATHS]}"
|
||||
)
|
||||
|
||||
def register(
|
||||
self,
|
||||
key: str,
|
||||
value: Type[PreTrainedPolicy],
|
||||
config_class: Type[PreTrainedConfig],
|
||||
exist_ok: bool = False,
|
||||
) -> None:
|
||||
"""Register a new policy class with its configuration class."""
|
||||
if not isinstance(key, str):
|
||||
raise TypeError(f"Key must be a string, got {type(key)}")
|
||||
if not issubclass(value, PreTrainedPolicy):
|
||||
raise TypeError(f"Value must be a PreTrainedPolicy subclass, got {type(value)}")
|
||||
if not issubclass(config_class, PreTrainedConfig):
|
||||
raise TypeError(f"Config class must be a PreTrainedConfig subclass, got {type(config_class)}")
|
||||
|
||||
if key in self._mapping and not exist_ok:
|
||||
raise ValueError(f"'{key}' is already used by a Policy, pick another name.")
|
||||
self._extra_content[key] = value
|
||||
self._config_mapping[config_class] = value
|
||||
|
||||
def get_policy_for_config(self, config_class: Type[PreTrainedConfig]) -> Type[PreTrainedPolicy]:
|
||||
"""Get the policy class associated with a config class."""
|
||||
# First check direct config class mapping
|
||||
if config_class in self._config_mapping:
|
||||
return self._config_mapping[config_class]
|
||||
|
||||
# Try to find by policy type
|
||||
try:
|
||||
policy_type = config_class.get_type_str()
|
||||
# Check extra content first
|
||||
if policy_type in self._extra_content:
|
||||
return self._extra_content[policy_type]
|
||||
|
||||
# Then check standard mapping
|
||||
if policy_type in self._mapping:
|
||||
self._lazy_init_for_type(policy_type)
|
||||
if config_class in self._config_mapping:
|
||||
return self._config_mapping[config_class]
|
||||
return self[policy_type]
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
raise ValueError(
|
||||
f"No policy class found for config class {config_class.__name__}. "
|
||||
f"Available types: {list(self._mapping.keys()) + list(self._extra_content.keys())}"
|
||||
)
|
||||
|
||||
|
||||
POLICY_NAMES_MAPPING = OrderedDict(
|
||||
[
|
||||
("vqbet", "VQBeTPolicy"),
|
||||
("lerobot/vqbet_pusht", "VQBeTPolicy"),
|
||||
]
|
||||
)
|
||||
|
||||
POLICY_MAPPING = _LazyPolicyMapping(POLICY_NAMES_MAPPING)
|
||||
|
||||
|
||||
class AutoPolicyConfig:
|
||||
"""
|
||||
Factory class for automatically loading policy configurations.
|
||||
|
||||
This class provides methods to:
|
||||
- Load pre-trained policy configurations from local files or the Hub
|
||||
- Register new policy types dynamically
|
||||
- Create policy configurations for specific policy types
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
raise OSError("AutoPolicyConfig not meant to be instantiated directly")
|
||||
|
||||
@classmethod
|
||||
def for_policy(cls, policy_type: str, *args, **kwargs) -> PreTrainedConfig:
|
||||
"""Create a new configuration instance for the specified policy type."""
|
||||
if policy_type in POLICY_CONFIG_MAPPING:
|
||||
config_class = POLICY_CONFIG_MAPPING[policy_type]
|
||||
return config_class(*args, **kwargs)
|
||||
raise ValueError(
|
||||
f"Unrecognized policy identifier: {policy_type}. Should contain one of {', '.join(POLICY_CONFIG_MAPPING.keys())}"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def register(policy_type, config, exist_ok=False):
|
||||
"""
|
||||
Register a new configuration for this class.
|
||||
|
||||
Args:
|
||||
policy_type (`str`): The policy type like "act" or "pi0".
|
||||
config ([`PreTrainedConfig`]): The config to register.
|
||||
"""
|
||||
if issubclass(config, PreTrainedConfig) and config.get_type_str() != policy_type:
|
||||
raise ValueError(
|
||||
"The config you are passing has a `policy_type` attribute that is not consistent with the policy type "
|
||||
f"you passed (config has {config.type} and you passed {policy_type}. Fix one of those so they "
|
||||
"match!"
|
||||
)
|
||||
POLICY_CONFIG_MAPPING.register(policy_type, config, exist_ok=exist_ok)
|
||||
|
||||
@classmethod
|
||||
def from_pretrained(
|
||||
cls, pretrained_policy_config_name_or_path: Union[str, Path], **kwargs
|
||||
) -> PreTrainedConfig:
|
||||
"""
|
||||
Instantiate a PreTrainedConfig from a pre-trained policy configuration.
|
||||
|
||||
Args:
|
||||
pretrained_policy_config_name_or_path (`str` or `Path`):
|
||||
Can be either:
|
||||
- A string with the `policy_type` of a pre-trained policy configuration listed on
|
||||
the Hub or locally (e.g., 'act')
|
||||
- A path to a `directory` containing a configuration file saved
|
||||
using [`~PreTrainedConfig.save_pretrained`].
|
||||
- A path or url to a saved configuration JSON `file`.
|
||||
**kwargs: Additional kwargs passed to PreTrainedConfig.from_pretrained()
|
||||
|
||||
Returns:
|
||||
[`PreTrainedConfig`]: The configuration object instantiated from that pre-trained policy config.
|
||||
"""
|
||||
if os.path.isdir(pretrained_policy_config_name_or_path):
|
||||
# Load from local directory
|
||||
config_dict = PreTrainedConfig.from_pretrained(pretrained_policy_config_name_or_path, **kwargs)
|
||||
policy_type = config_dict.type
|
||||
elif os.path.isfile(pretrained_policy_config_name_or_path):
|
||||
# Load from local file
|
||||
config_dict = PreTrainedConfig.from_pretrained(pretrained_policy_config_name_or_path, **kwargs)
|
||||
policy_type = config_dict.type
|
||||
else:
|
||||
# Assume it's a policy_type identifier
|
||||
policy_type = pretrained_policy_config_name_or_path
|
||||
|
||||
if policy_type not in POLICY_CONFIG_MAPPING:
|
||||
raise ValueError(
|
||||
f"Unrecognized policy type {policy_type}. "
|
||||
f"Should be one of {', '.join(POLICY_CONFIG_MAPPING.keys())}"
|
||||
)
|
||||
|
||||
config_class = POLICY_CONFIG_MAPPING[policy_type]
|
||||
return config_class.from_pretrained(pretrained_policy_config_name_or_path, **kwargs)
|
||||
|
||||
|
||||
class AutoPolicy:
|
||||
"""
|
||||
Factory class that allows instantiating policy models from configurations.
|
||||
|
||||
This class provides methods to:
|
||||
- Load pre-trained policies from configurations
|
||||
- Register new policy types dynamically
|
||||
- Create policy instances for specific configurations
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
raise OSError("AutoPolicy not meant to be instantiated directly")
|
||||
|
||||
@classmethod
|
||||
def from_config(cls, config: PreTrainedConfig, **kwargs) -> PreTrainedPolicy:
|
||||
"""Instantiate a policy from a configuration."""
|
||||
policy_class = POLICY_MAPPING.get_policy_for_config(type(config))
|
||||
return policy_class(config, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def from_pretrained(
|
||||
cls,
|
||||
pretrained_policy_name_or_path: Union[str, Path],
|
||||
*,
|
||||
config: Optional[PreTrainedConfig] = None,
|
||||
**kwargs,
|
||||
) -> PreTrainedPolicy:
|
||||
"""
|
||||
Instantiate a pre-trained policy from a configuration.
|
||||
|
||||
Args:
|
||||
pretrained_policy_name_or_path: Path to pretrained weights or model identifier
|
||||
config: Optional configuration for the policy
|
||||
**kwargs: Additional arguments to pass to from_pretrained()
|
||||
"""
|
||||
if config is None:
|
||||
config = AutoPolicyConfig.from_pretrained(pretrained_policy_name_or_path)
|
||||
|
||||
if isinstance(config, str):
|
||||
config = AutoPolicyConfig.from_pretrained(config)
|
||||
|
||||
policy_class = POLICY_MAPPING.get_policy_for_config(config)
|
||||
return policy_class.from_pretrained(pretrained_policy_name_or_path, config=config, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def register(
|
||||
config_class: Type[PreTrainedConfig], policy_class: Type[PreTrainedPolicy], exist_ok: bool = False
|
||||
):
|
||||
"""
|
||||
Register a new policy class for a configuration class.
|
||||
|
||||
Args:
|
||||
config_class: The configuration class
|
||||
policy_class: The policy class to register
|
||||
exist_ok: Whether to allow overwriting existing registrations
|
||||
"""
|
||||
POLICY_MAPPING.register(config_class.get_type_str(), policy_class, config_class, exist_ok=exist_ok)
|
||||
|
||||
|
||||
def main():
|
||||
"""Test the AutoPolicy and AutoPolicyConfig functionality."""
|
||||
|
||||
def test_error_cases():
|
||||
"""Test error handling"""
|
||||
try:
|
||||
AutoPolicyConfig()
|
||||
except OSError as e:
|
||||
assert "not meant to be instantiated directly" in str(e)
|
||||
try:
|
||||
AutoPolicy()
|
||||
except OSError as e:
|
||||
assert "not meant to be instantiated directly" in str(e)
|
||||
|
||||
# try:
|
||||
# AutoPolicy.from_config("invalid_config")
|
||||
# except ValueError as e:
|
||||
# assert "Unrecognized policy identifier" in str(e)
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
# Test built-in policy loading
|
||||
# config = AutoPolicyConfig.from_pretrained("lerobot/vqbet_pusht")
|
||||
config = AutoPolicyConfig.for_policy("vqbet")
|
||||
policy = AutoPolicy.from_config(config)
|
||||
|
||||
from lerobot.common.policies.vqbet.configuration_vqbet import VQBeTConfig
|
||||
from lerobot.common.policies.vqbet.modeling_vqbet import VQBeTPolicy
|
||||
|
||||
assert isinstance(config, VQBeTConfig)
|
||||
assert isinstance(policy, VQBeTPolicy)
|
||||
|
||||
# Test policy registration
|
||||
from lerobot.common.policies.tdmpc.configuration_tdmpc import TDMPCConfig
|
||||
from lerobot.common.policies.tdmpc.modeling_tdmpc import TDMPCPolicy
|
||||
|
||||
AutoPolicyConfig.register("tdmpc", TDMPCConfig)
|
||||
AutoPolicy.register(TDMPCConfig, TDMPCPolicy)
|
||||
|
||||
my_new_config = AutoPolicyConfig.for_policy("tdmpc")
|
||||
my_new_policy = AutoPolicy.from_config(my_new_config)
|
||||
assert isinstance(my_new_config, TDMPCConfig)
|
||||
assert isinstance(my_new_policy, TDMPCPolicy)
|
||||
|
||||
# Run error case tests
|
||||
test_error_cases()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -68,7 +68,7 @@ class DiffusionConfig(PreTrainedConfig):
|
||||
within the image size. If None, no cropping is done.
|
||||
crop_is_random: Whether the crop should be random at training time (it's always a center crop in eval
|
||||
mode).
|
||||
pretrained_backbone_weights: Pretrained weights from torchvision to initalize the backbone.
|
||||
pretrained_backbone_weights: Pretrained weights from torchvision to initialize the backbone.
|
||||
`None` means no pretrained weights.
|
||||
use_group_norm: Whether to replace batch normalization with group normalization in the backbone.
|
||||
The group sizes are set to be about 16 (to be precise, feature_dim // 16).
|
||||
@@ -99,7 +99,7 @@ class DiffusionConfig(PreTrainedConfig):
|
||||
num_inference_steps: Number of reverse diffusion steps to use at inference time (steps are evenly
|
||||
spaced). If not provided, this defaults to be the same as `num_train_timesteps`.
|
||||
do_mask_loss_for_padding: Whether to mask the loss when there are copy-padded actions. See
|
||||
`LeRobotDataset` and `load_previous_and_future_frames` for mor information. Note, this defaults
|
||||
`LeRobotDataset` and `load_previous_and_future_frames` for more information. Note, this defaults
|
||||
to False as the original Diffusion Policy implementation does the same.
|
||||
"""
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
Convert pi0 parameters from Jax to Pytorch
|
||||
|
||||
Follow [README of openpi](https://github.com/Physical-Intelligence/openpi) to create a new environment
|
||||
and install the required librairies.
|
||||
and install the required libraries.
|
||||
|
||||
```bash
|
||||
cd ~/code/openpi
|
||||
|
||||
@@ -313,7 +313,7 @@ class PI0Policy(PreTrainedPolicy):
|
||||
state = self.prepare_state(batch)
|
||||
lang_tokens, lang_masks = self.prepare_language(batch)
|
||||
actions = self.prepare_action(batch)
|
||||
actions_is_pad = batch.get("actions_id_pad")
|
||||
actions_is_pad = batch.get("actions_is_pad")
|
||||
|
||||
loss_dict = {}
|
||||
losses = self.model.forward(images, img_masks, lang_tokens, lang_masks, state, actions, noise, time)
|
||||
|
||||
@@ -76,7 +76,7 @@ class TDMPCConfig(PreTrainedConfig):
|
||||
n_pi_samples: Number of samples to draw from the policy / world model rollout every CEM iteration. Can
|
||||
be zero.
|
||||
uncertainty_regularizer_coeff: Coefficient for the uncertainty regularization used when estimating
|
||||
trajectory values (this is the λ coeffiecient in eqn 4 of FOWM).
|
||||
trajectory values (this is the λ coefficient in eqn 4 of FOWM).
|
||||
n_elites: The number of elite samples to use for updating the gaussian parameters every CEM iteration.
|
||||
elite_weighting_temperature: The temperature to use for softmax weighting (by trajectory value) of the
|
||||
elites, when updating the gaussian parameters for CEM.
|
||||
@@ -165,7 +165,7 @@ class TDMPCConfig(PreTrainedConfig):
|
||||
"""Input validation (not exhaustive)."""
|
||||
if self.n_gaussian_samples <= 0:
|
||||
raise ValueError(
|
||||
f"The number of guassian samples for CEM should be non-zero. Got `{self.n_gaussian_samples=}`"
|
||||
f"The number of gaussian samples for CEM should be non-zero. Got `{self.n_gaussian_samples=}`"
|
||||
)
|
||||
if self.normalization_mapping["ACTION"] is not NormalizationMode.MIN_MAX:
|
||||
raise ValueError(
|
||||
|
||||
@@ -66,7 +66,7 @@ class VQBeTConfig(PreTrainedConfig):
|
||||
within the image size. If None, no cropping is done.
|
||||
crop_is_random: Whether the crop should be random at training time (it's always a center crop in eval
|
||||
mode).
|
||||
pretrained_backbone_weights: Pretrained weights from torchvision to initalize the backbone.
|
||||
pretrained_backbone_weights: Pretrained weights from torchvision to initialize the backbone.
|
||||
`None` means no pretrained weights.
|
||||
use_group_norm: Whether to replace batch normalization with group normalization in the backbone.
|
||||
The group sizes are set to be about 16 (to be precise, feature_dim // 16).
|
||||
|
||||
@@ -485,7 +485,7 @@ class VQBeTHead(nn.Module):
|
||||
def forward(self, x, **kwargs) -> dict:
|
||||
# N is the batch size, and T is number of action query tokens, which are process through same GPT
|
||||
N, T, _ = x.shape
|
||||
# we calculate N and T side parallely. Thus, the dimensions would be
|
||||
# we calculate N and T side parallelly. Thus, the dimensions would be
|
||||
# (batch size * number of action query tokens, action chunk size, action dimension)
|
||||
x = einops.rearrange(x, "N T WA -> (N T) WA")
|
||||
|
||||
@@ -772,7 +772,7 @@ class VqVae(nn.Module):
|
||||
Encoder and decoder are MLPs consisting of an input, output layer, and hidden layer, respectively.
|
||||
The vq_layer uses residual VQs.
|
||||
|
||||
This class contains functions for training the encoder and decoder along with the residual VQ layer (for trainign phase 1),
|
||||
This class contains functions for training the encoder and decoder along with the residual VQ layer (for training phase 1),
|
||||
as well as functions to help BeT training part in training phase 2.
|
||||
"""
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ from lerobot.common.policies.vqbet.configuration_vqbet import VQBeTConfig
|
||||
This file is part of a VQ-BeT that utilizes code from the following repositories:
|
||||
|
||||
- Vector Quantize PyTorch code is licensed under the MIT License:
|
||||
Origianl source: https://github.com/lucidrains/vector-quantize-pytorch
|
||||
Original source: https://github.com/lucidrains/vector-quantize-pytorch
|
||||
|
||||
- nanoGPT part is an adaptation of Andrej Karpathy's nanoGPT implementation in PyTorch.
|
||||
Original source: https://github.com/karpathy/nanoGPT
|
||||
@@ -289,7 +289,7 @@ class GPT(nn.Module):
|
||||
This file is a part for Residual Vector Quantization that utilizes code from the following repository:
|
||||
|
||||
- Phil Wang's vector-quantize-pytorch implementation in PyTorch.
|
||||
Origianl source: https://github.com/lucidrains/vector-quantize-pytorch
|
||||
Original source: https://github.com/lucidrains/vector-quantize-pytorch
|
||||
|
||||
- The vector-quantize-pytorch code is licensed under the MIT License:
|
||||
|
||||
@@ -1349,9 +1349,9 @@ class EuclideanCodebook(nn.Module):
|
||||
|
||||
# calculate distributed variance
|
||||
|
||||
variance_numer = reduce((data - batch_mean) ** 2, "h n d -> h 1 d", "sum")
|
||||
distributed.all_reduce(variance_numer)
|
||||
batch_variance = variance_numer / num_vectors
|
||||
variance_number = reduce((data - batch_mean) ** 2, "h n d -> h 1 d", "sum")
|
||||
distributed.all_reduce(variance_number)
|
||||
batch_variance = variance_number / num_vectors
|
||||
|
||||
self.update_with_decay("batch_variance", batch_variance, self.affine_param_batch_decay)
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ def make_cameras_from_configs(camera_configs: dict[str, CameraConfig]) -> list[C
|
||||
|
||||
cameras[key] = IntelRealSenseCamera(cfg)
|
||||
else:
|
||||
raise ValueError(f"The motor type '{cfg.type}' is not valid.")
|
||||
raise ValueError(f"The camera type '{cfg.type}' is not valid.")
|
||||
|
||||
return cameras
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ class RecordControlConfig(ControlConfig):
|
||||
private: bool = False
|
||||
# Add tags to your dataset on the hub.
|
||||
tags: list[str] | None = None
|
||||
# Number of subprocesses handling the saving of frames as PNGs. Set to 0 to use threads only;
|
||||
# Number of subprocesses handling the saving of frames as PNG. Set to 0 to use threads only;
|
||||
# set to ≥1 to use subprocesses, each using threads to write images. The best number of processes
|
||||
# and threads depends on your system. We recommend 4 threads per camera with 0 processes.
|
||||
# If fps is unstable, adjust the thread count. If still unstable, try using 1 or more subprocesses.
|
||||
|
||||
@@ -242,7 +242,7 @@ class DriveMode(enum.Enum):
|
||||
class CalibrationMode(enum.Enum):
|
||||
# Joints with rotational motions are expressed in degrees in nominal range of [-180, 180]
|
||||
DEGREE = 0
|
||||
# Joints with linear motions (like gripper of Aloha) are experessed in nominal range of [0, 100]
|
||||
# Joints with linear motions (like gripper of Aloha) are expressed in nominal range of [0, 100]
|
||||
LINEAR = 1
|
||||
|
||||
|
||||
@@ -610,7 +610,7 @@ class DynamixelMotorsBus:
|
||||
# 0-centered resolution range (e.g. [-2048, 2048] for resolution=4096)
|
||||
values[i] = values[i] / HALF_TURN_DEGREE * (resolution // 2)
|
||||
|
||||
# Substract the homing offsets to come back to actual motor range of values
|
||||
# Subtract the homing offsets to come back to actual motor range of values
|
||||
# which can be arbitrary.
|
||||
values[i] -= homing_offset
|
||||
|
||||
|
||||
@@ -221,7 +221,7 @@ class DriveMode(enum.Enum):
|
||||
class CalibrationMode(enum.Enum):
|
||||
# Joints with rotational motions are expressed in degrees in nominal range of [-180, 180]
|
||||
DEGREE = 0
|
||||
# Joints with linear motions (like gripper of Aloha) are experessed in nominal range of [0, 100]
|
||||
# Joints with linear motions (like gripper of Aloha) are expressed in nominal range of [0, 100]
|
||||
LINEAR = 1
|
||||
|
||||
|
||||
@@ -591,7 +591,7 @@ class FeetechMotorsBus:
|
||||
# 0-centered resolution range (e.g. [-2048, 2048] for resolution=4096)
|
||||
values[i] = values[i] / HALF_TURN_DEGREE * (resolution // 2)
|
||||
|
||||
# Substract the homing offsets to come back to actual motor range of values
|
||||
# Subtract the homing offsets to come back to actual motor range of values
|
||||
# which can be arbitrary.
|
||||
values[i] -= homing_offset
|
||||
|
||||
@@ -632,7 +632,7 @@ class FeetechMotorsBus:
|
||||
track["prev"][idx] = values[i]
|
||||
continue
|
||||
|
||||
# Detect a full rotation occured
|
||||
# Detect a full rotation occurred
|
||||
if abs(track["prev"][idx] - values[i]) > 2048:
|
||||
# Position went below 0 and got reset to 4095
|
||||
if track["prev"][idx] < values[i]:
|
||||
|
||||
@@ -87,7 +87,7 @@ def run_arm_calibration(arm: MotorsBus, robot_type: str, arm_name: str, arm_type
|
||||
# For instance, if the motor rotates 90 degree, and its value is -90 after applying the homing offset, then we know its rotation direction
|
||||
# is inverted. However, for the calibration being successful, we need everyone to follow the same target position.
|
||||
# Sometimes, there is only one possible rotation direction. For instance, if the gripper is closed, there is only one direction which
|
||||
# corresponds to opening the gripper. When the rotation direction is ambiguous, we arbitrarely rotate clockwise from the point of view
|
||||
# corresponds to opening the gripper. When the rotation direction is ambiguous, we arbitrarily rotate clockwise from the point of view
|
||||
# of the previous motor in the kinetic chain.
|
||||
print("\nMove arm to rotated target position")
|
||||
print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="rotated"))
|
||||
@@ -115,7 +115,7 @@ def run_arm_calibration(arm: MotorsBus, robot_type: str, arm_name: str, arm_type
|
||||
|
||||
# TODO(rcadene): make type of joints (DEGREE or LINEAR) configurable from yaml?
|
||||
if robot_type in ["aloha"] and "gripper" in arm.motor_names:
|
||||
# Joints with linear motions (like gripper of Aloha) are experessed in nominal range of [0, 100]
|
||||
# Joints with linear motions (like gripper of Aloha) are expressed in nominal range of [0, 100]
|
||||
calib_idx = arm.motor_names.index("gripper")
|
||||
calib_mode[calib_idx] = CalibrationMode.LINEAR.name
|
||||
|
||||
|
||||
@@ -443,7 +443,7 @@ def run_arm_manual_calibration(arm: MotorsBus, robot_type: str, arm_name: str, a
|
||||
# For instance, if the motor rotates 90 degree, and its value is -90 after applying the homing offset, then we know its rotation direction
|
||||
# is inverted. However, for the calibration being successful, we need everyone to follow the same target position.
|
||||
# Sometimes, there is only one possible rotation direction. For instance, if the gripper is closed, there is only one direction which
|
||||
# corresponds to opening the gripper. When the rotation direction is ambiguous, we arbitrarely rotate clockwise from the point of view
|
||||
# corresponds to opening the gripper. When the rotation direction is ambiguous, we arbitrarily rotate clockwise from the point of view
|
||||
# of the previous motor in the kinetic chain.
|
||||
print("\nMove arm to rotated target position")
|
||||
print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="rotated"))
|
||||
|
||||
@@ -44,7 +44,7 @@ class ManipulatorRobot:
|
||||
# TODO(rcadene): Implement force feedback
|
||||
"""This class allows to control any manipulator robot of various number of motors.
|
||||
|
||||
Non exaustive list of robots:
|
||||
Non exhaustive list of robots:
|
||||
- [Koch v1.0](https://github.com/AlexanderKoch-Koch/low_cost_robot), with and without the wrist-to-elbow expansion, developed
|
||||
by Alexander Koch from [Tau Robotics](https://tau-robotics.com)
|
||||
- [Koch v1.1](https://github.com/jess-moss/koch-v1-1) developed by Jess Moss
|
||||
@@ -55,7 +55,7 @@ class ManipulatorRobot:
|
||||
robot = ManipulatorRobot(KochRobotConfig())
|
||||
```
|
||||
|
||||
Example of overwritting motors during instantiation:
|
||||
Example of overwriting motors during instantiation:
|
||||
```python
|
||||
# Defines how to communicate with the motors of the leader and follower arms
|
||||
leader_arms = {
|
||||
@@ -90,7 +90,7 @@ class ManipulatorRobot:
|
||||
robot = ManipulatorRobot(robot_config)
|
||||
```
|
||||
|
||||
Example of overwritting cameras during instantiation:
|
||||
Example of overwriting cameras during instantiation:
|
||||
```python
|
||||
# Defines how to communicate with 2 cameras connected to the computer.
|
||||
# Here, the webcam of the laptop and the phone (connected in USB to the laptop)
|
||||
@@ -348,7 +348,7 @@ class ManipulatorRobot:
|
||||
set_operating_mode_(self.follower_arms[name])
|
||||
|
||||
# Set better PID values to close the gap between recorded states and actions
|
||||
# TODO(rcadene): Implement an automatic procedure to set optimial PID values for each motor
|
||||
# TODO(rcadene): Implement an automatic procedure to set optimal PID values for each motor
|
||||
self.follower_arms[name].write("Position_P_Gain", 1500, "elbow_flex")
|
||||
self.follower_arms[name].write("Position_I_Gain", 0, "elbow_flex")
|
||||
self.follower_arms[name].write("Position_D_Gain", 600, "elbow_flex")
|
||||
@@ -500,7 +500,7 @@ class ManipulatorRobot:
|
||||
self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
|
||||
self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
|
||||
|
||||
# Populate output dictionnaries
|
||||
# Populate output dictionaries
|
||||
obs_dict, action_dict = {}, {}
|
||||
obs_dict["observation.state"] = state
|
||||
action_dict["action"] = action
|
||||
@@ -540,7 +540,7 @@ class ManipulatorRobot:
|
||||
self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
|
||||
self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
|
||||
|
||||
# Populate output dictionnaries and format to pytorch
|
||||
# Populate output dictionaries and format to pytorch
|
||||
obs_dict = {}
|
||||
obs_dict["observation.state"] = state
|
||||
for name in self.cameras:
|
||||
|
||||
@@ -392,21 +392,19 @@ class MobileManipulator:
|
||||
for name in self.leader_arms:
|
||||
pos = self.leader_arms[name].read("Present_Position")
|
||||
pos_tensor = torch.from_numpy(pos).float()
|
||||
# Instead of pos_tensor.item(), use tolist() to convert the entire tensor to a list
|
||||
arm_positions.extend(pos_tensor.tolist())
|
||||
|
||||
# (The rest of your code for generating wheel commands remains unchanged)
|
||||
x_cmd = 0.0 # m/s forward/backward
|
||||
y_cmd = 0.0 # m/s lateral
|
||||
y_cmd = 0.0 # m/s forward/backward
|
||||
x_cmd = 0.0 # m/s lateral
|
||||
theta_cmd = 0.0 # deg/s rotation
|
||||
if self.pressed_keys["forward"]:
|
||||
x_cmd += xy_speed
|
||||
if self.pressed_keys["backward"]:
|
||||
x_cmd -= xy_speed
|
||||
if self.pressed_keys["left"]:
|
||||
y_cmd += xy_speed
|
||||
if self.pressed_keys["right"]:
|
||||
if self.pressed_keys["backward"]:
|
||||
y_cmd -= xy_speed
|
||||
if self.pressed_keys["left"]:
|
||||
x_cmd += xy_speed
|
||||
if self.pressed_keys["right"]:
|
||||
x_cmd -= xy_speed
|
||||
if self.pressed_keys["rotate_left"]:
|
||||
theta_cmd += theta_speed
|
||||
if self.pressed_keys["rotate_right"]:
|
||||
@@ -584,8 +582,8 @@ class MobileManipulator:
|
||||
# Create the body velocity vector [x, y, theta_rad].
|
||||
velocity_vector = np.array([x_cmd, y_cmd, theta_rad])
|
||||
|
||||
# Define the wheel mounting angles with a -90° offset.
|
||||
angles = np.radians(np.array([240, 120, 0]) - 90)
|
||||
# Define the wheel mounting angles (defined from y axis cw)
|
||||
angles = np.radians(np.array([300, 180, 60]))
|
||||
# Build the kinematic matrix: each row maps body velocities to a wheel’s linear speed.
|
||||
# The third column (base_radius) accounts for the effect of rotation.
|
||||
m = np.array([[np.cos(a), np.sin(a), base_radius] for a in angles])
|
||||
@@ -641,8 +639,8 @@ class MobileManipulator:
|
||||
# Compute each wheel’s linear speed (m/s) from its angular speed.
|
||||
wheel_linear_speeds = wheel_radps * wheel_radius
|
||||
|
||||
# Define the wheel mounting angles with a -90° offset.
|
||||
angles = np.radians(np.array([240, 120, 0]) - 90)
|
||||
# Define the wheel mounting angles (defined from y axis cw)
|
||||
angles = np.radians(np.array([300, 180, 60]))
|
||||
m = np.array([[np.cos(a), np.sin(a), base_radius] for a in angles])
|
||||
|
||||
# Solve the inverse kinematics: body_velocity = M⁻¹ · wheel_linear_speeds.
|
||||
|
||||
@@ -108,7 +108,7 @@ class StretchRobot(StretchAPI):
|
||||
self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
|
||||
self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
|
||||
|
||||
# Populate output dictionnaries
|
||||
# Populate output dictionaries
|
||||
obs_dict, action_dict = {}, {}
|
||||
obs_dict["observation.state"] = state
|
||||
action_dict["action"] = action
|
||||
@@ -153,7 +153,7 @@ class StretchRobot(StretchAPI):
|
||||
self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"]
|
||||
self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t
|
||||
|
||||
# Populate output dictionnaries
|
||||
# Populate output dictionaries
|
||||
obs_dict = {}
|
||||
obs_dict["observation.state"] = state
|
||||
for name in self.cameras:
|
||||
|
||||
@@ -17,6 +17,7 @@ import logging
|
||||
import os
|
||||
import os.path as osp
|
||||
import platform
|
||||
import subprocess
|
||||
from copy import copy
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
@@ -165,23 +166,31 @@ def capture_timestamp_utc():
|
||||
|
||||
|
||||
def say(text, blocking=False):
|
||||
# Check if mac, linux, or windows.
|
||||
if platform.system() == "Darwin":
|
||||
cmd = f'say "{text}"'
|
||||
if not blocking:
|
||||
cmd += " &"
|
||||
elif platform.system() == "Linux":
|
||||
cmd = f'spd-say "{text}"'
|
||||
if blocking:
|
||||
cmd += " --wait"
|
||||
elif platform.system() == "Windows":
|
||||
# TODO(rcadene): Make blocking option work for Windows
|
||||
cmd = (
|
||||
'PowerShell -Command "Add-Type -AssemblyName System.Speech; '
|
||||
f"(New-Object System.Speech.Synthesis.SpeechSynthesizer).Speak('{text}')\""
|
||||
)
|
||||
system = platform.system()
|
||||
|
||||
os.system(cmd)
|
||||
if system == "Darwin":
|
||||
cmd = ["say", text]
|
||||
|
||||
elif system == "Linux":
|
||||
cmd = ["spd-say", text]
|
||||
if blocking:
|
||||
cmd.append("--wait")
|
||||
|
||||
elif system == "Windows":
|
||||
cmd = [
|
||||
"PowerShell",
|
||||
"-Command",
|
||||
"Add-Type -AssemblyName System.Speech; "
|
||||
f"(New-Object System.Speech.Synthesis.SpeechSynthesizer).Speak('{text}')",
|
||||
]
|
||||
|
||||
else:
|
||||
raise RuntimeError("Unsupported operating system for text-to-speech.")
|
||||
|
||||
if blocking:
|
||||
subprocess.run(cmd, check=True)
|
||||
else:
|
||||
subprocess.Popen(cmd, creationflags=subprocess.CREATE_NO_WINDOW if system == "Windows" else 0)
|
||||
|
||||
|
||||
def log_say(text, play_sounds, blocking=False):
|
||||
|
||||
@@ -27,7 +27,7 @@ class DatasetConfig:
|
||||
# You may provide a list of datasets here. `train.py` creates them all and concatenates them. Note: only data
|
||||
# keys common between the datasets are kept. Each dataset gets and additional transform that inserts the
|
||||
# "dataset_index" into the returned item. The index mapping is made according to the order in which the
|
||||
# datsets are provided.
|
||||
# datasets are provided.
|
||||
repo_id: str
|
||||
# Root directory where the dataset will be stored (e.g. 'dataset/path').
|
||||
root: str | None = None
|
||||
|
||||
@@ -47,6 +47,15 @@ class PreTrainedConfig(draccus.ChoiceRegistry, HubMixin, abc.ABC):
|
||||
def type(self) -> str:
|
||||
return self.get_choice_name(self.__class__)
|
||||
|
||||
# TODO(Steven): Find a better way to do deal with this
|
||||
@classmethod
|
||||
def get_type_str(cls) -> str:
|
||||
"""Get the policy type identifier for this configuration class."""
|
||||
class_name = cls.__name__.lower()
|
||||
if class_name.endswith("config"):
|
||||
return class_name[:-6] # Remove 'config' suffix
|
||||
return class_name
|
||||
|
||||
@abc.abstractproperty
|
||||
def observation_delta_indices(self) -> list | None:
|
||||
raise NotImplementedError
|
||||
|
||||
@@ -102,7 +102,7 @@ class TrainPipelineConfig(HubMixin):
|
||||
|
||||
if not self.resume and isinstance(self.output_dir, Path) and self.output_dir.is_dir():
|
||||
raise FileExistsError(
|
||||
f"Output directory {self.output_dir} alreay exists and resume is {self.resume}. "
|
||||
f"Output directory {self.output_dir} already exists and resume is {self.resume}. "
|
||||
f"Please change your output directory so that {self.output_dir} is not overwritten."
|
||||
)
|
||||
elif not self.output_dir:
|
||||
|
||||
@@ -59,8 +59,8 @@ python lerobot/scripts/control_sim_robot.py record \
|
||||
```
|
||||
|
||||
**NOTE**: You can use your keyboard to control data recording flow.
|
||||
- Tap right arrow key '->' to early exit while recording an episode and go to reseting the environment.
|
||||
- Tap right arrow key '->' to early exit while reseting the environment and got to recording the next episode.
|
||||
- Tap right arrow key '->' to early exit while recording an episode and go to resetting the environment.
|
||||
- Tap right arrow key '->' to early exit while resetting the environment and got to recording the next episode.
|
||||
- Tap left arrow key '<-' to early exit and re-record the current episode.
|
||||
- Tap escape key 'esc' to stop the data recording.
|
||||
This might require a sudo permission to allow your terminal to monitor keyboard events.
|
||||
@@ -131,7 +131,7 @@ def none_or_int(value):
|
||||
|
||||
def init_sim_calibration(robot, cfg):
|
||||
# Constants necessary for transforming the joint pos of the real robot to the sim
|
||||
# depending on the robot discription used in that sim.
|
||||
# depending on the robot description used in that sim.
|
||||
start_pos = np.array(robot.leader_arms.main.calibration["start_pos"])
|
||||
axis_directions = np.array(cfg.get("axis_directions", [1]))
|
||||
offsets = np.array(cfg.get("offsets", [0])) * np.pi
|
||||
@@ -445,7 +445,7 @@ if __name__ == "__main__":
|
||||
type=int,
|
||||
default=0,
|
||||
help=(
|
||||
"Number of subprocesses handling the saving of frames as PNGs. Set to 0 to use threads only; "
|
||||
"Number of subprocesses handling the saving of frames as PNG. Set to 0 to use threads only; "
|
||||
"set to ≥1 to use subprocesses, each using threads to write images. The best number of processes "
|
||||
"and threads depends on your system. We recommend 4 threads per camera with 0 processes. "
|
||||
"If fps is unstable, adjust the thread count. If still unstable, try using 1 or more subprocesses."
|
||||
|
||||
@@ -454,7 +454,7 @@ def _compile_episode_data(
|
||||
|
||||
|
||||
@parser.wrap()
|
||||
def eval(cfg: EvalPipelineConfig):
|
||||
def eval_main(cfg: EvalPipelineConfig):
|
||||
logging.info(pformat(asdict(cfg)))
|
||||
|
||||
# Check device is available
|
||||
@@ -499,4 +499,4 @@ def eval(cfg: EvalPipelineConfig):
|
||||
|
||||
if __name__ == "__main__":
|
||||
init_logging()
|
||||
eval()
|
||||
eval_main()
|
||||
|
||||
@@ -175,7 +175,7 @@ def push_dataset_to_hub(
|
||||
# Robustify when `local_dir` is str instead of Path
|
||||
local_dir = Path(local_dir)
|
||||
|
||||
# Send warning if local_dir isn't well formated
|
||||
# Send warning if local_dir isn't well formatted
|
||||
if local_dir.parts[-2] != user_id or local_dir.parts[-1] != dataset_id:
|
||||
warnings.warn(
|
||||
f"`local_dir` ({local_dir}) doesn't contain a community or user id `/` the name of the dataset that match the `repo_id` (e.g. 'data/lerobot/pusht'). Following this naming convention is advised, but not mandatory.",
|
||||
|
||||
@@ -72,7 +72,7 @@ def update_policy(
|
||||
# TODO(rcadene): policy.unnormalize_outputs(out_dict)
|
||||
grad_scaler.scale(loss).backward()
|
||||
|
||||
# Unscale the graident of the optimzer's assigned params in-place **prior to gradient clipping**.
|
||||
# Unscale the gradient of the optimizer's assigned params in-place **prior to gradient clipping**.
|
||||
grad_scaler.unscale_(optimizer)
|
||||
|
||||
grad_norm = torch.nn.utils.clip_grad_norm_(
|
||||
|
||||
@@ -158,7 +158,7 @@ def run_server(
|
||||
if major_version < 2:
|
||||
return "Make sure to convert your LeRobotDataset to v2 & above."
|
||||
|
||||
episode_data_csv_str, columns = get_episode_data(dataset, episode_id)
|
||||
episode_data_csv_str, columns, ignored_columns = get_episode_data(dataset, episode_id)
|
||||
dataset_info = {
|
||||
"repo_id": f"{dataset_namespace}/{dataset_name}",
|
||||
"num_samples": dataset.num_frames
|
||||
@@ -194,7 +194,7 @@ def run_server(
|
||||
]
|
||||
|
||||
response = requests.get(
|
||||
f"https://huggingface.co/datasets/{repo_id}/resolve/main/meta/episodes.jsonl"
|
||||
f"https://huggingface.co/datasets/{repo_id}/resolve/main/meta/episodes.jsonl", timeout=5
|
||||
)
|
||||
response.raise_for_status()
|
||||
# Split into lines and parse each line as JSON
|
||||
@@ -218,6 +218,7 @@ def run_server(
|
||||
videos_info=videos_info,
|
||||
episode_data_csv_str=episode_data_csv_str,
|
||||
columns=columns,
|
||||
ignored_columns=ignored_columns,
|
||||
)
|
||||
|
||||
app.run(host=host, port=port)
|
||||
@@ -236,6 +237,14 @@ def get_episode_data(dataset: LeRobotDataset | IterableNamespace, episode_index)
|
||||
selected_columns = [col for col, ft in dataset.features.items() if ft["dtype"] == "float32"]
|
||||
selected_columns.remove("timestamp")
|
||||
|
||||
ignored_columns = []
|
||||
for column_name in selected_columns:
|
||||
shape = dataset.features[column_name]["shape"]
|
||||
shape_dim = len(shape)
|
||||
if shape_dim > 1:
|
||||
selected_columns.remove(column_name)
|
||||
ignored_columns.append(column_name)
|
||||
|
||||
# init header of csv with state and action names
|
||||
header = ["timestamp"]
|
||||
|
||||
@@ -245,16 +254,17 @@ def get_episode_data(dataset: LeRobotDataset | IterableNamespace, episode_index)
|
||||
if isinstance(dataset, LeRobotDataset)
|
||||
else dataset.features[column_name].shape[0]
|
||||
)
|
||||
header += [f"{column_name}_{i}" for i in range(dim_state)]
|
||||
|
||||
if "names" in dataset.features[column_name] and dataset.features[column_name]["names"]:
|
||||
column_names = dataset.features[column_name]["names"]
|
||||
while not isinstance(column_names, list):
|
||||
column_names = list(column_names.values())[0]
|
||||
else:
|
||||
column_names = [f"motor_{i}" for i in range(dim_state)]
|
||||
column_names = [f"{column_name}_{i}" for i in range(dim_state)]
|
||||
columns.append({"key": column_name, "value": column_names})
|
||||
|
||||
header += column_names
|
||||
|
||||
selected_columns.insert(0, "timestamp")
|
||||
|
||||
if isinstance(dataset, LeRobotDataset):
|
||||
@@ -290,7 +300,7 @@ def get_episode_data(dataset: LeRobotDataset | IterableNamespace, episode_index)
|
||||
csv_writer.writerows(rows)
|
||||
csv_string = csv_buffer.getvalue()
|
||||
|
||||
return csv_string, columns
|
||||
return csv_string, columns, ignored_columns
|
||||
|
||||
|
||||
def get_episode_video_paths(dataset: LeRobotDataset, ep_index: int) -> list[str]:
|
||||
@@ -317,7 +327,9 @@ def get_episode_language_instruction(dataset: LeRobotDataset, ep_index: int) ->
|
||||
|
||||
|
||||
def get_dataset_info(repo_id: str) -> IterableNamespace:
|
||||
response = requests.get(f"https://huggingface.co/datasets/{repo_id}/resolve/main/meta/info.json")
|
||||
response = requests.get(
|
||||
f"https://huggingface.co/datasets/{repo_id}/resolve/main/meta/info.json", timeout=5
|
||||
)
|
||||
response.raise_for_status() # Raises an HTTPError for bad responses
|
||||
dataset_info = response.json()
|
||||
dataset_info["repo_id"] = repo_id
|
||||
@@ -364,7 +376,7 @@ def visualize_dataset_html(
|
||||
template_folder=template_dir,
|
||||
)
|
||||
else:
|
||||
# Create a simlink from the dataset video folder containg mp4 files to the output directory
|
||||
# Create a simlink from the dataset video folder containing mp4 files to the output directory
|
||||
# so that the http server can get access to the mp4 files.
|
||||
if isinstance(dataset, LeRobotDataset):
|
||||
ln_videos_dir = static_dir / "videos"
|
||||
|
||||
@@ -14,21 +14,7 @@
|
||||
<!-- Use [Alpin.js](https://alpinejs.dev), a lightweight and easy to learn JS framework -->
|
||||
<!-- Use [tailwindcss](https://tailwindcss.com/), CSS classes for styling html -->
|
||||
<!-- Use [dygraphs](https://dygraphs.com/), a lightweight JS charting library -->
|
||||
<body class="flex flex-col md:flex-row h-screen max-h-screen bg-slate-950 text-gray-200" x-data="createAlpineData()" @keydown.window="(e) => {
|
||||
// Use the space bar to play and pause, instead of default action (e.g. scrolling)
|
||||
const { keyCode, key } = e;
|
||||
if (keyCode === 32 || key === ' ') {
|
||||
e.preventDefault();
|
||||
$refs.btnPause.classList.contains('hidden') ? $refs.btnPlay.click() : $refs.btnPause.click();
|
||||
}else if (key === 'ArrowDown' || key === 'ArrowUp'){
|
||||
const nextEpisodeId = key === 'ArrowDown' ? {{ episode_id }} + 1 : {{ episode_id }} - 1;
|
||||
const lowestEpisodeId = {{ episodes }}.at(0);
|
||||
const highestEpisodeId = {{ episodes }}.at(-1);
|
||||
if(nextEpisodeId >= lowestEpisodeId && nextEpisodeId <= highestEpisodeId){
|
||||
window.location.href = `./episode_${nextEpisodeId}`;
|
||||
}
|
||||
}
|
||||
}">
|
||||
<body class="flex flex-col md:flex-row h-screen max-h-screen bg-slate-950 text-gray-200" x-data="createAlpineData()">
|
||||
<!-- Sidebar -->
|
||||
<div x-ref="sidebar" class="bg-slate-900 p-5 break-words overflow-y-auto shrink-0 md:shrink md:w-60 md:max-h-screen">
|
||||
<a href="https://github.com/huggingface/lerobot" target="_blank" class="hidden md:block">
|
||||
@@ -52,25 +38,55 @@
|
||||
|
||||
<p>Episodes:</p>
|
||||
<!-- episodes menu for medium & large screens -->
|
||||
<ul class="ml-2 hidden md:block">
|
||||
{% for episode in episodes %}
|
||||
<li class="font-mono text-sm mt-0.5">
|
||||
<a href="episode_{{ episode }}" class="underline {% if episode_id == episode %}font-bold -ml-1{% endif %}">
|
||||
Episode {{ episode }}
|
||||
</a>
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
<div class="ml-2 hidden md:block" x-data="episodePagination">
|
||||
<ul>
|
||||
<template x-for="episode in paginatedEpisodes" :key="episode">
|
||||
<li class="font-mono text-sm mt-0.5">
|
||||
<a :href="'episode_' + episode"
|
||||
:class="{'underline': true, 'font-bold -ml-1': episode == {{ episode_id }}}"
|
||||
x-text="'Episode ' + episode"></a>
|
||||
</li>
|
||||
</template>
|
||||
</ul>
|
||||
|
||||
<div class="flex items-center mt-3 text-xs" x-show="totalPages > 1">
|
||||
<button @click="prevPage()"
|
||||
class="px-2 py-1 bg-slate-800 rounded mr-2"
|
||||
:class="{'opacity-50 cursor-not-allowed': page === 1}"
|
||||
:disabled="page === 1">
|
||||
« Prev
|
||||
</button>
|
||||
<span class="font-mono mr-2" x-text="` ${page} / ${totalPages}`"></span>
|
||||
<button @click="nextPage()"
|
||||
class="px-2 py-1 bg-slate-800 rounded"
|
||||
:class="{'opacity-50 cursor-not-allowed': page === totalPages}"
|
||||
:disabled="page === totalPages">
|
||||
Next »
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- episodes menu for small screens -->
|
||||
<div class="flex overflow-x-auto md:hidden">
|
||||
{% for episode in episodes %}
|
||||
<p class="font-mono text-sm mt-0.5 border-r last:border-r-0 px-2 {% if episode_id == episode %}font-bold{% endif %}">
|
||||
<a href="episode_{{ episode }}" class="">
|
||||
{{ episode }}
|
||||
</a>
|
||||
</p>
|
||||
{% endfor %}
|
||||
<div class="flex overflow-x-auto md:hidden" x-data="episodePagination">
|
||||
<button @click="prevPage()"
|
||||
class="px-2 bg-slate-800 rounded mr-2"
|
||||
:class="{'opacity-50 cursor-not-allowed': page === 1}"
|
||||
:disabled="page === 1">«</button>
|
||||
<div class="flex">
|
||||
<template x-for="(episode, index) in paginatedEpisodes" :key="episode">
|
||||
<p class="font-mono text-sm mt-0.5 px-2"
|
||||
:class="{
|
||||
'font-bold': episode == {{ episode_id }},
|
||||
'border-r': index !== paginatedEpisodes.length - 1
|
||||
}">
|
||||
<a :href="'episode_' + episode" x-text="episode"></a>
|
||||
</p>
|
||||
</template>
|
||||
</div>
|
||||
<button @click="nextPage()"
|
||||
class="px-2 bg-slate-800 rounded ml-2"
|
||||
:class="{'opacity-50 cursor-not-allowed': page === totalPages}"
|
||||
:disabled="page === totalPages">» </button>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
@@ -208,47 +224,58 @@
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<table class="text-sm border-collapse border border-slate-700" x-show="currentFrameData">
|
||||
<thead>
|
||||
<tr>
|
||||
<th></th>
|
||||
<template x-for="(_, colIndex) in Array.from({length: columns.length}, (_, index) => index)">
|
||||
<th class="border border-slate-700">
|
||||
<div class="flex gap-x-2 justify-between px-2">
|
||||
<input type="checkbox" :checked="isColumnChecked(colIndex)"
|
||||
@change="toggleColumn(colIndex)">
|
||||
<p x-text="`${columns[colIndex].key}`"></p>
|
||||
</div>
|
||||
</th>
|
||||
</template>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<template x-for="(row, rowIndex) in rows">
|
||||
<tr class="odd:bg-gray-800 even:bg-gray-900">
|
||||
<td class="border border-slate-700">
|
||||
<div class="flex gap-x-2 max-w-64 font-semibold px-1 break-all">
|
||||
<input type="checkbox" :checked="isRowChecked(rowIndex)"
|
||||
@change="toggleRow(rowIndex)">
|
||||
<p x-text="`${rowLabels[rowIndex]}`"></p>
|
||||
</div>
|
||||
</td>
|
||||
<template x-for="(cell, colIndex) in row">
|
||||
<td x-show="cell" class="border border-slate-700">
|
||||
<div class="flex gap-x-2 w-24 justify-between px-2" :class="{ 'hidden': cell.isNull }">
|
||||
<input type="checkbox" x-model="cell.checked" @change="updateTableValues()">
|
||||
<span x-text="`${!cell.isNull ? cell.value.toFixed(2) : null}`"
|
||||
:style="`color: ${cell.color}`"></span>
|
||||
<div>
|
||||
<table class="text-sm border-collapse border border-slate-700" x-show="currentFrameData">
|
||||
<thead>
|
||||
<tr>
|
||||
<th></th>
|
||||
<template x-for="(_, colIndex) in Array.from({length: columns.length}, (_, index) => index)">
|
||||
<th class="border border-slate-700">
|
||||
<div class="flex gap-x-2 justify-between px-2">
|
||||
<input type="checkbox" :checked="isColumnChecked(colIndex)"
|
||||
@change="toggleColumn(colIndex)">
|
||||
<p x-text="`${columns[colIndex].key}`"></p>
|
||||
</div>
|
||||
</td>
|
||||
</th>
|
||||
</template>
|
||||
</tr>
|
||||
</template>
|
||||
</tbody>
|
||||
</table>
|
||||
</thead>
|
||||
<tbody>
|
||||
<template x-for="(row, rowIndex) in rows">
|
||||
<tr class="odd:bg-gray-800 even:bg-gray-900">
|
||||
<td class="border border-slate-700">
|
||||
<div class="flex gap-x-2 max-w-64 font-semibold px-1 break-all">
|
||||
<input type="checkbox" :checked="isRowChecked(rowIndex)"
|
||||
@change="toggleRow(rowIndex)">
|
||||
</div>
|
||||
</td>
|
||||
<template x-for="(cell, colIndex) in row">
|
||||
<td x-show="cell" class="border border-slate-700">
|
||||
<div class="flex gap-x-2 justify-between px-2" :class="{ 'hidden': cell.isNull }">
|
||||
<div class="flex gap-x-2">
|
||||
<input type="checkbox" x-model="cell.checked" @change="updateTableValues()">
|
||||
<span x-text="`${!cell.isNull ? cell.label : null}`"></span>
|
||||
</div>
|
||||
<span class="w-14 text-right" x-text="`${!cell.isNull ? (typeof cell.value === 'number' ? cell.value.toFixed(2) : cell.value) : null}`"
|
||||
:style="`color: ${cell.color}`"></span>
|
||||
</div>
|
||||
</td>
|
||||
</template>
|
||||
</tr>
|
||||
</template>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
<div id="labels" class="hidden">
|
||||
<div id="labels" class="hidden">
|
||||
</div>
|
||||
|
||||
{% if ignored_columns|length > 0 %}
|
||||
<div class="m-2 text-orange-700 max-w-96">
|
||||
Columns {{ ignored_columns }} are NOT shown since the visualizer currently does not support 2D or 3D data.
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -279,7 +306,6 @@
|
||||
videosKeys: {{ videos_info | map(attribute='filename') | list | tojson }},
|
||||
videosKeysSelected: [],
|
||||
columns: {{ columns | tojson }},
|
||||
rowLabels: {{ columns | tojson }}.reduce((colA, colB) => colA.value.length > colB.value.length ? colA : colB).value,
|
||||
|
||||
// alpine initialization
|
||||
init() {
|
||||
@@ -452,6 +478,68 @@
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
document.addEventListener('alpine:init', () => {
|
||||
// Episode pagination component
|
||||
Alpine.data('episodePagination', () => ({
|
||||
episodes: {{ episodes }},
|
||||
pageSize: 100,
|
||||
page: 1,
|
||||
|
||||
init() {
|
||||
// Find which page contains the current episode_id
|
||||
const currentEpisodeId = {{ episode_id }};
|
||||
const episodeIndex = this.episodes.indexOf(currentEpisodeId);
|
||||
if (episodeIndex !== -1) {
|
||||
this.page = Math.floor(episodeIndex / this.pageSize) + 1;
|
||||
}
|
||||
},
|
||||
|
||||
get totalPages() {
|
||||
return Math.ceil(this.episodes.length / this.pageSize);
|
||||
},
|
||||
|
||||
get paginatedEpisodes() {
|
||||
const start = (this.page - 1) * this.pageSize;
|
||||
const end = start + this.pageSize;
|
||||
return this.episodes.slice(start, end);
|
||||
},
|
||||
|
||||
nextPage() {
|
||||
if (this.page < this.totalPages) {
|
||||
this.page++;
|
||||
}
|
||||
},
|
||||
|
||||
prevPage() {
|
||||
if (this.page > 1) {
|
||||
this.page--;
|
||||
}
|
||||
}
|
||||
}));
|
||||
});
|
||||
</script>
|
||||
|
||||
<script>
|
||||
window.addEventListener('keydown', (e) => {
|
||||
// Use the space bar to play and pause, instead of default action (e.g. scrolling)
|
||||
const { keyCode, key } = e;
|
||||
|
||||
if (keyCode === 32 || key === ' ') {
|
||||
e.preventDefault();
|
||||
const btnPause = document.querySelector('[x-ref="btnPause"]');
|
||||
const btnPlay = document.querySelector('[x-ref="btnPlay"]');
|
||||
btnPause.classList.contains('hidden') ? btnPlay.click() : btnPause.click();
|
||||
} else if (key === 'ArrowDown' || key === 'ArrowUp') {
|
||||
const episodes = {{ episodes }}; // Access episodes directly from the Jinja template
|
||||
const nextEpisodeId = key === 'ArrowDown' ? {{ episode_id }} + 1 : {{ episode_id }} - 1;
|
||||
const lowestEpisodeId = episodes.at(0);
|
||||
const highestEpisodeId = episodes.at(-1);
|
||||
if (nextEpisodeId >= lowestEpisodeId && nextEpisodeId <= highestEpisodeId) {
|
||||
window.location.href = `./episode_${nextEpisodeId}`;
|
||||
}
|
||||
}
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
|
||||
|
||||
BIN
media/lekiwi/kiwi.webp
Normal file
|
After Width: | Height: | Size: 219 KiB |
BIN
media/tutorial/img1.jpg
Normal file
|
After Width: | Height: | Size: 66 KiB |
BIN
media/tutorial/img10.jpg
Normal file
|
After Width: | Height: | Size: 127 KiB |
BIN
media/tutorial/img11.jpg
Normal file
|
After Width: | Height: | Size: 109 KiB |
BIN
media/tutorial/img12.jpg
Normal file
|
After Width: | Height: | Size: 80 KiB |
BIN
media/tutorial/img13.jpg
Normal file
|
After Width: | Height: | Size: 88 KiB |
BIN
media/tutorial/img14.jpg
Normal file
|
After Width: | Height: | Size: 86 KiB |
BIN
media/tutorial/img15.jpg
Normal file
|
After Width: | Height: | Size: 96 KiB |
BIN
media/tutorial/img16.jpg
Normal file
|
After Width: | Height: | Size: 84 KiB |
BIN
media/tutorial/img17.jpg
Normal file
|
After Width: | Height: | Size: 72 KiB |
BIN
media/tutorial/img18.jpg
Normal file
|
After Width: | Height: | Size: 78 KiB |
BIN
media/tutorial/img19.jpg
Normal file
|
After Width: | Height: | Size: 97 KiB |
BIN
media/tutorial/img2.jpg
Normal file
|
After Width: | Height: | Size: 88 KiB |
BIN
media/tutorial/img20.jpg
Normal file
|
After Width: | Height: | Size: 42 KiB |
BIN
media/tutorial/img21.jpg
Normal file
|
After Width: | Height: | Size: 85 KiB |
BIN
media/tutorial/img22.jpg
Normal file
|
After Width: | Height: | Size: 62 KiB |
BIN
media/tutorial/img23.jpg
Normal file
|
After Width: | Height: | Size: 54 KiB |
BIN
media/tutorial/img24.jpg
Normal file
|
After Width: | Height: | Size: 61 KiB |
BIN
media/tutorial/img25.jpg
Normal file
|
After Width: | Height: | Size: 76 KiB |
BIN
media/tutorial/img26.jpg
Normal file
|
After Width: | Height: | Size: 80 KiB |
BIN
media/tutorial/img27.jpg
Normal file
|
After Width: | Height: | Size: 48 KiB |
BIN
media/tutorial/img28.jpg
Normal file
|
After Width: | Height: | Size: 91 KiB |
BIN
media/tutorial/img29.jpg
Normal file
|
After Width: | Height: | Size: 54 KiB |
BIN
media/tutorial/img3.jpg
Normal file
|
After Width: | Height: | Size: 86 KiB |
BIN
media/tutorial/img30.jpg
Normal file
|
After Width: | Height: | Size: 57 KiB |
BIN
media/tutorial/img31.jpg
Normal file
|
After Width: | Height: | Size: 84 KiB |
BIN
media/tutorial/img32.jpg
Normal file
|
After Width: | Height: | Size: 88 KiB |
BIN
media/tutorial/img4.jpg
Normal file
|
After Width: | Height: | Size: 70 KiB |
BIN
media/tutorial/img5.jpg
Normal file
|
After Width: | Height: | Size: 66 KiB |
BIN
media/tutorial/img6.jpg
Normal file
|
After Width: | Height: | Size: 64 KiB |
BIN
media/tutorial/img7.jpg
Normal file
|
After Width: | Height: | Size: 89 KiB |
BIN
media/tutorial/img8.jpg
Normal file
|
After Width: | Height: | Size: 74 KiB |
BIN
media/tutorial/img9.jpg
Normal file
|
After Width: | Height: | Size: 82 KiB |
@@ -48,7 +48,7 @@ dependencies = [
|
||||
"omegaconf>=2.3.0",
|
||||
"opencv-python>=4.9.0",
|
||||
"packaging>=24.2",
|
||||
"pyav>=12.0.5",
|
||||
"av>=12.0.5",
|
||||
"pymunk>=6.6.0",
|
||||
"pynput>=1.7.7",
|
||||
"pyzmq>=26.2.1",
|
||||
@@ -111,10 +111,32 @@ exclude = [
|
||||
"venv",
|
||||
]
|
||||
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = ["E4", "E7", "E9", "F", "I", "N", "B", "C4", "SIM"]
|
||||
|
||||
[tool.bandit]
|
||||
exclude_dirs = [
|
||||
"tests",
|
||||
"benchmarks",
|
||||
"lerobot/common/datasets/push_dataset_to_hub",
|
||||
"lerobot/common/datasets/v2/convert_dataset_v1_to_v2",
|
||||
"lerobot/common/policies/pi0/conversion_scripts",
|
||||
"lerobot/scripts/push_dataset_to_hub.py",
|
||||
]
|
||||
skips = ["B101", "B311", "B404", "B603"]
|
||||
|
||||
[tool.typos]
|
||||
default.extend-ignore-re = [
|
||||
"(?Rm)^.*(#|//)\\s*spellchecker:disable-line$", # spellchecker:disable-line
|
||||
"(?s)(#|//)\\s*spellchecker:off.*?\\n\\s*(#|//)\\s*spellchecker:on" # spellchecker:<on|off>
|
||||
]
|
||||
default.extend-ignore-identifiers-re = [
|
||||
# Add individual words here to ignore them
|
||||
"2nd",
|
||||
"pn",
|
||||
"ser",
|
||||
"ein",
|
||||
]
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
|
||||
@@ -36,51 +36,27 @@ def pytest_collection_finish():
|
||||
print(f"\nTesting with {DEVICE=}")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def is_robot_available(robot_type):
|
||||
if robot_type not in available_robots:
|
||||
def _check_component_availability(component_type, available_components, make_component):
|
||||
"""Generic helper to check if a hardware component is available"""
|
||||
if component_type not in available_components:
|
||||
raise ValueError(
|
||||
f"The robot type '{robot_type}' is not valid. Expected one of these '{available_robots}"
|
||||
f"The {component_type} type is not valid. Expected one of these '{available_components}'"
|
||||
)
|
||||
|
||||
try:
|
||||
robot = make_robot(robot_type)
|
||||
robot.connect()
|
||||
del robot
|
||||
component = make_component(component_type)
|
||||
component.connect()
|
||||
del component
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"\nA {robot_type} robot is not available.")
|
||||
print(f"\nA {component_type} is not available.")
|
||||
|
||||
if isinstance(e, ModuleNotFoundError):
|
||||
print(f"\nInstall module '{e.name}'")
|
||||
elif isinstance(e, SerialException):
|
||||
print("\nNo physical motors bus detected.")
|
||||
else:
|
||||
traceback.print_exc()
|
||||
|
||||
return False
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def is_camera_available(camera_type):
|
||||
if camera_type not in available_cameras:
|
||||
raise ValueError(
|
||||
f"The camera type '{camera_type}' is not valid. Expected one of these '{available_cameras}"
|
||||
)
|
||||
|
||||
try:
|
||||
camera = make_camera(camera_type)
|
||||
camera.connect()
|
||||
del camera
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"\nA {camera_type} camera is not available.")
|
||||
|
||||
if isinstance(e, ModuleNotFoundError):
|
||||
print(f"\nInstall module '{e.name}'")
|
||||
elif isinstance(e, ValueError) and "camera_index" in e.args[0]:
|
||||
print("\nNo physical device detected.")
|
||||
elif isinstance(e, ValueError) and "camera_index" in str(e):
|
||||
print("\nNo physical camera detected.")
|
||||
else:
|
||||
traceback.print_exc()
|
||||
@@ -88,30 +64,19 @@ def is_camera_available(camera_type):
|
||||
return False
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def is_robot_available(robot_type):
|
||||
return _check_component_availability(robot_type, available_robots, make_robot)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def is_camera_available(camera_type):
|
||||
return _check_component_availability(camera_type, available_cameras, make_camera)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def is_motor_available(motor_type):
|
||||
if motor_type not in available_motors:
|
||||
raise ValueError(
|
||||
f"The motor type '{motor_type}' is not valid. Expected one of these '{available_motors}"
|
||||
)
|
||||
|
||||
try:
|
||||
motors_bus = make_motors_bus(motor_type)
|
||||
motors_bus.connect()
|
||||
del motors_bus
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"\nA {motor_type} motor is not available.")
|
||||
|
||||
if isinstance(e, ModuleNotFoundError):
|
||||
print(f"\nInstall module '{e.name}'")
|
||||
elif isinstance(e, SerialException):
|
||||
print("\nNo physical motors bus detected.")
|
||||
else:
|
||||
traceback.print_exc()
|
||||
|
||||
return False
|
||||
return _check_component_availability(motor_type, available_motors, make_motors_bus)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:eb7b74f919adf8d4478585f65c54997e6f3bccab67eadb4048300108586a4163
|
||||
size 5104
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:dfbc3b1ad5e3b94311edda0f04db002b26117b0719b73dfdb56dd483dc9c409d
|
||||
size 31672
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:e39afdf1f3db8a72a1095a5a0ffdb7e67f478a28bd73e59cda197687da8d236c
|
||||
size 68
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:5dd39a554c9c3db537e98c9ceade024d172c46c4fa7ce9e27601b94116445417
|
||||
size 33400
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:a5ec46abc5a3c85675a5ee4a1bb362eecb3ff4c546082ff309c89fc7821f38bd
|
||||
size 515400
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:50303d05caea725c4a240f1389424d6c2361961f2cee729a0010e909ebffed81
|
||||
size 31672
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:9bb9b195d32e05550af0edd5df88fcc761c829ab8c4b129ba970a723f39b46ee
|
||||
size 68
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:683a2038185f3d070e7d7c0c31e4aa75067c11bf798daa41c9fab336f4183fda
|
||||
size 33400
|
||||
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:cc67af1d60f95d84c98d6c9ebd648990e0f0705368bd6b72d2b39533950b0179
|
||||
size 5104
|
||||
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:64518cf652105d15f5fd2cfc13d0681f66a4ec4797dc5d5dc2f7b0d91fe5dfd6
|
||||
size 31672
|
||||