Compare commits
1 Commits
user/mruss
...
fix_path
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d374873849 |
@@ -1,68 +0,0 @@
|
||||
{
|
||||
"homing_offset": [
|
||||
2048,
|
||||
3072,
|
||||
3072,
|
||||
-1024,
|
||||
-1024,
|
||||
2048,
|
||||
-2048,
|
||||
2048,
|
||||
-2048
|
||||
],
|
||||
"drive_mode": [
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
0
|
||||
],
|
||||
"start_pos": [
|
||||
2015,
|
||||
3058,
|
||||
3061,
|
||||
1071,
|
||||
1071,
|
||||
2035,
|
||||
2152,
|
||||
2029,
|
||||
2499
|
||||
],
|
||||
"end_pos": [
|
||||
-1008,
|
||||
-1963,
|
||||
-1966,
|
||||
2141,
|
||||
2143,
|
||||
-971,
|
||||
3043,
|
||||
-1077,
|
||||
3144
|
||||
],
|
||||
"calib_mode": [
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"LINEAR"
|
||||
],
|
||||
"motor_names": [
|
||||
"waist",
|
||||
"shoulder",
|
||||
"shoulder_shadow",
|
||||
"elbow",
|
||||
"elbow_shadow",
|
||||
"forearm_roll",
|
||||
"wrist_angle",
|
||||
"wrist_rotate",
|
||||
"gripper"
|
||||
]
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
{
|
||||
"homing_offset": [
|
||||
2048,
|
||||
3072,
|
||||
3072,
|
||||
-1024,
|
||||
-1024,
|
||||
2048,
|
||||
-2048,
|
||||
2048,
|
||||
-1024
|
||||
],
|
||||
"drive_mode": [
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
0
|
||||
],
|
||||
"start_pos": [
|
||||
2035,
|
||||
3024,
|
||||
3019,
|
||||
979,
|
||||
981,
|
||||
1982,
|
||||
2166,
|
||||
2124,
|
||||
1968
|
||||
],
|
||||
"end_pos": [
|
||||
-990,
|
||||
-2017,
|
||||
-2015,
|
||||
2078,
|
||||
2076,
|
||||
-1030,
|
||||
3117,
|
||||
-1016,
|
||||
2556
|
||||
],
|
||||
"calib_mode": [
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"LINEAR"
|
||||
],
|
||||
"motor_names": [
|
||||
"waist",
|
||||
"shoulder",
|
||||
"shoulder_shadow",
|
||||
"elbow",
|
||||
"elbow_shadow",
|
||||
"forearm_roll",
|
||||
"wrist_angle",
|
||||
"wrist_rotate",
|
||||
"gripper"
|
||||
]
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
{
|
||||
"homing_offset": [
|
||||
2048,
|
||||
3072,
|
||||
3072,
|
||||
-1024,
|
||||
-1024,
|
||||
2048,
|
||||
-2048,
|
||||
2048,
|
||||
-2048
|
||||
],
|
||||
"drive_mode": [
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
0
|
||||
],
|
||||
"start_pos": [
|
||||
2056,
|
||||
2895,
|
||||
2896,
|
||||
1191,
|
||||
1190,
|
||||
2018,
|
||||
2051,
|
||||
2056,
|
||||
2509
|
||||
],
|
||||
"end_pos": [
|
||||
-1040,
|
||||
-2004,
|
||||
-2006,
|
||||
2126,
|
||||
2127,
|
||||
-1010,
|
||||
3050,
|
||||
-1117,
|
||||
3143
|
||||
],
|
||||
"calib_mode": [
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"LINEAR"
|
||||
],
|
||||
"motor_names": [
|
||||
"waist",
|
||||
"shoulder",
|
||||
"shoulder_shadow",
|
||||
"elbow",
|
||||
"elbow_shadow",
|
||||
"forearm_roll",
|
||||
"wrist_angle",
|
||||
"wrist_rotate",
|
||||
"gripper"
|
||||
]
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
{
|
||||
"homing_offset": [
|
||||
2048,
|
||||
3072,
|
||||
3072,
|
||||
-1024,
|
||||
-1024,
|
||||
2048,
|
||||
-2048,
|
||||
2048,
|
||||
-2048
|
||||
],
|
||||
"drive_mode": [
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
0
|
||||
],
|
||||
"start_pos": [
|
||||
2068,
|
||||
3034,
|
||||
3030,
|
||||
1038,
|
||||
1041,
|
||||
1991,
|
||||
1948,
|
||||
2090,
|
||||
1985
|
||||
],
|
||||
"end_pos": [
|
||||
-1025,
|
||||
-2014,
|
||||
-2015,
|
||||
2058,
|
||||
2060,
|
||||
-955,
|
||||
3091,
|
||||
-940,
|
||||
2576
|
||||
],
|
||||
"calib_mode": [
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"LINEAR"
|
||||
],
|
||||
"motor_names": [
|
||||
"waist",
|
||||
"shoulder",
|
||||
"shoulder_shadow",
|
||||
"elbow",
|
||||
"elbow_shadow",
|
||||
"forearm_roll",
|
||||
"wrist_angle",
|
||||
"wrist_rotate",
|
||||
"gripper"
|
||||
]
|
||||
}
|
||||
146
.dockerignore
@@ -1,146 +0,0 @@
|
||||
# Misc
|
||||
.git
|
||||
tmp
|
||||
wandb
|
||||
data
|
||||
outputs
|
||||
.vscode
|
||||
rl
|
||||
media
|
||||
|
||||
|
||||
# Logging
|
||||
logs
|
||||
|
||||
# HPC
|
||||
nautilus/*.yaml
|
||||
*.key
|
||||
|
||||
# Slurm
|
||||
sbatch*.sh
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
pip-wheel-metadata/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
!tests/data
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
# Ignore .cache except calibration
|
||||
.cache/*
|
||||
!.cache/calibration/
|
||||
!.cache/calibration/**
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
5
.gitattributes
vendored
@@ -1,6 +1 @@
|
||||
*.memmap filter=lfs diff=lfs merge=lfs -text
|
||||
*.stl filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.json !text !filter !merge !diff
|
||||
|
||||
54
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -1,54 +0,0 @@
|
||||
name: "\U0001F41B Bug Report"
|
||||
description: Submit a bug report to help us improve LeRobot
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to submit a bug report! 🐛
|
||||
If this is not a bug related to the LeRobot library directly, but instead a general question about your code or the library specifically please use our [discord](https://discord.gg/s3KuuzsPFb).
|
||||
|
||||
- type: textarea
|
||||
id: system-info
|
||||
attributes:
|
||||
label: System Info
|
||||
description: If needed, you can share your lerobot configuration with us by running `python -m lerobot.scripts.display_sys_info` and copy-pasting its outputs below
|
||||
render: Shell
|
||||
placeholder: lerobot version, OS, python version, numpy version, torch version, and lerobot's configuration
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: checkboxes
|
||||
id: information-scripts-examples
|
||||
attributes:
|
||||
label: Information
|
||||
description: 'The problem arises when using:'
|
||||
options:
|
||||
- label: "One of the scripts in the examples/ folder of LeRobot"
|
||||
- label: "My own task or dataset (give details below)"
|
||||
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Reproduction
|
||||
description: |
|
||||
If needed, provide a simple code sample that reproduces the problem you ran into. It can be a Colab link or just a code snippet.
|
||||
Sharing error messages or stack traces could be useful as well!
|
||||
Important! Use code tags to correctly format your code. See https://help.github.com/en/github/writing-on-github/creating-and-highlighting-code-blocks#syntax-highlighting
|
||||
Try to avoid screenshots, as they are hard to read and don't allow copy-and-pasting.
|
||||
|
||||
placeholder: |
|
||||
Steps to reproduce the behavior:
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
- type: textarea
|
||||
id: expected-behavior
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Expected behavior
|
||||
description: "A clear and concise description of what you would expect to happen."
|
||||
34
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,34 +0,0 @@
|
||||
## What this does
|
||||
Explain what this PR does. Feel free to tag your PR with the appropriate label(s).
|
||||
|
||||
Examples:
|
||||
| Title | Label |
|
||||
|----------------------|-----------------|
|
||||
| Fixes #[issue] | (🐛 Bug) |
|
||||
| Adds new dataset | (🗃️ Dataset) |
|
||||
| Optimizes something | (⚡️ Performance) |
|
||||
|
||||
## How it was tested
|
||||
Explain/show how you tested your changes.
|
||||
|
||||
Examples:
|
||||
- Added `test_something` in `tests/test_stuff.py`.
|
||||
- Added `new_feature` and checked that training converges with policy X on dataset/environment Y.
|
||||
- Optimized `some_function`, it now runs X times faster than previously.
|
||||
|
||||
## How to checkout & try? (for the reviewer)
|
||||
Provide a simple way for the reviewer to try out your changes.
|
||||
|
||||
Examples:
|
||||
```bash
|
||||
DATA_DIR=tests/data pytest -sx tests/test_stuff.py::test_something
|
||||
```
|
||||
```bash
|
||||
python lerobot/scripts/train.py --some.option=true
|
||||
```
|
||||
|
||||
## SECTION TO REMOVE BEFORE SUBMITTING YOUR PR
|
||||
**Note**: Anyone in the community is free to review the PR once the tests have passed. Feel free to tag
|
||||
members/contributors who may be interested in your PR. Try to avoid tagging more than 3 people.
|
||||
|
||||
**Note**: Before submitting this PR, please read the [contributor guideline](https://github.com/huggingface/lerobot/blob/main/CONTRIBUTING.md#submitting-a-pull-request-pr).
|
||||
3126
.github/poetry/cpu/poetry.lock
generated
vendored
Normal file
107
.github/poetry/cpu/pyproject.toml
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
[tool.poetry]
|
||||
name = "lerobot"
|
||||
version = "0.1.0"
|
||||
description = "Le robot is learning"
|
||||
authors = [
|
||||
"Rémi Cadène <re.cadene@gmail.com>",
|
||||
"Simon Alibert <alibert.sim@gmail.com>",
|
||||
]
|
||||
repository = "https://github.com/Cadene/lerobot"
|
||||
readme = "README.md"
|
||||
license = "MIT"
|
||||
classifiers=[
|
||||
"Development Status :: 3 - Alpha",
|
||||
"Intended Audience :: Developers",
|
||||
"Topic :: Software Development :: Build Tools",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
]
|
||||
packages = [{include = "lerobot"}]
|
||||
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.10"
|
||||
cython = "^3.0.8"
|
||||
termcolor = "^2.4.0"
|
||||
omegaconf = "^2.3.0"
|
||||
dm-env = "^1.6"
|
||||
pandas = "^2.2.1"
|
||||
wandb = "^0.16.3"
|
||||
moviepy = "^1.0.3"
|
||||
imageio = {extras = ["pyav"], version = "^2.34.0"}
|
||||
gdown = "^5.1.0"
|
||||
hydra-core = "^1.3.2"
|
||||
einops = "^0.7.0"
|
||||
pygame = "^2.5.2"
|
||||
pymunk = "^6.6.0"
|
||||
zarr = "^2.17.0"
|
||||
shapely = "^2.0.3"
|
||||
scikit-image = "^0.22.0"
|
||||
numba = "^0.59.0"
|
||||
mpmath = "^1.3.0"
|
||||
torch = {version = "^2.2.1", source = "torch-cpu"}
|
||||
tensordict = {git = "https://github.com/pytorch/tensordict"}
|
||||
torchrl = {git = "https://github.com/pytorch/rl", rev = "13bef426dcfa5887c6e5034a6e9697993fa92c37"}
|
||||
mujoco = "^3.1.2"
|
||||
mujoco-py = "^2.1.2.14"
|
||||
gym = "^0.26.2"
|
||||
opencv-python = "^4.9.0.80"
|
||||
diffusers = "^0.26.3"
|
||||
torchvision = {version = "^0.17.1", source = "torch-cpu"}
|
||||
h5py = "^3.10.0"
|
||||
dm = "^1.3"
|
||||
dm-control = "^1.0.16"
|
||||
huggingface-hub = "^0.21.4"
|
||||
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
pre-commit = "^3.6.2"
|
||||
debugpy = "^1.8.1"
|
||||
pytest = "^8.1.0"
|
||||
|
||||
|
||||
[[tool.poetry.source]]
|
||||
name = "torch-cpu"
|
||||
url = "https://download.pytorch.org/whl/cpu"
|
||||
priority = "supplemental"
|
||||
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 110
|
||||
target-version = "py310"
|
||||
exclude = [
|
||||
".bzr",
|
||||
".direnv",
|
||||
".eggs",
|
||||
".git",
|
||||
".git-rewrite",
|
||||
".hg",
|
||||
".mypy_cache",
|
||||
".nox",
|
||||
".pants.d",
|
||||
".pytype",
|
||||
".ruff_cache",
|
||||
".svn",
|
||||
".tox",
|
||||
".venv",
|
||||
"__pypackages__",
|
||||
"_build",
|
||||
"buck-out",
|
||||
"build",
|
||||
"dist",
|
||||
"node_modules",
|
||||
"venv",
|
||||
]
|
||||
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = ["E4", "E7", "E9", "F", "I", "N", "B", "C4", "SIM"]
|
||||
|
||||
|
||||
[tool.poetry-dynamic-versioning]
|
||||
enable = true
|
||||
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core>=1.0.0", "poetry-dynamic-versioning>=1.0.0,<2.0.0"]
|
||||
build-backend = "poetry_dynamic_versioning.backend"
|
||||
109
.github/workflows/build-docker-images.yml
vendored
@@ -1,109 +0,0 @@
|
||||
# Inspired by
|
||||
# https://github.com/huggingface/peft/blob/main/.github/workflows/build_docker_images.yml
|
||||
name: Builds
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
workflow_call:
|
||||
schedule:
|
||||
- cron: "0 1 * * *"
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: "3.10"
|
||||
|
||||
jobs:
|
||||
latest-cpu:
|
||||
name: CPU
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
steps:
|
||||
- name: Install Git LFS
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install git-lfs
|
||||
git lfs install
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
lfs: true
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push CPU
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/lerobot-cpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/lerobot-cpu
|
||||
build-args: PYTHON_VERSION=${{ env.PYTHON_VERSION }}
|
||||
|
||||
|
||||
latest-cuda:
|
||||
name: GPU
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
steps:
|
||||
- name: Install Git LFS
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install git-lfs
|
||||
git lfs install
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
lfs: true
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/lerobot-gpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/lerobot-gpu
|
||||
build-args: PYTHON_VERSION=${{ env.PYTHON_VERSION }}
|
||||
|
||||
|
||||
latest-cuda-dev:
|
||||
name: GPU Dev
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push GPU dev
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/lerobot-gpu-dev/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/lerobot-gpu:dev
|
||||
build-args: PYTHON_VERSION=${{ env.PYTHON_VERSION }}
|
||||
83
.github/workflows/nightly-tests.yml
vendored
@@ -1,83 +0,0 @@
|
||||
# Inspired by
|
||||
# https://github.com/huggingface/peft/blob/main/.github/workflows/nightly.yml
|
||||
name: Nightly
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "0 2 * * *"
|
||||
|
||||
env:
|
||||
DATA_DIR: tests/data
|
||||
# SLACK_API_TOKEN: ${{ secrets.SLACK_API_TOKEN }}
|
||||
|
||||
jobs:
|
||||
run_all_tests_cpu:
|
||||
name: CPU
|
||||
strategy:
|
||||
fail-fast: false
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
container:
|
||||
image: huggingface/lerobot-cpu:latest
|
||||
options: --shm-size "16gb"
|
||||
credentials:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: /lerobot
|
||||
steps:
|
||||
- name: Tests
|
||||
env:
|
||||
DATA_DIR: tests/data
|
||||
run: pytest -v --cov=./lerobot --disable-warnings tests
|
||||
|
||||
- name: Tests end-to-end
|
||||
env:
|
||||
DATA_DIR: tests/data
|
||||
run: make test-end-to-end
|
||||
|
||||
|
||||
run_all_tests_single_gpu:
|
||||
name: GPU
|
||||
strategy:
|
||||
fail-fast: false
|
||||
runs-on:
|
||||
group: aws-g6-4xlarge-plus
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
TEST_TYPE: "single_gpu"
|
||||
container:
|
||||
image: huggingface/lerobot-gpu:latest
|
||||
options: --gpus all --shm-size "16gb"
|
||||
credentials:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: /lerobot
|
||||
steps:
|
||||
- name: Nvidia-smi
|
||||
run: nvidia-smi
|
||||
|
||||
- name: Test
|
||||
run: pytest -v --cov=./lerobot --cov-report=xml --disable-warnings tests
|
||||
# TODO(aliberts): Link with HF Codecov account
|
||||
# - name: Upload coverage reports to Codecov with GitHub Action
|
||||
# uses: codecov/codecov-action@v4
|
||||
# with:
|
||||
# files: ./coverage.xml
|
||||
# verbose: true
|
||||
- name: Tests end-to-end
|
||||
env:
|
||||
DEVICE: cuda
|
||||
run: make test-end-to-end
|
||||
|
||||
# - name: Generate Report
|
||||
# if: always()
|
||||
# run: |
|
||||
# pip install slack_sdk tabulate
|
||||
# python scripts/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
84
.github/workflows/quality.yml
vendored
@@ -1,84 +0,0 @@
|
||||
name: Quality
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
workflow_call:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: "3.10"
|
||||
|
||||
jobs:
|
||||
style:
|
||||
name: Style
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Get Ruff Version from pre-commit-config.yaml
|
||||
id: get-ruff-version
|
||||
run: |
|
||||
RUFF_VERSION=$(awk '/repo: https:\/\/github.com\/astral-sh\/ruff-pre-commit/{flag=1;next}/rev:/{if(flag){print $2;exit}}' .pre-commit-config.yaml)
|
||||
echo "RUFF_VERSION=${RUFF_VERSION}" >> $GITHUB_ENV
|
||||
|
||||
- name: Install Ruff
|
||||
run: python -m pip install "ruff==${{ env.RUFF_VERSION }}"
|
||||
|
||||
- name: Ruff check
|
||||
run: ruff check
|
||||
|
||||
- name: Ruff format
|
||||
run: ruff format --diff
|
||||
|
||||
|
||||
poetry_check:
|
||||
name: Poetry check
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install poetry
|
||||
run: pipx install poetry
|
||||
|
||||
- name: Poetry check
|
||||
run: poetry check
|
||||
|
||||
|
||||
poetry_relax:
|
||||
name: Poetry relax
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install poetry
|
||||
run: pipx install poetry
|
||||
|
||||
- name: Install poetry-relax
|
||||
run: poetry self add poetry-relax
|
||||
|
||||
- name: Poetry relax
|
||||
id: poetry_relax
|
||||
run: |
|
||||
output=$(poetry relax --check 2>&1)
|
||||
if echo "$output" | grep -q "Proposing updates"; then
|
||||
echo "$output"
|
||||
echo ""
|
||||
echo "Some dependencies have caret '^' version requirement added by poetry by default."
|
||||
echo "Please replace them with '>='. You can do this by hand or use poetry-relax to do this."
|
||||
exit 1
|
||||
else
|
||||
echo "$output"
|
||||
fi
|
||||
65
.github/workflows/test-docker-build.yml
vendored
@@ -1,65 +0,0 @@
|
||||
# Inspired by
|
||||
# https://github.com/huggingface/peft/blob/main/.github/workflows/test-docker-build.yml
|
||||
name: Test Dockerfiles
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
# Run only when DockerFile files are modified
|
||||
- "docker/**"
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: "3.10"
|
||||
|
||||
jobs:
|
||||
get_changed_files:
|
||||
name: Detect modified Dockerfiles
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v44
|
||||
with:
|
||||
files: docker/**
|
||||
json: "true"
|
||||
|
||||
- name: Run step if only the files listed above change
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
id: set-matrix
|
||||
env:
|
||||
ALL_CHANGED_FILES: ${{ steps.changed-files.outputs.all_changed_files }}
|
||||
run: |
|
||||
echo "matrix=${{ steps.changed-files.outputs.all_changed_files}}" >> $GITHUB_OUTPUT
|
||||
|
||||
|
||||
build_modified_dockerfiles:
|
||||
name: Build modified Docker images
|
||||
needs: get_changed_files
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
if: ${{ needs.get_changed_files.outputs.matrix }} != ''
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
docker-file: ${{ fromJson(needs.get_changed_files.outputs.matrix) }}
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Build Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: ${{ matrix.docker-file }}
|
||||
context: .
|
||||
push: False
|
||||
build-args: PYTHON_VERSION=${{ env.PYTHON_VERSION }}
|
||||
220
.github/workflows/test.yml
vendored
@@ -1,142 +1,144 @@
|
||||
name: Tests
|
||||
name: Test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "lerobot/**"
|
||||
- "tests/**"
|
||||
- "examples/**"
|
||||
- ".github/**"
|
||||
- "poetry.lock"
|
||||
- "Makefile"
|
||||
- ".cache/**"
|
||||
types: [opened, synchronize, reopened, labeled]
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "lerobot/**"
|
||||
- "tests/**"
|
||||
- "examples/**"
|
||||
- ".github/**"
|
||||
- "poetry.lock"
|
||||
- "Makefile"
|
||||
- ".cache/**"
|
||||
|
||||
jobs:
|
||||
pytest:
|
||||
name: Pytest
|
||||
test:
|
||||
if: |
|
||||
${{ github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'CI') }} ||
|
||||
${{ github.event_name == 'push' }}
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
POETRY_VERSION: 1.8.1
|
||||
DATA_DIR: tests/data
|
||||
TMPDIR: ~/tmp
|
||||
TEMP: ~/tmp
|
||||
TMP: ~/tmp
|
||||
PYOPENGL_PLATFORM: egl
|
||||
MUJOCO_GL: egl
|
||||
LEROBOT_TESTS_DEVICE: cpu
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
#----------------------------------------------
|
||||
# check-out repo and set-up python
|
||||
#----------------------------------------------
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
lfs: true # Ensure LFS files are pulled
|
||||
lfs: true
|
||||
|
||||
- name: Install apt dependencies
|
||||
# portaudio19-dev is needed to install pyaudio
|
||||
run: |
|
||||
sudo apt-get update && \
|
||||
sudo apt-get install -y libegl1-mesa-dev ffmpeg portaudio19-dev
|
||||
|
||||
- name: Install poetry
|
||||
run: |
|
||||
pipx install poetry && poetry config virtualenvs.in-project true
|
||||
echo "${{ github.workspace }}/.venv/bin" >> $GITHUB_PATH
|
||||
|
||||
# TODO(rcadene, aliberts): python 3.12 seems to be used in the tests, not python 3.10
|
||||
- name: Set up Python 3.10
|
||||
- name: Set up python
|
||||
id: setup-python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
cache: "poetry"
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install poetry dependencies
|
||||
run: |
|
||||
poetry install --all-extras
|
||||
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
pytest tests -v --cov=./lerobot --durations=0 \
|
||||
-W ignore::DeprecationWarning:imageio_ffmpeg._utils:7 \
|
||||
-W ignore::UserWarning:torch.utils.data.dataloader:558 \
|
||||
-W ignore::UserWarning:gymnasium.utils.env_checker:247 \
|
||||
&& rm -rf tests/outputs outputs
|
||||
|
||||
pytest-minimal:
|
||||
name: Pytest (minimal install)
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DATA_DIR: tests/data
|
||||
MUJOCO_GL: egl
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
#----------------------------------------------
|
||||
# install & configure poetry
|
||||
#----------------------------------------------
|
||||
- name: Load cached Poetry installation
|
||||
id: restore-poetry-cache
|
||||
uses: actions/cache/restore@v3
|
||||
with:
|
||||
lfs: true # Ensure LFS files are pulled
|
||||
path: ~/.local
|
||||
key: poetry-${{ env.POETRY_VERSION }}
|
||||
|
||||
- name: Install apt dependencies
|
||||
run: sudo apt-get update && sudo apt-get install -y ffmpeg
|
||||
|
||||
- name: Install poetry
|
||||
run: |
|
||||
pipx install poetry && poetry config virtualenvs.in-project true
|
||||
echo "${{ github.workspace }}/.venv/bin" >> $GITHUB_PATH
|
||||
|
||||
# TODO(rcadene, aliberts): python 3.12 seems to be used in the tests, not python 3.10
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v5
|
||||
- name: Install Poetry
|
||||
if: steps.restore-poetry-cache.outputs.cache-hit != 'true'
|
||||
uses: snok/install-poetry@v1
|
||||
with:
|
||||
python-version: "3.10"
|
||||
version: ${{ env.POETRY_VERSION }}
|
||||
virtualenvs-create: true
|
||||
installer-parallel: true
|
||||
|
||||
- name: Install poetry dependencies
|
||||
run: |
|
||||
poetry install --extras "test"
|
||||
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
pytest tests -v --cov=./lerobot --durations=0 \
|
||||
-W ignore::DeprecationWarning:imageio_ffmpeg._utils:7 \
|
||||
-W ignore::UserWarning:torch.utils.data.dataloader:558 \
|
||||
-W ignore::UserWarning:gymnasium.utils.env_checker:247 \
|
||||
&& rm -rf tests/outputs outputs
|
||||
|
||||
|
||||
end-to-end:
|
||||
name: End-to-end
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DATA_DIR: tests/data
|
||||
MUJOCO_GL: egl
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Save cached Poetry installation
|
||||
if: |
|
||||
steps.restore-poetry-cache.outputs.cache-hit != 'true' &&
|
||||
github.ref_name == 'main'
|
||||
id: save-poetry-cache
|
||||
uses: actions/cache/save@v3
|
||||
with:
|
||||
lfs: true # Ensure LFS files are pulled
|
||||
path: ~/.local
|
||||
key: poetry-${{ env.POETRY_VERSION }}
|
||||
|
||||
- name: Install apt dependencies
|
||||
# portaudio19-dev is needed to install pyaudio
|
||||
run: |
|
||||
sudo apt-get update && \
|
||||
sudo apt-get install -y libegl1-mesa-dev portaudio19-dev
|
||||
- name: Configure Poetry
|
||||
run: poetry config virtualenvs.in-project true
|
||||
|
||||
- name: Install poetry
|
||||
run: |
|
||||
pipx install poetry && poetry config virtualenvs.in-project true
|
||||
echo "${{ github.workspace }}/.venv/bin" >> $GITHUB_PATH
|
||||
#----------------------------------------------
|
||||
# install dependencies
|
||||
#----------------------------------------------
|
||||
# TODO(aliberts): move to gpu runners
|
||||
- name: Select cpu dependencies # HACK
|
||||
run: cp -t . .github/poetry/cpu/pyproject.toml .github/poetry/cpu/poetry.lock
|
||||
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v5
|
||||
- name: Load cached venv
|
||||
id: restore-dependencies-cache
|
||||
uses: actions/cache/restore@v3
|
||||
with:
|
||||
python-version: "3.10"
|
||||
cache: "poetry"
|
||||
path: .venv
|
||||
key: venv-${{ steps.setup-python.outputs.python-version }}-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }}
|
||||
|
||||
- name: Install poetry dependencies
|
||||
- name: Install dependencies
|
||||
if: steps.restore-dependencies-cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
poetry install --all-extras
|
||||
mkdir ~/tmp
|
||||
poetry install --no-interaction --no-root
|
||||
|
||||
- name: Test end-to-end
|
||||
- name: Save cached venv
|
||||
if: |
|
||||
steps.restore-dependencies-cache.outputs.cache-hit != 'true' &&
|
||||
github.ref_name == 'main'
|
||||
id: save-dependencies-cache
|
||||
uses: actions/cache/save@v3
|
||||
with:
|
||||
path: .venv
|
||||
key: venv-${{ steps.setup-python.outputs.python-version }}-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }}
|
||||
|
||||
- name: Install libegl1-mesa-dev (to use MUJOCO_GL=egl)
|
||||
run: sudo apt-get update && sudo apt-get install -y libegl1-mesa-dev
|
||||
|
||||
#----------------------------------------------
|
||||
# install project
|
||||
#----------------------------------------------
|
||||
- name: Install project
|
||||
run: poetry install --no-interaction
|
||||
|
||||
#----------------------------------------------
|
||||
# run tests
|
||||
#----------------------------------------------
|
||||
- name: Run tests
|
||||
run: |
|
||||
make test-end-to-end \
|
||||
&& rm -rf outputs
|
||||
source .venv/bin/activate
|
||||
pytest tests
|
||||
|
||||
- name: Test train pusht end-to-end
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
python lerobot/scripts/train.py \
|
||||
hydra.job.name=pusht \
|
||||
env=pusht \
|
||||
wandb.enable=False \
|
||||
offline_steps=2 \
|
||||
online_steps=0 \
|
||||
device=cpu \
|
||||
save_model=true \
|
||||
save_freq=1 \
|
||||
hydra.run.dir=tests/outputs/
|
||||
|
||||
- name: Test eval pusht end-to-end
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
python lerobot/scripts/eval.py \
|
||||
hydra.job.name=pusht \
|
||||
env=pusht \
|
||||
wandb.enable=False \
|
||||
eval_episodes=1 \
|
||||
env.episode_length=8 \
|
||||
device=cpu \
|
||||
policy.pretrained_model_path=tests/outputs/models/1.pt
|
||||
|
||||
20
.github/workflows/trufflehog.yml
vendored
@@ -1,20 +0,0 @@
|
||||
on:
|
||||
push:
|
||||
|
||||
name: Secret Leaks
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
trufflehog:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Secret Scanning
|
||||
uses: trufflesecurity/trufflehog@main
|
||||
with:
|
||||
extra_args: --only-verified
|
||||
38
.gitignore
vendored
@@ -2,24 +2,15 @@
|
||||
logs
|
||||
tmp
|
||||
wandb
|
||||
|
||||
# Data
|
||||
data
|
||||
outputs
|
||||
|
||||
# Apple
|
||||
.DS_Store
|
||||
|
||||
# VS Code
|
||||
.vscode
|
||||
rl
|
||||
|
||||
# HPC
|
||||
nautilus/*.yaml
|
||||
*.key
|
||||
|
||||
# Slurm
|
||||
sbatch*.sh
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
@@ -66,6 +57,7 @@ htmlcov/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
@@ -73,11 +65,6 @@ coverage.xml
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
# Ignore .cache except calibration
|
||||
.cache/*
|
||||
!.cache/calibration/
|
||||
!.cache/calibration/**
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
@@ -99,7 +86,6 @@ instance/
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
@@ -112,6 +98,13 @@ ipython_config.py
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
@@ -122,13 +115,6 @@ celerybeat.pid
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
venv/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
@@ -146,9 +132,3 @@ dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
exclude: ^(tests/data)
|
||||
exclude: ^(data/|tests/)
|
||||
default_language_version:
|
||||
python: python3.10
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.6.0
|
||||
rev: v4.5.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
- id: debug-statements
|
||||
@@ -14,11 +14,11 @@ repos:
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v3.16.0
|
||||
rev: v3.15.1
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.5.2
|
||||
rev: v0.2.2
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [--fix]
|
||||
@@ -31,7 +31,3 @@ repos:
|
||||
args:
|
||||
- "--check"
|
||||
- "--no-update"
|
||||
- repo: https://github.com/gitleaks/gitleaks
|
||||
rev: v8.18.4
|
||||
hooks:
|
||||
- id: gitleaks
|
||||
|
||||
@@ -1,133 +0,0 @@
|
||||
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||
identity and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the overall
|
||||
community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or advances of
|
||||
any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email address,
|
||||
without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official email address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
[feedback@huggingface.co](mailto:feedback@huggingface.co).
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series of
|
||||
actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or permanent
|
||||
ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||
community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.1, available at
|
||||
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||
|
||||
Community Impact Guidelines were inspired by
|
||||
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
||||
[https://www.contributor-covenant.org/translations][translations].
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
||||
[Mozilla CoC]: https://github.com/mozilla/diversity
|
||||
[FAQ]: https://www.contributor-covenant.org/faq
|
||||
[translations]: https://www.contributor-covenant.org/translations
|
||||
275
CONTRIBUTING.md
@@ -1,275 +0,0 @@
|
||||
# How to contribute to 🤗 LeRobot?
|
||||
|
||||
Everyone is welcome to contribute, and we value everybody's contribution. Code
|
||||
is thus not the only way to help the community. Answering questions, helping
|
||||
others, reaching out and improving the documentations are immensely valuable to
|
||||
the community.
|
||||
|
||||
It also helps us if you spread the word: reference the library from blog posts
|
||||
on the awesome projects it made possible, shout out on Twitter when it has
|
||||
helped you, or simply ⭐️ the repo to say "thank you".
|
||||
|
||||
Whichever way you choose to contribute, please be mindful to respect our
|
||||
[code of conduct](https://github.com/huggingface/lerobot/blob/main/CODE_OF_CONDUCT.md).
|
||||
|
||||
## You can contribute in so many ways!
|
||||
|
||||
Some of the ways you can contribute to 🤗 LeRobot:
|
||||
* Fixing outstanding issues with the existing code.
|
||||
* Implementing new models, datasets or simulation environments.
|
||||
* Contributing to the examples or to the documentation.
|
||||
* Submitting issues related to bugs or desired new features.
|
||||
|
||||
Following the guides below, feel free to open issues and PRs and to coordinate your efforts with the community on our [Discord Channel](https://discord.gg/VjFz58wn3R). For specific inquiries, reach out to [Remi Cadene](mailto:remi.cadene@huggingface.co).
|
||||
|
||||
If you are not sure how to contribute or want to know the next features we working on, look on this project page: [LeRobot TODO](https://github.com/orgs/huggingface/projects/46)
|
||||
|
||||
## Submitting a new issue or feature request
|
||||
|
||||
Do your best to follow these guidelines when submitting an issue or a feature
|
||||
request. It will make it easier for us to come back to you quickly and with good
|
||||
feedback.
|
||||
|
||||
### Did you find a bug?
|
||||
|
||||
The 🤗 LeRobot library is robust and reliable thanks to the users who notify us of
|
||||
the problems they encounter. So thank you for reporting an issue.
|
||||
|
||||
First, we would really appreciate it if you could **make sure the bug was not
|
||||
already reported** (use the search bar on Github under Issues).
|
||||
|
||||
Did not find it? :( So we can act quickly on it, please follow these steps:
|
||||
|
||||
* Include your **OS type and version**, the versions of **Python** and **PyTorch**.
|
||||
* A short, self-contained, code snippet that allows us to reproduce the bug in
|
||||
less than 30s.
|
||||
* The full traceback if an exception is raised.
|
||||
* Attach any other additional information, like screenshots, you think may help.
|
||||
|
||||
### Do you want a new feature?
|
||||
|
||||
A good feature request addresses the following points:
|
||||
|
||||
1. Motivation first:
|
||||
* Is it related to a problem/frustration with the library? If so, please explain
|
||||
why. Providing a code snippet that demonstrates the problem is best.
|
||||
* Is it related to something you would need for a project? We'd love to hear
|
||||
about it!
|
||||
* Is it something you worked on and think could benefit the community?
|
||||
Awesome! Tell us what problem it solved for you.
|
||||
2. Write a *paragraph* describing the feature.
|
||||
3. Provide a **code snippet** that demonstrates its future use.
|
||||
4. In case this is related to a paper, please attach a link.
|
||||
5. Attach any additional information (drawings, screenshots, etc.) you think may help.
|
||||
|
||||
If your issue is well written we're already 80% of the way there by the time you
|
||||
post it.
|
||||
|
||||
## Adding new policies, datasets or environments
|
||||
|
||||
Look at our implementations for [datasets](./lerobot/common/datasets/), [policies](./lerobot/common/policies/),
|
||||
environments ([aloha](https://github.com/huggingface/gym-aloha),
|
||||
[xarm](https://github.com/huggingface/gym-xarm),
|
||||
[pusht](https://github.com/huggingface/gym-pusht))
|
||||
and follow the same api design.
|
||||
|
||||
When implementing a new dataset loadable with LeRobotDataset follow these steps:
|
||||
- Update `available_datasets_per_env` in `lerobot/__init__.py`
|
||||
|
||||
When implementing a new environment (e.g. `gym_aloha`), follow these steps:
|
||||
- Update `available_tasks_per_env` and `available_datasets_per_env` in `lerobot/__init__.py`
|
||||
|
||||
When implementing a new policy class (e.g. `DiffusionPolicy`) follow these steps:
|
||||
- Update `available_policies` and `available_policies_per_env`, in `lerobot/__init__.py`
|
||||
- Set the required `name` class attribute.
|
||||
- Update variables in `tests/test_available.py` by importing your new Policy class
|
||||
|
||||
## Submitting a pull request (PR)
|
||||
|
||||
Before writing code, we strongly advise you to search through the existing PRs or
|
||||
issues to make sure that nobody is already working on the same thing. If you are
|
||||
unsure, it is always a good idea to open an issue to get some feedback.
|
||||
|
||||
You will need basic `git` proficiency to be able to contribute to
|
||||
🤗 LeRobot. `git` is not the easiest tool to use but it has the greatest
|
||||
manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro
|
||||
Git](https://git-scm.com/book/en/v2) is a very good reference.
|
||||
|
||||
Follow these steps to start contributing:
|
||||
|
||||
1. Fork the [repository](https://github.com/huggingface/lerobot) by
|
||||
clicking on the 'Fork' button on the repository's page. This creates a copy of the code
|
||||
under your GitHub user account.
|
||||
|
||||
2. Clone your fork to your local disk, and add the base repository as a remote. The following command
|
||||
assumes you have your public SSH key uploaded to GitHub. See the following guide for more
|
||||
[information](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository).
|
||||
|
||||
```bash
|
||||
git clone git@github.com:<your Github handle>/lerobot.git
|
||||
cd lerobot
|
||||
git remote add upstream https://github.com/huggingface/lerobot.git
|
||||
```
|
||||
|
||||
3. Create a new branch to hold your development changes, and do this for every new PR you work on.
|
||||
|
||||
Start by synchronizing your `main` branch with the `upstream/main` branch (more details in the [GitHub Docs](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/syncing-a-fork)):
|
||||
|
||||
```bash
|
||||
git checkout main
|
||||
git fetch upstream
|
||||
git rebase upstream/main
|
||||
```
|
||||
|
||||
Once your `main` branch is synchronized, create a new branch from it:
|
||||
|
||||
```bash
|
||||
git checkout -b a-descriptive-name-for-my-changes
|
||||
```
|
||||
|
||||
🚨 **Do not** work on the `main` branch.
|
||||
|
||||
4. for development, we use `poetry` instead of just `pip` to easily track our dependencies.
|
||||
If you don't have it already, follow the [instructions](https://python-poetry.org/docs/#installation) to install it.
|
||||
|
||||
Set up a development environment with conda or miniconda:
|
||||
```bash
|
||||
conda create -y -n lerobot-dev python=3.10 && conda activate lerobot-dev
|
||||
```
|
||||
|
||||
To develop on 🤗 LeRobot, you will at least need to install the `dev` and `test` extras dependencies along with the core library:
|
||||
```bash
|
||||
poetry install --sync --extras "dev test"
|
||||
```
|
||||
|
||||
You can also install the project with all its dependencies (including environments):
|
||||
```bash
|
||||
poetry install --sync --all-extras
|
||||
```
|
||||
|
||||
> **Note:** If you don't install simulation environments with `--all-extras`, the tests that require them will be skipped when running the pytest suite locally. However, they *will* be tested in the CI. In general, we advise you to install everything and test locally before pushing.
|
||||
|
||||
Whichever command you chose to install the project (e.g. `poetry install --sync --all-extras`), you should run it again when pulling code with an updated version of `pyproject.toml` and `poetry.lock` in order to synchronize your virtual environment with the new dependencies.
|
||||
|
||||
The equivalent of `pip install some-package`, would just be:
|
||||
```bash
|
||||
poetry add some-package
|
||||
```
|
||||
|
||||
When making changes to the poetry sections of the `pyproject.toml`, you should run the following command to lock dependencies.
|
||||
```bash
|
||||
poetry lock --no-update
|
||||
```
|
||||
|
||||
5. Develop the features on your branch.
|
||||
|
||||
As you work on the features, you should make sure that the test suite
|
||||
passes. You should run the tests impacted by your changes like this (see
|
||||
below an explanation regarding the environment variable):
|
||||
|
||||
```bash
|
||||
pytest tests/<TEST_TO_RUN>.py
|
||||
```
|
||||
|
||||
6. Follow our style.
|
||||
|
||||
`lerobot` relies on `ruff` to format its source code
|
||||
consistently. Set up [`pre-commit`](https://pre-commit.com/) to run these checks
|
||||
automatically as Git commit hooks.
|
||||
|
||||
Install `pre-commit` hooks:
|
||||
```bash
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
You can run these hooks whenever you need on staged files with:
|
||||
```bash
|
||||
pre-commit
|
||||
```
|
||||
|
||||
Once you're happy with your changes, add changed files using `git add` and
|
||||
make a commit with `git commit` to record your changes locally:
|
||||
|
||||
```bash
|
||||
git add modified_file.py
|
||||
git commit
|
||||
```
|
||||
|
||||
Note, if you already commited some changes that have a wrong formatting, you can use:
|
||||
```bash
|
||||
pre-commit run --all-files
|
||||
```
|
||||
|
||||
Please write [good commit messages](https://chris.beams.io/posts/git-commit/).
|
||||
|
||||
It is a good idea to sync your copy of the code with the original
|
||||
repository regularly. This way you can quickly account for changes:
|
||||
|
||||
```bash
|
||||
git fetch upstream
|
||||
git rebase upstream/main
|
||||
```
|
||||
|
||||
Push the changes to your account using:
|
||||
|
||||
```bash
|
||||
git push -u origin a-descriptive-name-for-my-changes
|
||||
```
|
||||
|
||||
6. Once you are satisfied (**and the checklist below is happy too**), go to the
|
||||
webpage of your fork on GitHub. Click on 'Pull request' to send your changes
|
||||
to the project maintainers for review.
|
||||
|
||||
7. It's ok if maintainers ask you for changes. It happens to core contributors
|
||||
too! So everyone can see the changes in the Pull request, work in your local
|
||||
branch and push the changes to your fork. They will automatically appear in
|
||||
the pull request.
|
||||
|
||||
|
||||
### Checklist
|
||||
|
||||
1. The title of your pull request should be a summary of its contribution;
|
||||
2. If your pull request addresses an issue, please mention the issue number in
|
||||
the pull request description to make sure they are linked (and people
|
||||
consulting the issue know you are working on it);
|
||||
3. To indicate a work in progress please prefix the title with `[WIP]`, or preferably mark
|
||||
the PR as a draft PR. These are useful to avoid duplicated work, and to differentiate
|
||||
it from PRs ready to be merged;
|
||||
4. Make sure existing tests pass;
|
||||
<!-- 5. Add high-coverage tests. No quality testing = no merge.
|
||||
|
||||
See an example of a good PR here: https://github.com/huggingface/lerobot/pull/ -->
|
||||
|
||||
### Tests
|
||||
|
||||
An extensive test suite is included to test the library behavior and several examples. Library tests can be found in the [tests folder](https://github.com/huggingface/lerobot/tree/main/tests).
|
||||
|
||||
Install [git lfs](https://git-lfs.com/) to retrieve test artifacts (if you don't have it already).
|
||||
|
||||
On Mac:
|
||||
```bash
|
||||
brew install git-lfs
|
||||
git lfs install
|
||||
```
|
||||
|
||||
On Ubuntu:
|
||||
```bash
|
||||
sudo apt-get install git-lfs
|
||||
git lfs install
|
||||
```
|
||||
|
||||
Pull artifacts if they're not in [tests/data](tests/data)
|
||||
```bash
|
||||
git lfs pull
|
||||
```
|
||||
|
||||
We use `pytest` in order to run the tests. From the root of the
|
||||
repository, here's how to run tests with `pytest` for the library:
|
||||
|
||||
```bash
|
||||
DATA_DIR="tests/data" python -m pytest -sv ./tests
|
||||
```
|
||||
|
||||
|
||||
You can specify a smaller set of tests in order to test only the feature
|
||||
you're working on.
|
||||
229
LICENSE
@@ -253,31 +253,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
|
||||
## Some of lerobot's code is derived from simxarm, which is subject to the following copyright notice:
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 Nicklas Hansen & Yanjie Ze
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
|
||||
## Some of lerobot's code is derived from ALOHA, which is subject to the following copyright notice:
|
||||
|
||||
MIT License
|
||||
@@ -301,207 +276,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
## Some of lerobot's code is derived from DETR, which is subject to the following copyright notice:
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2020 - present, Facebook, Inc
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
190
Makefile
@@ -1,190 +0,0 @@
|
||||
.PHONY: tests
|
||||
|
||||
PYTHON_PATH := $(shell which python)
|
||||
|
||||
# If Poetry is installed, redefine PYTHON_PATH to use the Poetry-managed Python
|
||||
POETRY_CHECK := $(shell command -v poetry)
|
||||
ifneq ($(POETRY_CHECK),)
|
||||
PYTHON_PATH := $(shell poetry run which python)
|
||||
endif
|
||||
|
||||
export PATH := $(dir $(PYTHON_PATH)):$(PATH)
|
||||
|
||||
DEVICE ?= cpu
|
||||
|
||||
build-cpu:
|
||||
docker build -t lerobot:latest -f docker/lerobot-cpu/Dockerfile .
|
||||
|
||||
build-gpu:
|
||||
docker build -t lerobot:latest -f docker/lerobot-gpu/Dockerfile .
|
||||
|
||||
test-end-to-end:
|
||||
${MAKE} DEVICE=$(DEVICE) test-act-ete-train
|
||||
${MAKE} DEVICE=$(DEVICE) test-act-ete-eval
|
||||
${MAKE} DEVICE=$(DEVICE) test-act-ete-train-amp
|
||||
${MAKE} DEVICE=$(DEVICE) test-act-ete-eval-amp
|
||||
${MAKE} DEVICE=$(DEVICE) test-diffusion-ete-train
|
||||
${MAKE} DEVICE=$(DEVICE) test-diffusion-ete-eval
|
||||
${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-train
|
||||
${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-train-with-online
|
||||
${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-eval
|
||||
${MAKE} DEVICE=$(DEVICE) test-default-ete-eval
|
||||
${MAKE} DEVICE=$(DEVICE) test-act-pusht-tutorial
|
||||
|
||||
test-act-ete-train:
|
||||
python lerobot/scripts/train.py \
|
||||
policy=act \
|
||||
policy.dim_model=64 \
|
||||
env=aloha \
|
||||
wandb.enable=False \
|
||||
training.offline_steps=2 \
|
||||
training.online_steps=0 \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
device=$(DEVICE) \
|
||||
training.save_checkpoint=true \
|
||||
training.save_freq=2 \
|
||||
policy.n_action_steps=20 \
|
||||
policy.chunk_size=20 \
|
||||
training.batch_size=2 \
|
||||
training.image_transforms.enable=true \
|
||||
hydra.run.dir=tests/outputs/act/
|
||||
|
||||
test-act-ete-eval:
|
||||
python lerobot/scripts/eval.py \
|
||||
-p tests/outputs/act/checkpoints/000002/pretrained_model \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
env.episode_length=8 \
|
||||
device=$(DEVICE) \
|
||||
|
||||
test-act-ete-train-amp:
|
||||
python lerobot/scripts/train.py \
|
||||
policy=act \
|
||||
policy.dim_model=64 \
|
||||
env=aloha \
|
||||
wandb.enable=False \
|
||||
training.offline_steps=2 \
|
||||
training.online_steps=0 \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
device=$(DEVICE) \
|
||||
training.save_checkpoint=true \
|
||||
training.save_freq=2 \
|
||||
policy.n_action_steps=20 \
|
||||
policy.chunk_size=20 \
|
||||
training.batch_size=2 \
|
||||
hydra.run.dir=tests/outputs/act_amp/ \
|
||||
training.image_transforms.enable=true \
|
||||
use_amp=true
|
||||
|
||||
test-act-ete-eval-amp:
|
||||
python lerobot/scripts/eval.py \
|
||||
-p tests/outputs/act_amp/checkpoints/000002/pretrained_model \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
env.episode_length=8 \
|
||||
device=$(DEVICE) \
|
||||
use_amp=true
|
||||
|
||||
test-diffusion-ete-train:
|
||||
python lerobot/scripts/train.py \
|
||||
policy=diffusion \
|
||||
policy.down_dims=\[64,128,256\] \
|
||||
policy.diffusion_step_embed_dim=32 \
|
||||
policy.num_inference_steps=10 \
|
||||
env=pusht \
|
||||
wandb.enable=False \
|
||||
training.offline_steps=2 \
|
||||
training.online_steps=0 \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
device=$(DEVICE) \
|
||||
training.save_checkpoint=true \
|
||||
training.save_freq=2 \
|
||||
training.batch_size=2 \
|
||||
training.image_transforms.enable=true \
|
||||
hydra.run.dir=tests/outputs/diffusion/
|
||||
|
||||
test-diffusion-ete-eval:
|
||||
python lerobot/scripts/eval.py \
|
||||
-p tests/outputs/diffusion/checkpoints/000002/pretrained_model \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
env.episode_length=8 \
|
||||
device=$(DEVICE) \
|
||||
|
||||
test-tdmpc-ete-train:
|
||||
python lerobot/scripts/train.py \
|
||||
policy=tdmpc \
|
||||
env=xarm \
|
||||
env.task=XarmLift-v0 \
|
||||
dataset_repo_id=lerobot/xarm_lift_medium \
|
||||
wandb.enable=False \
|
||||
training.offline_steps=2 \
|
||||
training.online_steps=0 \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
env.episode_length=2 \
|
||||
device=$(DEVICE) \
|
||||
training.save_checkpoint=true \
|
||||
training.save_freq=2 \
|
||||
training.batch_size=2 \
|
||||
training.image_transforms.enable=true \
|
||||
hydra.run.dir=tests/outputs/tdmpc/
|
||||
|
||||
test-tdmpc-ete-train-with-online:
|
||||
python lerobot/scripts/train.py \
|
||||
env=pusht \
|
||||
env.gym.obs_type=environment_state_agent_pos \
|
||||
policy=tdmpc_pusht_keypoints \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
env.episode_length=10 \
|
||||
device=$(DEVICE) \
|
||||
training.offline_steps=2 \
|
||||
training.online_steps=20 \
|
||||
training.save_checkpoint=false \
|
||||
training.save_freq=10 \
|
||||
training.batch_size=2 \
|
||||
training.online_rollout_n_episodes=2 \
|
||||
training.online_rollout_batch_size=2 \
|
||||
training.online_steps_between_rollouts=10 \
|
||||
training.online_buffer_capacity=15 \
|
||||
eval.use_async_envs=true \
|
||||
hydra.run.dir=tests/outputs/tdmpc_online/
|
||||
|
||||
|
||||
test-tdmpc-ete-eval:
|
||||
python lerobot/scripts/eval.py \
|
||||
-p tests/outputs/tdmpc/checkpoints/000002/pretrained_model \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
env.episode_length=8 \
|
||||
device=$(DEVICE) \
|
||||
|
||||
test-default-ete-eval:
|
||||
python lerobot/scripts/eval.py \
|
||||
--config lerobot/configs/default.yaml \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
env.episode_length=8 \
|
||||
device=$(DEVICE) \
|
||||
|
||||
test-act-pusht-tutorial:
|
||||
cp examples/advanced/1_train_act_pusht/act_pusht.yaml lerobot/configs/policy/created_by_Makefile.yaml
|
||||
python lerobot/scripts/train.py \
|
||||
policy=created_by_Makefile.yaml \
|
||||
env=pusht \
|
||||
wandb.enable=False \
|
||||
training.offline_steps=2 \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
env.episode_length=2 \
|
||||
device=$(DEVICE) \
|
||||
training.save_model=true \
|
||||
training.save_freq=2 \
|
||||
training.batch_size=2 \
|
||||
training.image_transforms.enable=true \
|
||||
hydra.run.dir=tests/outputs/act_pusht/
|
||||
rm lerobot/configs/policy/created_by_Makefile.yaml
|
||||
466
README.md
@@ -1,348 +1,72 @@
|
||||
<p align="center">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="media/lerobot-logo-thumbnail.png">
|
||||
<source media="(prefers-color-scheme: light)" srcset="media/lerobot-logo-thumbnail.png">
|
||||
<img alt="LeRobot, Hugging Face Robotics Library" src="media/lerobot-logo-thumbnail.png" style="max-width: 100%;">
|
||||
</picture>
|
||||
<br/>
|
||||
<br/>
|
||||
</p>
|
||||
|
||||
<div align="center">
|
||||
|
||||
[](https://github.com/huggingface/lerobot/actions/workflows/nightly-tests.yml?query=branch%3Amain)
|
||||
[](https://codecov.io/gh/huggingface/lerobot)
|
||||
[](https://www.python.org/downloads/)
|
||||
[](https://github.com/huggingface/lerobot/blob/main/LICENSE)
|
||||
[](https://pypi.org/project/lerobot/)
|
||||
[](https://pypi.org/project/lerobot/)
|
||||
[](https://github.com/huggingface/lerobot/tree/main/examples)
|
||||
[](https://github.com/huggingface/lerobot/blob/main/CODE_OF_CONDUCT.md)
|
||||
[](https://discord.gg/s3KuuzsPFb)
|
||||
|
||||
</div>
|
||||
|
||||
<h2 align="center">
|
||||
<p><a href="https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md">New robot in town: SO-100</a></p>
|
||||
</h2>
|
||||
|
||||
<div align="center">
|
||||
<img src="media/so100/leader_follower.webp?raw=true" alt="SO-100 leader and follower arms" title="SO-100 leader and follower arms" width="50%">
|
||||
<p>We just added a new tutorial on how to build a more affordable robot, at the price of $110 per arm!</p>
|
||||
<p>Teach it new skills by showing it a few moves with just a laptop.</p>
|
||||
<p>Then watch your homemade robot act autonomously 🤯</p>
|
||||
<p>Follow the link to the <a href="https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md">full tutorial for SO-100</a>.</p>
|
||||
</div>
|
||||
|
||||
<br/>
|
||||
|
||||
<h3 align="center">
|
||||
<p>LeRobot: State-of-the-art AI for real-world robotics</p>
|
||||
</h3>
|
||||
|
||||
---
|
||||
|
||||
|
||||
🤗 LeRobot aims to provide models, datasets, and tools for real-world robotics in PyTorch. The goal is to lower the barrier to entry to robotics so that everyone can contribute and benefit from sharing datasets and pretrained models.
|
||||
|
||||
🤗 LeRobot contains state-of-the-art approaches that have been shown to transfer to the real-world with a focus on imitation learning and reinforcement learning.
|
||||
|
||||
🤗 LeRobot already provides a set of pretrained models, datasets with human collected demonstrations, and simulation environments to get started without assembling a robot. In the coming weeks, the plan is to add more and more support for real-world robotics on the most affordable and capable robots out there.
|
||||
|
||||
🤗 LeRobot hosts pretrained models and datasets on this Hugging Face community page: [huggingface.co/lerobot](https://huggingface.co/lerobot)
|
||||
|
||||
#### Examples of pretrained models on simulation environments
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td><img src="http://remicadene.com/assets/gif/aloha_act.gif" width="100%" alt="ACT policy on ALOHA env"/></td>
|
||||
<td><img src="http://remicadene.com/assets/gif/simxarm_tdmpc.gif" width="100%" alt="TDMPC policy on SimXArm env"/></td>
|
||||
<td><img src="http://remicadene.com/assets/gif/pusht_diffusion.gif" width="100%" alt="Diffusion policy on PushT env"/></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">ACT policy on ALOHA env</td>
|
||||
<td align="center">TDMPC policy on SimXArm env</td>
|
||||
<td align="center">Diffusion policy on PushT env</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
### Acknowledgment
|
||||
|
||||
- Thanks to Tony Zaho, Zipeng Fu and colleagues for open sourcing ACT policy, ALOHA environments and datasets. Ours are adapted from [ALOHA](https://tonyzhaozh.github.io/aloha) and [Mobile ALOHA](https://mobile-aloha.github.io).
|
||||
- Thanks to Cheng Chi, Zhenjia Xu and colleagues for open sourcing Diffusion policy, Pusht environment and datasets, as well as UMI datasets. Ours are adapted from [Diffusion Policy](https://diffusion-policy.cs.columbia.edu) and [UMI Gripper](https://umi-gripper.github.io).
|
||||
- Thanks to Nicklas Hansen, Yunhai Feng and colleagues for open sourcing TDMPC policy, Simxarm environments and datasets. Ours are adapted from [TDMPC](https://github.com/nicklashansen/tdmpc) and [FOWM](https://www.yunhaifeng.com/FOWM).
|
||||
- Thanks to Antonio Loquercio and Ashish Kumar for their early support.
|
||||
- Thanks to [Seungjae (Jay) Lee](https://sjlee.cc/), [Mahi Shafiullah](https://mahis.life/) and colleagues for open sourcing [VQ-BeT](https://sjlee.cc/vq-bet/) policy and helping us adapt the codebase to our repository. The policy is adapted from [VQ-BeT repo](https://github.com/jayLEE0301/vq_bet_official).
|
||||
|
||||
# LeRobot
|
||||
|
||||
## Installation
|
||||
|
||||
Download our source code:
|
||||
```bash
|
||||
git clone https://github.com/huggingface/lerobot.git
|
||||
cd lerobot
|
||||
Create a virtual environment with Python 3.10, e.g. using `conda`:
|
||||
```
|
||||
|
||||
Create a virtual environment with Python 3.10 and activate it, e.g. with [`miniconda`](https://docs.anaconda.com/free/miniconda/index.html):
|
||||
```bash
|
||||
conda create -y -n lerobot python=3.10
|
||||
conda activate lerobot
|
||||
```
|
||||
|
||||
Install 🤗 LeRobot:
|
||||
```bash
|
||||
pip install -e .
|
||||
[Install `poetry`](https://python-poetry.org/docs/#installation) (if you don't have it already)
|
||||
```
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
```
|
||||
|
||||
> **NOTE:** Depending on your platform, If you encounter any build errors during this step
|
||||
you may need to install `cmake` and `build-essential` for building some of our dependencies.
|
||||
On linux: `sudo apt-get install cmake build-essential`
|
||||
|
||||
For simulations, 🤗 LeRobot comes with gymnasium environments that can be installed as extras:
|
||||
- [aloha](https://github.com/huggingface/gym-aloha)
|
||||
- [xarm](https://github.com/huggingface/gym-xarm)
|
||||
- [pusht](https://github.com/huggingface/gym-pusht)
|
||||
|
||||
For instance, to install 🤗 LeRobot with aloha and pusht, use:
|
||||
```bash
|
||||
pip install -e ".[aloha, pusht]"
|
||||
Install dependencies
|
||||
```
|
||||
poetry install
|
||||
```
|
||||
|
||||
To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiment tracking, log in with
|
||||
```bash
|
||||
If you encounter a disk space error, try to change your tmp dir to a location where you have enough disk space, e.g.
|
||||
```
|
||||
mkdir ~/tmp
|
||||
export TMPDIR='~/tmp'
|
||||
```
|
||||
|
||||
To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiments tracking, log in with
|
||||
```
|
||||
wandb login
|
||||
```
|
||||
|
||||
(note: you will also need to enable WandB in the configuration. See below.)
|
||||
## Usage
|
||||
|
||||
## Walkthrough
|
||||
|
||||
### Train
|
||||
|
||||
```
|
||||
.
|
||||
├── examples # contains demonstration examples, start here to learn about LeRobot
|
||||
| └── advanced # contains even more examples for those who have mastered the basics
|
||||
├── lerobot
|
||||
| ├── configs # contains hydra yaml files with all options that you can override in the command line
|
||||
| | ├── default.yaml # selected by default, it loads pusht environment and diffusion policy
|
||||
| | ├── env # various sim environments and their datasets: aloha.yaml, pusht.yaml, xarm.yaml
|
||||
| | └── policy # various policies: act.yaml, diffusion.yaml, tdmpc.yaml
|
||||
| ├── common # contains classes and utilities
|
||||
| | ├── datasets # various datasets of human demonstrations: aloha, pusht, xarm
|
||||
| | ├── envs # various sim environments: aloha, pusht, xarm
|
||||
| | ├── policies # various policies: act, diffusion, tdmpc
|
||||
| | ├── robot_devices # various real devices: dynamixel motors, opencv cameras, koch robots
|
||||
| | └── utils # various utilities
|
||||
| └── scripts # contains functions to execute via command line
|
||||
| ├── eval.py # load policy and evaluate it on an environment
|
||||
| ├── train.py # train a policy via imitation learning and/or reinforcement learning
|
||||
| ├── control_robot.py # teleoperate a real robot, record data, run a policy
|
||||
| ├── push_dataset_to_hub.py # convert your dataset into LeRobot dataset format and upload it to the Hugging Face hub
|
||||
| └── visualize_dataset.py # load a dataset and render its demonstrations
|
||||
├── outputs # contains results of scripts execution: logs, videos, model checkpoints
|
||||
└── tests # contains pytest utilities for continuous integration
|
||||
```
|
||||
|
||||
### Visualize datasets
|
||||
|
||||
Check out [example 1](./examples/1_load_lerobot_dataset.py) that illustrates how to use our dataset class which automatically download data from the Hugging Face hub.
|
||||
|
||||
You can also locally visualize episodes from a dataset on the hub by executing our script from the command line:
|
||||
```bash
|
||||
python lerobot/scripts/visualize_dataset.py \
|
||||
--repo-id lerobot/pusht \
|
||||
--episode-index 0
|
||||
```
|
||||
|
||||
or from a dataset in a local folder with the root `DATA_DIR` environment variable (in the following case the dataset will be searched for in `./my_local_data_dir/lerobot/pusht`)
|
||||
```bash
|
||||
DATA_DIR='./my_local_data_dir' python lerobot/scripts/visualize_dataset.py \
|
||||
--repo-id lerobot/pusht \
|
||||
--episode-index 0
|
||||
```
|
||||
|
||||
|
||||
It will open `rerun.io` and display the camera streams, robot states and actions, like this:
|
||||
|
||||
https://github-production-user-asset-6210df.s3.amazonaws.com/4681518/328035972-fd46b787-b532-47e2-bb6f-fd536a55a7ed.mov?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAVCODYLSA53PQK4ZA%2F20240505%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20240505T172924Z&X-Amz-Expires=300&X-Amz-Signature=d680b26c532eeaf80740f08af3320d22ad0b8a4e4da1bcc4f33142c15b509eda&X-Amz-SignedHeaders=host&actor_id=24889239&key_id=0&repo_id=748713144
|
||||
|
||||
|
||||
Our script can also visualize datasets stored on a distant server. See `python lerobot/scripts/visualize_dataset.py --help` for more instructions.
|
||||
|
||||
### The `LeRobotDataset` format
|
||||
|
||||
A dataset in `LeRobotDataset` format is very simple to use. It can be loaded from a repository on the Hugging Face hub or a local folder simply with e.g. `dataset = LeRobotDataset("lerobot/aloha_static_coffee")` and can be indexed into like any Hugging Face and PyTorch dataset. For instance `dataset[0]` will retrieve a single temporal frame from the dataset containing observation(s) and an action as PyTorch tensors ready to be fed to a model.
|
||||
|
||||
A specificity of `LeRobotDataset` is that, rather than retrieving a single frame by its index, we can retrieve several frames based on their temporal relationship with the indexed frame, by setting `delta_timestamps` to a list of relative times with respect to the indexed frame. For example, with `delta_timestamps = {"observation.image": [-1, -0.5, -0.2, 0]}` one can retrieve, for a given index, 4 frames: 3 "previous" frames 1 second, 0.5 seconds, and 0.2 seconds before the indexed frame, and the indexed frame itself (corresponding to the 0 entry). See example [1_load_lerobot_dataset.py](examples/1_load_lerobot_dataset.py) for more details on `delta_timestamps`.
|
||||
|
||||
Under the hood, the `LeRobotDataset` format makes use of several ways to serialize data which can be useful to understand if you plan to work more closely with this format. We tried to make a flexible yet simple dataset format that would cover most type of features and specificities present in reinforcement learning and robotics, in simulation and in real-world, with a focus on cameras and robot states but easily extended to other types of sensory inputs as long as they can be represented by a tensor.
|
||||
|
||||
Here are the important details and internal structure organization of a typical `LeRobotDataset` instantiated with `dataset = LeRobotDataset("lerobot/aloha_static_coffee")`. The exact features will change from dataset to dataset but not the main aspects:
|
||||
|
||||
```
|
||||
dataset attributes:
|
||||
├ hf_dataset: a Hugging Face dataset (backed by Arrow/parquet). Typical features example:
|
||||
│ ├ observation.images.cam_high (VideoFrame):
|
||||
│ │ VideoFrame = {'path': path to a mp4 video, 'timestamp' (float32): timestamp in the video}
|
||||
│ ├ observation.state (list of float32): position of an arm joints (for instance)
|
||||
│ ... (more observations)
|
||||
│ ├ action (list of float32): goal position of an arm joints (for instance)
|
||||
│ ├ episode_index (int64): index of the episode for this sample
|
||||
│ ├ frame_index (int64): index of the frame for this sample in the episode ; starts at 0 for each episode
|
||||
│ ├ timestamp (float32): timestamp in the episode
|
||||
│ ├ next.done (bool): indicates the end of en episode ; True for the last frame in each episode
|
||||
│ └ index (int64): general index in the whole dataset
|
||||
├ episode_data_index: contains 2 tensors with the start and end indices of each episode
|
||||
│ ├ from (1D int64 tensor): first frame index for each episode — shape (num episodes,) starts with 0
|
||||
│ └ to: (1D int64 tensor): last frame index for each episode — shape (num episodes,)
|
||||
├ stats: a dictionary of statistics (max, mean, min, std) for each feature in the dataset, for instance
|
||||
│ ├ observation.images.cam_high: {'max': tensor with same number of dimensions (e.g. `(c, 1, 1)` for images, `(c,)` for states), etc.}
|
||||
│ ...
|
||||
├ info: a dictionary of metadata on the dataset
|
||||
│ ├ codebase_version (str): this is to keep track of the codebase version the dataset was created with
|
||||
│ ├ fps (float): frame per second the dataset is recorded/synchronized to
|
||||
│ ├ video (bool): indicates if frames are encoded in mp4 video files to save space or stored as png files
|
||||
│ └ encoding (dict): if video, this documents the main options that were used with ffmpeg to encode the videos
|
||||
├ videos_dir (Path): where the mp4 videos or png images are stored/accessed
|
||||
└ camera_keys (list of string): the keys to access camera features in the item returned by the dataset (e.g. `["observation.images.cam_high", ...]`)
|
||||
```
|
||||
|
||||
A `LeRobotDataset` is serialised using several widespread file formats for each of its parts, namely:
|
||||
- hf_dataset stored using Hugging Face datasets library serialization to parquet
|
||||
- videos are stored in mp4 format to save space or png files
|
||||
- episode_data_index saved using `safetensor` tensor serialization format
|
||||
- stats saved using `safetensor` tensor serialization format
|
||||
- info are saved using JSON
|
||||
|
||||
Dataset can be uploaded/downloaded from the HuggingFace hub seamlessly. To work on a local dataset, you can set the `DATA_DIR` environment variable to your root dataset folder as illustrated in the above section on dataset visualization.
|
||||
|
||||
### Evaluate a pretrained policy
|
||||
|
||||
Check out [example 2](./examples/2_evaluate_pretrained_policy.py) that illustrates how to download a pretrained policy from Hugging Face hub, and run an evaluation on its corresponding environment.
|
||||
|
||||
We also provide a more capable script to parallelize the evaluation over multiple environments during the same rollout. Here is an example with a pretrained model hosted on [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht):
|
||||
```bash
|
||||
python lerobot/scripts/eval.py \
|
||||
-p lerobot/diffusion_pusht \
|
||||
eval.n_episodes=10 \
|
||||
eval.batch_size=10
|
||||
```
|
||||
|
||||
Note: After training your own policy, you can re-evaluate the checkpoints with:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/eval.py -p {OUTPUT_DIR}/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
See `python lerobot/scripts/eval.py --help` for more instructions.
|
||||
|
||||
### Train your own policy
|
||||
|
||||
Check out [example 3](./examples/3_train_policy.py) that illustrates how to train a model using our core library in python, and [example 4](./examples/4_train_policy_with_script.md) that shows how to use our training script from command line.
|
||||
|
||||
In general, you can use our training script to easily train any policy. Here is an example of training the ACT policy on trajectories collected by humans on the Aloha simulation environment for the insertion task:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
policy=act \
|
||||
env=aloha \
|
||||
env.task=AlohaInsertion-v0 \
|
||||
dataset_repo_id=lerobot/aloha_sim_insertion_human \
|
||||
hydra.job.name=pusht \
|
||||
env=pusht
|
||||
```
|
||||
|
||||
The experiment directory is automatically generated and will show up in yellow in your terminal. It looks like `outputs/train/2024-05-05/20-21-12_aloha_act_default`. You can manually specify an experiment directory by adding this argument to the `train.py` python command:
|
||||
```bash
|
||||
hydra.run.dir=your/new/experiment/dir
|
||||
### Visualize offline buffer
|
||||
|
||||
```
|
||||
python lerobot/scripts/visualize_dataset.py \
|
||||
hydra.run.dir=tmp/$(date +"%Y_%m_%d") \
|
||||
env=pusht
|
||||
```
|
||||
|
||||
In the experiment directory there will be a folder called `checkpoints` which will have the following structure:
|
||||
### Visualize online buffer / Eval
|
||||
|
||||
```bash
|
||||
checkpoints
|
||||
├── 000250 # checkpoint_dir for training step 250
|
||||
│ ├── pretrained_model # Hugging Face pretrained model dir
|
||||
│ │ ├── config.json # Hugging Face pretrained model config
|
||||
│ │ ├── config.yaml # consolidated Hydra config
|
||||
│ │ ├── model.safetensors # model weights
|
||||
│ │ └── README.md # Hugging Face model card
|
||||
│ └── training_state.pth # optimizer/scheduler/rng state and training step
|
||||
```
|
||||
python lerobot/scripts/eval.py \
|
||||
hydra.run.dir=tmp/$(date +"%Y_%m_%d") \
|
||||
env=pusht
|
||||
```
|
||||
|
||||
To resume training from a checkpoint, you can add these to the `train.py` python command:
|
||||
```bash
|
||||
hydra.run.dir=your/original/experiment/dir resume=true
|
||||
```
|
||||
|
||||
It will load the pretrained model, optimizer and scheduler states for training. For more information please see our tutorial on training resumption [here](https://github.com/huggingface/lerobot/blob/main/examples/5_resume_training.md).
|
||||
## TODO
|
||||
|
||||
To use wandb for logging training and evaluation curves, make sure you've run `wandb login` as a one-time setup step. Then, when running the training command above, enable WandB in the configuration by adding:
|
||||
If you are not sure how to contribute or want to know the next features we working on, look on this project page: [LeRobot TODO](https://github.com/users/Cadene/projects/1)
|
||||
|
||||
```bash
|
||||
wandb.enable=true
|
||||
```
|
||||
|
||||
A link to the wandb logs for the run will also show up in yellow in your terminal. Here is an example of what they look like in your browser. Please also check [here](https://github.com/huggingface/lerobot/blob/main/examples/4_train_policy_with_script.md#typical-logs-and-metrics) for the explaination of some commonly used metrics in logs.
|
||||
|
||||

|
||||
|
||||
Note: For efficiency, during training every checkpoint is evaluated on a low number of episodes. You may use `eval.n_episodes=500` to evaluate on more episodes than the default. Or, after training, you may want to re-evaluate your best checkpoints on more episodes or change the evaluation settings. See `python lerobot/scripts/eval.py --help` for more instructions.
|
||||
|
||||
#### Reproduce state-of-the-art (SOTA)
|
||||
|
||||
We have organized our configuration files (found under [`lerobot/configs`](./lerobot/configs)) such that they reproduce SOTA results from a given model variant in their respective original works. Simply running:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py policy=diffusion env=pusht
|
||||
```
|
||||
|
||||
reproduces SOTA results for Diffusion Policy on the PushT task.
|
||||
|
||||
Pretrained policies, along with reproduction details, can be found under the "Models" section of https://huggingface.co/lerobot.
|
||||
|
||||
## Contribute
|
||||
|
||||
If you would like to contribute to 🤗 LeRobot, please check out our [contribution guide](https://github.com/huggingface/lerobot/blob/main/CONTRIBUTING.md).
|
||||
|
||||
### Add a new dataset
|
||||
|
||||
To add a dataset to the hub, you need to login using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
```
|
||||
|
||||
Then point to your raw dataset folder (e.g. `data/aloha_static_pingpong_test_raw`), and push your dataset to the hub with:
|
||||
```bash
|
||||
python lerobot/scripts/push_dataset_to_hub.py \
|
||||
--raw-dir data/aloha_static_pingpong_test_raw \
|
||||
--out-dir data \
|
||||
--repo-id lerobot/aloha_static_pingpong_test \
|
||||
--raw-format aloha_hdf5
|
||||
```
|
||||
|
||||
See `python lerobot/scripts/push_dataset_to_hub.py --help` for more instructions.
|
||||
|
||||
If your dataset format is not supported, implement your own in `lerobot/common/datasets/push_dataset_to_hub/${raw_format}_format.py` by copying examples like [pusht_zarr](https://github.com/huggingface/lerobot/blob/main/lerobot/common/datasets/push_dataset_to_hub/pusht_zarr_format.py), [umi_zarr](https://github.com/huggingface/lerobot/blob/main/lerobot/common/datasets/push_dataset_to_hub/umi_zarr_format.py), [aloha_hdf5](https://github.com/huggingface/lerobot/blob/main/lerobot/common/datasets/push_dataset_to_hub/aloha_hdf5_format.py), or [xarm_pkl](https://github.com/huggingface/lerobot/blob/main/lerobot/common/datasets/push_dataset_to_hub/xarm_pkl_format.py).
|
||||
Ask [Remi Cadene](re.cadene@gmail.com) for access if needed.
|
||||
|
||||
|
||||
### Add a pretrained policy
|
||||
## Profile
|
||||
|
||||
Once you have trained a policy you may upload it to the Hugging Face hub using a hub id that looks like `${hf_user}/${repo_name}` (e.g. [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht)).
|
||||
|
||||
You first need to find the checkpoint folder located inside your experiment directory (e.g. `outputs/train/2024-05-05/20-21-12_aloha_act_default/checkpoints/002500`). Within that there is a `pretrained_model` directory which should contain:
|
||||
- `config.json`: A serialized version of the policy configuration (following the policy's dataclass config).
|
||||
- `model.safetensors`: A set of `torch.nn.Module` parameters, saved in [Hugging Face Safetensors](https://huggingface.co/docs/safetensors/index) format.
|
||||
- `config.yaml`: A consolidated Hydra training configuration containing the policy, environment, and dataset configs. The policy configuration should match `config.json` exactly. The environment config is useful for anyone who wants to evaluate your policy. The dataset config just serves as a paper trail for reproducibility.
|
||||
|
||||
To upload these to the hub, run the following:
|
||||
```bash
|
||||
huggingface-cli upload ${hf_user}/${repo_name} path/to/pretrained_model
|
||||
```
|
||||
|
||||
See [eval.py](https://github.com/huggingface/lerobot/blob/main/lerobot/scripts/eval.py) for an example of how other people may use your policy.
|
||||
|
||||
|
||||
### Improve your code with profiling
|
||||
|
||||
An example of a code snippet to profile the evaluation of a policy:
|
||||
**Example**
|
||||
```python
|
||||
from torch.profiler import profile, record_function, ProfilerActivity
|
||||
|
||||
@@ -361,59 +85,87 @@ with profile(
|
||||
with record_function("eval_policy"):
|
||||
for i in range(num_episodes):
|
||||
prof.step()
|
||||
# insert code to profile, potentially whole body of eval_policy function
|
||||
```
|
||||
|
||||
## Citation
|
||||
|
||||
If you want, you can cite this work with:
|
||||
```bibtex
|
||||
@misc{cadene2024lerobot,
|
||||
author = {Cadene, Remi and Alibert, Simon and Soare, Alexander and Gallouedec, Quentin and Zouitine, Adil and Wolf, Thomas},
|
||||
title = {LeRobot: State-of-the-art Machine Learning for Real-World Robotics in Pytorch},
|
||||
howpublished = "\url{https://github.com/huggingface/lerobot}",
|
||||
year = {2024}
|
||||
}
|
||||
```bash
|
||||
python lerobot/scripts/eval.py \
|
||||
pretrained_model_path=/home/rcadene/code/fowm/logs/xarm_lift/all/default/2/models/final.pt \
|
||||
eval_episodes=7
|
||||
```
|
||||
|
||||
Additionally, if you are using any of the particular policy architecture, pretrained models, or datasets, it is recommended to cite the original authors of the work as they appear below:
|
||||
## Contribute
|
||||
|
||||
- [Diffusion Policy](https://diffusion-policy.cs.columbia.edu)
|
||||
```bibtex
|
||||
@article{chi2024diffusionpolicy,
|
||||
author = {Cheng Chi and Zhenjia Xu and Siyuan Feng and Eric Cousineau and Yilun Du and Benjamin Burchfiel and Russ Tedrake and Shuran Song},
|
||||
title ={Diffusion Policy: Visuomotor Policy Learning via Action Diffusion},
|
||||
journal = {The International Journal of Robotics Research},
|
||||
year = {2024},
|
||||
}
|
||||
**Style**
|
||||
```
|
||||
- [ACT or ALOHA](https://tonyzhaozh.github.io/aloha)
|
||||
```bibtex
|
||||
@article{zhao2023learning,
|
||||
title={Learning fine-grained bimanual manipulation with low-cost hardware},
|
||||
author={Zhao, Tony Z and Kumar, Vikash and Levine, Sergey and Finn, Chelsea},
|
||||
journal={arXiv preprint arXiv:2304.13705},
|
||||
year={2023}
|
||||
}
|
||||
# install if needed
|
||||
pre-commit install
|
||||
# apply style and linter checks before git commit
|
||||
pre-commit run -a
|
||||
```
|
||||
|
||||
- [TDMPC](https://www.nicklashansen.com/td-mpc/)
|
||||
**Adding dependencies (temporary)**
|
||||
|
||||
```bibtex
|
||||
@inproceedings{Hansen2022tdmpc,
|
||||
title={Temporal Difference Learning for Model Predictive Control},
|
||||
author={Nicklas Hansen and Xiaolong Wang and Hao Su},
|
||||
booktitle={ICML},
|
||||
year={2022}
|
||||
}
|
||||
Right now, for the CI to work, whenever a new dependency is added it needs to be also added to the cpu env, eg:
|
||||
|
||||
```
|
||||
# Run in this directory, adds the package to the main env with cuda
|
||||
poetry add some-package
|
||||
|
||||
# Adds the same package to the cpu env
|
||||
cd .github/poetry/cpu && poetry add some-package
|
||||
```
|
||||
|
||||
- [VQ-BeT](https://sjlee.cc/vq-bet/)
|
||||
```bibtex
|
||||
@article{lee2024behavior,
|
||||
title={Behavior generation with latent actions},
|
||||
author={Lee, Seungjae and Wang, Yibin and Etukuru, Haritheja and Kim, H Jin and Shafiullah, Nur Muhammad Mahi and Pinto, Lerrel},
|
||||
journal={arXiv preprint arXiv:2403.03181},
|
||||
year={2024}
|
||||
}
|
||||
**Tests**
|
||||
|
||||
Install [git lfs](https://git-lfs.com/) to retrieve test artifacts (if you don't have it already).
|
||||
|
||||
On Mac:
|
||||
```
|
||||
brew install git-lfs
|
||||
git lfs install
|
||||
```
|
||||
|
||||
On Ubuntu:
|
||||
```
|
||||
sudo apt-get install git-lfs
|
||||
git lfs install
|
||||
```
|
||||
|
||||
Pull artifacts if they're not in [tests/data](tests/data)
|
||||
```
|
||||
git lfs pull
|
||||
```
|
||||
|
||||
When adding a new dataset, mock it with
|
||||
```
|
||||
python tests/scripts/mock_dataset.py --in-data-dir data/<dataset_id> --out-data-dir tests/data/<dataset_id>
|
||||
```
|
||||
|
||||
Run tests
|
||||
```
|
||||
DATA_DIR="tests/data" pytest -sx tests
|
||||
```
|
||||
|
||||
**Datasets**
|
||||
|
||||
To add a pytorch rl dataset to the hub, first login and use a token generated from [huggingface settings](https://huggingface.co/settings/tokens) with write access:
|
||||
```
|
||||
huggingface-cli login --token $HUGGINGFACE_TOKEN --add-to-git-credential
|
||||
```
|
||||
|
||||
Then you can upload it to the hub with:
|
||||
```
|
||||
HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli upload --repo-type dataset $HF_USER/$DATASET data/$DATASET
|
||||
```
|
||||
|
||||
For instance, for [cadene/pusht](https://huggingface.co/datasets/cadene/pusht), we used:
|
||||
```
|
||||
HF_USER=cadene
|
||||
DATASET=pusht
|
||||
```
|
||||
|
||||
|
||||
## Acknowledgment
|
||||
- Our Diffusion policy and Pusht environment are adapted from [Diffusion Policy](https://diffusion-policy.cs.columbia.edu/)
|
||||
- Our TDMPC policy and Simxarm environment are adapted from [FOWM](https://www.yunhaifeng.com/FOWM/)
|
||||
- Our ACT policy and ALOHA environment are adapted from [ALOHA](https://tonyzhaozh.github.io/aloha/)
|
||||
|
||||
@@ -1,271 +0,0 @@
|
||||
# Video benchmark
|
||||
|
||||
|
||||
## Questions
|
||||
What is the optimal trade-off between:
|
||||
- maximizing loading time with random access,
|
||||
- minimizing memory space on disk,
|
||||
- maximizing success rate of policies,
|
||||
- compatibility across devices/platforms for decoding videos (e.g. video players, web browsers).
|
||||
|
||||
How to encode videos?
|
||||
- Which video codec (`-vcodec`) to use? h264, h265, AV1?
|
||||
- What pixel format to use (`-pix_fmt`)? `yuv444p` or `yuv420p`?
|
||||
- How much compression (`-crf`)? No compression with `0`, intermediate compression with `25` or extreme with `50+`?
|
||||
- Which frequency to chose for key frames (`-g`)? A key frame every `10` frames?
|
||||
|
||||
How to decode videos?
|
||||
- Which `decoder`? `torchvision`, `torchaudio`, `ffmpegio`, `decord`, or `nvc`?
|
||||
- What scenarios to use for the requesting timestamps during benchmark? (`timestamps_mode`)
|
||||
|
||||
|
||||
## Variables
|
||||
**Image content & size**
|
||||
We don't expect the same optimal settings for a dataset of images from a simulation, or from real-world in an appartment, or in a factory, or outdoor, or with lots of moving objects in the scene, etc. Similarly, loading times might not vary linearly with the image size (resolution).
|
||||
For these reasons, we run this benchmark on four representative datasets:
|
||||
- `lerobot/pusht_image`: (96 x 96 pixels) simulation with simple geometric shapes, fixed camera.
|
||||
- `aliberts/aloha_mobile_shrimp_image`: (480 x 640 pixels) real-world indoor, moving camera.
|
||||
- `aliberts/paris_street`: (720 x 1280 pixels) real-world outdoor, moving camera.
|
||||
- `aliberts/kitchen`: (1080 x 1920 pixels) real-world indoor, fixed camera.
|
||||
|
||||
Note: The datasets used for this benchmark need to be image datasets, not video datasets.
|
||||
|
||||
**Data augmentations**
|
||||
We might revisit this benchmark and find better settings if we train our policies with various data augmentations to make them more robust (e.g. robust to color changes, compression, etc.).
|
||||
|
||||
### Encoding parameters
|
||||
| parameter | values |
|
||||
|-------------|--------------------------------------------------------------|
|
||||
| **vcodec** | `libx264`, `libx265`, `libsvtav1` |
|
||||
| **pix_fmt** | `yuv444p`, `yuv420p` |
|
||||
| **g** | `1`, `2`, `3`, `4`, `5`, `6`, `10`, `15`, `20`, `40`, `None` |
|
||||
| **crf** | `0`, `5`, `10`, `15`, `20`, `25`, `30`, `40`, `50`, `None` |
|
||||
|
||||
Note that `crf` value might be interpreted differently by various video codecs. In other words, the same value used with one codec doesn't necessarily translate into the same compression level with another codec. In fact, the default value (`None`) isn't the same amongst the different video codecs. Importantly, it is also the case for many other ffmpeg arguments like `g` which specifies the frequency of the key frames.
|
||||
|
||||
For a comprehensive list and documentation of these parameters, see the ffmpeg documentation depending on the video codec used:
|
||||
- h264: https://trac.ffmpeg.org/wiki/Encode/H.264
|
||||
- h265: https://trac.ffmpeg.org/wiki/Encode/H.265
|
||||
- AV1: https://trac.ffmpeg.org/wiki/Encode/AV1
|
||||
|
||||
### Decoding parameters
|
||||
**Decoder**
|
||||
We tested two video decoding backends from torchvision:
|
||||
- `pyav` (default)
|
||||
- `video_reader` (requires to build torchvision from source)
|
||||
|
||||
**Requested timestamps**
|
||||
Given the way video decoding works, once a keyframe has been loaded, the decoding of subsequent frames is fast.
|
||||
This of course is affected by the `-g` parameter during encoding, which specifies the frequency of the keyframes. Given our typical use cases in robotics policies which might request a few timestamps in different random places, we want to replicate these use cases with the following scenarios:
|
||||
- `1_frame`: 1 frame,
|
||||
- `2_frames`: 2 consecutive frames (e.g. `[t, t + 1 / fps]`),
|
||||
- `6_frames`: 6 consecutive frames (e.g. `[t + i / fps for i in range(6)]`)
|
||||
|
||||
Note that this differs significantly from a typical use case like watching a movie, in which every frame is loaded sequentially from the beginning to the end and it's acceptable to have big values for `-g`.
|
||||
|
||||
Additionally, because some policies might request single timestamps that are a few frames appart, we also have the following scenario:
|
||||
- `2_frames_4_space`: 2 frames with 4 consecutive frames of spacing in between (e.g `[t, t + 5 / fps]`),
|
||||
|
||||
However, due to how video decoding is implemented with `pyav`, we don't have access to an accurate seek so in practice this scenario is essentially the same as `6_frames` since all 6 frames between `t` and `t + 5 / fps` will be decoded.
|
||||
|
||||
|
||||
## Metrics
|
||||
**Data compression ratio (lower is better)**
|
||||
`video_images_size_ratio` is the ratio of the memory space on disk taken by the encoded video over the memory space taken by the original images. For instance, `video_images_size_ratio=25%` means that the video takes 4 times less memory space on disk compared to the original images.
|
||||
|
||||
**Loading time ratio (lower is better)**
|
||||
`video_images_load_time_ratio` is the ratio of the time it takes to decode frames from the video at a given timestamps over the time it takes to load the exact same original images. Lower is better. For instance, `video_images_load_time_ratio=200%` means that decoding from video is 2 times slower than loading the original images.
|
||||
|
||||
**Average Mean Square Error (lower is better)**
|
||||
`avg_mse` is the average mean square error between each decoded frame and its corresponding original image over all requested timestamps, and also divided by the number of pixels in the image to be comparable when switching to different image sizes.
|
||||
|
||||
**Average Peak Signal to Noise Ratio (higher is better)**
|
||||
`avg_psnr` measures the ratio between the maximum possible power of a signal and the power of corrupting noise that affects the fidelity of its representation. Higher PSNR indicates better quality.
|
||||
|
||||
**Average Structural Similarity Index Measure (higher is better)**
|
||||
`avg_ssim` evaluates the perceived quality of images by comparing luminance, contrast, and structure. SSIM values range from -1 to 1, where 1 indicates perfect similarity.
|
||||
|
||||
One aspect that can't be measured here with those metrics is the compatibility of the encoding accross platforms, in particular on web browser, for visualization purposes.
|
||||
h264, h265 and AV1 are all commonly used codecs and should not be pose an issue. However, the chroma subsampling (`pix_fmt`) format might affect compatibility:
|
||||
- `yuv420p` is more widely supported across various platforms, including web browsers.
|
||||
- `yuv444p` offers higher color fidelity but might not be supported as broadly.
|
||||
|
||||
|
||||
<!-- **Loss of a pretrained policy (higher is better)** (not available)
|
||||
`loss_pretrained` is the result of evaluating with the selected encoding/decoding settings a policy pretrained on original images. It is easier to understand than `avg_l2_error`.
|
||||
|
||||
**Success rate after retraining (higher is better)** (not available)
|
||||
`success_rate` is the result of training and evaluating a policy with the selected encoding/decoding settings. It is the most difficult metric to get but also the very best. -->
|
||||
|
||||
|
||||
## How the benchmark works
|
||||
The benchmark evaluates both encoding and decoding of video frames on the first episode of each dataset.
|
||||
|
||||
**Encoding:** for each `vcodec` and `pix_fmt` pair, we use a default value for `g` and `crf` upon which we change a single value (either `g` or `crf`) to one of the specified values (we don't test every combination of those as this would be computationally too heavy).
|
||||
This gives a unique set of encoding parameters which is used to encode the episode.
|
||||
|
||||
**Decoding:** Then, for each of those unique encodings, we iterate through every combination of the decoding parameters `backend` and `timestamps_mode`. For each of them, we record the metrics of a number of samples (given by `--num-samples`). This is parallelized for efficiency and the number of processes can be controlled with `--num-workers`. Ideally, it's best to have a `--num-samples` that is divisible by `--num-workers`.
|
||||
|
||||
Intermediate results saved for each `vcodec` and `pix_fmt` combination in csv tables.
|
||||
These are then all concatenated to a single table ready for analysis.
|
||||
|
||||
## Caveats
|
||||
We tried to measure the most impactful parameters for both encoding and decoding. However, for computational reasons we can't test out every combination.
|
||||
|
||||
Additional encoding parameters exist that are not included in this benchmark. In particular:
|
||||
- `-preset` which allows for selecting encoding presets. This represents a collection of options that will provide a certain encoding speed to compression ratio. By leaving this parameter unspecified, it is considered to be `medium` for libx264 and libx265 and `8` for libsvtav1.
|
||||
- `-tune` which allows to optimize the encoding for certains aspects (e.g. film quality, fast decoding, etc.).
|
||||
|
||||
See the documentation mentioned above for more detailled info on these settings and for a more comprehensive list of other parameters.
|
||||
|
||||
Similarly on the decoding side, other decoders exist but are not implemented in our current benchmark. To name a few:
|
||||
- `torchaudio`
|
||||
- `ffmpegio`
|
||||
- `decord`
|
||||
- `nvc`
|
||||
|
||||
Note as well that since we are mostly interested in the performance at decoding time (also because encoding is done only once before uploading a dataset), we did not measure encoding times nor have any metrics regarding encoding.
|
||||
However, besides the necessity to build ffmpeg from source, encoding did not pose any issue and it didn't take a significant amount of time during this benchmark.
|
||||
|
||||
|
||||
## Install
|
||||
Building ffmpeg from source is required to include libx265 and libaom/libsvtav1 (av1) video codecs ([compilation guide](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu)).
|
||||
|
||||
**Note:** While you still need to build torchvision with a conda-installed `ffmpeg<4.3` to use the `video_reader` decoder (as described in [#220](https://github.com/huggingface/lerobot/pull/220)), you also need another version which is custom-built with all the video codecs for encoding. For the script to then use that version, you can prepend the command above with `PATH="$HOME/bin:$PATH"`, which is where ffmpeg should be built.
|
||||
|
||||
|
||||
## Adding a video decoder
|
||||
Right now, we're only benchmarking the two video decoder available with torchvision: `pyav` and `video_reader`.
|
||||
You can easily add a new decoder to benchmark by adding it to this function in the script:
|
||||
```diff
|
||||
def decode_video_frames(
|
||||
video_path: str,
|
||||
timestamps: list[float],
|
||||
tolerance_s: float,
|
||||
backend: str,
|
||||
) -> torch.Tensor:
|
||||
if backend in ["pyav", "video_reader"]:
|
||||
return decode_video_frames_torchvision(
|
||||
video_path, timestamps, tolerance_s, backend
|
||||
)
|
||||
+ elif backend == ["your_decoder"]:
|
||||
+ return your_decoder_function(
|
||||
+ video_path, timestamps, tolerance_s, backend
|
||||
+ )
|
||||
else:
|
||||
raise NotImplementedError(backend)
|
||||
```
|
||||
|
||||
|
||||
## Example
|
||||
For a quick run, you can try these parameters:
|
||||
```bash
|
||||
python benchmark/video/run_video_benchmark.py \
|
||||
--output-dir outputs/video_benchmark \
|
||||
--repo-ids \
|
||||
lerobot/pusht_image \
|
||||
aliberts/aloha_mobile_shrimp_image \
|
||||
--vcodec libx264 libx265 \
|
||||
--pix-fmt yuv444p yuv420p \
|
||||
--g 2 20 None \
|
||||
--crf 10 40 None \
|
||||
--timestamps-modes 1_frame 2_frames \
|
||||
--backends pyav video_reader \
|
||||
--num-samples 5 \
|
||||
--num-workers 5 \
|
||||
--save-frames 0
|
||||
```
|
||||
|
||||
|
||||
## Results
|
||||
|
||||
### Reproduce
|
||||
We ran the benchmark with the following parameters:
|
||||
```bash
|
||||
# h264 and h265 encodings
|
||||
python benchmark/video/run_video_benchmark.py \
|
||||
--output-dir outputs/video_benchmark \
|
||||
--repo-ids \
|
||||
lerobot/pusht_image \
|
||||
aliberts/aloha_mobile_shrimp_image \
|
||||
aliberts/paris_street \
|
||||
aliberts/kitchen \
|
||||
--vcodec libx264 libx265 \
|
||||
--pix-fmt yuv444p yuv420p \
|
||||
--g 1 2 3 4 5 6 10 15 20 40 None \
|
||||
--crf 0 5 10 15 20 25 30 40 50 None \
|
||||
--timestamps-modes 1_frame 2_frames 6_frames \
|
||||
--backends pyav video_reader \
|
||||
--num-samples 50 \
|
||||
--num-workers 5 \
|
||||
--save-frames 1
|
||||
|
||||
# av1 encoding (only compatible with yuv420p and pyav decoder)
|
||||
python benchmark/video/run_video_benchmark.py \
|
||||
--output-dir outputs/video_benchmark \
|
||||
--repo-ids \
|
||||
lerobot/pusht_image \
|
||||
aliberts/aloha_mobile_shrimp_image \
|
||||
aliberts/paris_street \
|
||||
aliberts/kitchen \
|
||||
--vcodec libsvtav1 \
|
||||
--pix-fmt yuv420p \
|
||||
--g 1 2 3 4 5 6 10 15 20 40 None \
|
||||
--crf 0 5 10 15 20 25 30 40 50 None \
|
||||
--timestamps-modes 1_frame 2_frames 6_frames \
|
||||
--backends pyav \
|
||||
--num-samples 50 \
|
||||
--num-workers 5 \
|
||||
--save-frames 1
|
||||
```
|
||||
|
||||
The full results are available [here](https://docs.google.com/spreadsheets/d/1OYJB43Qu8fC26k_OyoMFgGBBKfQRCi4BIuYitQnq3sw/edit?usp=sharing)
|
||||
|
||||
|
||||
### Parameters selected for LeRobotDataset
|
||||
Considering these results, we chose what we think is the best set of encoding parameter:
|
||||
- vcodec: `libsvtav1`
|
||||
- pix-fmt: `yuv420p`
|
||||
- g: `2`
|
||||
- crf: `30`
|
||||
|
||||
Since we're using av1 encoding, we're choosing the `pyav` decoder as `video_reader` does not support it (and `pyav` doesn't require a custom build of `torchvision`).
|
||||
|
||||
### Summary
|
||||
|
||||
These tables show the results for `g=2` and `crf=30`, using `timestamps-modes=6_frames` and `backend=pyav`
|
||||
|
||||
| video_images_size_ratio | vcodec | pix_fmt | | | |
|
||||
|------------------------------------|------------|---------|-----------|-----------|-----------|
|
||||
| | libx264 | | libx265 | | libsvtav1 |
|
||||
| repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p |
|
||||
| lerobot/pusht_image | **16.97%** | 17.58% | 18.57% | 18.86% | 22.06% |
|
||||
| aliberts/aloha_mobile_shrimp_image | 2.14% | 2.11% | 1.38% | **1.37%** | 5.59% |
|
||||
| aliberts/paris_street | 2.12% | 2.13% | **1.54%** | **1.54%** | 4.43% |
|
||||
| aliberts/kitchen | 1.40% | 1.39% | **1.00%** | **1.00%** | 2.52% |
|
||||
|
||||
| video_images_load_time_ratio | vcodec | pix_fmt | | | |
|
||||
|------------------------------------|---------|---------|----------|---------|-----------|
|
||||
| | libx264 | | libx265 | | libsvtav1 |
|
||||
| repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p |
|
||||
| lerobot/pusht_image | 6.45 | 5.19 | **1.90** | 2.12 | 2.47 |
|
||||
| aliberts/aloha_mobile_shrimp_image | 11.80 | 7.92 | 0.71 | 0.85 | **0.48** |
|
||||
| aliberts/paris_street | 2.21 | 2.05 | 0.36 | 0.49 | **0.30** |
|
||||
| aliberts/kitchen | 1.46 | 1.46 | 0.28 | 0.51 | **0.26** |
|
||||
|
||||
| | | vcodec | pix_fmt | | | |
|
||||
|------------------------------------|----------|----------|--------------|----------|-----------|--------------|
|
||||
| | | libx264 | | libx265 | | libsvtav1 |
|
||||
| repo_id | metric | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p |
|
||||
| lerobot/pusht_image | avg_mse | 2.90E-04 | **2.03E-04** | 3.13E-04 | 2.29E-04 | 2.19E-04 |
|
||||
| | avg_psnr | 35.44 | 37.07 | 35.49 | **37.30** | 37.20 |
|
||||
| | avg_ssim | 98.28% | **98.85%** | 98.31% | 98.84% | 98.72% |
|
||||
| aliberts/aloha_mobile_shrimp_image | avg_mse | 2.76E-04 | 2.59E-04 | 3.17E-04 | 3.06E-04 | **1.30E-04** |
|
||||
| | avg_psnr | 35.91 | 36.21 | 35.88 | 36.09 | **40.17** |
|
||||
| | avg_ssim | 95.19% | 95.18% | 95.00% | 95.05% | **97.73%** |
|
||||
| aliberts/paris_street | avg_mse | 6.89E-04 | 6.70E-04 | 4.03E-03 | 4.02E-03 | **3.09E-04** |
|
||||
| | avg_psnr | 33.48 | 33.68 | 32.05 | 32.15 | **35.40** |
|
||||
| | avg_ssim | 93.76% | 93.75% | 89.46% | 89.46% | **95.46%** |
|
||||
| aliberts/kitchen | avg_mse | 2.50E-04 | 2.24E-04 | 4.28E-04 | 4.18E-04 | **1.53E-04** |
|
||||
| | avg_psnr | 36.73 | 37.33 | 36.56 | 36.75 | **39.12** |
|
||||
| | avg_ssim | 95.47% | 95.58% | 95.52% | 95.53% | **96.82%** |
|
||||
@@ -1,90 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Capture video feed from a camera as raw images."""
|
||||
|
||||
import argparse
|
||||
import datetime as dt
|
||||
from pathlib import Path
|
||||
|
||||
import cv2
|
||||
|
||||
|
||||
def display_and_save_video_stream(output_dir: Path, fps: int, width: int, height: int):
|
||||
now = dt.datetime.now()
|
||||
capture_dir = output_dir / f"{now:%Y-%m-%d}" / f"{now:%H-%M-%S}"
|
||||
if not capture_dir.exists():
|
||||
capture_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Opens the default webcam
|
||||
cap = cv2.VideoCapture(0)
|
||||
if not cap.isOpened():
|
||||
print("Error: Could not open video stream.")
|
||||
return
|
||||
|
||||
cap.set(cv2.CAP_PROP_FPS, fps)
|
||||
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
|
||||
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
|
||||
|
||||
frame_index = 0
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
|
||||
if not ret:
|
||||
print("Error: Could not read frame.")
|
||||
break
|
||||
|
||||
cv2.imshow("Video Stream", frame)
|
||||
cv2.imwrite(str(capture_dir / f"frame_{frame_index:06d}.png"), frame)
|
||||
frame_index += 1
|
||||
|
||||
# Break the loop on 'q' key press
|
||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||
break
|
||||
|
||||
# Release the capture and destroy all windows
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
"--output-dir",
|
||||
type=Path,
|
||||
default=Path("outputs/cam_capture/"),
|
||||
help="Directory where the capture images are written. A subfolder named with the current date & time will be created inside it for each capture.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--fps",
|
||||
type=int,
|
||||
default=30,
|
||||
help="Frames Per Second of the capture.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--width",
|
||||
type=int,
|
||||
default=1280,
|
||||
help="Width of the captured images.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--height",
|
||||
type=int,
|
||||
default=720,
|
||||
help="Height of the captured images.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
display_and_save_video_stream(**vars(args))
|
||||
@@ -1,490 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Assess the performance of video decoding in various configurations.
|
||||
|
||||
This script will benchmark different video encoding and decoding parameters.
|
||||
See the provided README.md or run `python benchmark/video/run_video_benchmark.py --help` for usage info.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import datetime as dt
|
||||
import random
|
||||
import shutil
|
||||
from collections import OrderedDict
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from pathlib import Path
|
||||
|
||||
import einops
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import PIL
|
||||
import torch
|
||||
from skimage.metrics import mean_squared_error, peak_signal_noise_ratio, structural_similarity
|
||||
from tqdm import tqdm
|
||||
|
||||
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
|
||||
from lerobot.common.datasets.video_utils import (
|
||||
decode_video_frames_torchvision,
|
||||
encode_video_frames,
|
||||
)
|
||||
from lerobot.common.utils.benchmark import TimeBenchmark
|
||||
|
||||
BASE_ENCODING = OrderedDict(
|
||||
[
|
||||
("vcodec", "libx264"),
|
||||
("pix_fmt", "yuv444p"),
|
||||
("g", 2),
|
||||
("crf", None),
|
||||
# TODO(aliberts): Add fastdecode
|
||||
# ("fastdecode", 0),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
# TODO(rcadene, aliberts): move to `utils.py` folder when we want to refactor
|
||||
def parse_int_or_none(value) -> int | None:
|
||||
if value.lower() == "none":
|
||||
return None
|
||||
try:
|
||||
return int(value)
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(f"Invalid int or None: {value}") from e
|
||||
|
||||
|
||||
def check_datasets_formats(repo_ids: list) -> None:
|
||||
for repo_id in repo_ids:
|
||||
dataset = LeRobotDataset(repo_id)
|
||||
if dataset.video:
|
||||
raise ValueError(
|
||||
f"Use only image dataset for running this benchmark. Video dataset provided: {repo_id}"
|
||||
)
|
||||
|
||||
|
||||
def get_directory_size(directory: Path) -> int:
|
||||
total_size = 0
|
||||
for item in directory.rglob("*"):
|
||||
if item.is_file():
|
||||
total_size += item.stat().st_size
|
||||
return total_size
|
||||
|
||||
|
||||
def load_original_frames(imgs_dir: Path, timestamps: list[float], fps: int) -> torch.Tensor:
|
||||
frames = []
|
||||
for ts in timestamps:
|
||||
idx = int(ts * fps)
|
||||
frame = PIL.Image.open(imgs_dir / f"frame_{idx:06d}.png")
|
||||
frame = torch.from_numpy(np.array(frame))
|
||||
frame = frame.type(torch.float32) / 255
|
||||
frame = einops.rearrange(frame, "h w c -> c h w")
|
||||
frames.append(frame)
|
||||
return torch.stack(frames)
|
||||
|
||||
|
||||
def save_decoded_frames(
|
||||
imgs_dir: Path, save_dir: Path, frames: torch.Tensor, timestamps: list[float], fps: int
|
||||
) -> None:
|
||||
if save_dir.exists() and len(list(save_dir.glob("frame_*.png"))) == len(timestamps):
|
||||
return
|
||||
|
||||
save_dir.mkdir(parents=True, exist_ok=True)
|
||||
for i, ts in enumerate(timestamps):
|
||||
idx = int(ts * fps)
|
||||
frame_hwc = (frames[i].permute((1, 2, 0)) * 255).type(torch.uint8).cpu().numpy()
|
||||
PIL.Image.fromarray(frame_hwc).save(save_dir / f"frame_{idx:06d}_decoded.png")
|
||||
shutil.copyfile(imgs_dir / f"frame_{idx:06d}.png", save_dir / f"frame_{idx:06d}_original.png")
|
||||
|
||||
|
||||
def save_first_episode(imgs_dir: Path, dataset: LeRobotDataset) -> None:
|
||||
ep_num_images = dataset.episode_data_index["to"][0].item()
|
||||
if imgs_dir.exists() and len(list(imgs_dir.glob("frame_*.png"))) == ep_num_images:
|
||||
return
|
||||
|
||||
imgs_dir.mkdir(parents=True, exist_ok=True)
|
||||
hf_dataset = dataset.hf_dataset.with_format(None)
|
||||
|
||||
# We only save images from the first camera
|
||||
img_keys = [key for key in hf_dataset.features if key.startswith("observation.image")]
|
||||
imgs_dataset = hf_dataset.select_columns(img_keys[0])
|
||||
|
||||
for i, item in enumerate(
|
||||
tqdm(imgs_dataset, desc=f"saving {dataset.repo_id} first episode images", leave=False)
|
||||
):
|
||||
img = item[img_keys[0]]
|
||||
img.save(str(imgs_dir / f"frame_{i:06d}.png"), quality=100)
|
||||
|
||||
if i >= ep_num_images - 1:
|
||||
break
|
||||
|
||||
|
||||
def sample_timestamps(timestamps_mode: str, ep_num_images: int, fps: int) -> list[float]:
|
||||
# Start at 5 to allow for 2_frames_4_space and 6_frames
|
||||
idx = random.randint(5, ep_num_images - 1)
|
||||
match timestamps_mode:
|
||||
case "1_frame":
|
||||
frame_indexes = [idx]
|
||||
case "2_frames":
|
||||
frame_indexes = [idx - 1, idx]
|
||||
case "2_frames_4_space":
|
||||
frame_indexes = [idx - 5, idx]
|
||||
case "6_frames":
|
||||
frame_indexes = [idx - i for i in range(6)][::-1]
|
||||
case _:
|
||||
raise ValueError(timestamps_mode)
|
||||
|
||||
return [idx / fps for idx in frame_indexes]
|
||||
|
||||
|
||||
def decode_video_frames(
|
||||
video_path: str,
|
||||
timestamps: list[float],
|
||||
tolerance_s: float,
|
||||
backend: str,
|
||||
) -> torch.Tensor:
|
||||
if backend in ["pyav", "video_reader"]:
|
||||
return decode_video_frames_torchvision(video_path, timestamps, tolerance_s, backend)
|
||||
else:
|
||||
raise NotImplementedError(backend)
|
||||
|
||||
|
||||
def benchmark_decoding(
|
||||
imgs_dir: Path,
|
||||
video_path: Path,
|
||||
timestamps_mode: str,
|
||||
backend: str,
|
||||
ep_num_images: int,
|
||||
fps: int,
|
||||
num_samples: int = 50,
|
||||
num_workers: int = 4,
|
||||
save_frames: bool = False,
|
||||
) -> dict:
|
||||
def process_sample(sample: int):
|
||||
time_benchmark = TimeBenchmark()
|
||||
timestamps = sample_timestamps(timestamps_mode, ep_num_images, fps)
|
||||
num_frames = len(timestamps)
|
||||
result = {
|
||||
"psnr_values": [],
|
||||
"ssim_values": [],
|
||||
"mse_values": [],
|
||||
}
|
||||
|
||||
with time_benchmark:
|
||||
frames = decode_video_frames(video_path, timestamps=timestamps, tolerance_s=5e-1, backend=backend)
|
||||
result["load_time_video_ms"] = time_benchmark.result_ms / num_frames
|
||||
|
||||
with time_benchmark:
|
||||
original_frames = load_original_frames(imgs_dir, timestamps, fps)
|
||||
result["load_time_images_ms"] = time_benchmark.result_ms / num_frames
|
||||
|
||||
frames_np, original_frames_np = frames.numpy(), original_frames.numpy()
|
||||
for i in range(num_frames):
|
||||
result["mse_values"].append(mean_squared_error(original_frames_np[i], frames_np[i]))
|
||||
result["psnr_values"].append(
|
||||
peak_signal_noise_ratio(original_frames_np[i], frames_np[i], data_range=1.0)
|
||||
)
|
||||
result["ssim_values"].append(
|
||||
structural_similarity(original_frames_np[i], frames_np[i], data_range=1.0, channel_axis=0)
|
||||
)
|
||||
|
||||
if save_frames and sample == 0:
|
||||
save_dir = video_path.with_suffix("") / f"{timestamps_mode}_{backend}"
|
||||
save_decoded_frames(imgs_dir, save_dir, frames, timestamps, fps)
|
||||
|
||||
return result
|
||||
|
||||
load_times_video_ms = []
|
||||
load_times_images_ms = []
|
||||
mse_values = []
|
||||
psnr_values = []
|
||||
ssim_values = []
|
||||
|
||||
# A sample is a single set of decoded frames specified by timestamps_mode (e.g. a single frame, 2 frames, etc.).
|
||||
# For each sample, we record metrics (loading time and quality metrics) which are then averaged over all samples.
|
||||
# As these samples are independent, we run them in parallel threads to speed up the benchmark.
|
||||
with ThreadPoolExecutor(max_workers=num_workers) as executor:
|
||||
futures = [executor.submit(process_sample, i) for i in range(num_samples)]
|
||||
for future in tqdm(as_completed(futures), total=num_samples, desc="samples", leave=False):
|
||||
result = future.result()
|
||||
load_times_video_ms.append(result["load_time_video_ms"])
|
||||
load_times_images_ms.append(result["load_time_images_ms"])
|
||||
psnr_values.extend(result["psnr_values"])
|
||||
ssim_values.extend(result["ssim_values"])
|
||||
mse_values.extend(result["mse_values"])
|
||||
|
||||
avg_load_time_video_ms = float(np.array(load_times_video_ms).mean())
|
||||
avg_load_time_images_ms = float(np.array(load_times_images_ms).mean())
|
||||
video_images_load_time_ratio = avg_load_time_video_ms / avg_load_time_images_ms
|
||||
|
||||
return {
|
||||
"avg_load_time_video_ms": avg_load_time_video_ms,
|
||||
"avg_load_time_images_ms": avg_load_time_images_ms,
|
||||
"video_images_load_time_ratio": video_images_load_time_ratio,
|
||||
"avg_mse": float(np.mean(mse_values)),
|
||||
"avg_psnr": float(np.mean(psnr_values)),
|
||||
"avg_ssim": float(np.mean(ssim_values)),
|
||||
}
|
||||
|
||||
|
||||
def benchmark_encoding_decoding(
|
||||
dataset: LeRobotDataset,
|
||||
video_path: Path,
|
||||
imgs_dir: Path,
|
||||
encoding_cfg: dict,
|
||||
decoding_cfg: dict,
|
||||
num_samples: int,
|
||||
num_workers: int,
|
||||
save_frames: bool,
|
||||
overwrite: bool = False,
|
||||
seed: int = 1337,
|
||||
) -> list[dict]:
|
||||
fps = dataset.fps
|
||||
|
||||
if overwrite or not video_path.is_file():
|
||||
tqdm.write(f"encoding {video_path}")
|
||||
encode_video_frames(
|
||||
imgs_dir=imgs_dir,
|
||||
video_path=video_path,
|
||||
fps=fps,
|
||||
vcodec=encoding_cfg["vcodec"],
|
||||
pix_fmt=encoding_cfg["pix_fmt"],
|
||||
g=encoding_cfg.get("g"),
|
||||
crf=encoding_cfg.get("crf"),
|
||||
# fast_decode=encoding_cfg.get("fastdecode"),
|
||||
overwrite=True,
|
||||
)
|
||||
|
||||
ep_num_images = dataset.episode_data_index["to"][0].item()
|
||||
width, height = tuple(dataset[0][dataset.camera_keys[0]].shape[-2:])
|
||||
num_pixels = width * height
|
||||
video_size_bytes = video_path.stat().st_size
|
||||
images_size_bytes = get_directory_size(imgs_dir)
|
||||
video_images_size_ratio = video_size_bytes / images_size_bytes
|
||||
|
||||
random.seed(seed)
|
||||
benchmark_table = []
|
||||
for timestamps_mode in tqdm(
|
||||
decoding_cfg["timestamps_modes"], desc="decodings (timestamps_modes)", leave=False
|
||||
):
|
||||
for backend in tqdm(decoding_cfg["backends"], desc="decodings (backends)", leave=False):
|
||||
benchmark_row = benchmark_decoding(
|
||||
imgs_dir,
|
||||
video_path,
|
||||
timestamps_mode,
|
||||
backend,
|
||||
ep_num_images,
|
||||
fps,
|
||||
num_samples,
|
||||
num_workers,
|
||||
save_frames,
|
||||
)
|
||||
benchmark_row.update(
|
||||
**{
|
||||
"repo_id": dataset.repo_id,
|
||||
"resolution": f"{width} x {height}",
|
||||
"num_pixels": num_pixels,
|
||||
"video_size_bytes": video_size_bytes,
|
||||
"images_size_bytes": images_size_bytes,
|
||||
"video_images_size_ratio": video_images_size_ratio,
|
||||
"timestamps_mode": timestamps_mode,
|
||||
"backend": backend,
|
||||
},
|
||||
**encoding_cfg,
|
||||
)
|
||||
benchmark_table.append(benchmark_row)
|
||||
|
||||
return benchmark_table
|
||||
|
||||
|
||||
def main(
|
||||
output_dir: Path,
|
||||
repo_ids: list[str],
|
||||
vcodec: list[str],
|
||||
pix_fmt: list[str],
|
||||
g: list[int],
|
||||
crf: list[int],
|
||||
# fastdecode: list[int],
|
||||
timestamps_modes: list[str],
|
||||
backends: list[str],
|
||||
num_samples: int,
|
||||
num_workers: int,
|
||||
save_frames: bool,
|
||||
):
|
||||
check_datasets_formats(repo_ids)
|
||||
encoding_benchmarks = {
|
||||
"g": g,
|
||||
"crf": crf,
|
||||
# "fastdecode": fastdecode,
|
||||
}
|
||||
decoding_benchmarks = {
|
||||
"timestamps_modes": timestamps_modes,
|
||||
"backends": backends,
|
||||
}
|
||||
headers = ["repo_id", "resolution", "num_pixels"]
|
||||
headers += list(BASE_ENCODING.keys())
|
||||
headers += [
|
||||
"timestamps_mode",
|
||||
"backend",
|
||||
"video_size_bytes",
|
||||
"images_size_bytes",
|
||||
"video_images_size_ratio",
|
||||
"avg_load_time_video_ms",
|
||||
"avg_load_time_images_ms",
|
||||
"video_images_load_time_ratio",
|
||||
"avg_mse",
|
||||
"avg_psnr",
|
||||
"avg_ssim",
|
||||
]
|
||||
file_paths = []
|
||||
for video_codec in tqdm(vcodec, desc="encodings (vcodec)"):
|
||||
for pixel_format in tqdm(pix_fmt, desc="encodings (pix_fmt)", leave=False):
|
||||
benchmark_table = []
|
||||
for repo_id in tqdm(repo_ids, desc="encodings (datasets)", leave=False):
|
||||
dataset = LeRobotDataset(repo_id)
|
||||
imgs_dir = output_dir / "images" / dataset.repo_id.replace("/", "_")
|
||||
# We only use the first episode
|
||||
save_first_episode(imgs_dir, dataset)
|
||||
for key, values in tqdm(encoding_benchmarks.items(), desc="encodings (g, crf)", leave=False):
|
||||
for value in tqdm(values, desc=f"encodings ({key})", leave=False):
|
||||
encoding_cfg = BASE_ENCODING.copy()
|
||||
encoding_cfg["vcodec"] = video_codec
|
||||
encoding_cfg["pix_fmt"] = pixel_format
|
||||
encoding_cfg[key] = value
|
||||
args_path = Path("_".join(str(value) for value in encoding_cfg.values()))
|
||||
video_path = output_dir / "videos" / args_path / f"{repo_id.replace('/', '_')}.mp4"
|
||||
benchmark_table += benchmark_encoding_decoding(
|
||||
dataset,
|
||||
video_path,
|
||||
imgs_dir,
|
||||
encoding_cfg,
|
||||
decoding_benchmarks,
|
||||
num_samples,
|
||||
num_workers,
|
||||
save_frames,
|
||||
)
|
||||
|
||||
# Save intermediate results
|
||||
benchmark_df = pd.DataFrame(benchmark_table, columns=headers)
|
||||
now = dt.datetime.now()
|
||||
csv_path = (
|
||||
output_dir
|
||||
/ f"{now:%Y-%m-%d}_{now:%H-%M-%S}_{video_codec}_{pixel_format}_{num_samples}-samples.csv"
|
||||
)
|
||||
benchmark_df.to_csv(csv_path, header=True, index=False)
|
||||
file_paths.append(csv_path)
|
||||
del benchmark_df
|
||||
|
||||
# Concatenate all results
|
||||
df_list = [pd.read_csv(csv_path) for csv_path in file_paths]
|
||||
concatenated_df = pd.concat(df_list, ignore_index=True)
|
||||
concatenated_path = output_dir / f"{now:%Y-%m-%d}_{now:%H-%M-%S}_all_{num_samples}-samples.csv"
|
||||
concatenated_df.to_csv(concatenated_path, header=True, index=False)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--output-dir",
|
||||
type=Path,
|
||||
default=Path("outputs/video_benchmark"),
|
||||
help="Directory where the video benchmark outputs are written.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--repo-ids",
|
||||
type=str,
|
||||
nargs="*",
|
||||
default=[
|
||||
"lerobot/pusht_image",
|
||||
"aliberts/aloha_mobile_shrimp_image",
|
||||
"aliberts/paris_street",
|
||||
"aliberts/kitchen",
|
||||
],
|
||||
help="Datasets repo-ids to test against. First episodes only are used. Must be images.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--vcodec",
|
||||
type=str,
|
||||
nargs="*",
|
||||
default=["libx264", "libx265", "libsvtav1"],
|
||||
help="Video codecs to be tested",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pix-fmt",
|
||||
type=str,
|
||||
nargs="*",
|
||||
default=["yuv444p", "yuv420p"],
|
||||
help="Pixel formats (chroma subsampling) to be tested",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--g",
|
||||
type=parse_int_or_none,
|
||||
nargs="*",
|
||||
default=[1, 2, 3, 4, 5, 6, 10, 15, 20, 40, 100, None],
|
||||
help="Group of pictures sizes to be tested.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--crf",
|
||||
type=parse_int_or_none,
|
||||
nargs="*",
|
||||
default=[0, 5, 10, 15, 20, 25, 30, 40, 50, None],
|
||||
help="Constant rate factors to be tested.",
|
||||
)
|
||||
# parser.add_argument(
|
||||
# "--fastdecode",
|
||||
# type=int,
|
||||
# nargs="*",
|
||||
# default=[0, 1],
|
||||
# help="Use the fastdecode tuning option. 0 disables it. "
|
||||
# "For libx264 and libx265, only 1 is possible. "
|
||||
# "For libsvtav1, 1, 2 or 3 are possible values with a higher number meaning a faster decoding optimization",
|
||||
# )
|
||||
parser.add_argument(
|
||||
"--timestamps-modes",
|
||||
type=str,
|
||||
nargs="*",
|
||||
default=[
|
||||
"1_frame",
|
||||
"2_frames",
|
||||
"2_frames_4_space",
|
||||
"6_frames",
|
||||
],
|
||||
help="Timestamps scenarios to be tested.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--backends",
|
||||
type=str,
|
||||
nargs="*",
|
||||
default=["pyav", "video_reader"],
|
||||
help="Torchvision decoding backend to be tested.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--num-samples",
|
||||
type=int,
|
||||
default=50,
|
||||
help="Number of samples for each encoding x decoding config.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--num-workers",
|
||||
type=int,
|
||||
default=10,
|
||||
help="Number of processes for parallelized sample processing.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--save-frames",
|
||||
type=int,
|
||||
default=0,
|
||||
help="Whether to save decoded frames or not. Enter a non-zero number for true.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
main(**vars(args))
|
||||
@@ -1,32 +0,0 @@
|
||||
# Configure image
|
||||
ARG PYTHON_VERSION=3.10
|
||||
|
||||
FROM python:${PYTHON_VERSION}-slim
|
||||
ARG PYTHON_VERSION
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install apt dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential cmake \
|
||||
libglib2.0-0 libgl1-mesa-glx libegl1-mesa ffmpeg \
|
||||
speech-dispatcher \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Create virtual environment
|
||||
RUN ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python
|
||||
RUN python -m venv /opt/venv
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
RUN echo "source /opt/venv/bin/activate" >> /root/.bashrc
|
||||
|
||||
# Install LeRobot
|
||||
COPY . /lerobot
|
||||
WORKDIR /lerobot
|
||||
RUN pip install --upgrade --no-cache-dir pip
|
||||
RUN pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]" \
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
|
||||
# Set EGL as the rendering backend for MuJoCo
|
||||
ENV MUJOCO_GL="egl"
|
||||
|
||||
# Execute in bash shell rather than python
|
||||
CMD ["/bin/bash"]
|
||||
@@ -1,68 +0,0 @@
|
||||
FROM nvidia/cuda:12.2.2-devel-ubuntu22.04
|
||||
|
||||
# Configure image
|
||||
ARG PYTHON_VERSION=3.10
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install apt dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential cmake \
|
||||
git git-lfs openssh-client \
|
||||
nano vim less util-linux tree \
|
||||
htop atop nvtop \
|
||||
sed gawk grep curl wget zip unzip \
|
||||
tcpdump sysstat screen tmux \
|
||||
libglib2.0-0 libgl1-mesa-glx libegl1-mesa \
|
||||
speech-dispatcher \
|
||||
python${PYTHON_VERSION} python${PYTHON_VERSION}-venv \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install ffmpeg build dependencies. See:
|
||||
# https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu
|
||||
# TODO(aliberts): create image to build dependencies from source instead
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
autoconf automake yasm \
|
||||
libass-dev \
|
||||
libfreetype6-dev \
|
||||
libgnutls28-dev \
|
||||
libunistring-dev \
|
||||
libmp3lame-dev \
|
||||
libtool \
|
||||
libvorbis-dev \
|
||||
meson \
|
||||
ninja-build \
|
||||
pkg-config \
|
||||
texinfo \
|
||||
yasm \
|
||||
zlib1g-dev \
|
||||
nasm \
|
||||
libx264-dev \
|
||||
libx265-dev libnuma-dev \
|
||||
libvpx-dev \
|
||||
libfdk-aac-dev \
|
||||
libopus-dev \
|
||||
libsvtav1-dev libsvtav1enc-dev libsvtav1dec-dev \
|
||||
libdav1d-dev
|
||||
|
||||
# Install gh cli tool
|
||||
RUN (type -p wget >/dev/null || (apt update && apt-get install wget -y)) \
|
||||
&& mkdir -p -m 755 /etc/apt/keyrings \
|
||||
&& wget -qO- https://cli.github.com/packages/githubcli-archive-keyring.gpg | tee /etc/apt/keyrings/githubcli-archive-keyring.gpg > /dev/null \
|
||||
&& chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg \
|
||||
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
|
||||
&& apt update \
|
||||
&& apt install gh -y \
|
||||
&& apt clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Setup `python`
|
||||
RUN ln -s /usr/bin/python3 /usr/bin/python
|
||||
|
||||
# Install poetry
|
||||
RUN curl -sSL https://install.python-poetry.org | python -
|
||||
ENV PATH="/root/.local/bin:$PATH"
|
||||
RUN echo 'if [ "$HOME" != "/root" ]; then ln -sf /root/.local/bin/poetry $HOME/.local/bin/poetry; fi' >> /root/.bashrc
|
||||
RUN poetry config virtualenvs.create false
|
||||
RUN poetry config virtualenvs.in-project true
|
||||
|
||||
# Set EGL as the rendering backend for MuJoCo
|
||||
ENV MUJOCO_GL="egl"
|
||||
@@ -1,30 +0,0 @@
|
||||
FROM nvidia/cuda:12.4.1-base-ubuntu22.04
|
||||
|
||||
# Configure image
|
||||
ARG PYTHON_VERSION=3.10
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
|
||||
# Install apt dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential cmake \
|
||||
libglib2.0-0 libgl1-mesa-glx libegl1-mesa ffmpeg \
|
||||
speech-dispatcher \
|
||||
python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
# Create virtual environment
|
||||
RUN ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python
|
||||
RUN python -m venv /opt/venv
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
RUN echo "source /opt/venv/bin/activate" >> /root/.bashrc
|
||||
|
||||
# Install LeRobot
|
||||
COPY . /lerobot
|
||||
WORKDIR /lerobot
|
||||
RUN pip install --upgrade --no-cache-dir pip
|
||||
RUN pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]"
|
||||
|
||||
# Set EGL as the rendering backend for MuJoCo
|
||||
ENV MUJOCO_GL="egl"
|
||||
BIN
examples.zip
@@ -1,280 +0,0 @@
|
||||
This tutorial explains how to use [SO-100](https://github.com/TheRobotStudio/SO-ARM100) with LeRobot.
|
||||
|
||||
## Source the parts
|
||||
|
||||
Follow this [README](https://github.com/TheRobotStudio/SO-ARM100). It contains the bill of materials, with link to source the parts, as well as the instructions to 3D print the parts, and advices if it's your first time printing or if you don't own a 3D printer already.
|
||||
|
||||
**Important**: Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
|
||||
|
||||
## Install LeRobot
|
||||
|
||||
On your computer:
|
||||
|
||||
1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
|
||||
```bash
|
||||
mkdir -p ~/miniconda3
|
||||
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
|
||||
bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
|
||||
rm ~/miniconda3/miniconda.sh
|
||||
~/miniconda3/bin/conda init bash
|
||||
```
|
||||
|
||||
2. Restart shell or `source ~/.bashrc`
|
||||
|
||||
3. Create and activate a fresh conda environment for lerobot
|
||||
```bash
|
||||
conda create -y -n lerobot python=3.10 && conda activate lerobot
|
||||
```
|
||||
|
||||
4. Clone LeRobot:
|
||||
```bash
|
||||
git clone https://github.com/huggingface/lerobot.git ~/lerobot
|
||||
```
|
||||
|
||||
5. Install LeRobot with dependencies for the feetech motors:
|
||||
```bash
|
||||
cd ~/lerobot && pip install -e ".[feetech]"
|
||||
```
|
||||
|
||||
For Linux only (not Mac), install extra dependencies for recording datasets:
|
||||
```bash
|
||||
conda install -y -c conda-forge ffmpeg
|
||||
pip uninstall -y opencv-python
|
||||
conda install -y -c conda-forge "opencv>=4.10.0"
|
||||
```
|
||||
|
||||
## Configure the motors
|
||||
|
||||
Follow steps 1 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I) which illustrates the use of our scripts below.
|
||||
|
||||
**Find USB ports associated to your arms**
|
||||
To find the correct ports for each arm, run the utility script twice:
|
||||
```bash
|
||||
python lerobot/scripts/find_motors_bus_port.py
|
||||
```
|
||||
|
||||
Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect leader arm and press Enter...]
|
||||
|
||||
The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect follower arm and press Enter...]
|
||||
|
||||
The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
Troubleshooting: On Linux, you might need to give access to the USB ports by running:
|
||||
```bash
|
||||
sudo chmod 666 /dev/ttyACM0
|
||||
sudo chmod 666 /dev/ttyACM1
|
||||
```
|
||||
|
||||
**Configure your motors**
|
||||
Plug your first motor and run this script to set its ID to 1. It will also set its present position to 2048, so expect your motor to rotate:
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 1
|
||||
```
|
||||
|
||||
Note: These motors are currently limitated. They can take values between 0 and 4096 only, which corresponds to a full turn. They can't turn more than that. 2048 is at the middle of this range, so we can take -2048 steps (180 degrees anticlockwise) and reach the maximum range, or take +2048 steps (180 degrees clockwise) and reach the maximum range. The configuration step also sets the homing offset to 0, so that if you misassembled the arm, you can always update the homing offset to account for a shift up to ± 2048 steps (± 180 degrees).
|
||||
|
||||
Then unplug your motor and plug the second motor and set its ID to 2.
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 2
|
||||
```
|
||||
|
||||
Redo the process for all your motors until ID 6. Do the same for the 6 motors of the leader arm.
|
||||
|
||||
**Remove the gears of the 6 leader motors**
|
||||
Follow step 2 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I). You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm.
|
||||
|
||||
**Add motor horn to the motors**
|
||||
Follow step 3 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I). For SO-100, you need to align the holes on the motor horn to the motor spline to be approximately 1:30, 4:30, 7:30 and 10:30.
|
||||
Try to avoid rotating the motor while doing so to keep position 2048 set during configuration. It is especially tricky for the leader motors as it is more sensible without the gears, but it's ok if it's a bit rotated.
|
||||
|
||||
## Assemble the arms
|
||||
|
||||
Follow step 4 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I). The first arm should take a bit more than 1 hour to assemble, but once you get use to it, you can do it under 1 hour for the second arm.
|
||||
|
||||
## Calibrate
|
||||
|
||||
Next, you'll need to calibrate your SO-100 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one SO-100 robot to work on another.
|
||||
|
||||
**Auto-calibration of follower arm**
|
||||
Follow step 5 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I) which illustrates the auto-calibration of the follower arm. You first need to manually move your follower arm to this initial position:
|
||||
|
||||
<div style="text-align:center;">
|
||||
<img src="../media/so100/follower_initial.webp?raw=true" alt="SO-100 follower arm initial position" title="SO-100 follower arm initial position" width="50%">
|
||||
</div>
|
||||
|
||||
Then run this script to launch auto-calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py calibrate \
|
||||
--robot-path lerobot/configs/robot/so100.yaml \
|
||||
--robot-overrides '~cameras' --arms main_follower
|
||||
```
|
||||
|
||||
Note: You can't run auto-calibration for the leader arm, since we removed the gears. Thus, you will need to manually calibrate the leader arm. It's less precise than auto-calibration, but precision is not as critical for the leader arm.
|
||||
|
||||
**Manual calibration of leader arm**
|
||||
Follow step 6 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
|---|---|---|
|
||||
| <img src="../media/so100/leader_zero.webp?raw=true" alt="SO-100 leader arm zero position" title="SO-100 leader arm zero position" style="width:100%;"> | <img src="../media/so100/leader_rotated.webp?raw=true" alt="SO-100 leader arm rotated position" title="SO-100 leader arm rotated position" style="width:100%;"> | <img src="../media/so100/leader_rest.webp?raw=true" alt="SO-100 leader arm rest position" title="SO-100 leader arm rest position" style="width:100%;"> |
|
||||
|
||||
Run this script to launch manual calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py calibrate \
|
||||
--robot-path lerobot/configs/robot/so100.yaml \
|
||||
--robot-overrides '~cameras' --arms main_leader
|
||||
```
|
||||
|
||||
## Teleoperate
|
||||
|
||||
**Simple teleop**
|
||||
Then you are ready to teleoperate your robot! Run this simple script (it won't connect and display the cameras):
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/so100.yaml \
|
||||
--robot-overrides '~cameras' \
|
||||
--display-cameras 0
|
||||
```
|
||||
|
||||
|
||||
**Teleop with displaying cameras**
|
||||
Follow [this guide to setup your cameras](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#c-add-your-cameras-with-opencvcamera). Then you will be able to display the cameras on your computer while you are teleoperating by running the following code. This is useful to prepare your setup before recording your first dataset.
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/so100.yaml
|
||||
```
|
||||
|
||||
## Record a dataset
|
||||
|
||||
Once you're familiar with teleoperation, you can record your first dataset with SO-100.
|
||||
|
||||
If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
```
|
||||
|
||||
Store your Hugging Face repository name in a variable to run these commands:
|
||||
```bash
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
|
||||
Record 2 episodes and upload your dataset to the hub:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/so100.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/so100_test \
|
||||
--tags so100 tutorial \
|
||||
--warmup-time-s 5 \
|
||||
--episode-time-s 40 \
|
||||
--reset-time-s 10 \
|
||||
--num-episodes 2 \
|
||||
--push-to-hub 1
|
||||
```
|
||||
|
||||
## Visualize a dataset
|
||||
|
||||
If you uploaded your dataset to the hub with `--push-to-hub 1`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
|
||||
```bash
|
||||
echo ${HF_USER}/so100_test
|
||||
```
|
||||
|
||||
If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with:
|
||||
```bash
|
||||
python lerobot/scripts/visualize_dataset_html.py \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/so100_test
|
||||
```
|
||||
|
||||
## Replay an episode
|
||||
|
||||
Now try to replay the first episode on your robot:
|
||||
```bash
|
||||
DATA_DIR=data python lerobot/scripts/control_robot.py replay \
|
||||
--robot-path lerobot/configs/robot/so100.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/so100_test \
|
||||
--episode 0
|
||||
```
|
||||
|
||||
## Train a policy
|
||||
|
||||
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
```bash
|
||||
DATA_DIR=data python lerobot/scripts/train.py \
|
||||
dataset_repo_id=${HF_USER}/so100_test \
|
||||
policy=act_so100_real \
|
||||
env=so100_real \
|
||||
hydra.run.dir=outputs/train/act_so100_test \
|
||||
hydra.job.name=act_so100_test \
|
||||
device=cuda \
|
||||
wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/so100_test`.
|
||||
2. We provided the policy with `policy=act_so100_real`. This loads configurations from [`lerobot/configs/policy/act_so100_real.yaml`](../lerobot/configs/policy/act_so100_real.yaml). Importantly, this policy uses 2 cameras as input `laptop`, `phone`.
|
||||
3. We provided an environment as argument with `env=so100_real`. This loads configurations from [`lerobot/configs/env/so100_real.yaml`](../lerobot/configs/env/so100_real.yaml).
|
||||
4. We provided `device=cuda` since we are training on a Nvidia GPU, but you can also use `device=mps` if you are using a Mac with Apple silicon, or `device=cpu` otherwise.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
6. We added `DATA_DIR=data` to access your dataset stored in your local `data` directory. If you dont provide `DATA_DIR`, your dataset will be downloaded from Hugging Face hub to your cache folder `$HOME/.cache/hugginface`. In future versions of `lerobot`, both directories will be in sync.
|
||||
|
||||
Training should take several hours. You will find checkpoints in `outputs/train/act_so100_test/checkpoints`.
|
||||
|
||||
## Evaluate your policy
|
||||
|
||||
You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/so100.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/eval_act_so100_test \
|
||||
--tags so100 tutorial eval \
|
||||
--warmup-time-s 5 \
|
||||
--episode-time-s 40 \
|
||||
--reset-time-s 10 \
|
||||
--num-episodes 10 \
|
||||
-p outputs/train/act_so100_test/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
|
||||
1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_so100_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_so100_test`).
|
||||
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_act_so100_test`).
|
||||
|
||||
## More
|
||||
|
||||
Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth tutorial on controlling real robots with LeRobot.
|
||||
|
||||
If you have any question or need help, please reach out on Discord in the channel [`#so100-arm`](https://discord.com/channels/1216765309076115607/1237741463832363039).
|
||||
@@ -1,280 +0,0 @@
|
||||
This tutorial explains how to use [Moss v1](https://github.com/jess-moss/moss-robot-arms) with LeRobot.
|
||||
|
||||
## Source the parts
|
||||
|
||||
Follow this [README](https://github.com/jess-moss/moss-robot-arms). It contains the bill of materials, with link to source the parts, as well as the instructions to 3D print the parts, and advices if it's your first time printing or if you don't own a 3D printer already.
|
||||
|
||||
**Important**: Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
|
||||
|
||||
## Install LeRobot
|
||||
|
||||
On your computer:
|
||||
|
||||
1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
|
||||
```bash
|
||||
mkdir -p ~/miniconda3
|
||||
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
|
||||
bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
|
||||
rm ~/miniconda3/miniconda.sh
|
||||
~/miniconda3/bin/conda init bash
|
||||
```
|
||||
|
||||
2. Restart shell or `source ~/.bashrc`
|
||||
|
||||
3. Create and activate a fresh conda environment for lerobot
|
||||
```bash
|
||||
conda create -y -n lerobot python=3.10 && conda activate lerobot
|
||||
```
|
||||
|
||||
4. Clone LeRobot:
|
||||
```bash
|
||||
git clone https://github.com/huggingface/lerobot.git ~/lerobot
|
||||
```
|
||||
|
||||
5. Install LeRobot with dependencies for the feetech motors:
|
||||
```bash
|
||||
cd ~/lerobot && pip install -e ".[feetech]"
|
||||
```
|
||||
|
||||
For Linux only (not Mac), install extra dependencies for recording datasets:
|
||||
```bash
|
||||
conda install -y -c conda-forge ffmpeg
|
||||
pip uninstall -y opencv-python
|
||||
conda install -y -c conda-forge "opencv>=4.10.0"
|
||||
```
|
||||
|
||||
## Configure the motors
|
||||
|
||||
Follow steps 1 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the use of our scripts below.
|
||||
|
||||
**Find USB ports associated to your arms**
|
||||
To find the correct ports for each arm, run the utility script twice:
|
||||
```bash
|
||||
python lerobot/scripts/find_motors_bus_port.py
|
||||
```
|
||||
|
||||
Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect leader arm and press Enter...]
|
||||
|
||||
The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect follower arm and press Enter...]
|
||||
|
||||
The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
Troubleshooting: On Linux, you might need to give access to the USB ports by running:
|
||||
```bash
|
||||
sudo chmod 666 /dev/ttyACM0
|
||||
sudo chmod 666 /dev/ttyACM1
|
||||
```
|
||||
|
||||
**Configure your motors**
|
||||
Plug your first motor and run this script to set its ID to 1. It will also set its present position to 2048, so expect your motor to rotate:
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 1
|
||||
```
|
||||
|
||||
Note: These motors are currently limitated. They can take values between 0 and 4096 only, which corresponds to a full turn. They can't turn more than that. 2048 is at the middle of this range, so we can take -2048 steps (180 degrees anticlockwise) and reach the maximum range, or take +2048 steps (180 degrees clockwise) and reach the maximum range. The configuration step also sets the homing offset to 0, so that if you misassembled the arm, you can always update the homing offset to account for a shift up to ± 2048 steps (± 180 degrees).
|
||||
|
||||
Then unplug your motor and plug the second motor and set its ID to 2.
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 2
|
||||
```
|
||||
|
||||
Redo the process for all your motors until ID 6. Do the same for the 6 motors of the leader arm.
|
||||
|
||||
**Remove the gears of the 6 leader motors**
|
||||
Follow step 2 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm.
|
||||
|
||||
**Add motor horn to the motors**
|
||||
Follow step 3 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). For Moss v1, you need to align the holes on the motor horn to the motor spline to be approximately 3, 6, 9 and 12 o'clock.
|
||||
Try to avoid rotating the motor while doing so to keep position 2048 set during configuration. It is especially tricky for the leader motors as it is more sensible without the gears, but it's ok if it's a bit rotated.
|
||||
|
||||
## Assemble the arms
|
||||
|
||||
Follow step 4 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). The first arm should take a bit more than 1 hour to assemble, but once you get use to it, you can do it under 1 hour for the second arm.
|
||||
|
||||
## Calibrate
|
||||
|
||||
Next, you'll need to calibrate your Moss v1 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one Moss v1 robot to work on another.
|
||||
|
||||
**Auto-calibration of follower arm**
|
||||
Follow step 5 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the auto-calibration of the follower arm. You first need to manually move your follower arm to this initial position:
|
||||
|
||||
<div style="text-align:center;">
|
||||
<img src="../media/moss/follower_initial.webp?raw=true" alt="Moss v1 follower arm initial position" title="Moss v1 follower arm initial position" width="50%">
|
||||
</div>
|
||||
|
||||
Then run this script to launch auto-calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py calibrate \
|
||||
--robot-path lerobot/configs/robot/moss.yaml \
|
||||
--robot-overrides '~cameras' --arms main_follower
|
||||
```
|
||||
|
||||
Note: You can't run auto-calibration for the leader arm, since we removed the gears. Thus, you will need to manually calibrate the leader arm. It's less precise than auto-calibration, but precision is not as critical for the leader arm.
|
||||
|
||||
**Manual calibration of leader arm**
|
||||
Follow step 6 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
|---|---|---|
|
||||
| <img src="../media/moss/leader_zero.webp?raw=true" alt="Moss v1 leader arm zero position" title="Moss v1 leader arm zero position" style="width:100%;"> | <img src="../media/moss/leader_rotated.webp?raw=true" alt="Moss v1 leader arm rotated position" title="Moss v1 leader arm rotated position" style="width:100%;"> | <img src="../media/moss/leader_rest.webp?raw=true" alt="Moss v1 leader arm rest position" title="Moss v1 leader arm rest position" style="width:100%;"> |
|
||||
|
||||
Run this script to launch manual calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py calibrate \
|
||||
--robot-path lerobot/configs/robot/moss.yaml \
|
||||
--robot-overrides '~cameras' --arms main_leader
|
||||
```
|
||||
|
||||
## Teleoperate
|
||||
|
||||
**Simple teleop**
|
||||
Then you are ready to teleoperate your robot! Run this simple script (it won't connect and display the cameras):
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/moss.yaml \
|
||||
--robot-overrides '~cameras' \
|
||||
--display-cameras 0
|
||||
```
|
||||
|
||||
|
||||
**Teleop with displaying cameras**
|
||||
Follow [this guide to setup your cameras](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#c-add-your-cameras-with-opencvcamera). Then you will be able to display the cameras on your computer while you are teleoperating by running the following code. This is useful to prepare your setup before recording your first dataset.
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/moss.yaml
|
||||
```
|
||||
|
||||
## Record a dataset
|
||||
|
||||
Once you're familiar with teleoperation, you can record your first dataset with Moss v1.
|
||||
|
||||
If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
```
|
||||
|
||||
Store your Hugging Face repository name in a variable to run these commands:
|
||||
```bash
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
|
||||
Record 2 episodes and upload your dataset to the hub:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/moss.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/moss_test \
|
||||
--tags moss tutorial \
|
||||
--warmup-time-s 5 \
|
||||
--episode-time-s 40 \
|
||||
--reset-time-s 10 \
|
||||
--num-episodes 2 \
|
||||
--push-to-hub 1
|
||||
```
|
||||
|
||||
## Visualize a dataset
|
||||
|
||||
If you uploaded your dataset to the hub with `--push-to-hub 1`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
|
||||
```bash
|
||||
echo ${HF_USER}/moss_test
|
||||
```
|
||||
|
||||
If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with:
|
||||
```bash
|
||||
python lerobot/scripts/visualize_dataset_html.py \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/moss_test
|
||||
```
|
||||
|
||||
## Replay an episode
|
||||
|
||||
Now try to replay the first episode on your robot:
|
||||
```bash
|
||||
DATA_DIR=data python lerobot/scripts/control_robot.py replay \
|
||||
--robot-path lerobot/configs/robot/moss.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/moss_test \
|
||||
--episode 0
|
||||
```
|
||||
|
||||
## Train a policy
|
||||
|
||||
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
```bash
|
||||
DATA_DIR=data python lerobot/scripts/train.py \
|
||||
dataset_repo_id=${HF_USER}/moss_test \
|
||||
policy=act_moss_real \
|
||||
env=moss_real \
|
||||
hydra.run.dir=outputs/train/act_moss_test \
|
||||
hydra.job.name=act_moss_test \
|
||||
device=cuda \
|
||||
wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/moss_test`.
|
||||
2. We provided the policy with `policy=act_moss_real`. This loads configurations from [`lerobot/configs/policy/act_moss_real.yaml`](../lerobot/configs/policy/act_moss_real.yaml). Importantly, this policy uses 2 cameras as input `laptop`, `phone`.
|
||||
3. We provided an environment as argument with `env=moss_real`. This loads configurations from [`lerobot/configs/env/moss_real.yaml`](../lerobot/configs/env/moss_real.yaml).
|
||||
4. We provided `device=cuda` since we are training on a Nvidia GPU, but you can also use `device=mps` if you are using a Mac with Apple silicon, or `device=cpu` otherwise.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
6. We added `DATA_DIR=data` to access your dataset stored in your local `data` directory. If you dont provide `DATA_DIR`, your dataset will be downloaded from Hugging Face hub to your cache folder `$HOME/.cache/hugginface`. In future versions of `lerobot`, both directories will be in sync.
|
||||
|
||||
Training should take several hours. You will find checkpoints in `outputs/train/act_moss_test/checkpoints`.
|
||||
|
||||
## Evaluate your policy
|
||||
|
||||
You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/moss.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/eval_act_moss_test \
|
||||
--tags moss tutorial eval \
|
||||
--warmup-time-s 5 \
|
||||
--episode-time-s 40 \
|
||||
--reset-time-s 10 \
|
||||
--num-episodes 10 \
|
||||
-p outputs/train/act_moss_test/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
|
||||
1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_moss_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_moss_test`).
|
||||
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_act_moss_test`).
|
||||
|
||||
## More
|
||||
|
||||
Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth tutorial on controlling real robots with LeRobot.
|
||||
|
||||
If you have any question or need help, please reach out on Discord in the channel [`#moss-arm`](https://discord.com/channels/1216765309076115607/1275374638985252925).
|
||||
@@ -1,91 +0,0 @@
|
||||
"""
|
||||
This script demonstrates the use of `LeRobotDataset` class for handling and processing robotic datasets from Hugging Face.
|
||||
It illustrates how to load datasets, manipulate them, and apply transformations suitable for machine learning tasks in PyTorch.
|
||||
|
||||
Features included in this script:
|
||||
- Loading a dataset and accessing its properties.
|
||||
- Filtering data by episode number.
|
||||
- Converting tensor data for visualization.
|
||||
- Saving video files from dataset frames.
|
||||
- Using advanced dataset features like timestamp-based frame selection.
|
||||
- Demonstrating compatibility with PyTorch DataLoader for batch processing.
|
||||
|
||||
The script ends with examples of how to batch process data using PyTorch's DataLoader.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from pprint import pprint
|
||||
|
||||
import imageio
|
||||
import torch
|
||||
|
||||
import lerobot
|
||||
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
|
||||
|
||||
print("List of available datasets:")
|
||||
pprint(lerobot.available_datasets)
|
||||
|
||||
# Let's take one for this example
|
||||
repo_id = "lerobot/pusht"
|
||||
|
||||
# You can easily load a dataset from a Hugging Face repository
|
||||
dataset = LeRobotDataset(repo_id)
|
||||
|
||||
# LeRobotDataset is actually a thin wrapper around an underlying Hugging Face dataset
|
||||
# (see https://huggingface.co/docs/datasets/index for more information).
|
||||
print(dataset)
|
||||
print(dataset.hf_dataset)
|
||||
|
||||
# And provides additional utilities for robotics and compatibility with Pytorch
|
||||
print(f"\naverage number of frames per episode: {dataset.num_samples / dataset.num_episodes:.3f}")
|
||||
print(f"frames per second used during data collection: {dataset.fps=}")
|
||||
print(f"keys to access images from cameras: {dataset.camera_keys=}\n")
|
||||
|
||||
# Access frame indexes associated to first episode
|
||||
episode_index = 0
|
||||
from_idx = dataset.episode_data_index["from"][episode_index].item()
|
||||
to_idx = dataset.episode_data_index["to"][episode_index].item()
|
||||
|
||||
# LeRobot datasets actually subclass PyTorch datasets so you can do everything you know and love from working
|
||||
# with the latter, like iterating through the dataset. Here we grab all the image frames.
|
||||
frames = [dataset[idx]["observation.image"] for idx in range(from_idx, to_idx)]
|
||||
|
||||
# Video frames are now float32 in range [0,1] channel first (c,h,w) to follow pytorch convention. To visualize
|
||||
# them, we convert to uint8 in range [0,255]
|
||||
frames = [(frame * 255).type(torch.uint8) for frame in frames]
|
||||
# and to channel last (h,w,c).
|
||||
frames = [frame.permute((1, 2, 0)).numpy() for frame in frames]
|
||||
|
||||
# Finally, we save the frames to a mp4 video for visualization.
|
||||
Path("outputs/examples/1_load_lerobot_dataset").mkdir(parents=True, exist_ok=True)
|
||||
imageio.mimsave("outputs/examples/1_load_lerobot_dataset/episode_0.mp4", frames, fps=dataset.fps)
|
||||
|
||||
# For many machine learning applications we need to load the history of past observations or trajectories of
|
||||
# future actions. Our datasets can load previous and future frames for each key/modality, using timestamps
|
||||
# differences with the current loaded frame. For instance:
|
||||
delta_timestamps = {
|
||||
# loads 4 images: 1 second before current frame, 500 ms before, 200 ms before, and current frame
|
||||
"observation.image": [-1, -0.5, -0.20, 0],
|
||||
# loads 8 state vectors: 1.5 seconds before, 1 second before, ... 20 ms, 10 ms, and current frame
|
||||
"observation.state": [-1.5, -1, -0.5, -0.20, -0.10, -0.02, -0.01, 0],
|
||||
# loads 64 action vectors: current frame, 1 frame in the future, 2 frames, ... 63 frames in the future
|
||||
"action": [t / dataset.fps for t in range(64)],
|
||||
}
|
||||
dataset = LeRobotDataset(repo_id, delta_timestamps=delta_timestamps)
|
||||
print(f"\n{dataset[0]['observation.image'].shape=}") # (4,c,h,w)
|
||||
print(f"{dataset[0]['observation.state'].shape=}") # (8,c)
|
||||
print(f"{dataset[0]['action'].shape=}\n") # (64,c)
|
||||
|
||||
# Finally, our datasets are fully compatible with PyTorch dataloaders and samplers because they are just
|
||||
# PyTorch datasets.
|
||||
dataloader = torch.utils.data.DataLoader(
|
||||
dataset,
|
||||
num_workers=0,
|
||||
batch_size=32,
|
||||
shuffle=True,
|
||||
)
|
||||
for batch in dataloader:
|
||||
print(f"{batch['observation.image'].shape=}") # (32,4,c,h,w)
|
||||
print(f"{batch['observation.state'].shape=}") # (32,8,c)
|
||||
print(f"{batch['action'].shape=}") # (32,64,c)
|
||||
break
|
||||
@@ -1,121 +0,0 @@
|
||||
"""
|
||||
This scripts demonstrates how to evaluate a pretrained policy from the HuggingFace Hub or from your local
|
||||
training outputs directory. In the latter case, you might want to run examples/3_train_policy.py first.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import gym_pusht # noqa: F401
|
||||
import gymnasium as gym
|
||||
import imageio
|
||||
import numpy
|
||||
import torch
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy
|
||||
|
||||
# Create a directory to store the video of the evaluation
|
||||
output_directory = Path("outputs/eval/example_pusht_diffusion")
|
||||
output_directory.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Download the diffusion policy for pusht environment
|
||||
pretrained_policy_path = Path(snapshot_download("lerobot/diffusion_pusht"))
|
||||
# OR uncomment the following to evaluate a policy from the local outputs/train folder.
|
||||
# pretrained_policy_path = Path("outputs/train/example_pusht_diffusion")
|
||||
|
||||
policy = DiffusionPolicy.from_pretrained(pretrained_policy_path)
|
||||
policy.eval()
|
||||
|
||||
# Check if GPU is available
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device("cuda")
|
||||
print("GPU is available. Device set to:", device)
|
||||
else:
|
||||
device = torch.device("cpu")
|
||||
print(f"GPU is not available. Device set to: {device}. Inference will be slower than on GPU.")
|
||||
# Decrease the number of reverse-diffusion steps (trades off a bit of quality for 10x speed)
|
||||
policy.diffusion.num_inference_steps = 10
|
||||
|
||||
policy.to(device)
|
||||
|
||||
# Initialize evaluation environment to render two observation types:
|
||||
# an image of the scene and state/position of the agent. The environment
|
||||
# also automatically stops running after 300 interactions/steps.
|
||||
env = gym.make(
|
||||
"gym_pusht/PushT-v0",
|
||||
obs_type="pixels_agent_pos",
|
||||
max_episode_steps=300,
|
||||
)
|
||||
|
||||
# Reset the policy and environmens to prepare for rollout
|
||||
policy.reset()
|
||||
numpy_observation, info = env.reset(seed=42)
|
||||
|
||||
# Prepare to collect every rewards and all the frames of the episode,
|
||||
# from initial state to final state.
|
||||
rewards = []
|
||||
frames = []
|
||||
|
||||
# Render frame of the initial state
|
||||
frames.append(env.render())
|
||||
|
||||
step = 0
|
||||
done = False
|
||||
while not done:
|
||||
# Prepare observation for the policy running in Pytorch
|
||||
state = torch.from_numpy(numpy_observation["agent_pos"])
|
||||
image = torch.from_numpy(numpy_observation["pixels"])
|
||||
|
||||
# Convert to float32 with image from channel first in [0,255]
|
||||
# to channel last in [0,1]
|
||||
state = state.to(torch.float32)
|
||||
image = image.to(torch.float32) / 255
|
||||
image = image.permute(2, 0, 1)
|
||||
|
||||
# Send data tensors from CPU to GPU
|
||||
state = state.to(device, non_blocking=True)
|
||||
image = image.to(device, non_blocking=True)
|
||||
|
||||
# Add extra (empty) batch dimension, required to forward the policy
|
||||
state = state.unsqueeze(0)
|
||||
image = image.unsqueeze(0)
|
||||
|
||||
# Create the policy input dictionary
|
||||
observation = {
|
||||
"observation.state": state,
|
||||
"observation.image": image,
|
||||
}
|
||||
|
||||
# Predict the next action with respect to the current observation
|
||||
with torch.inference_mode():
|
||||
action = policy.select_action(observation)
|
||||
|
||||
# Prepare the action for the environment
|
||||
numpy_action = action.squeeze(0).to("cpu").numpy()
|
||||
|
||||
# Step through the environment and receive a new observation
|
||||
numpy_observation, reward, terminated, truncated, info = env.step(numpy_action)
|
||||
print(f"{step=} {reward=} {terminated=}")
|
||||
|
||||
# Keep track of all the rewards and frames
|
||||
rewards.append(reward)
|
||||
frames.append(env.render())
|
||||
|
||||
# The rollout is considered done when the success state is reach (i.e. terminated is True),
|
||||
# or the maximum number of iterations is reached (i.e. truncated is True)
|
||||
done = terminated | truncated | done
|
||||
step += 1
|
||||
|
||||
if terminated:
|
||||
print("Success!")
|
||||
else:
|
||||
print("Failure!")
|
||||
|
||||
# Get the speed of environment (i.e. its number of frames per second).
|
||||
fps = env.metadata["render_fps"]
|
||||
|
||||
# Encode all frames into a mp4 video.
|
||||
video_path = output_directory / "rollout.mp4"
|
||||
imageio.mimsave(str(video_path), numpy.stack(frames), fps=fps)
|
||||
|
||||
print(f"Video of the evaluation is available in '{video_path}'.")
|
||||
@@ -1,79 +0,0 @@
|
||||
"""This scripts demonstrates how to train Diffusion Policy on the PushT environment.
|
||||
|
||||
Once you have trained a model with this script, you can try to evaluate it on
|
||||
examples/2_evaluate_pretrained_policy.py
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
|
||||
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
|
||||
from lerobot.common.policies.diffusion.configuration_diffusion import DiffusionConfig
|
||||
from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy
|
||||
|
||||
# Create a directory to store the training checkpoint.
|
||||
output_directory = Path("outputs/train/example_pusht_diffusion")
|
||||
output_directory.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Number of offline training steps (we'll only do offline training for this example.)
|
||||
# Adjust as you prefer. 5000 steps are needed to get something worth evaluating.
|
||||
training_steps = 5000
|
||||
device = torch.device("cuda")
|
||||
log_freq = 250
|
||||
|
||||
# Set up the dataset.
|
||||
delta_timestamps = {
|
||||
# Load the previous image and state at -0.1 seconds before current frame,
|
||||
# then load current image and state corresponding to 0.0 second.
|
||||
"observation.image": [-0.1, 0.0],
|
||||
"observation.state": [-0.1, 0.0],
|
||||
# Load the previous action (-0.1), the next action to be executed (0.0),
|
||||
# and 14 future actions with a 0.1 seconds spacing. All these actions will be
|
||||
# used to supervise the policy.
|
||||
"action": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4],
|
||||
}
|
||||
dataset = LeRobotDataset("lerobot/pusht", delta_timestamps=delta_timestamps)
|
||||
|
||||
# Set up the the policy.
|
||||
# Policies are initialized with a configuration class, in this case `DiffusionConfig`.
|
||||
# For this example, no arguments need to be passed because the defaults are set up for PushT.
|
||||
# If you're doing something different, you will likely need to change at least some of the defaults.
|
||||
cfg = DiffusionConfig()
|
||||
policy = DiffusionPolicy(cfg, dataset_stats=dataset.stats)
|
||||
policy.train()
|
||||
policy.to(device)
|
||||
|
||||
optimizer = torch.optim.Adam(policy.parameters(), lr=1e-4)
|
||||
|
||||
# Create dataloader for offline training.
|
||||
dataloader = torch.utils.data.DataLoader(
|
||||
dataset,
|
||||
num_workers=4,
|
||||
batch_size=64,
|
||||
shuffle=True,
|
||||
pin_memory=device != torch.device("cpu"),
|
||||
drop_last=True,
|
||||
)
|
||||
|
||||
# Run training loop.
|
||||
step = 0
|
||||
done = False
|
||||
while not done:
|
||||
for batch in dataloader:
|
||||
batch = {k: v.to(device, non_blocking=True) for k, v in batch.items()}
|
||||
output_dict = policy.forward(batch)
|
||||
loss = output_dict["loss"]
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
if step % log_freq == 0:
|
||||
print(f"step: {step} loss: {loss.item():.3f}")
|
||||
step += 1
|
||||
if step >= training_steps:
|
||||
done = True
|
||||
break
|
||||
|
||||
# Save a policy checkpoint.
|
||||
policy.save_pretrained(output_directory)
|
||||
@@ -1,213 +0,0 @@
|
||||
This tutorial will explain the training script, how to use it, and particularly the use of Hydra to configure everything needed for the training run.
|
||||
|
||||
## The training script
|
||||
|
||||
LeRobot offers a training script at [`lerobot/scripts/train.py`](../../lerobot/scripts/train.py). At a high level it does the following:
|
||||
|
||||
- Loads a Hydra configuration file for the following steps (more on Hydra in a moment).
|
||||
- Makes a simulation environment.
|
||||
- Makes a dataset corresponding to that simulation environment.
|
||||
- Makes a policy.
|
||||
- Runs a standard training loop with forward pass, backward pass, optimization step, and occasional logging, evaluation (of the policy on the environment), and checkpointing.
|
||||
|
||||
## Basics of how we use Hydra
|
||||
|
||||
Explaining the ins and outs of [Hydra](https://hydra.cc/docs/intro/) is beyond the scope of this document, but here we'll share the main points you need to know.
|
||||
|
||||
First, `lerobot/configs` has a directory structure like this:
|
||||
|
||||
```
|
||||
.
|
||||
├── default.yaml
|
||||
├── env
|
||||
│ ├── aloha.yaml
|
||||
│ ├── pusht.yaml
|
||||
│ └── xarm.yaml
|
||||
└── policy
|
||||
├── act.yaml
|
||||
├── diffusion.yaml
|
||||
└── tdmpc.yaml
|
||||
```
|
||||
|
||||
**_For brevity, in the rest of this document we'll drop the leading `lerobot/configs` path. So `default.yaml` really refers to `lerobot/configs/default.yaml`._**
|
||||
|
||||
When you run the training script with
|
||||
|
||||
```python
|
||||
python lerobot/scripts/train.py
|
||||
```
|
||||
|
||||
Hydra is set up to read `default.yaml` (via the `@hydra.main` decorator). If you take a look at the `@hydra.main`'s arguments you will see `config_path="../configs", config_name="default"`. At the top of `default.yaml`, is a `defaults` section which looks likes this:
|
||||
|
||||
```yaml
|
||||
defaults:
|
||||
- _self_
|
||||
- env: pusht
|
||||
- policy: diffusion
|
||||
```
|
||||
|
||||
This logic tells Hydra to incorporate configuration parameters from `env/pusht.yaml` and `policy/diffusion.yaml`. _Note: Be aware of the order as any configuration parameters with the same name will be overidden. Thus, `default.yaml` is overridden by `env/pusht.yaml` which is overidden by `policy/diffusion.yaml`_.
|
||||
|
||||
Then, `default.yaml` also contains common configuration parameters such as `device: cuda` or `use_amp: false` (for enabling fp16 training). Some other parameters are set to `???` which indicates that they are expected to be set in additional yaml files. For instance, `training.offline_steps: ???` in `default.yaml` is set to `200000` in `diffusion.yaml`.
|
||||
|
||||
Thanks to this `defaults` section in `default.yaml`, if you want to train Diffusion Policy with PushT, you really only need to run:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py
|
||||
```
|
||||
|
||||
However, you can be more explicit and launch the exact same Diffusion Policy training on PushT with:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py policy=diffusion env=pusht
|
||||
```
|
||||
|
||||
This way of overriding defaults via the CLI is especially useful when you want to change the policy and/or environment. For instance, you can train ACT on the default Aloha environment with:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py policy=act env=aloha
|
||||
```
|
||||
|
||||
There are two things to note here:
|
||||
- Config overrides are passed as `param_name=param_value`.
|
||||
- Here we have overridden the defaults section. `policy=act` tells Hydra to use `policy/act.yaml`, and `env=aloha` tells Hydra to use `env/aloha.yaml`.
|
||||
|
||||
_As an aside: we've set up all of our configurations so that they reproduce state-of-the-art results from papers in the literature._
|
||||
|
||||
## Overriding configuration parameters in the CLI
|
||||
|
||||
Now let's say that we want to train on a different task in the Aloha environment. If you look in `env/aloha.yaml` you will see something like:
|
||||
|
||||
```yaml
|
||||
# lerobot/configs/env/aloha.yaml
|
||||
env:
|
||||
task: AlohaInsertion-v0
|
||||
```
|
||||
|
||||
And if you look in `policy/act.yaml` you will see something like:
|
||||
|
||||
```yaml
|
||||
# lerobot/configs/policy/act.yaml
|
||||
dataset_repo_id: lerobot/aloha_sim_insertion_human
|
||||
```
|
||||
|
||||
But our Aloha environment actually supports a cube transfer task as well. To train for this task, you could manually modify the two yaml configuration files respectively.
|
||||
|
||||
First, we'd need to switch to using the cube transfer task for the ALOHA environment.
|
||||
|
||||
```diff
|
||||
# lerobot/configs/env/aloha.yaml
|
||||
env:
|
||||
- task: AlohaInsertion-v0
|
||||
+ task: AlohaTransferCube-v0
|
||||
```
|
||||
|
||||
Then, we'd also need to switch to using the cube transfer dataset.
|
||||
|
||||
```diff
|
||||
# lerobot/configs/policy/act.yaml
|
||||
-dataset_repo_id: lerobot/aloha_sim_insertion_human
|
||||
+dataset_repo_id: lerobot/aloha_sim_transfer_cube_human
|
||||
```
|
||||
|
||||
Then, you'd be able to run:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py policy=act env=aloha
|
||||
```
|
||||
|
||||
and you'd be training and evaluating on the cube transfer task.
|
||||
|
||||
An alternative approach to editing the yaml configuration files, would be to override the defaults via the command line:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
policy=act \
|
||||
dataset_repo_id=lerobot/aloha_sim_transfer_cube_human \
|
||||
env=aloha \
|
||||
env.task=AlohaTransferCube-v0
|
||||
```
|
||||
|
||||
There's something new here. Notice the `.` delimiter used to traverse the configuration hierarchy. _But be aware that the `defaults` section is an exception. As you saw above, we didn't need to write `defaults.policy=act` in the CLI. `policy=act` was enough._
|
||||
|
||||
Putting all that knowledge together, here's the command that was used to train https://huggingface.co/lerobot/act_aloha_sim_transfer_cube_human.
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
hydra.run.dir=outputs/train/act_aloha_sim_transfer_cube_human \
|
||||
device=cuda
|
||||
env=aloha \
|
||||
env.task=AlohaTransferCube-v0 \
|
||||
dataset_repo_id=lerobot/aloha_sim_transfer_cube_human \
|
||||
policy=act \
|
||||
training.eval_freq=10000 \
|
||||
training.log_freq=250 \
|
||||
training.offline_steps=100000 \
|
||||
training.save_model=true \
|
||||
training.save_freq=25000 \
|
||||
eval.n_episodes=50 \
|
||||
eval.batch_size=50 \
|
||||
wandb.enable=false \
|
||||
```
|
||||
|
||||
There's one new thing here: `hydra.run.dir=outputs/train/act_aloha_sim_transfer_cube_human`, which specifies where to save the training output.
|
||||
|
||||
## Using a configuration file not in `lerobot/configs`
|
||||
|
||||
Above we discusses the our training script is set up such that Hydra looks for `default.yaml` in `lerobot/configs`. But, if you have a configuration file elsewhere in your filesystem you may use:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py --config-dir PARENT/PATH --config-name FILE_NAME_WITHOUT_EXTENSION
|
||||
```
|
||||
|
||||
Note: here we use regular syntax for providing CLI arguments to a Python script, not Hydra's `param_name=param_value` syntax.
|
||||
|
||||
As a concrete example, this becomes particularly handy when you have a folder with training outputs, and would like to re-run the training. For example, say you previously ran the training script with one of the earlier commands and have `outputs/train/my_experiment/checkpoints/pretrained_model/config.yaml`. This `config.yaml` file will have the full set of configuration parameters within it. To run the training with the same configuration again, do:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py --config-dir outputs/train/my_experiment/checkpoints/last/pretrained_model --config-name config
|
||||
```
|
||||
|
||||
Note that you may still use the regular syntax for config parameter overrides (eg: by adding `training.offline_steps=200000`).
|
||||
|
||||
## Typical logs and metrics
|
||||
|
||||
When you start the training process, you will first see your full configuration being printed in the terminal. You can check it to make sure that you config it correctly and your config is not overrided by other files. The final configuration will also be saved with the checkpoint.
|
||||
|
||||
After that, you will see training log like this one:
|
||||
|
||||
```
|
||||
INFO 2024-08-14 13:35:12 ts/train.py:192 step:0 smpl:64 ep:1 epch:0.00 loss:1.112 grdn:15.387 lr:2.0e-07 updt_s:1.738 data_s:4.774
|
||||
```
|
||||
|
||||
or evaluation log like:
|
||||
|
||||
```
|
||||
INFO 2024-08-14 13:38:45 ts/train.py:226 step:100 smpl:6K ep:52 epch:0.25 ∑rwrd:20.693 success:0.0% eval_s:120.266
|
||||
```
|
||||
|
||||
These logs will also be saved in wandb if `wandb.enable` is set to `true`. Here are the meaning of some abbreviations:
|
||||
|
||||
- `smpl`: number of samples seen during training.
|
||||
- `ep`: number of episodes seen during training. An episode contains multiple samples in a complete manipulation task.
|
||||
- `epch`: number of time all unique samples are seen (epoch).
|
||||
- `grdn`: gradient norm.
|
||||
- `∑rwrd`: compute the sum of rewards in every evaluation episode and then take an average of them.
|
||||
- `success`: average success rate of eval episodes. Reward and success are usually different except for the sparsing reward setting, where reward=1 only when the task is completed successfully.
|
||||
- `eval_s`: time to evaluate the policy in the environment, in second.
|
||||
- `updt_s`: time to update the network parameters, in second.
|
||||
- `data_s`: time to load a batch of data, in second.
|
||||
|
||||
Some metrics are useful for initial performance profiling. For example, if you find the current GPU utilization is low via the `nvidia-smi` command and `data_s` sometimes is too high, you may need to modify batch size or number of dataloading workers to accelerate dataloading. We also recommend [pytorch profiler](https://github.com/huggingface/lerobot?tab=readme-ov-file#improve-your-code-with-profiling) for detailed performance probing.
|
||||
|
||||
---
|
||||
|
||||
So far we've seen how to train Diffusion Policy for PushT and ACT for ALOHA. Now, what if we want to train ACT for PushT? Well, there are aspects of the ACT configuration that are specific to the ALOHA environments, and these happen to be incompatible with PushT. Therefore, trying to run the following will almost certainly raise an exception of sorts (eg: feature dimension mismatch):
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py policy=act env=pusht dataset_repo_id=lerobot/pusht
|
||||
```
|
||||
|
||||
Please, head on over to our [advanced tutorial on adapting policy configuration to various environments](./advanced/train_act_pusht/train_act_pusht.md) to learn more.
|
||||
|
||||
Or in the meantime, happy coding! 🤗
|
||||
@@ -1,37 +0,0 @@
|
||||
This tutorial explains how to resume a training run that you've started with the training script. If you don't know how our training script and configuration system works, please read [4_train_policy_with_script.md](./4_train_policy_with_script.md) first.
|
||||
|
||||
## Basic training resumption
|
||||
|
||||
Let's consider the example of training ACT for one of the ALOHA tasks. Here's a command that can achieve that:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
hydra.run.dir=outputs/train/run_resumption \
|
||||
policy=act \
|
||||
dataset_repo_id=lerobot/aloha_sim_transfer_cube_human \
|
||||
env=aloha \
|
||||
env.task=AlohaTransferCube-v0 \
|
||||
training.log_freq=25 \
|
||||
training.save_checkpoint=true \
|
||||
training.save_freq=100
|
||||
```
|
||||
|
||||
Here we're using the default dataset and environment for ACT, and we've taken care to set up the log frequency and checkpointing frequency to low numbers so we can test resumption. You should be able to see some logging and have a first checkpoint within 1 minute. Please interrupt the training after the first checkpoint.
|
||||
|
||||
To resume, all that we have to do is run the training script, providing the run directory, and the resume option:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
hydra.run.dir=outputs/train/run_resumption \
|
||||
resume=true
|
||||
```
|
||||
|
||||
You should see from the logging that your training picks up from where it left off.
|
||||
|
||||
Note that with `resume=true`, the configuration file from the last checkpoint in the training output directory is loaded. So it doesn't matter that we haven't provided all the other configuration parameters from our previous command (although there may be warnings to notify you that your command has a different configuration than than the checkpoint).
|
||||
|
||||
---
|
||||
|
||||
Now you should know how to resume your training run in case it gets interrupted or you want to extend a finished training run.
|
||||
|
||||
Happy coding! 🤗
|
||||
@@ -1,52 +0,0 @@
|
||||
"""
|
||||
This script demonstrates how to use torchvision's image transformation with LeRobotDataset for data
|
||||
augmentation purposes. The transformations are passed to the dataset as an argument upon creation, and
|
||||
transforms are applied to the observation images before they are returned in the dataset's __get_item__.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from torchvision.transforms import ToPILImage, v2
|
||||
|
||||
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
|
||||
|
||||
dataset_repo_id = "lerobot/aloha_static_tape"
|
||||
|
||||
# Create a LeRobotDataset with no transformations
|
||||
dataset = LeRobotDataset(dataset_repo_id)
|
||||
# This is equivalent to `dataset = LeRobotDataset(dataset_repo_id, image_transforms=None)`
|
||||
|
||||
# Get the index of the first observation in the first episode
|
||||
first_idx = dataset.episode_data_index["from"][0].item()
|
||||
|
||||
# Get the frame corresponding to the first camera
|
||||
frame = dataset[first_idx][dataset.camera_keys[0]]
|
||||
|
||||
|
||||
# Define the transformations
|
||||
transforms = v2.Compose(
|
||||
[
|
||||
v2.ColorJitter(brightness=(0.5, 1.5)),
|
||||
v2.ColorJitter(contrast=(0.5, 1.5)),
|
||||
v2.RandomAdjustSharpness(sharpness_factor=2, p=1),
|
||||
]
|
||||
)
|
||||
|
||||
# Create another LeRobotDataset with the defined transformations
|
||||
transformed_dataset = LeRobotDataset(dataset_repo_id, image_transforms=transforms)
|
||||
|
||||
# Get a frame from the transformed dataset
|
||||
transformed_frame = transformed_dataset[first_idx][transformed_dataset.camera_keys[0]]
|
||||
|
||||
# Create a directory to store output images
|
||||
output_dir = Path("outputs/image_transforms")
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Save the original frame
|
||||
to_pil = ToPILImage()
|
||||
to_pil(frame).save(output_dir / "original_frame.png", quality=100)
|
||||
print(f"Original frame saved to {output_dir / 'original_frame.png'}.")
|
||||
|
||||
# Save the transformed frame
|
||||
to_pil(transformed_frame).save(output_dir / "transformed_frame.png", quality=100)
|
||||
print(f"Transformed frame saved to {output_dir / 'transformed_frame.png'}.")
|
||||
@@ -1,158 +0,0 @@
|
||||
This tutorial explains how to use [Stretch 3](https://hello-robot.com/stretch-3-product) with LeRobot.
|
||||
|
||||
## Setup
|
||||
|
||||
Familiarize yourself with Stretch by following its [tutorials](https://docs.hello-robot.com/0.3/getting_started/hello_robot/) (recommended).
|
||||
|
||||
To use LeRobot on Stretch, 3 options are available:
|
||||
- [tethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#tethered-setup)
|
||||
- [untethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#untethered-setup)
|
||||
- ssh directly into Stretch (you will first need to install and configure openssh-server on stretch using one of the two above setups)
|
||||
|
||||
|
||||
## Install LeRobot
|
||||
|
||||
On Stretch's CLI, follow these steps:
|
||||
|
||||
1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
|
||||
```bash
|
||||
mkdir -p ~/miniconda3
|
||||
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
|
||||
bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
|
||||
rm ~/miniconda3/miniconda.sh
|
||||
~/miniconda3/bin/conda init bash
|
||||
```
|
||||
|
||||
2. Comment out these lines in `~/.profile` (this can mess up paths used by conda and ~/.local/bin should already be in your PATH)
|
||||
```
|
||||
# set PATH so it includes user's private bin if it exists
|
||||
if [ -d "$HOME/.local/bin" ] ; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
fi
|
||||
```
|
||||
|
||||
3. Restart shell or `source ~/.bashrc`
|
||||
|
||||
4. Create and activate a fresh conda environment for lerobot
|
||||
```bash
|
||||
conda create -y -n lerobot python=3.10 && conda activate lerobot
|
||||
```
|
||||
|
||||
5. Clone LeRobot:
|
||||
```bash
|
||||
git clone https://github.com/huggingface/lerobot.git ~/lerobot
|
||||
```
|
||||
|
||||
6. Install LeRobot with stretch dependencies:
|
||||
```bash
|
||||
cd ~/lerobot && pip install -e ".[stretch]"
|
||||
```
|
||||
|
||||
> **Note:** If you get this message, you can ignore it: `ERROR: pip's dependency resolver does not currently take into account all the packages that are installed.`
|
||||
|
||||
For Linux only (not Mac), install extra dependencies for recording datasets:
|
||||
```bash
|
||||
conda install -y -c conda-forge ffmpeg
|
||||
pip uninstall -y opencv-python
|
||||
conda install -y -c conda-forge "opencv>=4.10.0"
|
||||
```
|
||||
|
||||
7. Run a [system check](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#system-check) to make sure your robot is ready:
|
||||
```bash
|
||||
stretch_system_check.py
|
||||
```
|
||||
|
||||
> **Note:** You may need to free the "robot process" after booting Stretch by running `stretch_free_robot_process.py`. For more info this Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#turning-off-gamepad-teleoperation).
|
||||
|
||||
You should get something like this:
|
||||
```bash
|
||||
For use with S T R E T C H (R) from Hello Robot Inc.
|
||||
---------------------------------------------------------------------
|
||||
|
||||
Model = Stretch 3
|
||||
Tool = DexWrist 3 w/ Gripper
|
||||
Serial Number = stretch-se3-3054
|
||||
|
||||
---- Checking Hardware ----
|
||||
[Pass] Comms are ready
|
||||
[Pass] Actuators are ready
|
||||
[Warn] Sensors not ready (IMU AZ = -10.19 out of range -10.1 to -9.5)
|
||||
[Pass] Battery voltage is 13.6 V
|
||||
|
||||
---- Checking Software ----
|
||||
[Pass] Ubuntu 22.04 is ready
|
||||
[Pass] All APT pkgs are setup correctly
|
||||
[Pass] Firmware is up-to-date
|
||||
[Pass] Python pkgs are up-to-date
|
||||
[Pass] ROS2 Humble is ready
|
||||
```
|
||||
|
||||
## Teleoperate, record a dataset and run a policy
|
||||
|
||||
**Calibrate (Optional)**
|
||||
Before operating Stretch, you need to [home](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#homing) it first. Be mindful about giving Stretch some space as this procedure will move the robot's arm and gripper. Now run this command:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py calibrate \
|
||||
--robot-path lerobot/configs/robot/stretch.yaml
|
||||
```
|
||||
This is equivalent to running `stretch_robot_home.py`
|
||||
|
||||
> **Note:** If you run any of the LeRobot scripts below and Stretch is not poperly homed, it will automatically home/calibrate first.
|
||||
|
||||
**Teleoperate**
|
||||
Before trying teleoperation, you need activate the gamepad controller by pressing the middle button. For more info, see Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/hello_robot/#gamepad-teleoperation).
|
||||
|
||||
Now try out teleoperation (see above documentation to learn about the gamepad controls):
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/stretch.yaml
|
||||
```
|
||||
This is essentially the same as running `stretch_gamepad_teleop.py`
|
||||
|
||||
**Record a dataset**
|
||||
Once you're familiar with the gamepad controls and after a bit of practice, you can try to record your first dataset with Stretch.
|
||||
|
||||
If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
```
|
||||
|
||||
Store your Hugging Face repository name in a variable to run these commands:
|
||||
```bash
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
|
||||
Record one episode:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/stretch.yaml \
|
||||
--fps 20 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/stretch_test \
|
||||
--tags stretch tutorial \
|
||||
--warmup-time-s 3 \
|
||||
--episode-time-s 40 \
|
||||
--reset-time-s 10 \
|
||||
--num-episodes 1 \
|
||||
--push-to-hub 0
|
||||
```
|
||||
|
||||
> **Note:** If you're using ssh to connect to Stretch and run this script, you won't be able to visualize its cameras feed (though they will still be recording). To see the cameras stream, use [tethered](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#tethered-setup) or [untethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#untethered-setup).
|
||||
|
||||
**Replay an episode**
|
||||
Now try to replay this episode (make sure the robot's initial position is the same):
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py replay \
|
||||
--robot-path lerobot/configs/robot/stretch.yaml \
|
||||
--fps 20 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/stretch_test \
|
||||
--episode 0
|
||||
```
|
||||
|
||||
Follow [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) to train a policy on your data and run inference on your robot. You will need to adapt the code for Stretch.
|
||||
|
||||
> TODO(rcadene, aliberts): Add already setup environment and policy yaml configuration files
|
||||
|
||||
If you need help, please reach out on Discord in the channel `#stretch3-mobile-arm`.
|
||||
@@ -1,179 +0,0 @@
|
||||
This tutorial explains how to use [Aloha and Aloha 2 stationary](https://www.trossenrobotics.com/aloha-stationary) with LeRobot.
|
||||
|
||||
## Setup
|
||||
|
||||
Follow the [documentation from Trossen Robotics](https://docs.trossenrobotics.com/aloha_docs/getting_started/stationary/hardware_setup.html) for setting up the hardware and plugging the 4 arms and 4 cameras to your computer.
|
||||
|
||||
|
||||
## Install LeRobot
|
||||
|
||||
On your computer:
|
||||
|
||||
1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
|
||||
```bash
|
||||
mkdir -p ~/miniconda3
|
||||
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
|
||||
bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
|
||||
rm ~/miniconda3/miniconda.sh
|
||||
~/miniconda3/bin/conda init bash
|
||||
```
|
||||
|
||||
2. Restart shell or `source ~/.bashrc`
|
||||
|
||||
3. Create and activate a fresh conda environment for lerobot
|
||||
```bash
|
||||
conda create -y -n lerobot python=3.10 && conda activate lerobot
|
||||
```
|
||||
|
||||
4. Clone LeRobot:
|
||||
```bash
|
||||
git clone https://github.com/huggingface/lerobot.git ~/lerobot
|
||||
```
|
||||
|
||||
5. Install LeRobot with dependencies for the Aloha motors (dynamixel) and cameras (intelrealsense):
|
||||
```bash
|
||||
cd ~/lerobot && pip install -e ".[dynamixel, intelrealsense]"
|
||||
```
|
||||
|
||||
For Linux only (not Mac), install extra dependencies for recording datasets:
|
||||
```bash
|
||||
conda install -y -c conda-forge ffmpeg
|
||||
pip uninstall -y opencv-python
|
||||
conda install -y -c conda-forge "opencv>=4.10.0"
|
||||
```
|
||||
|
||||
## Teleoperate
|
||||
|
||||
**/!\ FOR SAFETY, READ THIS /!\**
|
||||
Teleoperation consists in manually operating the leader arms to move the follower arms. Importantly:
|
||||
1. Make sure your leader arms are in the same position as the follower arms, so that the follower arms don't move too fast to match the leader arms,
|
||||
2. Our code assumes that your robot has been assembled following Trossen Robotics instructions. This allows us to skip calibration, as we use the pre-defined calibration files in `.cache/calibration/aloha_default`. If you replace a motor, make sure you follow the exact instructions from Trossen Robotics.
|
||||
|
||||
By running the following code, you can start your first **SAFE** teleoperation:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/aloha.yaml \
|
||||
--robot-overrides max_relative_target=5
|
||||
```
|
||||
|
||||
By adding `--robot-overrides max_relative_target=5`, we override the default value for `max_relative_target` defined in `lerobot/configs/robot/aloha.yaml`. It is expected to be `5` to limit the magnitude of the movement for more safety, but the teloperation won't be smooth. When you feel confident, you can disable this limit by adding `--robot-overrides max_relative_target=null` to the command line:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/aloha.yaml \
|
||||
--robot-overrides max_relative_target=null
|
||||
```
|
||||
|
||||
## Record a dataset
|
||||
|
||||
Once you're familiar with teleoperation, you can record your first dataset with Aloha.
|
||||
|
||||
If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
```
|
||||
|
||||
Store your Hugging Face repository name in a variable to run these commands:
|
||||
```bash
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
|
||||
Record 2 episodes and upload your dataset to the hub:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/aloha.yaml \
|
||||
--robot-overrides max_relative_target=null \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/aloha_test \
|
||||
--tags aloha tutorial \
|
||||
--warmup-time-s 5 \
|
||||
--episode-time-s 40 \
|
||||
--reset-time-s 10 \
|
||||
--num-episodes 2 \
|
||||
--push-to-hub 1
|
||||
```
|
||||
|
||||
## Visualize a dataset
|
||||
|
||||
If you uploaded your dataset to the hub with `--push-to-hub 1`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
|
||||
```bash
|
||||
echo ${HF_USER}/aloha_test
|
||||
```
|
||||
|
||||
If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with:
|
||||
```bash
|
||||
python lerobot/scripts/visualize_dataset_html.py \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/aloha_test
|
||||
```
|
||||
|
||||
## Replay an episode
|
||||
|
||||
**/!\ FOR SAFETY, READ THIS /!\**
|
||||
Replay consists in automatically replaying the sequence of actions (i.e. goal positions for your motors) recorded in a given dataset episode. Make sure the current initial position of your robot is similar to the one in your episode, so that your follower arms don't move too fast to go to the first goal positions. For safety, you might want to add `--robot-overrides max_relative_target=5` to your command line as explained above.
|
||||
|
||||
Now try to replay the first episode on your robot:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py replay \
|
||||
--robot-path lerobot/configs/robot/aloha.yaml \
|
||||
--robot-overrides max_relative_target=null \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/aloha_test \
|
||||
--episode 0
|
||||
```
|
||||
|
||||
## Train a policy
|
||||
|
||||
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
```bash
|
||||
DATA_DIR=data python lerobot/scripts/train.py \
|
||||
dataset_repo_id=${HF_USER}/aloha_test \
|
||||
policy=act_aloha_real \
|
||||
env=aloha_real \
|
||||
hydra.run.dir=outputs/train/act_aloha_test \
|
||||
hydra.job.name=act_aloha_test \
|
||||
device=cuda \
|
||||
wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/aloha_test`.
|
||||
2. We provided the policy with `policy=act_aloha_real`. This loads configurations from [`lerobot/configs/policy/act_aloha_real.yaml`](../lerobot/configs/policy/act_aloha_real.yaml). Importantly, this policy uses 4 cameras as input `cam_right_wrist`, `cam_left_wrist`, `cam_high`, and `cam_low`.
|
||||
3. We provided an environment as argument with `env=aloha_real`. This loads configurations from [`lerobot/configs/env/aloha_real.yaml`](../lerobot/configs/env/aloha_real.yaml). Note: this yaml defines 18 dimensions for the `state_dim` and `action_dim`, corresponding to 18 motors, not 14 motors as used in previous Aloha work. This is because, we include the `shoulder_shadow` and `elbow_shadow` motors for simplicity.
|
||||
4. We provided `device=cuda` since we are training on a Nvidia GPU.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
6. We added `DATA_DIR=data` to access your dataset stored in your local `data` directory. If you dont provide `DATA_DIR`, your dataset will be downloaded from Hugging Face hub to your cache folder `$HOME/.cache/hugginface`. In future versions of `lerobot`, both directories will be in sync.
|
||||
|
||||
Training should take several hours. You will find checkpoints in `outputs/train/act_aloha_test/checkpoints`.
|
||||
|
||||
## Evaluate your policy
|
||||
|
||||
You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/aloha.yaml \
|
||||
--robot-overrides max_relative_target=null \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/eval_act_aloha_test \
|
||||
--tags aloha tutorial eval \
|
||||
--warmup-time-s 5 \
|
||||
--episode-time-s 40 \
|
||||
--reset-time-s 10 \
|
||||
--num-episodes 10 \
|
||||
--num-image-writer-processes 1 \
|
||||
-p outputs/train/act_aloha_test/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
|
||||
1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_aloha_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_aloha_test`).
|
||||
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_act_aloha_test`).
|
||||
3. We use `--num-image-writer-processes 1` instead of the default value (`0`). On our computer, using a dedicated process to write images from the 4 cameras on disk allows to reach constent 30 fps during inference. Feel free to explore different values for `--num-image-writer-processes`.
|
||||
|
||||
## More
|
||||
|
||||
Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth explaination.
|
||||
|
||||
If you have any question or need help, please reach out on Discord in the channel `#aloha-arm`.
|
||||
@@ -1,87 +0,0 @@
|
||||
# @package _global_
|
||||
|
||||
# Change the seed to match what PushT eval uses
|
||||
# (to avoid evaluating on seeds used for generating the training data).
|
||||
seed: 100000
|
||||
# Change the dataset repository to the PushT one.
|
||||
dataset_repo_id: lerobot/pusht
|
||||
|
||||
override_dataset_stats:
|
||||
observation.image:
|
||||
# stats from imagenet, since we use a pretrained vision model
|
||||
mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
|
||||
std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
|
||||
|
||||
training:
|
||||
offline_steps: 80000
|
||||
online_steps: 0
|
||||
eval_freq: 10000
|
||||
save_freq: 100000
|
||||
log_freq: 250
|
||||
save_model: true
|
||||
|
||||
batch_size: 8
|
||||
lr: 1e-5
|
||||
lr_backbone: 1e-5
|
||||
weight_decay: 1e-4
|
||||
grad_clip_norm: 10
|
||||
online_steps_between_rollouts: 1
|
||||
|
||||
delta_timestamps:
|
||||
action: "[i / ${fps} for i in range(${policy.chunk_size})]"
|
||||
|
||||
eval:
|
||||
n_episodes: 50
|
||||
batch_size: 50
|
||||
|
||||
# See `configuration_act.py` for more details.
|
||||
policy:
|
||||
name: act
|
||||
|
||||
# Input / output structure.
|
||||
n_obs_steps: 1
|
||||
chunk_size: 100 # chunk_size
|
||||
n_action_steps: 100
|
||||
|
||||
input_shapes:
|
||||
observation.image: [3, 96, 96]
|
||||
observation.state: ["${env.state_dim}"]
|
||||
output_shapes:
|
||||
action: ["${env.action_dim}"]
|
||||
|
||||
# Normalization / Unnormalization
|
||||
input_normalization_modes:
|
||||
observation.image: mean_std
|
||||
# Use min_max normalization just because it's more standard.
|
||||
observation.state: min_max
|
||||
output_normalization_modes:
|
||||
# Use min_max normalization just because it's more standard.
|
||||
action: min_max
|
||||
|
||||
# Architecture.
|
||||
# Vision backbone.
|
||||
vision_backbone: resnet18
|
||||
pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1
|
||||
replace_final_stride_with_dilation: false
|
||||
# Transformer layers.
|
||||
pre_norm: false
|
||||
dim_model: 512
|
||||
n_heads: 8
|
||||
dim_feedforward: 3200
|
||||
feedforward_activation: relu
|
||||
n_encoder_layers: 4
|
||||
# Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code
|
||||
# that means only the first layer is used. Here we match the original implementation by setting this to 1.
|
||||
# See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521.
|
||||
n_decoder_layers: 1
|
||||
# VAE.
|
||||
use_vae: true
|
||||
latent_dim: 32
|
||||
n_vae_encoder_layers: 4
|
||||
|
||||
# Inference.
|
||||
temporal_ensemble_coeff: null
|
||||
|
||||
# Training and loss computation.
|
||||
dropout: 0.1
|
||||
kl_weight: 10.0
|
||||
@@ -1,70 +0,0 @@
|
||||
In this tutorial we will learn how to adapt a policy configuration to be compatible with a new environment and dataset. As a concrete example, we will adapt the default configuration for ACT to be compatible with the PushT environment and dataset.
|
||||
|
||||
If you haven't already read our tutorial on the [training script and configuration tooling](../4_train_policy_with_script.md) please do so prior to tackling this tutorial.
|
||||
|
||||
Let's get started!
|
||||
|
||||
Suppose we want to train ACT for PushT. Well, there are aspects of the ACT configuration that are specific to the ALOHA environments, and these happen to be incompatible with PushT. Therefore, trying to run the following will almost certainly raise an exception of sorts (eg: feature dimension mismatch):
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py policy=act env=pusht dataset_repo_id=lerobot/pusht
|
||||
```
|
||||
|
||||
We need to adapt the parameters of the ACT policy configuration to the PushT environment. The most important ones are the image keys.
|
||||
|
||||
ALOHA's datasets and environments typically use a variable number of cameras. In `lerobot/configs/policy/act.yaml` you may notice two relevant sections. Here we show you the minimal diff needed to adjust to PushT:
|
||||
|
||||
```diff
|
||||
override_dataset_stats:
|
||||
- observation.images.top:
|
||||
+ observation.image:
|
||||
# stats from imagenet, since we use a pretrained vision model
|
||||
mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
|
||||
std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
|
||||
|
||||
policy:
|
||||
input_shapes:
|
||||
- observation.images.top: [3, 480, 640]
|
||||
+ observation.image: [3, 96, 96]
|
||||
observation.state: ["${env.state_dim}"]
|
||||
output_shapes:
|
||||
action: ["${env.action_dim}"]
|
||||
|
||||
input_normalization_modes:
|
||||
- observation.images.top: mean_std
|
||||
+ observation.image: mean_std
|
||||
observation.state: min_max
|
||||
output_normalization_modes:
|
||||
action: min_max
|
||||
```
|
||||
|
||||
Here we've accounted for the following:
|
||||
- PushT uses "observation.image" for its image key.
|
||||
- PushT provides smaller images.
|
||||
|
||||
_Side note: technically we could override these via the CLI, but with many changes it gets a bit messy, and we also have a bit of a challenge in that we're using `.` in our observation keys which is treated by Hydra as a hierarchical separator_.
|
||||
|
||||
For your convenience, we provide [`act_pusht.yaml`](./act_pusht.yaml) in this directory. It contains the diff above, plus some other (optional) ones that are explained within. Please copy it into `lerobot/configs/policy` with:
|
||||
|
||||
```bash
|
||||
cp examples/advanced/1_train_act_pusht/act_pusht.yaml lerobot/configs/policy/act_pusht.yaml
|
||||
```
|
||||
|
||||
(remember from a [previous tutorial](../4_train_policy_with_script.md) that Hydra will look in the `lerobot/configs` directory). Now try running the following.
|
||||
|
||||
<!-- Note to contributor: are you changing this command? Note that it's tested in `Makefile`, so change it there too! -->
|
||||
```bash
|
||||
python lerobot/scripts/train.py policy=act_pusht env=pusht
|
||||
```
|
||||
|
||||
Notice that this is much the same as the command that failed at the start of the tutorial, only:
|
||||
- Now we are using `policy=act_pusht` to point to our new configuration file.
|
||||
- We can drop `dataset_repo_id=lerobot/pusht` as the change is incorporated in our new configuration file.
|
||||
|
||||
Hurrah! You're now training ACT for the PushT environment.
|
||||
|
||||
---
|
||||
|
||||
The bottom line of this tutorial is that when training policies for different environments and datasets you will need to understand what parts of the policy configuration are specific to those and make changes accordingly.
|
||||
|
||||
Happy coding! 🤗
|
||||
@@ -1,90 +0,0 @@
|
||||
"""This script demonstrates how to slice a dataset and calculate the loss on a subset of the data.
|
||||
|
||||
This technique can be useful for debugging and testing purposes, as well as identifying whether a policy
|
||||
is learning effectively.
|
||||
|
||||
Furthermore, relying on validation loss to evaluate performance is generally not considered a good practice,
|
||||
especially in the context of imitation learning. The most reliable approach is to evaluate the policy directly
|
||||
on the target environment, whether that be in simulation or the real world.
|
||||
"""
|
||||
|
||||
import math
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
|
||||
from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy
|
||||
|
||||
device = torch.device("cuda")
|
||||
|
||||
# Download the diffusion policy for pusht environment
|
||||
pretrained_policy_path = Path(snapshot_download("lerobot/diffusion_pusht"))
|
||||
# OR uncomment the following to evaluate a policy from the local outputs/train folder.
|
||||
# pretrained_policy_path = Path("outputs/train/example_pusht_diffusion")
|
||||
|
||||
policy = DiffusionPolicy.from_pretrained(pretrained_policy_path)
|
||||
policy.eval()
|
||||
policy.to(device)
|
||||
|
||||
# Set up the dataset.
|
||||
delta_timestamps = {
|
||||
# Load the previous image and state at -0.1 seconds before current frame,
|
||||
# then load current image and state corresponding to 0.0 second.
|
||||
"observation.image": [-0.1, 0.0],
|
||||
"observation.state": [-0.1, 0.0],
|
||||
# Load the previous action (-0.1), the next action to be executed (0.0),
|
||||
# and 14 future actions with a 0.1 seconds spacing. All these actions will be
|
||||
# used to calculate the loss.
|
||||
"action": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4],
|
||||
}
|
||||
|
||||
# Load the last 10% of episodes of the dataset as a validation set.
|
||||
# - Load full dataset
|
||||
full_dataset = LeRobotDataset("lerobot/pusht", split="train")
|
||||
# - Calculate train and val subsets
|
||||
num_train_episodes = math.floor(full_dataset.num_episodes * 90 / 100)
|
||||
num_val_episodes = full_dataset.num_episodes - num_train_episodes
|
||||
print(f"Number of episodes in full dataset: {full_dataset.num_episodes}")
|
||||
print(f"Number of episodes in training dataset (90% subset): {num_train_episodes}")
|
||||
print(f"Number of episodes in validation dataset (10% subset): {num_val_episodes}")
|
||||
# - Get first frame index of the validation set
|
||||
first_val_frame_index = full_dataset.episode_data_index["from"][num_train_episodes].item()
|
||||
# - Load frames subset belonging to validation set using the `split` argument.
|
||||
# It utilizes the `datasets` library's syntax for slicing datasets.
|
||||
# For more information on the Slice API, please see:
|
||||
# https://huggingface.co/docs/datasets/v2.19.0/loading#slice-splits
|
||||
train_dataset = LeRobotDataset(
|
||||
"lerobot/pusht", split=f"train[:{first_val_frame_index}]", delta_timestamps=delta_timestamps
|
||||
)
|
||||
val_dataset = LeRobotDataset(
|
||||
"lerobot/pusht", split=f"train[{first_val_frame_index}:]", delta_timestamps=delta_timestamps
|
||||
)
|
||||
print(f"Number of frames in training dataset (90% subset): {len(train_dataset)}")
|
||||
print(f"Number of frames in validation dataset (10% subset): {len(val_dataset)}")
|
||||
|
||||
# Create dataloader for evaluation.
|
||||
val_dataloader = torch.utils.data.DataLoader(
|
||||
val_dataset,
|
||||
num_workers=4,
|
||||
batch_size=64,
|
||||
shuffle=False,
|
||||
pin_memory=device != torch.device("cpu"),
|
||||
drop_last=False,
|
||||
)
|
||||
|
||||
# Run validation loop.
|
||||
loss_cumsum = 0
|
||||
n_examples_evaluated = 0
|
||||
for batch in val_dataloader:
|
||||
batch = {k: v.to(device, non_blocking=True) for k, v in batch.items()}
|
||||
output_dict = policy.forward(batch)
|
||||
|
||||
loss_cumsum += output_dict["loss"].item()
|
||||
n_examples_evaluated += batch["index"].shape[0]
|
||||
|
||||
# Calculate the average loss over the validation set.
|
||||
average_loss = loss_cumsum / n_examples_evaluated
|
||||
|
||||
print(f"Average loss on validation set: {average_loss:.4f}")
|
||||
@@ -1 +0,0 @@
|
||||
Bearings - https://amzn.eu/d/8Xz7m4C - https://amzn.eu/d/1xOo8re - https://amzn.eu/d/9LXO205 (17x) - https://amzn.eu/d/eKGj9gf (2x) Bike Components - https://amzn.eu/d/cNiQi0O (1x) Accessories - https://amzn.eu/d/ipjCq1R (1x) - https://amzn.eu/d/0ZMzC3G (1x) Screws - https://amzn.eu/d/dzNhSkJ - https://amzn.eu/d/41AhVIU - https://amzn.eu/d/8G91txy - https://amzn.eu/d/9xu0pLa - https://amzn.eu/d/c5xaClV - https://amzn.eu/d/7kudpAo - https://amzn.eu/d/2BEgJFc - https://amzn.eu/d/4q9RNby - https://amzn.eu/d/4RE2lPV - https://amzn.eu/d/63YU0l1 Inserts - https://amzn.eu/d/7fjOtOC
|
||||
@@ -1,624 +0,0 @@
|
||||
# Using the [SO-100](https://github.com/TheRobotStudio/SO-ARM100) with LeRobot
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [A. Source the parts](#a-source-the-parts)
|
||||
- [B. Install LeRobot](#b-install-lerobot)
|
||||
- [C. Configure the Motors](#c-configure-the-motors)
|
||||
- [D. Step-by-Step Assembly Instructions](#d-step-by-step-assembly-instructions)
|
||||
- [E. Calibrate](#e-calibrate)
|
||||
- [F. Teleoperate](#f-teleoperate)
|
||||
- [G. Record a dataset](#g-record-a-dataset)
|
||||
- [H. Visualize a dataset](#h-visualize-a-dataset)
|
||||
- [I. Replay an episode](#i-replay-an-episode)
|
||||
- [J. Train a policy](#j-train-a-policy)
|
||||
- [K. Evaluate your policy](#k-evaluate-your-policy)
|
||||
- [L. More Information](#l-more-information)
|
||||
|
||||
## A. Source the parts
|
||||
|
||||
Follow this [README](https://github.com/TheRobotStudio/SO-ARM100). It contains the bill of materials, with a link to source the parts, as well as the instructions to 3D print the parts,
|
||||
and advice if it's your first time printing or if you don't own a 3D printer.
|
||||
|
||||
Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
|
||||
|
||||
## B. Install LeRobot
|
||||
|
||||
> [!TIP]
|
||||
> We use the Command Prompt (cmd) quite a lot. If you are not comfortable using the cmd or want to brush up using the command line you can have a look here: [Command line crash course](https://developer.mozilla.org/en-US/docs/Learn_web_development/Getting_started/Environment_setup/Command_line)
|
||||
|
||||
On your computer:
|
||||
|
||||
#### 1. [Install Miniconda](https://docs.anaconda.com/miniconda/install/#quick-command-line-install):
|
||||
|
||||
#### 2. Restart shell
|
||||
Copy paste in your shell: `source ~/.bashrc` or for Mac: `source ~/.bash_profile` or `source ~/.zshrc` if you're using zshell
|
||||
|
||||
#### 3. Create and activate a fresh conda environment for lerobot
|
||||
|
||||
<details>
|
||||
<summary><strong>Video install instructions</strong></summary>
|
||||
|
||||
<video src="https://github.com/user-attachments/assets/17172d3b-3b64-4b80-9cf1-b2b7c5cbd236"></video>
|
||||
|
||||
</details>
|
||||
|
||||
```bash
|
||||
conda create -y -n lerobot python=3.10
|
||||
```
|
||||
|
||||
Then activate your conda environment (do this each time you open a shell to use lerobot!):
|
||||
```bash
|
||||
conda activate lerobot
|
||||
```
|
||||
|
||||
#### 4. Clone LeRobot:
|
||||
```bash
|
||||
git clone https://github.com/huggingface/lerobot.git ~/lerobot
|
||||
```
|
||||
|
||||
#### 5. Install ffmpeg in your environment:
|
||||
When using `miniconda`, install `ffmpeg` in your environment:
|
||||
```bash
|
||||
conda install ffmpeg -c conda-forge
|
||||
```
|
||||
|
||||
#### 6. Install LeRobot with dependencies for the feetech motors:
|
||||
```bash
|
||||
cd ~/lerobot && pip install -e ".[feetech]"
|
||||
```
|
||||
|
||||
Great :hugs:! You are now done installing LeRobot and we can begin assembling the SO100 arms :robot:.
|
||||
Every time you now want to use LeRobot you can go to the `~/lerobot` folder where we installed LeRobot and run one of the commands.
|
||||
|
||||
## C. Configure the motors
|
||||
|
||||
> [!NOTE]
|
||||
> Throughout this tutorial you will find videos on how to do the steps, the full video tutorial can be found here: [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I).
|
||||
|
||||
### 1. Find the USB ports associated to each arm
|
||||
|
||||
Designate one bus servo adapter and 6 motors for your leader arm, and similarly the other bus servo adapter and 6 motors for the follower arm. It's convenient to label them and write on each motor if it's for the follower `F` or for the leader `L` and it's ID from 1 to 6 (F1...F6 and L1...L6).
|
||||
|
||||
#### a. Run the script to find port
|
||||
|
||||
<details>
|
||||
<summary><strong>Video finding port</strong></summary>
|
||||
<video src="https://github.com/user-attachments/assets/4a21a14d-2046-4805-93c4-ee97a30ba33f"></video>
|
||||
<video src="https://github.com/user-attachments/assets/1cc3aecf-c16d-4ff9-aec7-8c175afbbce2"></video>
|
||||
</details>
|
||||
|
||||
To find the port for each bus servo adapter, run the utility script:
|
||||
```bash
|
||||
python lerobot/scripts/find_motors_bus_port.py
|
||||
```
|
||||
|
||||
#### b. Example outputs
|
||||
|
||||
Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your MotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect leader arm and press Enter...]
|
||||
|
||||
The port of this MotorsBus is /dev/tty.usbmodem575E0031751
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your MotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect follower arm and press Enter...]
|
||||
|
||||
The port of this MotorsBus is /dev/tty.usbmodem575E0032081
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
#### c. Troubleshooting
|
||||
On Linux, you might need to give access to the USB ports by running:
|
||||
```bash
|
||||
sudo chmod 666 /dev/ttyACM0
|
||||
sudo chmod 666 /dev/ttyACM1
|
||||
```
|
||||
|
||||
#### d. Update config file
|
||||
|
||||
IMPORTANTLY: Now that you have your ports, update the **port** default values of [`SO100RobotConfig`](../lerobot/common/robot_devices/robots/configs.py). You will find something like:
|
||||
```python
|
||||
@RobotConfig.register_subclass("so100")
|
||||
@dataclass
|
||||
class So100RobotConfig(ManipulatorRobotConfig):
|
||||
calibration_dir: str = ".cache/calibration/so100"
|
||||
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
|
||||
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
|
||||
# the number of motors in your follower arms.
|
||||
max_relative_target: int | None = None
|
||||
|
||||
leader_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem58760431091", <-- UPDATE HERE
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
follower_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem585A0076891", <-- UPDATE HERE
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### 2. Assembling the Base
|
||||
Let's begin with assembling the follower arm base
|
||||
|
||||
#### a. Set IDs for all 12 motors
|
||||
|
||||
<details>
|
||||
<summary><strong>Video configuring motor</strong></summary>
|
||||
<video src="https://github.com/user-attachments/assets/ef9b3317-2e11-4858-b9d3-f0a02fb48ecf"></video>
|
||||
<video src="https://github.com/user-attachments/assets/f36b5ed5-c803-4ebe-8947-b39278776a0d"></video>
|
||||
</details>
|
||||
|
||||
Plug your first motor F1 and run this script to set its ID to 1. It will also set its present position to 2048, so expect your motor to rotate. Replace the text after --port to the corresponding follower control board port and run this command in cmd:
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 1
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> These motors are currently limited. They can take values between 0 and 4096 only, which corresponds to a full turn. They can't turn more than that. 2048 is at the middle of this range, so we can take -2048 steps (180 degrees anticlockwise) and reach the maximum range, or take +2048 steps (180 degrees clockwise) and reach the maximum range. The configuration step also sets the homing offset to 0, so that if you misassembled the arm, you can always update the homing offset to account for a shift up to ± 2048 steps (± 180 degrees).
|
||||
|
||||
Then unplug your motor and plug the second motor and set its ID to 2.
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 2
|
||||
```
|
||||
|
||||
Redo the process for all your motors until ID 6. Do the same for the 6 motors of the leader arm.
|
||||
|
||||
|
||||
#### b. Remove the gears of the 6 leader motors
|
||||
|
||||
<details>
|
||||
<summary><strong>Video removing gears</strong></summary>
|
||||
|
||||
<video src="https://github.com/user-attachments/assets/0c95b88c-5b85-413d-ba19-aee2f864f2a7"></video>
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
Follow the video for removing gears. You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm.
|
||||
|
||||
## D. Step-by-Step Assembly Instructions
|
||||
|
||||
**Step 1: Clean Parts**
|
||||
- Remove all support material from the 3D-printed parts.
|
||||
---
|
||||
|
||||
### Additional Guidance
|
||||
|
||||
<details>
|
||||
<summary><strong>Video assembling arms</strong></summary>
|
||||
|
||||
<video src="https://github.com/user-attachments/assets/488a39de-0189-4461-9de3-05b015f90cca"></video>
|
||||
|
||||
</details>
|
||||
|
||||
**Note:**
|
||||
This video provides visual guidance for assembling the arms, but it doesn't specify when or how to do the wiring. Inserting the cables beforehand is much easier than doing it afterward. The first arm may take a bit more than 1 hour to assemble, but once you get used to it, you can assemble the second arm in under 1 hour.
|
||||
|
||||
---
|
||||
|
||||
### First Motor
|
||||
|
||||
**Step 2: Insert Wires**
|
||||
- Insert two wires into the first motor.
|
||||
|
||||
<img src="../media/tutorial/img1.jpg" style="height:300px;">
|
||||
|
||||
**Step 3: Install in Base**
|
||||
- Place the first motor into the base.
|
||||
|
||||
<img src="../media/tutorial/img2.jpg" style="height:300px;">
|
||||
|
||||
**Step 4: Secure Motor**
|
||||
- Fasten the motor with 4 screws. Two from the bottom and two from top.
|
||||
|
||||
**Step 5: Attach Motor Holder**
|
||||
- Slide over the first motor holder and fasten it using two screws (one on each side).
|
||||
|
||||
<img src="../media/tutorial/img4.jpg" style="height:300px;">
|
||||
|
||||
**Step 6: Attach Motor Horns**
|
||||
- Install both motor horns, securing the top horn with a screw. Try not to move the motor position when attaching the motor horn, especially for the leader arms, where we removed the gears.
|
||||
|
||||
<img src="../media/tutorial/img5.jpg" style="height:300px;">
|
||||
<details>
|
||||
<summary><strong>Video adding motor horn</strong></summary>
|
||||
<video src="https://github.com/user-attachments/assets/ef3391a4-ad05-4100-b2bd-1699bf86c969"></video>
|
||||
</details>
|
||||
|
||||
**Step 7: Attach Shoulder Part**
|
||||
- Route one wire to the back of the robot and the other to the left or in photo towards you (see photo).
|
||||
- Attach the shoulder part.
|
||||
|
||||
<img src="../media/tutorial/img6.jpg" style="height:300px;">
|
||||
|
||||
**Step 8: Secure Shoulder**
|
||||
- Tighten the shoulder part with 4 screws on top and 4 on the bottom
|
||||
*(access bottom holes by turning the shoulder).*
|
||||
|
||||
---
|
||||
|
||||
### Second Motor Assembly
|
||||
|
||||
**Step 9: Install Motor 2**
|
||||
- Slide the second motor in from the top and link the wire from motor 1 to motor 2.
|
||||
|
||||
<img src="../media/tutorial/img8.jpg" style="height:300px;">
|
||||
|
||||
**Step 10: Attach Shoulder Holder**
|
||||
- Add the shoulder motor holder.
|
||||
- Ensure the wire from motor 1 to motor 2 goes behind the holder while the other wire is routed upward (see photo).
|
||||
- This part can be tight to assemble, you can use a workbench like the image or a similar setup to push the part around the motor.
|
||||
|
||||
<div style="display: flex;">
|
||||
<img src="../media/tutorial/img9.jpg" style="height:250px;">
|
||||
<img src="../media/tutorial/img10.jpg" style="height:250px;">
|
||||
<img src="../media/tutorial/img12.jpg" style="height:250px;">
|
||||
</div>
|
||||
|
||||
**Step 11: Secure Motor 2**
|
||||
- Fasten the second motor with 4 screws.
|
||||
|
||||
**Step 12: Attach Motor Horn**
|
||||
- Attach both motor horns to motor 2, again use the horn screw.
|
||||
|
||||
**Step 13: Attach Base**
|
||||
- Install the base attachment using 2 screws.
|
||||
|
||||
<img src="../media/tutorial/img11.jpg" style="height:300px;">
|
||||
|
||||
**Step 14: Attach Upper Arm**
|
||||
- Attach the upper arm with 4 screws on each side.
|
||||
|
||||
<img src="../media/tutorial/img13.jpg" style="height:300px;">
|
||||
|
||||
---
|
||||
|
||||
### Third Motor Assembly
|
||||
|
||||
**Step 15: Install Motor 3**
|
||||
- Route the motor cable from motor 2 through the cable holder to motor 3, then secure motor 3 with 4 screws.
|
||||
|
||||
**Step 16: Attach Motor Horn**
|
||||
- Attach both motor horns to motor 3 and secure one again with a horn screw.
|
||||
|
||||
<img src="../media/tutorial/img14.jpg" style="height:300px;">
|
||||
|
||||
**Step 17: Attach Forearm**
|
||||
- Connect the forearm to motor 3 using 4 screws on each side.
|
||||
|
||||
<img src="../media/tutorial/img15.jpg" style="height:300px;">
|
||||
|
||||
---
|
||||
|
||||
### Fourth Motor Assembly
|
||||
|
||||
**Step 18: Install Motor 4**
|
||||
- Slide in motor 4, attach the cable from motor 3, and secure the cable in its holder with a screw.
|
||||
|
||||
<div style="display: flex;">
|
||||
<img src="../media/tutorial/img16.jpg" style="height:300px;">
|
||||
<img src="../media/tutorial/img19.jpg" style="height:300px;">
|
||||
</div>
|
||||
|
||||
**Step 19: Attach Motor Holder 4**
|
||||
- Install the fourth motor holder (a tight fit). Ensure one wire is routed upward and the wire from motor 3 is routed downward (see photo).
|
||||
|
||||
<img src="../media/tutorial/img17.jpg" style="height:300px;">
|
||||
|
||||
**Step 20: Secure Motor 4 & Attach Horn**
|
||||
- Fasten motor 4 with 4 screws and attach its motor horns, use for one a horn screw.
|
||||
|
||||
<img src="../media/tutorial/img18.jpg" style="height:300px;">
|
||||
|
||||
---
|
||||
|
||||
### Wrist Assembly
|
||||
|
||||
**Step 21: Install Motor 5**
|
||||
- Insert motor 5 into the wrist holder and secure it with 2 front screws.
|
||||
|
||||
<img src="../media/tutorial/img20.jpg" style="height:300px;">
|
||||
|
||||
**Step 22: Attach Wrist**
|
||||
- Connect the wire from motor 4 to motor 5. And already insert the other wire for the gripper.
|
||||
- Secure the wrist to motor 4 using 4 screws on both sides.
|
||||
|
||||
<img src="../media/tutorial/img22.jpg" style="height:300px;">
|
||||
|
||||
**Step 23: Attach Wrist Horn**
|
||||
- Install only one motor horn on the wrist motor and secure it with a horn screw.
|
||||
|
||||
<img src="../media/tutorial/img23.jpg" style="height:300px;">
|
||||
|
||||
---
|
||||
|
||||
### Follower Configuration
|
||||
|
||||
**Step 24: Attach Gripper**
|
||||
- Attach the gripper to motor 5.
|
||||
|
||||
<img src="../media/tutorial/img24.jpg" style="height:300px;">
|
||||
|
||||
**Step 25: Install Gripper Motor**
|
||||
- Insert the gripper motor, connect the motor wire from motor 5 to motor 6, and secure it with 3 screws on each side.
|
||||
|
||||
<img src="../media/tutorial/img25.jpg" style="height:300px;">
|
||||
|
||||
**Step 26: Attach Gripper Horn & Claw**
|
||||
- Attach the motor horns and again use a horn screw.
|
||||
- Install the gripper claw and secure it with 4 screws on both sides.
|
||||
|
||||
<img src="../media/tutorial/img26.jpg" style="height:300px;">
|
||||
|
||||
**Step 27: Mount Controller**
|
||||
- Attach the motor controller on the back.
|
||||
|
||||
<div style="display: flex;">
|
||||
<img src="../media/tutorial/img27.jpg" style="height:300px;">
|
||||
<img src="../media/tutorial/img28.jpg" style="height:300px;">
|
||||
</div>
|
||||
|
||||
*Assembly complete – proceed to Leader arm assembly.*
|
||||
|
||||
---
|
||||
|
||||
### Leader Configuration
|
||||
|
||||
For the leader configuration, perform **Steps 1–23**. Make sure that you removed the motor gears from the motors.
|
||||
|
||||
**Step 24: Attach Leader Holder**
|
||||
- Mount the leader holder onto the wrist and secure it with a screw.
|
||||
|
||||
<img src="../media/tutorial/img29.jpg" style="height:300px;">
|
||||
|
||||
**Step 25: Attach Handle**
|
||||
- Attach the handle to motor 5 using 4 screws.
|
||||
|
||||
<img src="../media/tutorial/img30.jpg" style="height:300px;">
|
||||
|
||||
**Step 26: Install Gripper Motor**
|
||||
- Insert the gripper motor, secure it with 3 screws on each side, attach a motor horn using a horn screw, and connect the motor wire.
|
||||
|
||||
<img src="../media/tutorial/img31.jpg" style="height:300px;">
|
||||
|
||||
**Step 27: Attach Trigger**
|
||||
- Attach the follower trigger with 4 screws.
|
||||
|
||||
<img src="../media/tutorial/img32.jpg" style="height:300px;">
|
||||
|
||||
**Step 28: Mount Controller**
|
||||
- Attach the motor controller on the back.
|
||||
|
||||
<div style="display: flex;">
|
||||
<img src="../media/tutorial/img27.jpg" style="height:300px;">
|
||||
<img src="../media/tutorial/img28.jpg" style="height:300px;">
|
||||
</div>
|
||||
|
||||
*Assembly complete – proceed to calibration.*
|
||||
|
||||
|
||||
## E. Calibrate
|
||||
|
||||
Next, you'll need to calibrate your SO-100 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one SO-100 robot to work on another.
|
||||
|
||||
#### a. Manual calibration of follower arm
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Contrarily to step 6 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=724) which illustrates the auto calibration, we will actually do manual calibration of follower for now.
|
||||
|
||||
You will need to move the follower arm to these positions sequentially:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
| ------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| <img src="../media/so100/follower_zero.webp?raw=true" alt="SO-100 follower arm zero position" title="SO-100 follower arm zero position" style="width:100%;"> | <img src="../media/so100/follower_rotated.webp?raw=true" alt="SO-100 follower arm rotated position" title="SO-100 follower arm rotated position" style="width:100%;"> | <img src="../media/so100/follower_rest.webp?raw=true" alt="SO-100 follower arm rest position" title="SO-100 follower arm rest position" style="width:100%;"> |
|
||||
|
||||
Make sure both arms are connected and run this script to launch manual calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--robot.cameras='{}' \
|
||||
--control.type=calibrate \
|
||||
--control.arms='["main_follower"]'
|
||||
```
|
||||
|
||||
#### b. Manual calibration of leader arm
|
||||
Follow step 6 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=724) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
| ------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| <img src="../media/so100/leader_zero.webp?raw=true" alt="SO-100 leader arm zero position" title="SO-100 leader arm zero position" style="width:100%;"> | <img src="../media/so100/leader_rotated.webp?raw=true" alt="SO-100 leader arm rotated position" title="SO-100 leader arm rotated position" style="width:100%;"> | <img src="../media/so100/leader_rest.webp?raw=true" alt="SO-100 leader arm rest position" title="SO-100 leader arm rest position" style="width:100%;"> |
|
||||
|
||||
Run this script to launch manual calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--robot.cameras='{}' \
|
||||
--control.type=calibrate \
|
||||
--control.arms='["main_leader"]'
|
||||
```
|
||||
|
||||
## F. Teleoperate
|
||||
|
||||
**Simple teleop**
|
||||
Then you are ready to teleoperate your robot! Run this simple script (it won't connect and display the cameras):
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--robot.cameras='{}' \
|
||||
--control.type=teleoperate
|
||||
```
|
||||
|
||||
|
||||
#### a. Teleop with displaying cameras
|
||||
Follow [this guide to setup your cameras](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#c-add-your-cameras-with-opencvcamera). Then you will be able to display the cameras on your computer while you are teleoperating by running the following code. This is useful to prepare your setup before recording your first dataset.
|
||||
|
||||
> **NOTE:** To visualize the data, enable `--control.display_data=true`. This streams the data using `rerun`.
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--control.type=teleoperate
|
||||
```
|
||||
|
||||
## G. Record a dataset
|
||||
|
||||
Once you're familiar with teleoperation, you can record your first dataset with SO-100.
|
||||
|
||||
If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
```
|
||||
|
||||
Store your Hugging Face repository name in a variable to run these commands:
|
||||
```bash
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
|
||||
Record 2 episodes and upload your dataset to the hub:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--control.type=record \
|
||||
--control.fps=30 \
|
||||
--control.single_task="Grasp a lego block and put it in the bin." \
|
||||
--control.repo_id=${HF_USER}/so100_test \
|
||||
--control.tags='["so100","tutorial"]' \
|
||||
--control.warmup_time_s=5 \
|
||||
--control.episode_time_s=30 \
|
||||
--control.reset_time_s=30 \
|
||||
--control.num_episodes=2 \
|
||||
--control.push_to_hub=true
|
||||
```
|
||||
|
||||
Note: You can resume recording by adding `--control.resume=true`.
|
||||
|
||||
## H. Visualize a dataset
|
||||
|
||||
If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
|
||||
```bash
|
||||
echo ${HF_USER}/so100_test
|
||||
```
|
||||
|
||||
If you didn't upload with `--control.push_to_hub=false`, you can also visualize it locally with (a window can be opened in the browser `http://127.0.0.1:9090` with the visualization tool):
|
||||
```bash
|
||||
python lerobot/scripts/visualize_dataset_html.py \
|
||||
--repo-id ${HF_USER}/so100_test \
|
||||
--local-files-only 1
|
||||
```
|
||||
|
||||
## I. Replay an episode
|
||||
|
||||
Now try to replay the first episode on your robot:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--control.type=replay \
|
||||
--control.fps=30 \
|
||||
--control.repo_id=${HF_USER}/so100_test \
|
||||
--control.episode=0
|
||||
```
|
||||
|
||||
## J. Train a policy
|
||||
|
||||
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--dataset.repo_id=${HF_USER}/so100_test \
|
||||
--policy.type=act \
|
||||
--output_dir=outputs/train/act_so100_test \
|
||||
--job_name=act_so100_test \
|
||||
--policy.device=cuda \
|
||||
--wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/so100_test`.
|
||||
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
|
||||
4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
|
||||
Training should take several hours. You will find checkpoints in `outputs/train/act_so100_test/checkpoints`.
|
||||
|
||||
To resume training from a checkpoint, below is an example command to resume from `last` checkpoint of the `act_so100_test` policy:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--config_path=outputs/train/act_so100_test/checkpoints/last/pretrained_model/train_config.json \
|
||||
--resume=true
|
||||
```
|
||||
|
||||
## K. Evaluate your policy
|
||||
|
||||
You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--control.type=record \
|
||||
--control.fps=30 \
|
||||
--control.single_task="Grasp a lego block and put it in the bin." \
|
||||
--control.repo_id=${HF_USER}/eval_act_so100_test \
|
||||
--control.tags='["tutorial"]' \
|
||||
--control.warmup_time_s=5 \
|
||||
--control.episode_time_s=30 \
|
||||
--control.reset_time_s=30 \
|
||||
--control.num_episodes=10 \
|
||||
--control.push_to_hub=true \
|
||||
--control.policy.path=outputs/train/act_so100_test/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
|
||||
1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_so100_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_so100_test`).
|
||||
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_so100_test`).
|
||||
|
||||
## L. More Information
|
||||
|
||||
Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth tutorial on controlling real robots with LeRobot.
|
||||
|
||||
> [!TIP]
|
||||
> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb) in the channel [`#so100-arm`](https://discord.com/channels/1216765309076115607/1237741463832363039).
|
||||
@@ -1,45 +0,0 @@
|
||||
import time
|
||||
import numpy as np
|
||||
|
||||
from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus, CalibrationMode
|
||||
|
||||
@staticmethod
|
||||
def degps_to_raw(degps: float) -> int:
|
||||
steps_per_deg = 4096.0 / 360.0
|
||||
speed_in_steps = abs(degps) * steps_per_deg
|
||||
speed_int = int(round(speed_in_steps))
|
||||
if speed_int > 0x7FFF:
|
||||
speed_int = 0x7FFF
|
||||
if degps < 0:
|
||||
return speed_int | 0x8000
|
||||
else:
|
||||
return speed_int & 0x7FFF
|
||||
|
||||
@staticmethod
|
||||
def raw_to_degps(raw_speed: int) -> float:
|
||||
steps_per_deg = 4096.0 / 360.0
|
||||
magnitude = raw_speed & 0x7FFF
|
||||
degps = magnitude / steps_per_deg
|
||||
if raw_speed & 0x8000:
|
||||
degps = -degps
|
||||
return degps
|
||||
|
||||
def main():
|
||||
# Instantiate the bus for a single motor on port /dev/ttyACM0.
|
||||
arm_bus = FeetechMotorsBus(
|
||||
port="/dev/ttyACM0",
|
||||
motors={"wrist_pitch": [1, "scs0009"]},
|
||||
protocol_version=1,
|
||||
group_sync_read=False, # using individual read calls
|
||||
)
|
||||
arm_bus.connect()
|
||||
# Read the current raw motor position.
|
||||
# Note that "Present_Position" is in the raw units.
|
||||
current_raw = arm_bus.read("Present_Position", ["wrist_pitch"])[0]
|
||||
print("Current raw position:", current_raw)
|
||||
arm_bus.write("Goal_Position", 1000)
|
||||
arm_bus.disconnect()
|
||||
exit()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
Before Width: | Height: | Size: 148 KiB |
@@ -1,46 +0,0 @@
|
||||
import serial
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.animation as animation
|
||||
from collections import deque
|
||||
|
||||
# Adjust this to match your actual serial port and baud rate
|
||||
SERIAL_PORT = '/dev/ttyACM0' # or COM3 on Windows
|
||||
BAUD_RATE = 115200
|
||||
|
||||
# Set up serial connection
|
||||
ser = serial.Serial(SERIAL_PORT, BAUD_RATE)
|
||||
|
||||
# Buffers for real-time plot
|
||||
buffer_len = 200
|
||||
val1_buffer = deque([0]*buffer_len, maxlen=buffer_len)
|
||||
val2_buffer = deque([0]*buffer_len, maxlen=buffer_len)
|
||||
|
||||
# Setup the plot
|
||||
fig, ax = plt.subplots()
|
||||
line1, = ax.plot([], [], label='Sensor 0')
|
||||
line2, = ax.plot([], [], label='Sensor 1')
|
||||
ax.set_ylim(0, 4096)
|
||||
ax.set_xlim(0, buffer_len)
|
||||
ax.legend()
|
||||
|
||||
def update(frame):
|
||||
while ser.in_waiting:
|
||||
line = ser.readline().decode('utf-8').strip()
|
||||
parts = line.split()
|
||||
if len(parts) >= 2:
|
||||
try:
|
||||
val1 = int(parts[0])
|
||||
val2 = int(parts[1])
|
||||
val1_buffer.append(val1)
|
||||
val2_buffer.append(val2)
|
||||
except ValueError:
|
||||
pass # skip malformed lines
|
||||
|
||||
line1.set_ydata(val1_buffer)
|
||||
line1.set_xdata(range(len(val1_buffer)))
|
||||
line2.set_ydata(val2_buffer)
|
||||
line2.set_xdata(range(len(val2_buffer)))
|
||||
return line1, line2
|
||||
|
||||
ani = animation.FuncAnimation(fig, update, interval=50)
|
||||
plt.show()
|
||||
@@ -1,64 +0,0 @@
|
||||
import serial
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.animation as animation
|
||||
from collections import deque
|
||||
|
||||
# Config
|
||||
SERIAL_PORT = '/dev/ttyACM1' # Change as needed
|
||||
BAUD_RATE = 115200
|
||||
BUFFER_LEN = 200
|
||||
|
||||
# Sensor names in order
|
||||
sensor_names = [
|
||||
"wrist_roll",
|
||||
"wrist_pitch",
|
||||
"wrist_yaw",
|
||||
"elbow_flex",
|
||||
"shoulder_roll",
|
||||
"shoulder_yaw",
|
||||
"shoulder_pitch"
|
||||
]
|
||||
|
||||
# Initialize buffers
|
||||
sensor_data = {
|
||||
name: deque([0]*BUFFER_LEN, maxlen=BUFFER_LEN)
|
||||
for name in sensor_names
|
||||
}
|
||||
|
||||
# Setup plot
|
||||
fig, axes = plt.subplots(len(sensor_names), 1, figsize=(8, 12), sharex=True)
|
||||
fig.tight_layout(pad=3.0)
|
||||
|
||||
lines = {}
|
||||
for i, name in enumerate(sensor_names):
|
||||
axes[i].set_title(name)
|
||||
axes[i].set_xlim(0, BUFFER_LEN)
|
||||
axes[i].set_ylim(0, 4096)
|
||||
line, = axes[i].plot([], [], label=name)
|
||||
axes[i].legend()
|
||||
lines[name] = line
|
||||
|
||||
# Connect to serial
|
||||
ser = serial.Serial(SERIAL_PORT, BAUD_RATE)
|
||||
|
||||
# Update function
|
||||
def update(frame):
|
||||
while ser.in_waiting:
|
||||
line = ser.readline().decode().strip()
|
||||
parts = line.split()
|
||||
if len(parts) != 7:
|
||||
continue
|
||||
try:
|
||||
values = list(map(int, parts))
|
||||
except ValueError:
|
||||
continue
|
||||
for i, name in enumerate(sensor_names):
|
||||
sensor_data[name].append(values[i])
|
||||
for name in sensor_names:
|
||||
x = range(len(sensor_data[name]))
|
||||
lines[name].set_data(x, sensor_data[name])
|
||||
return lines.values()
|
||||
|
||||
# Animate
|
||||
ani = animation.FuncAnimation(fig, update, interval=50, blit=False)
|
||||
plt.show()
|
||||
@@ -1,161 +0,0 @@
|
||||
import serial
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.animation as animation
|
||||
from collections import deque
|
||||
|
||||
# Adjust this to match your actual serial port and baud rate
|
||||
SERIAL_PORT = '/dev/ttyACM0' # or COM3 on Windows
|
||||
BAUD_RATE = 115200
|
||||
|
||||
# Set up serial connection
|
||||
ser = serial.Serial(SERIAL_PORT, BAUD_RATE)
|
||||
|
||||
# How many data points to keep in the scrolling buffer
|
||||
buffer_len = 200
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# 1) Sensor buffers for existing sensors + new wrist_pitch, wrist_yaw
|
||||
# -------------------------------------------------------------------
|
||||
sensor_buffers = {
|
||||
'wrist_roll': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len),
|
||||
'val2': deque([0]*buffer_len, maxlen=buffer_len)
|
||||
},
|
||||
'elbow_pitch': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len),
|
||||
'val2': deque([0]*buffer_len, maxlen=buffer_len)
|
||||
},
|
||||
'shoulder_pitch': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len),
|
||||
'val2': deque([0]*buffer_len, maxlen=buffer_len)
|
||||
},
|
||||
'shoulder_yaw': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len),
|
||||
'val2': deque([0]*buffer_len, maxlen=buffer_len)
|
||||
},
|
||||
'shoulder_roll': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len),
|
||||
'val2': deque([0]*buffer_len, maxlen=buffer_len)
|
||||
},
|
||||
# --- New single-valued sensors ---
|
||||
'wrist_pitch': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len) # Only one line
|
||||
},
|
||||
'wrist_yaw': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len) # Only one line
|
||||
},
|
||||
}
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# 2) Figure with 7 subplots (was 5). We keep the original 5 + 2 new.
|
||||
# -------------------------------------------------------------------
|
||||
fig, axes = plt.subplots(7, 1, figsize=(8, 14), sharex=True)
|
||||
fig.tight_layout(pad=3.0)
|
||||
|
||||
# We'll store line references in a dict so we can update them in update().
|
||||
lines = {}
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# 3) Define each subplot, including new ones at the end.
|
||||
# -------------------------------------------------------------------
|
||||
subplot_info = [
|
||||
('wrist_roll', 'Wrist Roll (2,3)', axes[0]),
|
||||
('elbow_pitch', 'Elbow Pitch (0,1)', axes[1]),
|
||||
('shoulder_pitch', 'Shoulder Pitch (10,11)', axes[2]),
|
||||
('shoulder_yaw', 'Shoulder Yaw (12,13)', axes[3]),
|
||||
('shoulder_roll', 'Shoulder Roll (14,15)', axes[4]),
|
||||
('wrist_pitch', 'Wrist Pitch (0)', axes[5]), # new
|
||||
('wrist_yaw', 'Wrist Yaw (1)', axes[6]), # new
|
||||
]
|
||||
|
||||
# Set up each subplot
|
||||
for (sensor_name, label, ax) in subplot_info:
|
||||
ax.set_title(label)
|
||||
ax.set_xlim(0, buffer_len)
|
||||
ax.set_ylim(0, 4096) # adjust if needed
|
||||
|
||||
# For existing sensors, plot 2 lines (val1, val2)
|
||||
# For the new single-line sensors, plot just 1 line
|
||||
if sensor_name in ['wrist_pitch', 'wrist_yaw']:
|
||||
# Single-valued
|
||||
line, = ax.plot([], [], label=f"{sensor_name}")
|
||||
lines[sensor_name] = line
|
||||
else:
|
||||
# Pair of values
|
||||
line1, = ax.plot([], [], label=f"{sensor_name} - val1")
|
||||
line2, = ax.plot([], [], label=f"{sensor_name} - val2")
|
||||
lines[sensor_name] = [line1, line2]
|
||||
|
||||
ax.legend()
|
||||
|
||||
def update(frame):
|
||||
# Read all available lines from the serial buffer
|
||||
while ser.in_waiting:
|
||||
raw_line = ser.readline().decode('utf-8').strip()
|
||||
parts = raw_line.split()
|
||||
|
||||
# We expect at least 16 values if all sensors are present
|
||||
if len(parts) < 7:
|
||||
continue
|
||||
|
||||
try:
|
||||
values = list(map(int, parts))
|
||||
except ValueError:
|
||||
# If there's a parsing error, skip this line
|
||||
continue
|
||||
|
||||
# Original code: extract the relevant values and append to the correct buffer
|
||||
sensor_buffers['elbow_pitch']['val1'].append(values[13])
|
||||
sensor_buffers['elbow_pitch']['val2'].append(values[13])
|
||||
|
||||
sensor_buffers['wrist_roll']['val1'].append(values[3])
|
||||
sensor_buffers['wrist_roll']['val2'].append(values[3])
|
||||
|
||||
sensor_buffers['shoulder_pitch']['val1'].append(values[14])
|
||||
sensor_buffers['shoulder_pitch']['val2'].append(values[14])
|
||||
|
||||
sensor_buffers['shoulder_yaw']['val1'].append(values[8])
|
||||
sensor_buffers['shoulder_yaw']['val2'].append(values[8])
|
||||
|
||||
sensor_buffers['shoulder_roll']['val1'].append(values[10])
|
||||
sensor_buffers['shoulder_roll']['val2'].append(values[10])
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# 4) New code: also read wrist_pitch (index 0) and wrist_yaw (index 1)
|
||||
# -------------------------------------------------------------------
|
||||
sensor_buffers['wrist_yaw']['val1'].append(values[0])
|
||||
sensor_buffers['wrist_pitch']['val1'].append(values[1])
|
||||
|
||||
# Update each line's data in each subplot
|
||||
all_lines = []
|
||||
for (sensor_name, _, ax) in subplot_info:
|
||||
# x-values are just the index range of the buffer for val1
|
||||
x_data = range(len(sensor_buffers[sensor_name]['val1']))
|
||||
|
||||
# If this sensor has two lines
|
||||
if isinstance(lines[sensor_name], list):
|
||||
# First line
|
||||
lines[sensor_name][0].set_data(
|
||||
x_data,
|
||||
sensor_buffers[sensor_name]['val1']
|
||||
)
|
||||
# Second line
|
||||
lines[sensor_name][1].set_data(
|
||||
x_data,
|
||||
sensor_buffers[sensor_name]['val2']
|
||||
)
|
||||
all_lines.extend(lines[sensor_name])
|
||||
else:
|
||||
# Single line only (wrist_pitch, wrist_yaw)
|
||||
lines[sensor_name].set_data(
|
||||
x_data,
|
||||
sensor_buffers[sensor_name]['val1']
|
||||
)
|
||||
all_lines.append(lines[sensor_name])
|
||||
|
||||
return all_lines
|
||||
|
||||
# Create the animation
|
||||
ani = animation.FuncAnimation(fig, update, interval=50, blit=False)
|
||||
|
||||
plt.show()
|
||||
@@ -1,186 +0,0 @@
|
||||
from lerobot.common.robot_devices.motors.feetech import (
|
||||
CalibrationMode,
|
||||
FeetechMotorsBus,
|
||||
)
|
||||
import yaml
|
||||
|
||||
class HopeJuniorRobot:
|
||||
def __init__(self):
|
||||
self.arm_port = "/dev/tty.usbserial-140"
|
||||
self.hand_port = "/dev/tty.usbmodem58760436961"
|
||||
self.arm_bus = FeetechMotorsBus(
|
||||
port = self.arm_port,
|
||||
motors={
|
||||
# "motor1": (1, "sts3250"),
|
||||
# "motor2": (2, "sts3250"),
|
||||
# "motor3": (3, "sts3250"),
|
||||
|
||||
#"shoulder_pitch": [1, "sts3215"],
|
||||
"shoulder_pitch": [1, "sm8512bl"],
|
||||
"shoulder_yaw": [2, "sts3250"], # TODO: sts3250
|
||||
"shoulder_roll": [3, "sts3250"], # TODO: sts3250
|
||||
"elbow_flex": [4, "sts3250"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"wrist_yaw": [6, "sts3215"],
|
||||
"wrist_pitch": [7, "sts3215"],
|
||||
},
|
||||
protocol_version=0,
|
||||
)
|
||||
self.hand_bus = FeetechMotorsBus(
|
||||
port=self.hand_port,
|
||||
|
||||
motors = {
|
||||
# Thumb
|
||||
"thumb_basel_rotation": [1, "scs0009"],
|
||||
"thumb_mcp": [3, "scs0009"],
|
||||
"thumb_pip": [4, "scs0009"],
|
||||
"thumb_dip": [13, "scs0009"],
|
||||
|
||||
# Index
|
||||
"index_thumb_side": [5, "scs0009"],
|
||||
"index_pinky_side": [6, "scs0009"],
|
||||
"index_flexor": [16, "scs0009"],
|
||||
|
||||
# Middle
|
||||
"middle_thumb_side": [8, "scs0009"],
|
||||
"middle_pinky_side": [9, "scs0009"],
|
||||
"middle_flexor": [2, "scs0009"],
|
||||
|
||||
# Ring
|
||||
"ring_thumb_side": [11, "scs0009"],
|
||||
"ring_pinky_side": [12, "scs0009"],
|
||||
"ring_flexor": [7, "scs0009"],
|
||||
|
||||
# Pinky
|
||||
"pinky_thumb_side": [14, "scs0009"],
|
||||
"pinky_pinky_side": [15, "scs0009"],
|
||||
"pinky_flexor": [10, "scs0009"],
|
||||
},
|
||||
protocol_version=1,#1
|
||||
group_sync_read=False,
|
||||
)
|
||||
|
||||
self.arm_calib_dict = self.get_arm_calibration()
|
||||
self.hand_calib_dict = self.get_hand_calibration()
|
||||
|
||||
|
||||
def apply_arm_config(self, config_file):
|
||||
with open(config_file, "r") as file:
|
||||
config = yaml.safe_load(file)
|
||||
for param, value in config.get("robot", {}).get("arm_bus", {}).items():
|
||||
self.arm_bus.write(param, value)
|
||||
|
||||
def apply_hand_config(config_file, robot):
|
||||
with open(config_file, "r") as file:
|
||||
config = yaml.safe_load(file)
|
||||
|
||||
for param, value in config.get("robot", {}).get("hand_bus", {}).items():
|
||||
robot.arm_bus.write(param, value)
|
||||
|
||||
def get_hand_calibration(self):
|
||||
homing_offset = [0] * len(self.hand_bus.motor_names)
|
||||
drive_mode = [0] * len(self.hand_bus.motor_names)
|
||||
|
||||
start_pos = [
|
||||
750, # thumb_basel_rotation
|
||||
100, # thumb_mcp
|
||||
700, # thumb_pip
|
||||
100, # thumb_dip
|
||||
|
||||
800, # index_thumb_side
|
||||
950, # index_pinky_side
|
||||
0, # index_flexor
|
||||
|
||||
250, # middle_thumb_side
|
||||
850, # middle_pinky_side
|
||||
0, # middle_flexor
|
||||
|
||||
850, # ring_thumb_side
|
||||
900, # ring_pinky_side
|
||||
0, # ring_flexor
|
||||
|
||||
00, # pinky_thumb_side
|
||||
950, # pinky_pinky_side
|
||||
0, # pinky_flexor
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
start_pos[0] - 550, # thumb_basel_rotation
|
||||
start_pos[1] + 400, # thumb_mcp
|
||||
start_pos[2] + 300, # thumb_pip
|
||||
start_pos[3] + 200, # thumb_dip
|
||||
|
||||
start_pos[4] - 700, # index_thumb_side
|
||||
start_pos[5] - 300, # index_pinky_side
|
||||
start_pos[6] + 600, # index_flexor
|
||||
|
||||
start_pos[7] + 700, # middle_thumb_side
|
||||
start_pos[8] - 400, # middle_pinky_side
|
||||
start_pos[9] + 600, # middle_flexor
|
||||
|
||||
start_pos[10] - 600, # ring_thumb_side
|
||||
start_pos[11] - 400, # ring_pinky_side
|
||||
start_pos[12] + 600, # ring_flexor
|
||||
|
||||
start_pos[13] + 400, # pinky_thumb_side
|
||||
start_pos[14] - 450, # pinky_pinky_side
|
||||
start_pos[15] + 600, # pinky_flexor
|
||||
]
|
||||
|
||||
|
||||
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.hand_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.hand_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def get_arm_calibration(self):
|
||||
|
||||
homing_offset = [0] * len(self.arm_bus.motor_names)
|
||||
drive_mode = [0] * len(self.arm_bus.motor_names)
|
||||
|
||||
start_pos = [
|
||||
1800, # shoulder_up
|
||||
2800, # shoulder_forward
|
||||
1800, # shoulder_roll
|
||||
1200, # bend_elbow
|
||||
700, # wrist_roll
|
||||
1850, # wrist_yaw
|
||||
1700, # wrist_pitch
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
2800, # shoulder_up
|
||||
3150, # shoulder_forward
|
||||
400, #shoulder_roll
|
||||
2300, # bend_elbow
|
||||
2300, # wrist_roll
|
||||
2150, # wrist_yaw
|
||||
2300, # wrist_pitch
|
||||
]
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.arm_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.arm_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def connect_arm(self):
|
||||
self.arm_bus.connect()
|
||||
|
||||
def connect_hand(self):
|
||||
self.hand_bus.connect()
|
||||
@@ -1,730 +0,0 @@
|
||||
from lerobot.common.robot_devices.motors.feetech import (
|
||||
CalibrationMode,
|
||||
FeetechMotorsBus,
|
||||
)
|
||||
import serial
|
||||
import threading
|
||||
import time
|
||||
from typing import Callable
|
||||
import pickle
|
||||
import cv2
|
||||
import numpy as np
|
||||
from collections import deque
|
||||
import json
|
||||
import os
|
||||
LOWER_BOUND_LINEAR = -100
|
||||
UPPER_BOUND_LINEAR = 200
|
||||
|
||||
class HomonculusArm:
|
||||
def __init__(self, serial_port: str = "/dev/ttyACM1", baud_rate: int = 115200):
|
||||
self.serial_port = serial_port
|
||||
self.baud_rate = 115200
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
|
||||
# Number of past values to keep in memory
|
||||
self.buffer_size = 10
|
||||
|
||||
# Initialize a buffer (deque) for each joint
|
||||
self.joint_buffer = {
|
||||
"wrist_roll": deque(maxlen=self.buffer_size),
|
||||
"wrist_pitch": deque(maxlen=self.buffer_size),
|
||||
"wrist_yaw": deque(maxlen=self.buffer_size),
|
||||
"elbow_flex": deque(maxlen=self.buffer_size),
|
||||
"shoulder_roll": deque(maxlen=self.buffer_size),
|
||||
"shoulder_yaw": deque(maxlen=self.buffer_size),
|
||||
"shoulder_pitch": deque(maxlen=self.buffer_size),
|
||||
}
|
||||
|
||||
# Start the reading thread
|
||||
self.thread = threading.Thread(target=self.async_read, daemon=True)
|
||||
self.thread.start()
|
||||
|
||||
# Last read dictionary
|
||||
self.last_d = {
|
||||
"wrist_roll": 100,
|
||||
"wrist_pitch": 100,
|
||||
"wrist_yaw": 100,
|
||||
"elbow_flex": 100,
|
||||
"shoulder_roll": 100,
|
||||
"shoulder_yaw": 100,
|
||||
"shoulder_pitch": 100,
|
||||
}
|
||||
self.calibration = None
|
||||
|
||||
# For adaptive EMA, we store a "previous smoothed" state per joint
|
||||
self.adaptive_ema_state = {
|
||||
"wrist_roll": None,
|
||||
"wrist_pitch": None,
|
||||
"wrist_yaw": None,
|
||||
"elbow_flex": None,
|
||||
"shoulder_roll": None,
|
||||
"shoulder_yaw": None,
|
||||
"shoulder_pitch": None,
|
||||
}
|
||||
|
||||
self.kalman_state = {
|
||||
joint: {"x": None, "P": None} for joint in self.joint_buffer.keys()
|
||||
}
|
||||
|
||||
@property
|
||||
def joint_names(self):
|
||||
return list(self.last_d.keys())
|
||||
|
||||
def read(self, motor_names: list[str] | None = None):
|
||||
"""
|
||||
Return the most recent (single) values from self.last_d,
|
||||
optionally applying calibration.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
# Get raw (last) values
|
||||
values = np.array([self.last_d[k] for k in motor_names])
|
||||
|
||||
#print(motor_names)
|
||||
print(values)
|
||||
|
||||
# Apply calibration if available
|
||||
if self.calibration is not None:
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
print(values)
|
||||
return values
|
||||
|
||||
def read_running_average(self, motor_names: list[str] | None = None, linearize=False):
|
||||
"""
|
||||
Return the AVERAGE of the most recent self.buffer_size (or fewer, if not enough data) readings
|
||||
for each joint, optionally applying calibration.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
# Gather averaged readings from buffers
|
||||
smoothed_vals = []
|
||||
for name in motor_names:
|
||||
buf = self.joint_buffer[name]
|
||||
if len(buf) == 0:
|
||||
# If no data has been read yet, fall back to last_d
|
||||
smoothed_vals.append(self.last_d[name])
|
||||
else:
|
||||
# Otherwise, average over the existing buffer
|
||||
smoothed_vals.append(np.mean(buf))
|
||||
|
||||
smoothed_vals = np.array(smoothed_vals, dtype=np.float32)
|
||||
|
||||
# Apply calibration if available
|
||||
if self.calibration is not None:
|
||||
|
||||
if False:
|
||||
for i, joint_name in enumerate(motor_names):
|
||||
# Re-use the same raw_min / raw_max from the calibration
|
||||
calib_idx = self.calibration["motor_names"].index(joint_name)
|
||||
min_reading = self.calibration["start_pos"][calib_idx]
|
||||
max_reading = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
B_value = smoothed_vals[i]
|
||||
print(joint_name)
|
||||
if joint_name == "elbow_flex":
|
||||
print('elbow')
|
||||
try:
|
||||
smoothed_vals[i] = int(min_reading+(max_reading - min_reading)*np.arcsin((B_value-min_reading)/(max_reading-min_reading))/(np.pi / 2))
|
||||
except:
|
||||
print('not working')
|
||||
print(smoothed_vals)
|
||||
print('not working')
|
||||
smoothed_vals = self.apply_calibration(smoothed_vals, motor_names)
|
||||
return smoothed_vals
|
||||
|
||||
def read_kalman_filter(
|
||||
self,
|
||||
Q: float = 1.0,
|
||||
R: float = 100.0,
|
||||
motor_names: list[str] | None = None
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Return a Kalman-filtered reading for each requested joint.
|
||||
|
||||
We store a separate Kalman filter (x, P) per joint. For each new measurement Z:
|
||||
1) Predict:
|
||||
x_pred = x (assuming no motion model)
|
||||
P_pred = P + Q
|
||||
2) Update:
|
||||
K = P_pred / (P_pred + R)
|
||||
x = x_pred + K * (Z - x_pred)
|
||||
P = (1 - K) * P_pred
|
||||
|
||||
:param Q: Process noise. Larger Q means the estimate can change more freely.
|
||||
:param R: Measurement noise. Larger R means we trust our sensor less.
|
||||
:param motor_names: If not specified, all joints are filtered.
|
||||
:return: Kalman-filtered positions as a numpy array.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
current_vals = np.array([self.last_d[name] for name in motor_names], dtype=np.float32)
|
||||
filtered_vals = np.zeros_like(current_vals)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
# Retrieve the filter state for this joint
|
||||
x = self.kalman_state[name]["x"]
|
||||
P = self.kalman_state[name]["P"]
|
||||
Z = current_vals[i]
|
||||
|
||||
# If this is the first reading, initialize
|
||||
if x is None or P is None:
|
||||
x = Z
|
||||
P = 1.0 # or some large initial uncertainty
|
||||
|
||||
# 1) Predict step
|
||||
x_pred = x # no velocity model, so x_pred = x
|
||||
P_pred = P + Q
|
||||
|
||||
# 2) Update step
|
||||
K = P_pred / (P_pred + R) # Kalman gain
|
||||
x_new = x_pred + K * (Z - x_pred) # new state estimate
|
||||
P_new = (1 - K) * P_pred # new covariance
|
||||
|
||||
# Save back
|
||||
self.kalman_state[name]["x"] = x_new
|
||||
self.kalman_state[name]["P"] = P_new
|
||||
|
||||
filtered_vals[i] = x_new
|
||||
|
||||
if self.calibration is not None:
|
||||
filtered_vals = self.apply_calibration(filtered_vals, motor_names)
|
||||
|
||||
return filtered_vals
|
||||
|
||||
|
||||
def async_read(self):
|
||||
"""
|
||||
Continuously read from the serial buffer in its own thread,
|
||||
store into `self.last_d` and also append to the rolling buffer (joint_buffer).
|
||||
"""
|
||||
while True:
|
||||
if self.serial.in_waiting > 0:
|
||||
self.serial.flush()
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
|
||||
if len(vals) != 7:
|
||||
continue
|
||||
try:
|
||||
vals = [int(val) for val in vals]#remove last digit
|
||||
except ValueError:
|
||||
self.serial.flush()
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
vals = [int(val) for val in vals]
|
||||
d = {
|
||||
"wrist_roll": vals[0],
|
||||
"wrist_yaw": vals[1],
|
||||
"wrist_pitch": vals[2],
|
||||
"elbow_flex": vals[3],
|
||||
"shoulder_roll": vals[4],
|
||||
"shoulder_yaw": vals[5],
|
||||
"shoulder_pitch": vals[6],
|
||||
}
|
||||
|
||||
# Update the last_d dictionary
|
||||
self.last_d = d
|
||||
|
||||
# Also push these new values into the rolling buffers
|
||||
for joint_name, joint_val in d.items():
|
||||
self.joint_buffer[joint_name].append(joint_val)
|
||||
|
||||
# Optional: short sleep to avoid busy-loop
|
||||
# time.sleep(0.001)
|
||||
|
||||
def run_calibration(self, robot):
|
||||
robot.arm_bus.write("Acceleration", 50)
|
||||
n_joints = len(self.joint_names)
|
||||
|
||||
max_open_all = np.zeros(n_joints, dtype=np.float32)
|
||||
min_open_all = np.zeros(n_joints, dtype=np.float32)
|
||||
max_closed_all = np.zeros(n_joints, dtype=np.float32)
|
||||
min_closed_all = np.zeros(n_joints, dtype=np.float32)
|
||||
|
||||
for i, jname in enumerate(self.joint_names):
|
||||
|
||||
print(f"\n--- Calibrating joint '{jname}' ---")
|
||||
|
||||
joint_idx = robot.arm_calib_dict["motor_names"].index(jname)
|
||||
open_val = robot.arm_calib_dict["start_pos"][joint_idx]
|
||||
print(f"Commanding {jname} to OPEN position {open_val}...")
|
||||
robot.arm_bus.write("Goal_Position", [open_val], [jname])
|
||||
|
||||
input("Physically verify or adjust the joint. Press Enter when ready to capture...")
|
||||
|
||||
open_pos_list = []
|
||||
for _ in range(100):
|
||||
all_joints_vals = self.read() # read entire arm
|
||||
open_pos_list.append(all_joints_vals[i]) # store only this joint
|
||||
time.sleep(0.01)
|
||||
|
||||
# Convert to numpy and track min/max
|
||||
open_array = np.array(open_pos_list, dtype=np.float32)
|
||||
max_open_all[i] = open_array.max()
|
||||
min_open_all[i] = open_array.min()
|
||||
closed_val = robot.arm_calib_dict["end_pos"][joint_idx]
|
||||
if jname == "elbow_flex":
|
||||
closed_val = closed_val - 700
|
||||
closed_val = robot.arm_calib_dict["end_pos"][joint_idx]
|
||||
print(f"Commanding {jname} to CLOSED position {closed_val}...")
|
||||
robot.arm_bus.write("Goal_Position", [closed_val], [jname])
|
||||
|
||||
input("Physically verify or adjust the joint. Press Enter when ready to capture...")
|
||||
|
||||
closed_pos_list = []
|
||||
for _ in range(100):
|
||||
all_joints_vals = self.read()
|
||||
closed_pos_list.append(all_joints_vals[i])
|
||||
time.sleep(0.01)
|
||||
|
||||
closed_array = np.array(closed_pos_list, dtype=np.float32)
|
||||
# Some thresholding for closed positions
|
||||
#closed_array[closed_array < 1000] = 60000
|
||||
|
||||
max_closed_all[i] = closed_array.max()
|
||||
min_closed_all[i] = closed_array.min()
|
||||
|
||||
robot.arm_bus.write("Goal_Position", [int((closed_val+open_val)/2)], [jname])
|
||||
|
||||
open_pos = np.maximum(max_open_all, max_closed_all)
|
||||
closed_pos = np.minimum(min_open_all, min_closed_all)
|
||||
|
||||
for i, jname in enumerate(self.joint_names):
|
||||
if jname not in ["wrist_pitch", "shoulder_pitch"]:
|
||||
# Swap open/closed for these joints
|
||||
tmp_pos = open_pos[i]
|
||||
open_pos[i] = closed_pos[i]
|
||||
closed_pos[i] = tmp_pos
|
||||
|
||||
# Debug prints
|
||||
print("\nFinal open/closed arrays after any swaps/inversions:")
|
||||
print(f"open_pos={open_pos}")
|
||||
print(f"closed_pos={closed_pos}")
|
||||
|
||||
|
||||
homing_offset = [0] * n_joints
|
||||
drive_mode = [0] * n_joints
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * n_joints
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": open_pos,
|
||||
"end_pos": closed_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.joint_names,
|
||||
}
|
||||
file_path = "examples/hopejr/settings/arm_calib.pkl"
|
||||
|
||||
if not os.path.exists(file_path):
|
||||
with open(file_path, "wb") as f:
|
||||
pickle.dump(calib_dict, f)
|
||||
print(f"Dictionary saved to {file_path}")
|
||||
|
||||
self.set_calibration(calib_dict)
|
||||
|
||||
def set_calibration(self, calibration: dict[str, list]):
|
||||
self.calibration = calibration
|
||||
|
||||
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""
|
||||
Example calibration that linearly maps [start_pos, end_pos] to [0,100].
|
||||
Extend or modify for your needs.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
values = values.astype(np.float32)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
calib_idx = self.calibration["motor_names"].index(name)
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# Rescale the present position to [0, 100]
|
||||
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
|
||||
|
||||
# Check boundaries
|
||||
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
|
||||
# If you want to handle out-of-range differently:
|
||||
# raise JointOutOfRangeError(msg)
|
||||
msg = (
|
||||
f"Wrong motor position range detected for {name}. "
|
||||
f"Value = {values[i]} %, expected within [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}]"
|
||||
)
|
||||
print(msg)
|
||||
|
||||
return values
|
||||
|
||||
|
||||
class HomonculusGlove:
|
||||
def __init__(self, serial_port: str = "/dev/ttyACM1", baud_rate: int = 115200):
|
||||
self.serial_port = serial_port
|
||||
self.baud_rate = baud_rate
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
|
||||
# Number of past values to keep in memory
|
||||
self.buffer_size = 10
|
||||
|
||||
# Initialize a buffer (deque) for each joint
|
||||
self.joint_buffer = {
|
||||
"thumb_0": deque(maxlen=self.buffer_size),
|
||||
"thumb_1": deque(maxlen=self.buffer_size),
|
||||
"thumb_2": deque(maxlen=self.buffer_size),
|
||||
"thumb_3": deque(maxlen=self.buffer_size),
|
||||
"index_0": deque(maxlen=self.buffer_size),
|
||||
"index_1": deque(maxlen=self.buffer_size),
|
||||
"index_2": deque(maxlen=self.buffer_size),
|
||||
"middle_0": deque(maxlen=self.buffer_size),
|
||||
"middle_1": deque(maxlen=self.buffer_size),
|
||||
"middle_2": deque(maxlen=self.buffer_size),
|
||||
"ring_0": deque(maxlen=self.buffer_size),
|
||||
"ring_1": deque(maxlen=self.buffer_size),
|
||||
"ring_2": deque(maxlen=self.buffer_size),
|
||||
"pinky_0": deque(maxlen=self.buffer_size),
|
||||
"pinky_1": deque(maxlen=self.buffer_size),
|
||||
"pinky_2": deque(maxlen=self.buffer_size),
|
||||
"battery_voltage": deque(maxlen=self.buffer_size),
|
||||
}
|
||||
|
||||
# Start the reading thread
|
||||
self.thread = threading.Thread(target=self.async_read, daemon=True)
|
||||
self.thread.start()
|
||||
|
||||
# Last read dictionary
|
||||
self.last_d = {
|
||||
"thumb_0": 100,
|
||||
"thumb_1": 100,
|
||||
"thumb_2": 100,
|
||||
"thumb_3": 100,
|
||||
"index_0": 100,
|
||||
"index_1": 100,
|
||||
"index_2": 100,
|
||||
"middle_0": 100,
|
||||
"middle_1": 100,
|
||||
"middle_2": 100,
|
||||
"ring_0": 100,
|
||||
"ring_1": 100,
|
||||
"ring_2": 100,
|
||||
"pinky_0": 100,
|
||||
"pinky_1": 100,
|
||||
"pinky_2": 100,
|
||||
"battery_voltage": 100,
|
||||
}
|
||||
self.calibration = None
|
||||
|
||||
@property
|
||||
def joint_names(self):
|
||||
return list(self.last_d.keys())
|
||||
|
||||
def read(self, motor_names: list[str] | None = None):
|
||||
"""
|
||||
Return the most recent (single) values from self.last_d,
|
||||
optionally applying calibration.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
# Get raw (last) values
|
||||
values = np.array([self.last_d[k] for k in motor_names])
|
||||
|
||||
print(values)
|
||||
|
||||
# Apply calibration if available
|
||||
if self.calibration is not None:
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
print(values)
|
||||
return values
|
||||
|
||||
def read_running_average(self, motor_names: list[str] | None = None, linearize=False):
|
||||
"""
|
||||
Return the AVERAGE of the most recent self.buffer_size (or fewer, if not enough data) readings
|
||||
for each joint, optionally applying calibration.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
# Gather averaged readings from buffers
|
||||
smoothed_vals = []
|
||||
for name in motor_names:
|
||||
buf = self.joint_buffer[name]
|
||||
if len(buf) == 0:
|
||||
# If no data has been read yet, fall back to last_d
|
||||
smoothed_vals.append(self.last_d[name])
|
||||
else:
|
||||
# Otherwise, average over the existing buffer
|
||||
smoothed_vals.append(np.mean(buf))
|
||||
|
||||
smoothed_vals = np.array(smoothed_vals, dtype=np.float32)
|
||||
|
||||
# Apply calibration if available
|
||||
if self.calibration is not None:
|
||||
smoothed_vals = self.apply_calibration(smoothed_vals, motor_names)
|
||||
|
||||
return smoothed_vals
|
||||
|
||||
def async_read(self):
|
||||
"""
|
||||
Continuously read from the serial buffer in its own thread,
|
||||
store into `self.last_d` and also append to the rolling buffer (joint_buffer).
|
||||
"""
|
||||
while True:
|
||||
if self.serial.in_waiting > 0:
|
||||
self.serial.flush()
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
if len(vals) != 17:
|
||||
continue
|
||||
vals = [int(val) for val in vals]
|
||||
|
||||
d = {
|
||||
"thumb_0": vals[0],
|
||||
"thumb_1": vals[1],
|
||||
"thumb_2": vals[2],
|
||||
"thumb_3": vals[3],
|
||||
"index_0": vals[4],
|
||||
"index_1": vals[5],
|
||||
"index_2": vals[6],
|
||||
"middle_0": vals[7],
|
||||
"middle_1": vals[8],
|
||||
"middle_2": vals[9],
|
||||
"ring_0": vals[10],
|
||||
"ring_1": vals[11],
|
||||
"ring_2": vals[12],
|
||||
"pinky_0": vals[13],
|
||||
"pinky_1": vals[14],
|
||||
"pinky_2": vals[15],
|
||||
"battery_voltage": vals[16],
|
||||
}
|
||||
|
||||
# Update the last_d dictionary
|
||||
self.last_d = d
|
||||
|
||||
# Also push these new values into the rolling buffers
|
||||
for joint_name, joint_val in d.items():
|
||||
self.joint_buffer[joint_name].append(joint_val)
|
||||
|
||||
def run_calibration(self):
|
||||
print("\nMove arm to open position")
|
||||
input("Press Enter to continue...")
|
||||
open_pos_list = []
|
||||
for _ in range(100):
|
||||
open_pos = self.read()
|
||||
open_pos_list.append(open_pos)
|
||||
time.sleep(0.01)
|
||||
open_pos = np.array(open_pos_list)
|
||||
max_open_pos = open_pos.max(axis=0)
|
||||
min_open_pos = open_pos.min(axis=0)
|
||||
|
||||
print(f"{max_open_pos=}")
|
||||
print(f"{min_open_pos=}")
|
||||
|
||||
print("\nMove arm to closed position")
|
||||
input("Press Enter to continue...")
|
||||
closed_pos_list = []
|
||||
for _ in range(100):
|
||||
closed_pos = self.read()
|
||||
closed_pos_list.append(closed_pos)
|
||||
time.sleep(0.01)
|
||||
closed_pos = np.array(closed_pos_list)
|
||||
max_closed_pos = closed_pos.max(axis=0)
|
||||
closed_pos[closed_pos < 1000] = 60000
|
||||
min_closed_pos = closed_pos.min(axis=0)
|
||||
|
||||
print(f"{max_closed_pos=}")
|
||||
print(f"{min_closed_pos=}")
|
||||
|
||||
open_pos = np.array([max_open_pos, max_closed_pos]).max(axis=0)
|
||||
closed_pos = np.array([min_open_pos, min_closed_pos]).min(axis=0)
|
||||
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
for i, jname in enumerate(self.joint_names):
|
||||
if jname in [
|
||||
"thumb_0",
|
||||
"thumb_3",
|
||||
"index_2",
|
||||
"middle_2",
|
||||
"ring_2",
|
||||
"pinky_2",
|
||||
"index_0",
|
||||
]:
|
||||
tmp_pos = open_pos[i]
|
||||
open_pos[i] = closed_pos[i]
|
||||
closed_pos[i] = tmp_pos
|
||||
|
||||
print()
|
||||
print(f"{open_pos=}")
|
||||
print(f"{closed_pos=}")
|
||||
|
||||
homing_offset = [0] * len(self.joint_names)
|
||||
drive_mode = [0] * len(self.joint_names)
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.joint_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": open_pos,
|
||||
"end_pos": closed_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.joint_names,
|
||||
}
|
||||
|
||||
file_path = "examples/hopejr/settings/hand_calib.pkl"
|
||||
|
||||
if not os.path.exists(file_path):
|
||||
with open(file_path, "wb") as f:
|
||||
pickle.dump(calib_dict, f)
|
||||
print(f"Dictionary saved to {file_path}")
|
||||
|
||||
# return calib_dict
|
||||
self.set_calibration(calib_dict)
|
||||
|
||||
|
||||
|
||||
def set_calibration(self, calibration: dict[str, list]):
|
||||
self.calibration = calibration
|
||||
|
||||
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
|
||||
a "zero position" at 0 degree.
|
||||
|
||||
Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor
|
||||
rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range.
|
||||
|
||||
Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation
|
||||
when given a goal position that is + or - their resolution. For instance, feetech xl330-m077 have a resolution of 4096, and
|
||||
at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830,
|
||||
or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor.
|
||||
To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work
|
||||
in the centered nominal degree range ]-180, 180[.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.motor_names
|
||||
|
||||
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
|
||||
values = values.astype(np.float32)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
calib_idx = self.calibration["motor_names"].index(name)
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# Rescale the present position to a nominal range [0, 100] %,
|
||||
# useful for joints with linear motions like Aloha gripper
|
||||
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
|
||||
|
||||
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
|
||||
if name == "pinky_1" and (values[i] < LOWER_BOUND_LINEAR):
|
||||
values[i] = end_pos
|
||||
else:
|
||||
msg = (
|
||||
f"Wrong motor position range detected for {name}. "
|
||||
f"Expected to be in nominal range of [0, 100] % (a full linear translation), "
|
||||
f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, "
|
||||
f"but present value is {values[i]} %. "
|
||||
"This might be due to a cable connection issue creating an artificial jump in motor values. "
|
||||
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
|
||||
)
|
||||
print(msg)
|
||||
# raise JointOutOfRangeError(msg)
|
||||
|
||||
return values
|
||||
|
||||
# def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
# """Inverse of `apply_calibration`."""
|
||||
# if motor_names is None:
|
||||
# motor_names = self.motor_names
|
||||
|
||||
# for i, name in enumerate(motor_names):
|
||||
# calib_idx = self.calibration["motor_names"].index(name)
|
||||
# calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
# if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
# start_pos = self.calibration["start_pos"][calib_idx]
|
||||
# end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# # Convert from nominal lnear range of [0, 100] % to
|
||||
# # actual motor range of values which can be arbitrary.
|
||||
# values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos
|
||||
|
||||
# values = np.round(values).astype(np.int32)
|
||||
# return values
|
||||
|
||||
class EncoderReader:
|
||||
def __init__(self, serial_port="/dev/ttyUSB1", baud_rate=115200):
|
||||
self.serial_port = serial_port
|
||||
self.baud_rate = baud_rate
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
|
||||
# Start a background thread to continuously read from the serial port
|
||||
self.thread = threading.Thread(target=self.async_read, daemon=True)
|
||||
self.thread.start()
|
||||
|
||||
# Store the latest encoder reading in this dictionary
|
||||
self.last_d = {"encoder": 500}
|
||||
|
||||
def async_read(self):
|
||||
while True:
|
||||
# Read one line from serial
|
||||
line = self.serial.readline().decode("utf-8").strip()
|
||||
if line:
|
||||
try:
|
||||
val = int(line) # Parse the incoming line as integer
|
||||
self.last_d["encoder"] = val
|
||||
except ValueError:
|
||||
# If we couldn't parse it as an integer, just skip
|
||||
pass
|
||||
|
||||
def read(self):
|
||||
"""
|
||||
Returns the last encoder value that was read.
|
||||
"""
|
||||
return self.last_d["encoder"]
|
||||
|
||||
class Tac_Man:
|
||||
def __init__(self, serial_port="/dev/ttyUSB1", baud_rate=115200):
|
||||
self.serial_port = serial_port
|
||||
self.baud_rate = baud_rate
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
|
||||
# Start a background thread to continuously read from the serial port
|
||||
self.thread = threading.Thread(target=self.async_read, daemon=True)
|
||||
self.thread.start()
|
||||
|
||||
# Store the latest encoder readings in this list
|
||||
self.last_d = [0, 0, 0] # Default values for three readings
|
||||
|
||||
def async_read(self):
|
||||
while True:
|
||||
# Read one line from serial
|
||||
line = self.serial.readline().decode("utf-8").strip()
|
||||
if line:
|
||||
try:
|
||||
# Parse the incoming line as three comma-separated integers
|
||||
values = [int(val) for val in line.split(",")]
|
||||
if len(values) == 3: # Ensure we have exactly three values
|
||||
self.last_d = values
|
||||
except ValueError:
|
||||
# If parsing fails, skip this line
|
||||
pass
|
||||
|
||||
def read(self):
|
||||
"""
|
||||
Returns the last encoder values that were read as a list of three integers.
|
||||
"""
|
||||
return self.last_d
|
||||
@@ -1,111 +0,0 @@
|
||||
test and test4
|
||||
installed serial and opencv
|
||||
after pip install -e .
|
||||
pip install -e ".[feetech]"
|
||||
|
||||
robot.hand_bus.read("Present_Position")
|
||||
array([ 349, 799, 1000, 1004, 508, 503, 673, 608, 791, 390, 552,
|
||||
506, 600, 565, 428, 379], dtype=int32)
|
||||
|
||||
robot.hand_bus.write("Goal_Position",[349,799,500,500,508,503,673,608,791,390,552,506,600,565,428,379])
|
||||
|
||||
|
||||
robot.arm_bus.write("Goal_Position", [1825, 2045, 2010, 2035, 1414, 1800, 1615])
|
||||
robot.arm_bus.read("Present_Position")
|
||||
|
||||
robot.arm_bus.write("Goal_Position", [1500], ["elbow_flex"])
|
||||
robot.arm_bus.write("Goal_Position", [2000], ["wrist_yaw"])
|
||||
|
||||
ranges: [600-2300, 1500-2300, 1300-2800, 1000-2500, 600-2800,400-1700, 1300-2300]
|
||||
shoulder_up,
|
||||
shoulder forward,
|
||||
shoulder yaw,
|
||||
elbow_flex
|
||||
wrist_yaw,
|
||||
wrist_pitch,
|
||||
wrist_roll
|
||||
|
||||
COM18
|
||||
|
||||
C:/Users/Lenovo/AppData/Local/Programs/Python/Python310/python.exe c:/Users/Lenovo/Documents/HuggingFace/lerobot/examples/test4.py
|
||||
|
||||
wrist pitch is fucked
|
||||
|
||||
|
||||
so the wrist motor was fucked
|
||||
and we didnt know which one it was because
|
||||
if the chain hjas an issue we dont know how to locate whihc motor is at fault (cables are hard to remove)
|
||||
|
||||
to calibrate:
|
||||
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/ttyACM1 \
|
||||
--brand feetech \
|
||||
--model sts3250 \
|
||||
--baudrate 1000000 \
|
||||
--ID 2
|
||||
|
||||
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/ttyACM0 \
|
||||
--brand feetech \
|
||||
--model sm8512bl \
|
||||
--baudrate 115200 \
|
||||
--ID 1
|
||||
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/ttyACM1 \
|
||||
--brand feetech \
|
||||
--model scs0009 \
|
||||
--baudrate 1000000 \
|
||||
--ID 30
|
||||
|
||||
why are the motors beeping?
|
||||
|
||||
|
||||
#interpolate between start and end pos
|
||||
robot.arm_bus.write("Goal_Position", [int((i*interp+j*(1-interp))) for i, j in zip(arm_calibration["start_pos"], arm_calibration["end_pos"])])
|
||||
|
||||
control maj M to look for stuff
|
||||
|
||||
set calibration is useless
|
||||
|
||||
move the joints to that position too
|
||||
|
||||
|
||||
/home/nepyope/Desktop/HuggingFace/lerobot/lerobot/common/robot_devices/motors/feetech.py
|
||||
|
||||
theres clearly some lag, and its probably because of an out of range issue
|
||||
|
||||
|
||||
# hand_calibration = robot.get_hand_calibration()
|
||||
# joint = input("Enter joint name: ")
|
||||
# j1 = f"{joint}_pinky_side"
|
||||
# j2 = f"{joint}_thumb_side"
|
||||
# encoder = EncoderReader("/dev/ttyUSB0", 115200)
|
||||
# start_angle1 = hand_calibration['start_pos'][hand_calibration['motor_names'].index(j1)]
|
||||
# end_angle1 = hand_calibration['end_pos'][hand_calibration['motor_names'].index(j1)]
|
||||
# start_angle2 = hand_calibration['start_pos'][hand_calibration['motor_names'].index(j2)]
|
||||
# end_angle2 = hand_calibration['end_pos'][hand_calibration['motor_names'].index(j2)]
|
||||
# # start_angle = shoulder_calibration['start_pos'][shoulder_calibration['motor_names'].index(joint)]
|
||||
# # end_angle = shoulder_calibration['end_pos'][shoulder_calibration['motor_names'].index(joint)]
|
||||
# while True:
|
||||
# angle1 = int(start_angle1+(end_angle1-start_angle1)*encoder.read()/1000)
|
||||
# angle2 = int(start_angle2+(end_angle2-start_angle2)*encoder.read()/1000)
|
||||
|
||||
# robot.hand_bus.write("Goal_Position",angle1, [j1])
|
||||
# robot.hand_bus.write("Goal_Position",angle2, [j2])
|
||||
# print(angle1, angle2)
|
||||
# time.sleep(0.1)
|
||||
|
||||
# print(robot.hand_bus.find_motor_indices())
|
||||
# exit()
|
||||
|
||||
|
||||
|
||||
maybe divide the 3.3 by 2 and use that as a reference
|
||||
|
||||
https://jlcpcb.com/partdetail/23831236-OPA340UA_UMW/C22365307
|
||||
|
||||
|
||||
-90 is good for the op amp
|
||||
@@ -1,52 +0,0 @@
|
||||
#include <Arduino.h>
|
||||
|
||||
// Define multiplexer input pins
|
||||
#define S0 5
|
||||
#define S1 6
|
||||
#define S2 8
|
||||
#define S3 7
|
||||
#define SENSOR_INPUT 4
|
||||
|
||||
#define SENSOR_COUNT 16
|
||||
|
||||
int rawVals[SENSOR_COUNT];
|
||||
|
||||
void measureRawValues() {
|
||||
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
|
||||
digitalWrite(S0, (i & 0b1) ^ 0b1);;
|
||||
digitalWrite(S1, (i >> 1 & 0b1) ^ 0b1);;
|
||||
digitalWrite(S2, (i >> 2 & 0b1) ^ 0b1);;
|
||||
digitalWrite(S3, i >> 3 & 0b1);
|
||||
delay(1);
|
||||
|
||||
rawVals[i] = analogRead(SENSOR_INPUT);
|
||||
}
|
||||
}
|
||||
|
||||
void printRawValues() {
|
||||
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
|
||||
Serial.print(rawVals[i]);
|
||||
if (i < SENSOR_COUNT - 1) Serial.print(" ");
|
||||
}
|
||||
Serial.println();
|
||||
}
|
||||
|
||||
void setup() {
|
||||
Serial.begin(115200);
|
||||
|
||||
pinMode(S0, OUTPUT);
|
||||
pinMode(S1, OUTPUT);
|
||||
pinMode(S2, OUTPUT);
|
||||
pinMode(S3, OUTPUT);
|
||||
|
||||
digitalWrite(S0, LOW);
|
||||
digitalWrite(S1, LOW);
|
||||
digitalWrite(S2, LOW);
|
||||
digitalWrite(S3, LOW);
|
||||
}
|
||||
|
||||
void loop() {
|
||||
measureRawValues();
|
||||
printRawValues();
|
||||
delay(1);
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
#include <Arduino.h>
|
||||
|
||||
// Define multiplexer input pins
|
||||
#define S0 5
|
||||
#define S1 6
|
||||
#define S2 8
|
||||
#define S3 7
|
||||
#define SENSOR_INPUT 4
|
||||
|
||||
#define SENSOR_COUNT 16
|
||||
|
||||
int rawVals[SENSOR_COUNT];
|
||||
|
||||
void measureRawValues() {
|
||||
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
|
||||
digitalWrite(S0, (i & 0b1) ^ 0b1);;
|
||||
digitalWrite(S1, (i >> 1 & 0b1) ^ 0b1);;
|
||||
digitalWrite(S2, (i >> 2 & 0b1) ^ 0b1);;
|
||||
digitalWrite(S3, i >> 3 & 0b1);
|
||||
delay(1);
|
||||
|
||||
rawVals[i] = analogRead(SENSOR_INPUT);
|
||||
}
|
||||
}
|
||||
|
||||
void printRawValues() {
|
||||
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
|
||||
Serial.print(rawVals[i]);
|
||||
if (i < SENSOR_COUNT - 1) Serial.print(" ");
|
||||
}
|
||||
Serial.println();
|
||||
}
|
||||
|
||||
void setup() {
|
||||
Serial.begin(115200);
|
||||
|
||||
pinMode(S0, OUTPUT);
|
||||
pinMode(S1, OUTPUT);
|
||||
pinMode(S2, OUTPUT);
|
||||
pinMode(S3, OUTPUT);
|
||||
|
||||
digitalWrite(S0, LOW);
|
||||
digitalWrite(S1, LOW);
|
||||
digitalWrite(S2, LOW);
|
||||
digitalWrite(S3, LOW);
|
||||
}
|
||||
|
||||
void loop() {
|
||||
measureRawValues();
|
||||
printRawValues();
|
||||
delay(1);
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
#include <Arduino.h>
|
||||
|
||||
// Define multiplexer input pins
|
||||
#define S0 5
|
||||
#define S1 6
|
||||
#define S2 8
|
||||
#define S3 7
|
||||
#define SENSOR_INPUT 4
|
||||
|
||||
#define SENSOR_COUNT 16
|
||||
|
||||
int rawVals[SENSOR_COUNT];
|
||||
|
||||
void measureRawValues() {
|
||||
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
|
||||
digitalWrite(S0, (i & 0b1) ^ 0b1);;
|
||||
digitalWrite(S1, (i >> 1 & 0b1) ^ 0b1);;
|
||||
digitalWrite(S2, (i >> 2 & 0b1) ^ 0b1);;
|
||||
digitalWrite(S3, i >> 3 & 0b1);
|
||||
delay(1);
|
||||
|
||||
rawVals[i] = analogRead(SENSOR_INPUT);
|
||||
}
|
||||
}
|
||||
|
||||
void printRawValues() {
|
||||
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
|
||||
Serial.print(rawVals[i]);
|
||||
if (i < SENSOR_COUNT - 1) Serial.print(" ");
|
||||
}
|
||||
Serial.println();
|
||||
}
|
||||
|
||||
void setup() {
|
||||
Serial.begin(115200);
|
||||
|
||||
pinMode(S0, OUTPUT);
|
||||
pinMode(S1, OUTPUT);
|
||||
pinMode(S2, OUTPUT);
|
||||
pinMode(S3, OUTPUT);
|
||||
|
||||
digitalWrite(S0, LOW);
|
||||
digitalWrite(S1, LOW);
|
||||
digitalWrite(S2, LOW);
|
||||
digitalWrite(S3, LOW);
|
||||
}
|
||||
|
||||
void loop() {
|
||||
measureRawValues();
|
||||
printRawValues();
|
||||
delay(1);
|
||||
}
|
||||
|
Before Width: | Height: | Size: 75 KiB |
|
Before Width: | Height: | Size: 3.2 KiB |
@@ -1,74 +0,0 @@
|
||||
import numpy as np
|
||||
from PIL import Image, ImageSequence
|
||||
|
||||
def coalesce_gif(im):
|
||||
"""
|
||||
Attempt to coalesce frames so each one is a full image.
|
||||
This handles many (though not all) partial-frame GIFs.
|
||||
"""
|
||||
# Convert mode to RGBA
|
||||
im = im.convert("RGBA")
|
||||
|
||||
# Prepare an accumulator the same size as the base frame
|
||||
base = Image.new("RGBA", im.size)
|
||||
frames = []
|
||||
|
||||
# Go through each frame
|
||||
for frame in ImageSequence.Iterator(im):
|
||||
base.alpha_composite(frame.convert("RGBA"))
|
||||
frames.append(base.copy())
|
||||
return frames
|
||||
|
||||
def remove_white_make_black(arr, threshold=250):
|
||||
"""
|
||||
For each pixel in arr (H,W,3), if R,G,B >= threshold, set to black (0,0,0).
|
||||
This effectively 'removes' white so it won't affect the sum.
|
||||
"""
|
||||
mask = (arr[..., 0] >= threshold) & \
|
||||
(arr[..., 1] >= threshold) & \
|
||||
(arr[..., 2] >= threshold)
|
||||
arr[mask] = 0 # set to black
|
||||
|
||||
def main():
|
||||
# Load the animated GIF
|
||||
gif = Image.open("input.gif")
|
||||
|
||||
# Coalesce frames so each is full-size
|
||||
frames = coalesce_gif(gif)
|
||||
if not frames:
|
||||
print("No frames found!")
|
||||
return
|
||||
|
||||
# Convert first frame to RGB array, initialize sum array
|
||||
w, h = frames[0].size
|
||||
sum_array = np.zeros((h, w, 3), dtype=np.uint16) # 16-bit to avoid overflow
|
||||
|
||||
# For each frame:
|
||||
for f in frames:
|
||||
# Convert to RGB
|
||||
rgb = f.convert("RGB")
|
||||
arr = np.array(rgb, dtype=np.uint16) # shape (H, W, 3)
|
||||
|
||||
# Remove near-white by setting it to black
|
||||
remove_white_make_black(arr, threshold=250)
|
||||
|
||||
# Add to sum_array, then clamp to 255
|
||||
sum_array += arr
|
||||
np.clip(sum_array, 0, 255, out=sum_array)
|
||||
|
||||
# Convert sum_array back to 8-bit
|
||||
sum_array = sum_array.astype(np.uint8)
|
||||
|
||||
# Finally, any pixel that stayed black is presumably "empty," so we set it to white
|
||||
black_mask = (sum_array[..., 0] == 0) & \
|
||||
(sum_array[..., 1] == 0) & \
|
||||
(sum_array[..., 2] == 0)
|
||||
sum_array[black_mask] = [255, 255, 255]
|
||||
|
||||
# Create final Pillow image
|
||||
final_img = Image.fromarray(sum_array, mode="RGB")
|
||||
final_img.save("result.png")
|
||||
print("Done! Wrote result.png.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,31 +0,0 @@
|
||||
import time
|
||||
import serial
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Import the motor bus (adjust the import path as needed)
|
||||
from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus
|
||||
|
||||
def main():
|
||||
|
||||
bus = FeetechMotorsBus(
|
||||
port="/dev/ttyACM0",
|
||||
motors={
|
||||
"leader": [1, "scs0009"],
|
||||
"follower": [2, "scs0009"]
|
||||
},
|
||||
protocol_version=1,
|
||||
group_sync_read=False
|
||||
)
|
||||
bus.connect()
|
||||
print(bus.read("Present_Position", "leader"))
|
||||
bus.write("Torque_Enable", 0, ["leader"])
|
||||
bus.write("Torque_Enable", 1, ["follower"])
|
||||
for i in range(10000000):
|
||||
time.sleep(0.01)
|
||||
pos = bus.read("Present_Position", "leader")
|
||||
if pos[0] > 1 and pos[0] < 1022:
|
||||
bus.write("Goal_Position", pos, ["follower"])
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,15 +0,0 @@
|
||||
robot:
|
||||
arm_bus:
|
||||
Lock: 0
|
||||
Torque_Limit: 1000
|
||||
Protection_Current: 500
|
||||
Over_Current_Protection_Time: 10
|
||||
Max_Torque_Limit: 1000
|
||||
Overload_Torque: 40 # Play around with this
|
||||
Protection_Time: 1000 # When does it kick in?
|
||||
Protective_Torque: 1
|
||||
Maximum_Acceleration: 100
|
||||
Torque_Enable: 1
|
||||
Acceleration: 30
|
||||
hand_bus:
|
||||
Acceleration: 100
|
||||
@@ -1,61 +0,0 @@
|
||||
import time
|
||||
import numpy as np
|
||||
from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus
|
||||
|
||||
def main():
|
||||
# Instantiate the bus for a single motor on port /dev/ttyACM0.
|
||||
arm_bus = FeetechMotorsBus(
|
||||
port="/dev/ttyACM0",
|
||||
motors={"wrist_pitch": [1, "scs0009"]},
|
||||
protocol_version=1,
|
||||
group_sync_read=False, # using individual read calls
|
||||
)
|
||||
arm_bus.connect()
|
||||
|
||||
# Configure continuous rotation mode.
|
||||
arm_bus.write("Min_Angle_Limit", 0)
|
||||
arm_bus.write("Max_Angle_Limit", 1024)
|
||||
|
||||
# For model "scs0009", the raw reading runs from 0 to ~1022.
|
||||
resolution_max = 1022 # use 1022 as the effective maximum raw value
|
||||
|
||||
# Read initial raw motor position.
|
||||
prev_raw = arm_bus.read("Present_Position", ["wrist_pitch"])[0]
|
||||
print("Initial raw position:", prev_raw)
|
||||
|
||||
# Command continuous rotation.
|
||||
arm_bus.write("Goal_Position", 1024)
|
||||
|
||||
# Initialize loop counter.
|
||||
loops_count = 0
|
||||
target_effective = 1780
|
||||
tolerance = 50 # stop when effective position is within ±50 of target
|
||||
|
||||
while True:
|
||||
current_raw = arm_bus.read("Present_Position", ["wrist_pitch"])[0]
|
||||
|
||||
# Detect wrap-around: if the previous reading was near the top (>= 1020)
|
||||
# and current reading is low (< 100), count that as one full loop.
|
||||
if prev_raw >= 1020 and current_raw < 100:
|
||||
loops_count += 1
|
||||
print(f"Wrap detected! loops_count increased to {loops_count}")
|
||||
|
||||
# Compute the effective position.
|
||||
effective_position = loops_count * resolution_max + current_raw
|
||||
print(f"Raw position: {current_raw} | loops_count: {loops_count} | Effective position: {effective_position}")
|
||||
|
||||
# Check if effective position is within tolerance of the target.
|
||||
if abs(effective_position - target_effective) <= tolerance:
|
||||
# Command motor to stop by setting the current raw position as goal.
|
||||
arm_bus.write("Goal_Position", current_raw)
|
||||
print(f"Target reached (effective position: {effective_position}). Stopping motor at raw position {current_raw}.")
|
||||
break
|
||||
|
||||
prev_raw = current_raw
|
||||
time.sleep(0.01) # 10 ms delay
|
||||
|
||||
time.sleep(1)
|
||||
arm_bus.disconnect()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,226 +0,0 @@
|
||||
from follower import HopeJuniorRobot
|
||||
from leader import (
|
||||
HomonculusArm,
|
||||
HomonculusGlove,
|
||||
EncoderReader
|
||||
)
|
||||
from visualizer import value_to_color
|
||||
|
||||
import time
|
||||
import numpy as np
|
||||
|
||||
import pickle
|
||||
import pygame
|
||||
import typer
|
||||
|
||||
def main(
|
||||
calibrate_glove: bool = typer.Option(False, "--calibrate-glove", help="Calibrate the glove"),
|
||||
calibrate_exoskeleton: bool = typer.Option(False, "--calibrate-exoskeleton", help="Calibrate the exoskeleton"),
|
||||
freeze_fingers: bool = typer.Option(False, "--freeze-fingers", help="Freeze the fingers"),
|
||||
freeze_arm: bool = typer.Option(False, "--freeze-arm", help="Freeze the arm")):
|
||||
show_loads: bool = typer.Option(False, "--show-loads", help="Show the loads in a GUI")
|
||||
robot = HopeJuniorRobot()
|
||||
|
||||
|
||||
robot.connect_hand()
|
||||
robot.connect_arm()
|
||||
#read pos
|
||||
print(robot.hand_bus.read("Present_Position"))
|
||||
print(robot.arm_bus.read("Present_Position", "shoulder_pitch"))
|
||||
print(robot.arm_bus.read("Present_Position",["shoulder_yaw","shoulder_roll","elbow_flex","wrist_roll","wrist_yaw","wrist_pitch"]))
|
||||
#robot.arm_bus.write("Goal_Position", robot.arm_calib_dict["start_pos"][0]*1 +robot.arm_calib_dict["end_pos"][0]*0, ["wrist_roll"])
|
||||
for i in range(10):
|
||||
time.sleep(0.1)
|
||||
robot.apply_arm_config('examples/hopejr/settings/config.yaml')
|
||||
|
||||
# #calibrate arm
|
||||
arm_calibration = robot.get_arm_calibration()
|
||||
exoskeleton = HomonculusArm(serial_port="/dev/tty.usbmodem1201")
|
||||
|
||||
|
||||
if calibrate_exoskeleton:
|
||||
exoskeleton.run_calibration(robot)
|
||||
|
||||
file_path = "examples/hopejr/settings/arm_calib.pkl"
|
||||
with open(file_path, "rb") as f:
|
||||
calib_dict = pickle.load(f)
|
||||
print("Loaded dictionary:", calib_dict)
|
||||
exoskeleton.set_calibration(calib_dict)
|
||||
|
||||
#calibrate hand
|
||||
hand_calibration = robot.get_hand_calibration()
|
||||
glove = HomonculusGlove(serial_port = "/dev/tty.usbmodem1101")
|
||||
|
||||
if calibrate_glove:
|
||||
glove.run_calibration()
|
||||
|
||||
file_path = "examples/hopejr/settings/hand_calib.pkl"
|
||||
with open(file_path, "rb") as f:
|
||||
calib_dict = pickle.load(f)
|
||||
print("Loaded dictionary:", calib_dict)
|
||||
glove.set_calibration(calib_dict)
|
||||
|
||||
robot.hand_bus.set_calibration(hand_calibration)
|
||||
robot.arm_bus.set_calibration(arm_calibration)
|
||||
|
||||
# Initialize Pygame
|
||||
# pygame.init()
|
||||
|
||||
# # Set up the display
|
||||
# screen = pygame.display.set_mode((800, 600))
|
||||
|
||||
# pygame.display.set_caption("Robot Hand Visualization")
|
||||
|
||||
|
||||
# # Create hand structure with 16 squares and initial values
|
||||
# hand_components = []
|
||||
|
||||
# # Add thumb (4 squares in diamond shape)
|
||||
# thumb_positions = [
|
||||
# (150, 300), (125, 350),
|
||||
# (175, 350), (150, 400)
|
||||
# ]
|
||||
# for pos in thumb_positions:
|
||||
# hand_components.append({"pos": pos, "value": 0})
|
||||
|
||||
# # Add fingers (4 fingers with 3 squares each in vertical lines)
|
||||
# finger_positions = [
|
||||
# (200, 100), # Index
|
||||
# (250, 100), # Middle
|
||||
# (300, 100), # Ring
|
||||
# (350, 100) # Pinky
|
||||
# ]
|
||||
|
||||
# for x, y in finger_positions:
|
||||
# for i in range(3):
|
||||
# hand_components.append({"pos": (x, y + i * 50), "value": 0})
|
||||
|
||||
for i in range(1000000000000000):
|
||||
robot.apply_arm_config('examples/hopejr/settings/config.yaml')
|
||||
#robot.arm_bus.write("Acceleration", 50, "shoulder_yaw")
|
||||
joint_names = ["shoulder_pitch", "shoulder_yaw", "shoulder_roll", "elbow_flex", "wrist_roll", "wrist_yaw", "wrist_pitch"]
|
||||
#only wrist roll
|
||||
#joint_names = ["shoulder_pitch"]
|
||||
joint_values = exoskeleton.read(motor_names=joint_names)
|
||||
|
||||
#joint_values = joint_values.round().astype(int)
|
||||
joint_dict = {k: v for k, v in zip(joint_names, joint_values, strict=False)}
|
||||
|
||||
motor_values = []
|
||||
motor_names = []
|
||||
motor_names += ["shoulder_pitch", "shoulder_yaw", "shoulder_roll", "elbow_flex", "wrist_roll", "wrist_yaw", "wrist_pitch"]
|
||||
#motor_names += ["shoulder_pitch"]
|
||||
motor_values += [joint_dict[name] for name in motor_names]
|
||||
#remove 50 from shoulder_roll
|
||||
#motor_values += [joint_dict[name] for name in motor_names]
|
||||
|
||||
motor_values = np.array(motor_values)
|
||||
motor_values = np.clip(motor_values, 0, 100)
|
||||
|
||||
print(motor_names, motor_values)
|
||||
if not freeze_arm:
|
||||
robot.arm_bus.write("Goal_Position", motor_values, motor_names)
|
||||
|
||||
if not freeze_fingers:#include hand
|
||||
hand_joint_names = []
|
||||
hand_joint_names += ["thumb_3", "thumb_2", "thumb_1", "thumb_0"]#, "thumb_3"]
|
||||
hand_joint_names += ["index_0", "index_1", "index_2"]
|
||||
hand_joint_names += ["middle_0", "middle_1", "middle_2"]
|
||||
hand_joint_names += ["ring_0", "ring_1", "ring_2"]
|
||||
hand_joint_names += ["pinky_0", "pinky_1", "pinky_2"]
|
||||
hand_joint_values = glove.read(hand_joint_names)
|
||||
hand_joint_values = hand_joint_values.round( ).astype(int)
|
||||
hand_joint_dict = {k: v for k, v in zip(hand_joint_names, hand_joint_values, strict=False)}
|
||||
|
||||
hand_motor_values = []
|
||||
hand_motor_names = []
|
||||
|
||||
# Thumb
|
||||
hand_motor_names += ["thumb_basel_rotation", "thumb_mcp", "thumb_pip", "thumb_dip"]#, "thumb_MCP"]
|
||||
hand_motor_values += [
|
||||
hand_joint_dict["thumb_3"],
|
||||
hand_joint_dict["thumb_2"],
|
||||
hand_joint_dict["thumb_1"],
|
||||
hand_joint_dict["thumb_0"]
|
||||
]
|
||||
|
||||
# # Index finger
|
||||
index_splay = 0.1
|
||||
hand_motor_names += ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
hand_motor_values += [
|
||||
hand_joint_dict["index_2"],
|
||||
(100 - hand_joint_dict["index_0"]) * index_splay + hand_joint_dict["index_1"] * (1 - index_splay),
|
||||
hand_joint_dict["index_0"] * index_splay + hand_joint_dict["index_1"] * (1 - index_splay),
|
||||
]
|
||||
|
||||
# Middle finger
|
||||
middle_splay = 0.1
|
||||
hand_motor_names += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
hand_motor_values += [
|
||||
hand_joint_dict["middle_2"],
|
||||
hand_joint_dict["middle_0"] * middle_splay + hand_joint_dict["middle_1"] * (1 - middle_splay),
|
||||
(100 - hand_joint_dict["middle_0"]) * middle_splay + hand_joint_dict["middle_1"] * (1 - middle_splay),
|
||||
]
|
||||
|
||||
# # Ring finger
|
||||
ring_splay = 0.1
|
||||
hand_motor_names += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
hand_motor_values += [
|
||||
hand_joint_dict["ring_2"],
|
||||
(100 - hand_joint_dict["ring_0"]) * ring_splay + hand_joint_dict["ring_1"] * (1 - ring_splay),
|
||||
hand_joint_dict["ring_0"] * ring_splay + hand_joint_dict["ring_1"] * (1 - ring_splay),
|
||||
]
|
||||
|
||||
# # Pinky finger
|
||||
pinky_splay = -.1
|
||||
hand_motor_names += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
hand_motor_values += [
|
||||
hand_joint_dict["pinky_2"],
|
||||
hand_joint_dict["pinky_0"] * pinky_splay + hand_joint_dict["pinky_1"] * (1 - pinky_splay),
|
||||
(100 - hand_joint_dict["pinky_0"]) * pinky_splay + hand_joint_dict["pinky_1"] * (1 - pinky_splay),
|
||||
]
|
||||
|
||||
hand_motor_values = np.array(hand_motor_values)
|
||||
hand_motor_values = np.clip(hand_motor_values, 0, 100)
|
||||
robot.hand_bus.write("Acceleration", 255, hand_motor_names)
|
||||
robot.hand_bus.write("Goal_Position", hand_motor_values, hand_motor_names)
|
||||
|
||||
# if i%20==0 and i > 100:
|
||||
# try:
|
||||
# loads = robot.hand_bus.read("Present_Load")
|
||||
# for i, comp in enumerate(hand_components):
|
||||
# # Wave oscillates between 0 and 2024:
|
||||
# # Center (1012) +/- 1012 * sin(...)
|
||||
# comp["value"] = loads[i]
|
||||
# except:
|
||||
# pass
|
||||
|
||||
|
||||
time.sleep(0.01)
|
||||
|
||||
# for event in pygame.event.get():
|
||||
# if event.type == pygame.QUIT:
|
||||
# robot.hand_bus.disconnect()
|
||||
# robot.arm_bus.disconnect()
|
||||
# exit()
|
||||
# # Check for user pressing 'q' to quit
|
||||
# if event.type == pygame.KEYDOWN:
|
||||
# if event.key == pygame.K_q:
|
||||
# robot.hand_bus.disconnect()
|
||||
# robot.arm_bus.disconnect()
|
||||
# exit()
|
||||
|
||||
# # Draw background
|
||||
# screen.fill((0, 0, 0)) # Black background
|
||||
|
||||
# # Draw hand components
|
||||
# for comp in hand_components:
|
||||
# x, y = comp["pos"]
|
||||
# color = value_to_color(comp["value"])
|
||||
# pygame.draw.rect(screen, color, (x, y, 30, 30))
|
||||
|
||||
# pygame.display.flip()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
typer.run(main)
|
||||
@@ -1,135 +0,0 @@
|
||||
import serial
|
||||
import threading
|
||||
import time
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Thread function to read from a serial port continuously until stop_event is set.
|
||||
def read_serial(port, baudrate, stop_event, data_list):
|
||||
try:
|
||||
ser = serial.Serial(port, baudrate, timeout=1)
|
||||
except Exception as e:
|
||||
print(f"Error opening {port}: {e}")
|
||||
return
|
||||
|
||||
while not stop_event.is_set():
|
||||
try:
|
||||
line = ser.readline().decode('utf-8').strip()
|
||||
except Exception as e:
|
||||
print(f"Decode error on {port}: {e}")
|
||||
continue
|
||||
|
||||
if line:
|
||||
try:
|
||||
# Split the line into integer values.
|
||||
values = [int(x) for x in line.split()]
|
||||
# For ACM1, ignore the extra value if present.
|
||||
if len(values) >= 16:
|
||||
if len(values) > 16:
|
||||
values = values[:16]
|
||||
# Save the timestamp (relative to start) with the sensor readings.
|
||||
timestamp = time.time()
|
||||
data_list.append((timestamp, values))
|
||||
except Exception as e:
|
||||
print(f"Error parsing line from {port}: '{line}' -> {e}")
|
||||
ser.close()
|
||||
|
||||
def main():
|
||||
# --- Configuration ---
|
||||
# Set your serial port names here (adjust for your system)
|
||||
acm0_port = "/dev/ttyACM0" # Example for Linux (or "COM3" on Windows)
|
||||
acm1_port = "/dev/ttyACM1" # Example for Linux (or "COM4" on Windows)
|
||||
baudrate = 115200
|
||||
|
||||
# Data storage for each device:
|
||||
data_acm0 = [] # Will hold tuples of (timestamp, [16 sensor values])
|
||||
data_acm1 = []
|
||||
|
||||
# Event to signal threads to stop reading.
|
||||
stop_event = threading.Event()
|
||||
|
||||
# Create and start reader threads.
|
||||
thread_acm0 = threading.Thread(target=read_serial, args=(acm0_port, baudrate, stop_event, data_acm0))
|
||||
thread_acm1 = threading.Thread(target=read_serial, args=(acm1_port, baudrate, stop_event, data_acm1))
|
||||
thread_acm0.start()
|
||||
thread_acm1.start()
|
||||
|
||||
# Record data for 10 seconds.
|
||||
record_duration = 10 # seconds
|
||||
start_time = time.time()
|
||||
time.sleep(record_duration)
|
||||
stop_event.set() # signal threads to stop
|
||||
|
||||
# Wait for both threads to finish.
|
||||
thread_acm0.join()
|
||||
thread_acm1.join()
|
||||
print("Finished recording.")
|
||||
|
||||
# --- Process the Data ---
|
||||
# Convert lists of (timestamp, values) to numpy arrays.
|
||||
# Compute time relative to the start of the recording.
|
||||
times_acm0 = np.array([t - start_time for t, _ in data_acm0])
|
||||
sensor_acm0 = np.array([vals for _, vals in data_acm0]) # shape (N0, 16)
|
||||
|
||||
times_acm1 = np.array([t - start_time for t, _ in data_acm1])
|
||||
sensor_acm1 = np.array([vals for _, vals in data_acm1]) # shape (N1, 16)
|
||||
|
||||
# --- Plot 1: Overlapping Time Series ---
|
||||
plt.figure(figsize=(12, 8))
|
||||
# Plot each sensor from ACM0 in red.
|
||||
for i in range(16):
|
||||
plt.plot(times_acm0, sensor_acm0[:, i], color='red', alpha=0.7,
|
||||
label='ACM0 Sensor 1' if i == 0 else None)
|
||||
# Plot each sensor from ACM1 in blue.
|
||||
for i in range(16):
|
||||
plt.plot(times_acm1, sensor_acm1[:, i], color='blue', alpha=0.7,
|
||||
label='ACM1 Sensor 1' if i == 0 else None)
|
||||
plt.xlabel("Time (s)")
|
||||
plt.ylabel("Sensor Reading")
|
||||
plt.title("Overlapping Sensor Readings (ACM0 in Red, ACM1 in Blue)")
|
||||
plt.legend()
|
||||
plt.tight_layout()
|
||||
plt.savefig("overlapping_sensor_readings.png", dpi=300)
|
||||
plt.close()
|
||||
print("Saved overlapping_sensor_readings.png")
|
||||
|
||||
# --- Plot 2: Variance of Noise for Each Sensor ---
|
||||
# Compute variance (over time) for each sensor channel.
|
||||
variance_acm0 = np.var(sensor_acm0, axis=0)
|
||||
variance_acm1 = np.var(sensor_acm1, axis=0)
|
||||
sensor_numbers = np.arange(1, 17)
|
||||
bar_width = 0.35
|
||||
|
||||
plt.figure(figsize=(12, 6))
|
||||
plt.bar(sensor_numbers - bar_width/2, variance_acm0, bar_width, color='red', label='ACM0')
|
||||
plt.bar(sensor_numbers + bar_width/2, variance_acm1, bar_width, color='blue', label='ACM1')
|
||||
plt.xlabel("Sensor Number")
|
||||
plt.ylabel("Variance")
|
||||
plt.title("Noise Variance per Sensor")
|
||||
plt.xticks(sensor_numbers)
|
||||
plt.legend()
|
||||
plt.tight_layout()
|
||||
plt.savefig("sensor_variance.png", dpi=300)
|
||||
plt.close()
|
||||
print("Saved sensor_variance.png")
|
||||
|
||||
# --- Plot 3: Difference Between ACM0 and ACM1 Readings ---
|
||||
# Since the two devices may not sample at exactly the same time,
|
||||
# we interpolate ACM1's data onto ACM0's time base for each sensor.
|
||||
plt.figure(figsize=(12, 8))
|
||||
for i in range(16):
|
||||
if len(times_acm1) > 1 and len(times_acm0) > 1:
|
||||
interp_acm1 = np.interp(times_acm0, times_acm1, sensor_acm1[:, i])
|
||||
diff = sensor_acm0[:, i] - interp_acm1
|
||||
plt.plot(times_acm0, diff, label=f"Sensor {i+1}")
|
||||
plt.xlabel("Time (s)")
|
||||
plt.ylabel("Difference (ACM0 - ACM1)")
|
||||
plt.title("Difference in Sensor Readings")
|
||||
plt.legend(fontsize='small', ncol=2)
|
||||
plt.tight_layout()
|
||||
plt.savefig("sensor_differences.png", dpi=300)
|
||||
plt.close()
|
||||
print("Saved sensor_differences.png")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
Before Width: | Height: | Size: 1.5 MiB |
|
Before Width: | Height: | Size: 1.3 MiB |
|
Before Width: | Height: | Size: 87 KiB |
@@ -1,84 +0,0 @@
|
||||
import time
|
||||
import serial
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Import the motor bus (adjust the import path as needed)
|
||||
from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus
|
||||
|
||||
def main():
|
||||
# -------------------------------
|
||||
# Setup the motor bus (ACM0)
|
||||
# -------------------------------
|
||||
arm_bus = FeetechMotorsBus(
|
||||
port="/dev/ttyACM0",
|
||||
motors={
|
||||
"wrist_pitch": [7, "sts3215"],
|
||||
},
|
||||
protocol_version=0,
|
||||
)
|
||||
arm_bus.connect()
|
||||
|
||||
# -------------------------------
|
||||
# Setup the serial connection for sensor (ACM1)
|
||||
# -------------------------------
|
||||
try:
|
||||
ser = serial.Serial("/dev/ttyACM1", 115200, timeout=1)
|
||||
except Exception as e:
|
||||
print(f"Error opening serial port /dev/ttyACM1: {e}")
|
||||
return
|
||||
|
||||
# Lists to store the motor positions and sensor values.
|
||||
positions = []
|
||||
sensor_values = []
|
||||
|
||||
# -------------------------------
|
||||
# Loop: move motor and collect sensor data
|
||||
# -------------------------------
|
||||
# We assume that 2800 > 1480 so we decrement by 10 each step.
|
||||
for pos in range(2800, 1500, -10): # 2800 down to 1480 (inclusive)
|
||||
# Command the motor to go to position 'pos'
|
||||
arm_bus.write("Goal_Position", pos, ["wrist_pitch"])
|
||||
|
||||
# Wait a short period for the motor to move and the sensor to update.
|
||||
time.sleep(0.01)
|
||||
|
||||
# Read one line from the sensor device.
|
||||
sensor_val = np.nan # default if reading fails
|
||||
try:
|
||||
line = ser.readline().decode('utf-8').strip()
|
||||
if line:
|
||||
# Split the line into parts and convert each part to int.
|
||||
parts = line.split()
|
||||
# Ensure there are enough values (we expect at least 15 values)
|
||||
if len(parts) >= 15:
|
||||
values = [int(x) for x in parts]
|
||||
# Use the 15th value (index 14)
|
||||
sensor_val = values[14]
|
||||
except Exception as e:
|
||||
print(f"Error parsing sensor data: {e}")
|
||||
|
||||
positions.append(pos)
|
||||
sensor_values.append(sensor_val)
|
||||
print(f"Motor pos: {pos} | Sensor 15th value: {sensor_val}")
|
||||
|
||||
#move it back to
|
||||
arm_bus.write("Goal_Position", 2800, ["wrist_pitch"])
|
||||
# -------------------------------
|
||||
# Plot the data: Motor Angle vs. Sensor 15th Value
|
||||
# -------------------------------
|
||||
plt.figure(figsize=(10, 6))
|
||||
plt.plot(positions, sensor_values, marker='o', linestyle='-')
|
||||
plt.xlabel("Motor Angle")
|
||||
plt.ylabel("Sensor 15th Value")
|
||||
plt.title("Motor Angle vs Sensor 15th Value")
|
||||
plt.grid(True)
|
||||
plt.savefig("asd.png", dpi=300)
|
||||
plt.close()
|
||||
print("Plot saved as asd.png")
|
||||
|
||||
# Close the serial connection.
|
||||
ser.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,682 +0,0 @@
|
||||
import threading
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
# from qai_hub_models.models.mediapipe_hand.app import MediaPipeHandApp
|
||||
# from qai_hub_models.models.mediapipe_hand.model import (
|
||||
# MediaPipeHand,
|
||||
# )
|
||||
# from qai_hub_models.utils.image_processing import (
|
||||
# app_to_net_image_inputs,
|
||||
# )
|
||||
from lerobot.common.robot_devices.motors.feetech import (
|
||||
CalibrationMode,
|
||||
FeetechMotorsBus,
|
||||
)
|
||||
|
||||
LOWER_BOUND_LINEAR = -100
|
||||
UPPER_BOUND_LINEAR = 200
|
||||
|
||||
import serial
|
||||
|
||||
|
||||
class HomonculusGlove:
|
||||
def __init__(self):
|
||||
self.serial_port = "COM10"
|
||||
self.baud_rate = 115200
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
self.thread = threading.Thread(target=self.async_read)
|
||||
self.thread.start()
|
||||
self.last_d = {
|
||||
"thumb_0": 100,
|
||||
"thumb_1": 100,
|
||||
"thumb_2": 100,
|
||||
"thumb_3": 100,
|
||||
"index_0": 100,
|
||||
"index_1": 100,
|
||||
"index_2": 100,
|
||||
"middle_0": 100,
|
||||
"middle_1": 100,
|
||||
"middle_2": 100,
|
||||
"ring_0": 100,
|
||||
"ring_1": 100,
|
||||
"ring_2": 100,
|
||||
"pinky_0": 100,
|
||||
"pinky_1": 100,
|
||||
"pinky_2": 100,
|
||||
"battery_voltage": 100,
|
||||
}
|
||||
self.calibration = None
|
||||
|
||||
@property
|
||||
def joint_names(self):
|
||||
return list(self.last_d.keys())
|
||||
|
||||
def read(self, motor_names: list[str] | None = None):
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
values = np.array([self.last_d[k] for k in motor_names])
|
||||
|
||||
print(motor_names)
|
||||
print(values)
|
||||
|
||||
if self.calibration is not None:
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
print(values)
|
||||
return values
|
||||
|
||||
def async_read(self):
|
||||
while True:
|
||||
if self.serial.in_waiting > 0:
|
||||
self.serial.flush()
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
if len(vals) != 17:
|
||||
continue
|
||||
vals = [int(val) for val in vals]
|
||||
|
||||
d = {
|
||||
"thumb_0": vals[0],
|
||||
"thumb_1": vals[1],
|
||||
"thumb_2": vals[2],
|
||||
"thumb_3": vals[3],
|
||||
"index_0": vals[4],
|
||||
"index_1": vals[5],
|
||||
"index_2": vals[6],
|
||||
"middle_0": vals[7],
|
||||
"middle_1": vals[8],
|
||||
"middle_2": vals[9],
|
||||
"ring_0": vals[10],
|
||||
"ring_1": vals[11],
|
||||
"ring_2": vals[12],
|
||||
"pinky_0": vals[13],
|
||||
"pinky_1": vals[14],
|
||||
"pinky_2": vals[15],
|
||||
"battery_voltage": vals[16],
|
||||
}
|
||||
self.last_d = d
|
||||
# print(d.values())
|
||||
|
||||
def run_calibration(self):
|
||||
print("\nMove arm to open position")
|
||||
input("Press Enter to continue...")
|
||||
open_pos_list = []
|
||||
for _ in range(300):
|
||||
open_pos = self.read()
|
||||
open_pos_list.append(open_pos)
|
||||
time.sleep(0.01)
|
||||
open_pos = np.array(open_pos_list)
|
||||
max_open_pos = open_pos.max(axis=0)
|
||||
min_open_pos = open_pos.min(axis=0)
|
||||
|
||||
print(f"{max_open_pos=}")
|
||||
print(f"{min_open_pos=}")
|
||||
|
||||
print("\nMove arm to closed position")
|
||||
input("Press Enter to continue...")
|
||||
closed_pos_list = []
|
||||
for _ in range(300):
|
||||
closed_pos = self.read()
|
||||
closed_pos_list.append(closed_pos)
|
||||
time.sleep(0.01)
|
||||
closed_pos = np.array(closed_pos_list)
|
||||
max_closed_pos = closed_pos.max(axis=0)
|
||||
closed_pos[closed_pos < 1000] = 60000
|
||||
min_closed_pos = closed_pos.min(axis=0)
|
||||
|
||||
print(f"{max_closed_pos=}")
|
||||
print(f"{min_closed_pos=}")
|
||||
|
||||
open_pos = np.array([max_open_pos, max_closed_pos]).max(axis=0)
|
||||
closed_pos = np.array([min_open_pos, min_closed_pos]).min(axis=0)
|
||||
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
for i, jname in enumerate(self.joint_names):
|
||||
if jname in ["thumb_0", "thumb_3", "index_2", "middle_2", "ring_2", "pinky_0", "pinky_2"]:
|
||||
tmp_pos = open_pos[i]
|
||||
open_pos[i] = closed_pos[i]
|
||||
closed_pos[i] = tmp_pos
|
||||
|
||||
print()
|
||||
print(f"{open_pos=}")
|
||||
print(f"{closed_pos=}")
|
||||
|
||||
homing_offset = [0] * len(self.joint_names)
|
||||
drive_mode = [0] * len(self.joint_names)
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.joint_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": open_pos,
|
||||
"end_pos": closed_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.joint_names,
|
||||
}
|
||||
# return calib_dict
|
||||
self.set_calibration(calib_dict)
|
||||
|
||||
def set_calibration(self, calibration: dict[str, list]):
|
||||
self.calibration = calibration
|
||||
|
||||
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
|
||||
a "zero position" at 0 degree.
|
||||
|
||||
Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor
|
||||
rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range.
|
||||
|
||||
Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation
|
||||
when given a goal position that is + or - their resolution. For instance, feetech xl330-m077 have a resolution of 4096, and
|
||||
at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830,
|
||||
or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor.
|
||||
To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work
|
||||
in the centered nominal degree range ]-180, 180[.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.motor_names
|
||||
|
||||
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
|
||||
values = values.astype(np.float32)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
calib_idx = self.calibration["motor_names"].index(name)
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# Rescale the present position to a nominal range [0, 100] %,
|
||||
# useful for joints with linear motions like Aloha gripper
|
||||
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
|
||||
|
||||
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
|
||||
if name == "pinky_1" and (values[i] < LOWER_BOUND_LINEAR):
|
||||
values[i] = end_pos
|
||||
else:
|
||||
msg = (
|
||||
f"Wrong motor position range detected for {name}. "
|
||||
f"Expected to be in nominal range of [0, 100] % (a full linear translation), "
|
||||
f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, "
|
||||
f"but present value is {values[i]} %. "
|
||||
"This might be due to a cable connection issue creating an artificial jump in motor values. "
|
||||
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
|
||||
)
|
||||
print(msg)
|
||||
# raise JointOutOfRangeError(msg)
|
||||
|
||||
return values
|
||||
|
||||
# def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
# """Inverse of `apply_calibration`."""
|
||||
# if motor_names is None:
|
||||
# motor_names = self.motor_names
|
||||
|
||||
# for i, name in enumerate(motor_names):
|
||||
# calib_idx = self.calibration["motor_names"].index(name)
|
||||
# calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
# if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
# start_pos = self.calibration["start_pos"][calib_idx]
|
||||
# end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# # Convert from nominal lnear range of [0, 100] % to
|
||||
# # actual motor range of values which can be arbitrary.
|
||||
# values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos
|
||||
|
||||
# values = np.round(values).astype(np.int32)
|
||||
# return values
|
||||
|
||||
|
||||
class HopeJuniorRobot:
|
||||
def __init__(self):
|
||||
self.arm_bus = FeetechMotorsBus(
|
||||
port="COM14",
|
||||
motors={
|
||||
# "motor1": (2, "sts3250"),
|
||||
# "motor2": (1, "scs0009"),
|
||||
"shoulder_pitch": [1, "sts3250"],
|
||||
"shoulder_yaw": [2, "sts3215"], # TODO: sts3250
|
||||
"shoulder_roll": [3, "sts3215"], # TODO: sts3250
|
||||
"elbow_flex": [4, "sts3250"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"wrist_yaw": [6, "sts3215"],
|
||||
"wrist_pitch": [7, "sts3215"],
|
||||
},
|
||||
protocol_version=0,
|
||||
)
|
||||
self.hand_bus = FeetechMotorsBus(
|
||||
port="COM15",
|
||||
motors={
|
||||
"thumb_basel_rotation": [30, "scs0009"],
|
||||
"thumb_flexor": [27, "scs0009"],
|
||||
"thumb_pinky_side": [26, "scs0009"],
|
||||
"thumb_thumb_side": [28, "scs0009"],
|
||||
"index_flexor": [25, "scs0009"],
|
||||
"index_pinky_side": [31, "scs0009"],
|
||||
"index_thumb_side": [32, "scs0009"],
|
||||
"middle_flexor": [24, "scs0009"],
|
||||
"middle_pinky_side": [33, "scs0009"],
|
||||
"middle_thumb_side": [34, "scs0009"],
|
||||
"ring_flexor": [21, "scs0009"],
|
||||
"ring_pinky_side": [36, "scs0009"],
|
||||
"ring_thumb_side": [35, "scs0009"],
|
||||
"pinky_flexor": [23, "scs0009"],
|
||||
"pinky_pinky_side": [38, "scs0009"],
|
||||
"pinky_thumb_side": [37, "scs0009"],
|
||||
},
|
||||
protocol_version=1,
|
||||
group_sync_read=False,
|
||||
)
|
||||
|
||||
def get_hand_calibration(self):
|
||||
homing_offset = [0] * len(self.hand_bus.motor_names)
|
||||
drive_mode = [0] * len(self.hand_bus.motor_names)
|
||||
|
||||
start_pos = [
|
||||
500,
|
||||
900,
|
||||
1000,
|
||||
0,
|
||||
100,
|
||||
250,
|
||||
750,
|
||||
100,
|
||||
400,
|
||||
150,
|
||||
100,
|
||||
120,
|
||||
980,
|
||||
100,
|
||||
950,
|
||||
750,
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
500 - 250,
|
||||
900 - 300,
|
||||
1000 - 550,
|
||||
0 + 550,
|
||||
1000,
|
||||
250 + 700,
|
||||
750 - 700,
|
||||
1000,
|
||||
400 + 700,
|
||||
150 + 700,
|
||||
1000,
|
||||
120 + 700,
|
||||
980 - 700,
|
||||
1000,
|
||||
950 - 700,
|
||||
750 - 700,
|
||||
]
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.hand_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.hand_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def connect(self):
|
||||
self.arm_bus.connect()
|
||||
#self.hand_bus.connect()
|
||||
|
||||
|
||||
ESCAPE_KEY_ID = 27
|
||||
|
||||
|
||||
def capture_and_display_processed_frames(
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray],
|
||||
window_display_name: str,
|
||||
cap_device: int = 0,
|
||||
) -> None:
|
||||
"""
|
||||
Capture frames from the given input camera device, run them through
|
||||
the frame processor, and display the outputs in a window with the given name.
|
||||
|
||||
User should press Esc to exit.
|
||||
|
||||
Inputs:
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray]
|
||||
Processes frames.
|
||||
Input and output are numpy arrays of shape (H W C) with BGR channel layout and dtype uint8 / byte.
|
||||
window_display_name: str
|
||||
Name of the window used to display frames.
|
||||
cap_device: int
|
||||
Identifier for the camera to use to capture frames.
|
||||
"""
|
||||
cv2.namedWindow(window_display_name)
|
||||
capture = cv2.VideoCapture(cap_device)
|
||||
if not capture.isOpened():
|
||||
raise ValueError("Unable to open video capture.")
|
||||
|
||||
frame_count = 0
|
||||
has_frame, frame = capture.read()
|
||||
while has_frame:
|
||||
assert isinstance(frame, np.ndarray)
|
||||
|
||||
frame_count = frame_count + 1
|
||||
# mirror frame
|
||||
frame = np.ascontiguousarray(frame[:, ::-1, ::-1])
|
||||
|
||||
# process & show frame
|
||||
processed_frame = frame_processor(frame)
|
||||
cv2.imshow(window_display_name, processed_frame[:, :, ::-1])
|
||||
|
||||
has_frame, frame = capture.read()
|
||||
key = cv2.waitKey(1)
|
||||
if key == ESCAPE_KEY_ID:
|
||||
break
|
||||
|
||||
capture.release()
|
||||
|
||||
|
||||
def main():
|
||||
robot = HopeJuniorRobot()
|
||||
robot.connect()
|
||||
|
||||
# robot.hand_bus.calibration = None
|
||||
|
||||
# breakpoint()
|
||||
# print(robot.arm_bus.read("Present_Position"))
|
||||
robot.arm_bus.write("Torque_Enable", 1)
|
||||
robot.arm_bus.write("Acceleration", 20)
|
||||
robot.arm_bus.read("Acceleration")
|
||||
robot.arm_bus.write("Goal_Position", calibration["start_pos"])
|
||||
exit()
|
||||
calibration = robot.get_hand_calibration()
|
||||
robot.arm_bus.write("Goal_Position", calibration["start_pos"])
|
||||
# robot.hand_bus.write("Goal_Position", calibration["end_pos"][:4], robot.hand_bus.motor_names[:4])
|
||||
robot.hand_bus.set_calibration(calibration)
|
||||
lol = 1
|
||||
|
||||
# # print(motors_bus.write("Goal_Position", 500))
|
||||
# print(robot.hand_bus.read("Present_Position"))
|
||||
# # pos = hand_bus.read("Present_Position")
|
||||
# # hand_bus.write("Goal_Position", pos[0]+20, hand_bus.motor_names[0])
|
||||
# # hand_bus.write("Goal_Position", pos[i]+delta, hand_bus.motor_names[i])
|
||||
# robot.hand_bus.read("Acceleration")
|
||||
# robot.hand_bus.write("Acceleration", 10)
|
||||
|
||||
# sleep = 1
|
||||
# # robot.hand_bus.write(
|
||||
# # "Goal_Position", [glove.last_d['index_2']-1500,300,300], ["index_pinky_side", "index_flexor", "index_thumb_side"]
|
||||
# # )
|
||||
# #time.sleep(sleep)
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [100, 0, 0], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [200, 100, 600], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
|
||||
# breakpoint()
|
||||
|
||||
glove = HomonculusGlove()
|
||||
glove.run_calibration()
|
||||
# while True:
|
||||
# joint_names = ["index_1", "index_2"]
|
||||
# joint_values = glove.read(joint_names)
|
||||
# print(joint_values)
|
||||
|
||||
input()
|
||||
while True:
|
||||
joint_names = []
|
||||
joint_names += ["thumb_0", "thumb_2", "thumb_3"]
|
||||
joint_names += ["index_1", "index_2"]
|
||||
joint_names += ["middle_1", "middle_2"]
|
||||
joint_names += ["ring_1", "ring_2"]
|
||||
joint_names += ["pinky_1", "pinky_2"]
|
||||
joint_values = glove.read(joint_names)
|
||||
joint_values = joint_values.round().astype(int)
|
||||
joint_dict = {k: v for k, v in zip(joint_names, joint_values, strict=False)}
|
||||
|
||||
motor_values = []
|
||||
motor_names = []
|
||||
motor_names += ["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"]
|
||||
motor_values += [
|
||||
joint_dict["thumb_3"],
|
||||
joint_dict["thumb_0"],
|
||||
joint_dict["thumb_2"],
|
||||
joint_dict["thumb_2"],
|
||||
]
|
||||
motor_names += ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
motor_values += [joint_dict["index_2"], joint_dict["index_1"], joint_dict["index_1"]]
|
||||
motor_names += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
motor_values += [joint_dict["middle_2"], joint_dict["middle_1"], joint_dict["middle_1"]]
|
||||
motor_names += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
motor_values += [joint_dict["ring_2"], joint_dict["ring_1"], joint_dict["ring_1"]]
|
||||
motor_names += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
|
||||
motor_values += [joint_dict["pinky_2"], joint_dict["pinky_1"], joint_dict["pinky_1"]]
|
||||
|
||||
motor_values = np.array(motor_values)
|
||||
motor_values = np.clip(motor_values, 0, 100)
|
||||
|
||||
robot.hand_bus.write("Goal_Position", motor_values, motor_names)
|
||||
time.sleep(0.02)
|
||||
|
||||
while True:
|
||||
# print(glove.read()['index_2']-1500)
|
||||
glove_index_flexor = glove.read()["index_2"] - 1500
|
||||
glove_index_subflexor = glove.read()["index_1"] - 1500
|
||||
glove_index_side = glove.read()["index_0"] - 2100
|
||||
|
||||
vals = [glove_index_flexor, 1000 - (glove_index_subflexor), glove_index_subflexor]
|
||||
|
||||
keys = ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
|
||||
glove_middle_flexor = glove.read()["middle_2"] - 1500
|
||||
glove_middle_subflexor = 1000 - (glove.read()["middle_1"] - 1700)
|
||||
vals += [glove_middle_flexor, glove_middle_subflexor, glove_middle_subflexor - 200]
|
||||
keys += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
|
||||
glove_ring_flexor = glove.read()["ring_2"] - 1300
|
||||
print(glove_ring_flexor)
|
||||
glove_ring_subflexor = glove.read()["ring_1"] - 1100
|
||||
|
||||
vals += [glove_ring_flexor, 1000 - glove_ring_subflexor, glove_ring_subflexor]
|
||||
keys += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
|
||||
glove_pinky_flexor = glove.read()["pinky_2"] - 1500
|
||||
glove_pinky_subflexor = glove.read()["pinky_1"] - 1300
|
||||
vals += [300 + glove_pinky_flexor, max(1000 - glove_pinky_subflexor - 100, 0), glove_pinky_subflexor]
|
||||
keys += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
|
||||
robot.hand_bus.write("Goal_Position", vals, keys)
|
||||
time.sleep(0.1)
|
||||
|
||||
time.sleep(3)
|
||||
|
||||
def move_arm(loop=10):
|
||||
sleep = 1
|
||||
for i in range(loop):
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1195])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 2195])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1457, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 2357, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 974, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 2674, 1957, 1695])
|
||||
time.sleep(sleep + 2)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 1632, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 1369, 1632, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 1330, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [2381, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1681, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
|
||||
def move_hand(loop=10):
|
||||
sleep = 0.5
|
||||
for i in range(loop):
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[500, 1000, 0, 1000],
|
||||
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [100, 1000, 150], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [200, 100, 700], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[500, 1000 - 250, 0 + 300, 1000 - 200],
|
||||
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[100 + 450, 100 + 400, 100 + 400],
|
||||
["index_flexor", "index_pinky_side", "index_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[100 + 350, 1000 - 450, 150 + 450],
|
||||
["middle_flexor", "middle_pinky_side", "middle_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[200 + 650, 200 + 350, 0 + 350],
|
||||
["ring_flexor", "ring_pinky_side", "ring_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[200 + 450, 100 + 400, 700 - 400],
|
||||
["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
|
||||
move_hand(3)
|
||||
|
||||
move_arm(1)
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
executor.submit(move_arm)
|
||||
executor.submit(move_hand)
|
||||
|
||||
# initial position
|
||||
for i in range(3):
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [500, 1000, 0, 1000, 100, 950, 100, 100, 1000, 150, 200, 200, 0, 200, 100, 700]
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
# for i in range(3):
|
||||
# robot.hand_bus.write("Goal_Position", [500, 1000-150, 0+250, 1000-150,
|
||||
# 100+300, 950-250, 100+250,
|
||||
# 100+200, 1000-300, 150+300,
|
||||
# 200+500, 200+200, 0+200,
|
||||
# 200+300, 100+200, 700-200])
|
||||
# time.sleep(1)
|
||||
|
||||
# camera = 0
|
||||
# score_threshold = 0.95
|
||||
# iou_threshold = 0.3
|
||||
|
||||
# app = MediaPipeHandApp(MediaPipeHand.from_pretrained(), score_threshold, iou_threshold)
|
||||
|
||||
# def frame_processor(frame: np.ndarray) -> np.ndarray:
|
||||
# # Input Prep
|
||||
# NHWC_int_numpy_frames, NCHW_fp32_torch_frames = app_to_net_image_inputs(frame)
|
||||
|
||||
# # Run Bounding Box & Keypoint Detector
|
||||
# batched_selected_boxes, batched_selected_keypoints = app._run_box_detector(NCHW_fp32_torch_frames)
|
||||
|
||||
# # The region of interest ( bounding box of 4 (x, y) corners).
|
||||
# # list[torch.Tensor(shape=[Num Boxes, 4, 2])],
|
||||
# # where 2 == (x, y)
|
||||
# #
|
||||
# # A list element will be None if there is no selected ROI.
|
||||
# batched_roi_4corners = app._compute_object_roi(batched_selected_boxes, batched_selected_keypoints)
|
||||
|
||||
# # selected landmarks for the ROI (if any)
|
||||
# # list[torch.Tensor(shape=[Num Selected Landmarks, K, 3])],
|
||||
# # where K == number of landmark keypoints, 3 == (x, y, confidence)
|
||||
# #
|
||||
# # A list element will be None if there is no ROI.
|
||||
# landmarks_out = app._run_landmark_detector(NHWC_int_numpy_frames, batched_roi_4corners)
|
||||
|
||||
# app._draw_predictions(
|
||||
# NHWC_int_numpy_frames,
|
||||
# batched_selected_boxes,
|
||||
# batched_selected_keypoints,
|
||||
# batched_roi_4corners,
|
||||
# *landmarks_out,
|
||||
# )
|
||||
|
||||
# return NHWC_int_numpy_frames[0]
|
||||
|
||||
# capture_and_display_processed_frames(frame_processor, "QAIHM Mediapipe Hand Demo", camera)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,231 +0,0 @@
|
||||
|
||||
#robot.arm_bus.write("Acceleration", [20], ["shoulder_pitch"])
|
||||
|
||||
####DEBUGGER####################
|
||||
# joint = input("Enter joint name: ")
|
||||
# encoder = EncoderReader("/dev/ttyUSB1", 115200)
|
||||
# start_angle = arm_calibration['start_pos'][arm_calibration['motor_names'].index(joint)]
|
||||
# end_angle = arm_calibration['end_pos'][arm_calibration['motor_names'].index(joint)]
|
||||
# # start_angle = shoulder_calibration['start_pos'][shoulder_calibration['motor_names'].index(joint)]
|
||||
# # end_angle = shoulder_calibration['end_pos'][shoulder_calibration['motor_names'].index(joint)]
|
||||
# while True:
|
||||
# angle = int(start_angle+(end_angle-start_angle)*encoder.read()/1000)
|
||||
# # robot.shoulder_bus.set_bus_baudrate(115200)
|
||||
# # robot.shoulder_bus.write("Goal_Position",angle, [joint])
|
||||
# robot.shoulder_bus.set_bus_baudrate(1000000)
|
||||
# robot.arm_bus.write("Goal_Position",angle, [joint])
|
||||
# print(angle)
|
||||
# time.sleep(0.1)
|
||||
|
||||
|
||||
|
||||
#####SAFETY CHECKS EXPLAINED#####
|
||||
#There are two safety checks built-in: one is based on load and the other is based on current.
|
||||
#Current: if Protection_Current > Present_Current we wait Over_Current_Protection_Time (expressed in ms) and set Torque_Enable to 0
|
||||
#Load: if Max_Torque_Limit*Overload_Torque (expressed as a percentage) > Present_Load, we wait Protection_Time (expressed in ms
|
||||
#and set Max_Torque_Limit to Protective_Torque)
|
||||
#Though we can specify Min-Max_Angle_Limit, Max_Temperature_Limit, Min-Max_Voltage_Limit, no safety checks are implemented for these values
|
||||
|
||||
#robot.arm_bus.set_calibration(arm_calibration)
|
||||
|
||||
|
||||
|
||||
|
||||
#method 1
|
||||
# robot.arm_bus.write("Overload_Torque", 80)
|
||||
# robot.arm_bus.write("Protection_Time", 10)
|
||||
# robot.arm_bus.write("Protective_Torque", 1)
|
||||
# robot.arm_bus.write("Protection_Current", 200,["shoulder_pitch"])
|
||||
# robot.arm_bus.write("Over_Current_Protection_Time", 10)
|
||||
|
||||
#method 2
|
||||
# robot.arm_bus.write("Protection_Current", 500,["shoulder_pitch"])
|
||||
# robot.arm_bus.write("Over_Current_Protection_Time", 10)
|
||||
# robot.arm_bus.write("Max_Torque_Limit", 1000)
|
||||
# robot.arm_bus.write("Overload_Torque", 40)
|
||||
# robot.arm_bus.write("Protection_Time", 10)
|
||||
# robot.arm_bus.write("Protective_Torque", 1)
|
||||
|
||||
# robot.shoulder_bus.set_bus_baudrate(115200)
|
||||
# robot.shoulder_bus.write("Goal_Position",2500)
|
||||
# exit()
|
||||
|
||||
######LOGGER####################
|
||||
# from test_torque.log_and_plot_feetech import log_and_plot_params
|
||||
|
||||
# params_to_log = [
|
||||
# "Protection_Current",
|
||||
# "Present_Current",
|
||||
# "Max_Torque_Limit",
|
||||
# "Protection_Time",
|
||||
# "Overload_Torque",
|
||||
# "Present_Load",
|
||||
# "Present_Position",
|
||||
# ]
|
||||
|
||||
# servo_names = ["shoulder_pitch"]
|
||||
|
||||
|
||||
# servo_data, timestamps = log_and_plot_params(robot.shoulder_bus, params_to_log, servo_names, test_id="shoulder_pitch")
|
||||
# exit()
|
||||
|
||||
|
||||
#robot.arm_bus.write("Goal_Position",2300, ["shoulder_pitch"])
|
||||
# dt = 2
|
||||
# steps = 4
|
||||
# max_pos = 1500
|
||||
# min_pos = 2300
|
||||
# increment = (max_pos - min_pos) / steps
|
||||
# # Move from min_pos to max_pos in steps
|
||||
# for i in range(steps + 1): # Include the last step
|
||||
# current_pos = min_pos + int(i * increment)
|
||||
# robot.arm_bus.write("Goal_Position", [current_pos], ["shoulder_pitch"])
|
||||
# time.sleep(dt)
|
||||
|
||||
# # Move back from max_pos to min_pos in steps
|
||||
# for i in range(steps + 1): # Include the last step
|
||||
# current_pos = max_pos - int(i * increment)
|
||||
# robot.arm_bus.write("Goal_Position", [current_pos], ["shoulder_pitch"])
|
||||
# time.sleep(dt)shoulder_pitch
|
||||
#demo to show how sending a lot of values makes the robt shake
|
||||
|
||||
|
||||
|
||||
# # Step increment
|
||||
#
|
||||
|
||||
# # Move from min_pos to max_pos in steps
|
||||
# for i in range(steps + 1): # Include the last step
|
||||
# current_pos = min_pos + int(i * increment)
|
||||
# robot.arm_bus.write("Goal_Position", [current_pos], ["elbow_flex"])
|
||||
# time.sleep(dt)
|
||||
|
||||
# # Move back from max_pos to min_pos in steps
|
||||
# for i in range(steps + 1): # Include the last step
|
||||
# current_pos = max_pos - int(i * increment)
|
||||
# robot.arm_bus.write("Goal_Position", [current_pos], ["elbow_flex"])
|
||||
# time.sleep(dt)
|
||||
# exit()
|
||||
|
||||
#robot.arm_bus.write("Goal_Position", a # shoulder_calibration = robot.get_shoulder_calibration()
|
||||
# print(shoulder_calibration)m_calibration["start_pos"])
|
||||
# robot.arm_bus.write("Over_Current_Protection_Time", 50)
|
||||
# robot.arm_bus.write("Protection_Current", 310, ["shoulder_pitch"])
|
||||
# robot.arm_bus.write("Overload_Torque", 80, ["shoulder_pitch"])
|
||||
# robot.arm_bus.write("Protection_Time", 100, ["shoulder_pitch"])
|
||||
# robot.arm_bus.write("Over_Current_Protection_Time", 50, ["shoulder_pitch"])
|
||||
|
||||
# robot.arm_bus.write("Protective_Torque", 20, ["shoulder_pitch"])
|
||||
|
||||
|
||||
# robot.arm_bus.write("Goal_Position", [600],["shoulder_pitch"])
|
||||
|
||||
# from test_torque.log_and_plot_feetech import log_and_plot_params
|
||||
|
||||
# params_to_log = [
|
||||
# "Present_Current",
|
||||
# "Protection_Current",
|
||||
# "Overload_Torque",
|
||||
# "Protection_Time",
|
||||
# "Protective_Torque",
|
||||
# "Present_Load",
|
||||
# "Present_Position",
|
||||
# ]
|
||||
|
||||
# servo_names = ["shoulder_pitch"]
|
||||
|
||||
#
|
||||
|
||||
#robot.arm_bus.write("Goal_Position", arm_calibration["start_pos"])
|
||||
|
||||
#robot.hand_bus.set_calibration(hand_calibration)
|
||||
|
||||
#interp = 0.3
|
||||
|
||||
#robot.arm_bus.write("Goal_Position", [int((i*interp+j*(1-interp))) for i, j in zip(arm_calibration["start_pos"], arm_calibration["end_pos"])])
|
||||
#exit()
|
||||
|
||||
# glove = HomonculusGlove()
|
||||
# glove.run_calibration()
|
||||
|
||||
|
||||
|
||||
####GOOD FOR GRASPING
|
||||
# start_pos = [
|
||||
# 500,
|
||||
# 900,
|
||||
# 500,
|
||||
# 1000,
|
||||
# 100,
|
||||
# 450,#250
|
||||
# 950,#750
|
||||
# 100,
|
||||
# 300,#400
|
||||
# 50,#150
|
||||
# 100,
|
||||
# 120,
|
||||
# 980,
|
||||
# 100,
|
||||
# 950,
|
||||
# 750,
|
||||
# ]
|
||||
# end_pos = [
|
||||
# start_pos[0] - 400,
|
||||
# start_pos[1] - 300,
|
||||
# start_pos[2] + 500,
|
||||
# start_pos[3] - 50,
|
||||
# start_pos[4] + 900,
|
||||
# start_pos[5] + 500,
|
||||
# start_pos[6] - 500,
|
||||
# start_pos[7] + 900,
|
||||
# start_pos[8] + 700,
|
||||
# start_pos[9] + 700,
|
||||
# start_pos[10] + 900,
|
||||
# start_pos[11] + 700,
|
||||
# start_pos[12] - 700,
|
||||
# start_pos[13] + 900,
|
||||
# start_pos[14] - 700,
|
||||
# start_pos[15] - 700,
|
||||
# ]
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
SCS_SERIES_CONTROL_TABLE = {
|
||||
|
||||
# "Max_Torque_Limit": (16, 2),
|
||||
# "Phase": (18, 1),
|
||||
# "Unloading_Condition": (19, 1),
|
||||
|
||||
"Protective_Torque": (37, 1),
|
||||
"Protection_Time": (38, 1),
|
||||
#Baud_Rate": (48, 1),
|
||||
|
||||
}
|
||||
|
||||
def read_and_print_scs_values(robot):
|
||||
for param_name in SCS_SERIES_CONTROL_TABLE:
|
||||
value = robot.hand_bus.read(param_name)
|
||||
print(f"{param_name}: {value}")
|
||||
|
||||
motor_1_values = {
|
||||
"Lock" : 255,
|
||||
#"Protection_Time": 20#so if you write to these they turn to 0 for some fucking reason. protection time was 100, procetive to
|
||||
}
|
||||
|
||||
# motor_1_values = {
|
||||
# "Lock": 1,
|
||||
# "Protection_Time": 100,
|
||||
# "Protective_Torque": 20,
|
||||
# "Phase": 1,#thisu is bullshit
|
||||
# "Unloading_Condition": 32,
|
||||
|
||||
# }
|
||||
#bug in writing to specific values of the scs0009
|
||||
|
||||
# Write values to motor 2, there is overload torque there
|
||||
#ok so i can write, the jittering is because of the overload torque which is still being triggered
|
||||
|
||||
#TODO: i have to write a functioining version for the sc009 (or i dont who cares)
|
||||
@@ -1,18 +0,0 @@
|
||||
# Color gradient function (0-2024 scaled to 0-10)
|
||||
def value_to_color(value):
|
||||
# Clamp the value between 0 and 2024
|
||||
value = max(0, min(2024, value))
|
||||
|
||||
# Scale from [0..2024] to [0..10]
|
||||
scaled_value = (value / 2024) * 10
|
||||
|
||||
# Green to Yellow (scaled_value 0..5), then Yellow to Red (scaled_value 5..10)
|
||||
if scaled_value <= 5:
|
||||
r = int(255 * (scaled_value / 5))
|
||||
g = 255
|
||||
else:
|
||||
r = 255
|
||||
g = int(255 * (1 - (scaled_value - 5) / 5))
|
||||
b = 0
|
||||
|
||||
return (r, g, b)
|
||||
681
examples/test.py
@@ -1,681 +0,0 @@
|
||||
import threading
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
# from qai_hub_models.models.mediapipe_hand.app import MediaPipeHandApp
|
||||
# from qai_hub_models.models.mediapipe_hand.model import (
|
||||
# MediaPipeHand,
|
||||
# )
|
||||
# from qai_hub_models.utils.image_processing import (
|
||||
# app_to_net_image_inputs,
|
||||
# )
|
||||
from lerobot.common.robot_devices.motors.feetech import (
|
||||
CalibrationMode,
|
||||
FeetechMotorsBus,
|
||||
)
|
||||
|
||||
LOWER_BOUND_LINEAR = -100
|
||||
UPPER_BOUND_LINEAR = 200
|
||||
|
||||
import serial
|
||||
|
||||
|
||||
class HomonculusGlove:
|
||||
def __init__(self):
|
||||
self.serial_port = "/dev/tty.usbmodem21401"
|
||||
self.baud_rate = 115200
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
self.thread = threading.Thread(target=self.async_read)
|
||||
self.thread.start()
|
||||
self.last_d = {
|
||||
"thumb_0": 100,
|
||||
"thumb_1": 100,
|
||||
"thumb_2": 100,
|
||||
"thumb_3": 100,
|
||||
"index_0": 100,
|
||||
"index_1": 100,
|
||||
"index_2": 100,
|
||||
"middle_0": 100,
|
||||
"middle_1": 100,
|
||||
"middle_2": 100,
|
||||
"ring_0": 100,
|
||||
"ring_1": 100,
|
||||
"ring_2": 100,
|
||||
"pinky_0": 100,
|
||||
"pinky_1": 100,
|
||||
"pinky_2": 100,
|
||||
"battery_voltage": 100,
|
||||
}
|
||||
self.calibration = None
|
||||
|
||||
@property
|
||||
def joint_names(self):
|
||||
return list(self.last_d.keys())
|
||||
|
||||
def read(self, motor_names: list[str] | None = None):
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
values = np.array([self.last_d[k] for k in motor_names])
|
||||
|
||||
print(motor_names)
|
||||
print(values)
|
||||
|
||||
if self.calibration is not None:
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
print(values)
|
||||
return values
|
||||
|
||||
def async_read(self):
|
||||
while True:
|
||||
if self.serial.in_waiting > 0:
|
||||
self.serial.flush()
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
if len(vals) != 17:
|
||||
continue
|
||||
vals = [int(val) for val in vals]
|
||||
|
||||
d = {
|
||||
"thumb_0": vals[0],
|
||||
"thumb_1": vals[1],
|
||||
"thumb_2": vals[2],
|
||||
"thumb_3": vals[3],
|
||||
"index_0": vals[4],
|
||||
"index_1": vals[5],
|
||||
"index_2": vals[6],
|
||||
"middle_0": vals[7],
|
||||
"middle_1": vals[8],
|
||||
"middle_2": vals[9],
|
||||
"ring_0": vals[10],
|
||||
"ring_1": vals[11],
|
||||
"ring_2": vals[12],
|
||||
"pinky_0": vals[13],
|
||||
"pinky_1": vals[14],
|
||||
"pinky_2": vals[15],
|
||||
"battery_voltage": vals[16],
|
||||
}
|
||||
self.last_d = d
|
||||
# print(d.values())
|
||||
|
||||
def run_calibration(self):
|
||||
print("\nMove arm to open position")
|
||||
input("Press Enter to continue...")
|
||||
open_pos_list = []
|
||||
for _ in range(300):
|
||||
open_pos = self.read()
|
||||
open_pos_list.append(open_pos)
|
||||
time.sleep(0.01)
|
||||
open_pos = np.array(open_pos_list)
|
||||
max_open_pos = open_pos.max(axis=0)
|
||||
min_open_pos = open_pos.min(axis=0)
|
||||
|
||||
print(f"{max_open_pos=}")
|
||||
print(f"{min_open_pos=}")
|
||||
|
||||
print("\nMove arm to closed position")
|
||||
input("Press Enter to continue...")
|
||||
closed_pos_list = []
|
||||
for _ in range(300):
|
||||
closed_pos = self.read()
|
||||
closed_pos_list.append(closed_pos)
|
||||
time.sleep(0.01)
|
||||
closed_pos = np.array(closed_pos_list)
|
||||
max_closed_pos = closed_pos.max(axis=0)
|
||||
closed_pos[closed_pos < 1000] = 60000
|
||||
min_closed_pos = closed_pos.min(axis=0)
|
||||
|
||||
print(f"{max_closed_pos=}")
|
||||
print(f"{min_closed_pos=}")
|
||||
|
||||
open_pos = np.array([max_open_pos, max_closed_pos]).max(axis=0)
|
||||
closed_pos = np.array([min_open_pos, min_closed_pos]).min(axis=0)
|
||||
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
for i, jname in enumerate(self.joint_names):
|
||||
if jname in ["thumb_0", "thumb_3", "index_2", "middle_2", "ring_2", "pinky_0", "pinky_2"]:
|
||||
tmp_pos = open_pos[i]
|
||||
open_pos[i] = closed_pos[i]
|
||||
closed_pos[i] = tmp_pos
|
||||
|
||||
print()
|
||||
print(f"{open_pos=}")
|
||||
print(f"{closed_pos=}")
|
||||
|
||||
homing_offset = [0] * len(self.joint_names)
|
||||
drive_mode = [0] * len(self.joint_names)
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.joint_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": open_pos,
|
||||
"end_pos": closed_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.joint_names,
|
||||
}
|
||||
# return calib_dict
|
||||
self.set_calibration(calib_dict)
|
||||
|
||||
def set_calibration(self, calibration: dict[str, list]):
|
||||
self.calibration = calibration
|
||||
|
||||
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
|
||||
a "zero position" at 0 degree.
|
||||
|
||||
Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor
|
||||
rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range.
|
||||
|
||||
Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation
|
||||
when given a goal position that is + or - their resolution. For instance, feetech xl330-m077 have a resolution of 4096, and
|
||||
at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830,
|
||||
or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor.
|
||||
To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work
|
||||
in the centered nominal degree range ]-180, 180[.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.motor_names
|
||||
|
||||
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
|
||||
values = values.astype(np.float32)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
calib_idx = self.calibration["motor_names"].index(name)
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# Rescale the present position to a nominal range [0, 100] %,
|
||||
# useful for joints with linear motions like Aloha gripper
|
||||
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
|
||||
|
||||
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
|
||||
if name == "pinky_1" and (values[i] < LOWER_BOUND_LINEAR):
|
||||
values[i] = end_pos
|
||||
else:
|
||||
msg = (
|
||||
f"Wrong motor position range detected for {name}. "
|
||||
f"Expected to be in nominal range of [0, 100] % (a full linear translation), "
|
||||
f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, "
|
||||
f"but present value is {values[i]} %. "
|
||||
"This might be due to a cable connection issue creating an artificial jump in motor values. "
|
||||
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
|
||||
)
|
||||
print(msg)
|
||||
# raise JointOutOfRangeError(msg)
|
||||
|
||||
return values
|
||||
|
||||
# def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
# """Inverse of `apply_calibration`."""
|
||||
# if motor_names is None:
|
||||
# motor_names = self.motor_names
|
||||
|
||||
# for i, name in enumerate(motor_names):
|
||||
# calib_idx = self.calibration["motor_names"].index(name)
|
||||
# calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
# if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
# start_pos = self.calibration["start_pos"][calib_idx]
|
||||
# end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# # Convert from nominal lnear range of [0, 100] % to
|
||||
# # actual motor range of values which can be arbitrary.
|
||||
# values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos
|
||||
|
||||
# values = np.round(values).astype(np.int32)
|
||||
# return values
|
||||
|
||||
|
||||
class HopeJuniorRobot:
|
||||
def __init__(self):
|
||||
self.arm_bus = FeetechMotorsBus(
|
||||
port="/dev/tty.usbmodem58760429571",
|
||||
motors={
|
||||
# "motor1": (2, "sts3250"),
|
||||
# "motor2": (1, "scs0009"),
|
||||
"shoulder_pitch": [1, "sts3250"],
|
||||
"shoulder_yaw": [2, "sts3215"], # TODO: sts3250
|
||||
"shoulder_roll": [3, "sts3215"], # TODO: sts3250
|
||||
"elbow_flex": [4, "sts3250"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"wrist_yaw": [6, "sts3215"],
|
||||
"wrist_pitch": [7, "sts3215"],
|
||||
},
|
||||
protocol_version=0,
|
||||
)
|
||||
self.hand_bus = FeetechMotorsBus(
|
||||
port="/dev/tty.usbmodem585A0077581",
|
||||
motors={
|
||||
"thumb_basel_rotation": [30, "scs0009"],
|
||||
"thumb_flexor": [27, "scs0009"],
|
||||
"thumb_pinky_side": [26, "scs0009"],
|
||||
"thumb_thumb_side": [28, "scs0009"],
|
||||
"index_flexor": [25, "scs0009"],
|
||||
"index_pinky_side": [31, "scs0009"],
|
||||
"index_thumb_side": [32, "scs0009"],
|
||||
"middle_flexor": [24, "scs0009"],
|
||||
"middle_pinky_side": [33, "scs0009"],
|
||||
"middle_thumb_side": [34, "scs0009"],
|
||||
"ring_flexor": [21, "scs0009"],
|
||||
"ring_pinky_side": [36, "scs0009"],
|
||||
"ring_thumb_side": [35, "scs0009"],
|
||||
"pinky_flexor": [23, "scs0009"],
|
||||
"pinky_pinky_side": [38, "scs0009"],
|
||||
"pinky_thumb_side": [37, "scs0009"],
|
||||
},
|
||||
protocol_version=1,
|
||||
group_sync_read=False,
|
||||
)
|
||||
|
||||
def get_hand_calibration(self):
|
||||
homing_offset = [0] * len(self.hand_bus.motor_names)
|
||||
drive_mode = [0] * len(self.hand_bus.motor_names)
|
||||
|
||||
start_pos = [
|
||||
500,
|
||||
900,
|
||||
1000,
|
||||
0,
|
||||
100,
|
||||
250,
|
||||
750,
|
||||
100,
|
||||
400,
|
||||
150,
|
||||
100,
|
||||
120,
|
||||
980,
|
||||
100,
|
||||
950,
|
||||
750,
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
500 - 250,
|
||||
900 - 300,
|
||||
1000 - 550,
|
||||
0 + 550,
|
||||
1000,
|
||||
250 + 700,
|
||||
750 - 700,
|
||||
1000,
|
||||
400 + 700,
|
||||
150 + 700,
|
||||
1000,
|
||||
120 + 700,
|
||||
980 - 700,
|
||||
1000,
|
||||
950 - 700,
|
||||
750 - 700,
|
||||
]
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.hand_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.hand_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def connect(self):
|
||||
self.arm_bus.connect()
|
||||
self.hand_bus.connect()
|
||||
|
||||
|
||||
ESCAPE_KEY_ID = 27
|
||||
|
||||
|
||||
def capture_and_display_processed_frames(
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray],
|
||||
window_display_name: str,
|
||||
cap_device: int = 0,
|
||||
) -> None:
|
||||
"""
|
||||
Capture frames from the given input camera device, run them through
|
||||
the frame processor, and display the outputs in a window with the given name.
|
||||
|
||||
User should press Esc to exit.
|
||||
|
||||
Inputs:
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray]
|
||||
Processes frames.
|
||||
Input and output are numpy arrays of shape (H W C) with BGR channel layout and dtype uint8 / byte.
|
||||
window_display_name: str
|
||||
Name of the window used to display frames.
|
||||
cap_device: int
|
||||
Identifier for the camera to use to capture frames.
|
||||
"""
|
||||
cv2.namedWindow(window_display_name)
|
||||
capture = cv2.VideoCapture(cap_device)
|
||||
if not capture.isOpened():
|
||||
raise ValueError("Unable to open video capture.")
|
||||
|
||||
frame_count = 0
|
||||
has_frame, frame = capture.read()
|
||||
while has_frame:
|
||||
assert isinstance(frame, np.ndarray)
|
||||
|
||||
frame_count = frame_count + 1
|
||||
# mirror frame
|
||||
frame = np.ascontiguousarray(frame[:, ::-1, ::-1])
|
||||
|
||||
# process & show frame
|
||||
processed_frame = frame_processor(frame)
|
||||
cv2.imshow(window_display_name, processed_frame[:, :, ::-1])
|
||||
|
||||
has_frame, frame = capture.read()
|
||||
key = cv2.waitKey(1)
|
||||
if key == ESCAPE_KEY_ID:
|
||||
break
|
||||
|
||||
capture.release()
|
||||
|
||||
|
||||
def main():
|
||||
robot = HopeJuniorRobot()
|
||||
robot.connect()
|
||||
|
||||
# robot.hand_bus.calibration = None
|
||||
|
||||
# breakpoint()
|
||||
# print(robot.arm_bus.read("Present_Position"))
|
||||
robot.arm_bus.write("Torque_Enable", 1)
|
||||
robot.arm_bus.write("Acceleration", 20)
|
||||
robot.arm_bus.read("Acceleration")
|
||||
|
||||
calibration = robot.get_hand_calibration()
|
||||
robot.hand_bus.write("Goal_Position", calibration["start_pos"])
|
||||
# robot.hand_bus.write("Goal_Position", calibration["end_pos"][:4], robot.hand_bus.motor_names[:4])
|
||||
robot.hand_bus.set_calibration(calibration)
|
||||
lol = 1
|
||||
|
||||
# # print(motors_bus.write("Goal_Position", 500))
|
||||
# print(robot.hand_bus.read("Present_Position"))
|
||||
# # pos = hand_bus.read("Present_Position")
|
||||
# # hand_bus.write("Goal_Position", pos[0]+20, hand_bus.motor_names[0])
|
||||
# # hand_bus.write("Goal_Position", pos[i]+delta, hand_bus.motor_names[i])
|
||||
# robot.hand_bus.read("Acceleration")
|
||||
# robot.hand_bus.write("Acceleration", 10)
|
||||
|
||||
# sleep = 1
|
||||
# # robot.hand_bus.write(
|
||||
# # "Goal_Position", [glove.last_d['index_2']-1500,300,300], ["index_pinky_side", "index_flexor", "index_thumb_side"]
|
||||
# # )
|
||||
# #time.sleep(sleep)
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [100, 0, 0], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [200, 100, 600], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
|
||||
# breakpoint()
|
||||
|
||||
glove = HomonculusGlove()
|
||||
glove.run_calibration()
|
||||
# while True:
|
||||
# joint_names = ["index_1", "index_2"]
|
||||
# joint_values = glove.read(joint_names)
|
||||
# print(joint_values)
|
||||
|
||||
input()
|
||||
while True:
|
||||
joint_names = []
|
||||
joint_names += ["thumb_0", "thumb_2", "thumb_3"]
|
||||
joint_names += ["index_1", "index_2"]
|
||||
joint_names += ["middle_1", "middle_2"]
|
||||
joint_names += ["ring_1", "ring_2"]
|
||||
joint_names += ["pinky_1", "pinky_2"]
|
||||
joint_values = glove.read(joint_names)
|
||||
joint_values = joint_values.round().astype(int)
|
||||
joint_dict = {k: v for k, v in zip(joint_names, joint_values, strict=False)}
|
||||
|
||||
motor_values = []
|
||||
motor_names = []
|
||||
motor_names += ["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"]
|
||||
motor_values += [
|
||||
joint_dict["thumb_3"],
|
||||
joint_dict["thumb_0"],
|
||||
joint_dict["thumb_2"],
|
||||
joint_dict["thumb_2"],
|
||||
]
|
||||
motor_names += ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
motor_values += [joint_dict["index_2"], joint_dict["index_1"], joint_dict["index_1"]]
|
||||
motor_names += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
motor_values += [joint_dict["middle_2"], joint_dict["middle_1"], joint_dict["middle_1"]]
|
||||
motor_names += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
motor_values += [joint_dict["ring_2"], joint_dict["ring_1"], joint_dict["ring_1"]]
|
||||
motor_names += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
|
||||
motor_values += [joint_dict["pinky_2"], joint_dict["pinky_1"], joint_dict["pinky_1"]]
|
||||
|
||||
motor_values = np.array(motor_values)
|
||||
motor_values = np.clip(motor_values, 0, 100)
|
||||
|
||||
robot.hand_bus.write("Goal_Position", motor_values, motor_names)
|
||||
time.sleep(0.02)
|
||||
|
||||
while True:
|
||||
# print(glove.read()['index_2']-1500)
|
||||
glove_index_flexor = glove.read()["index_2"] - 1500
|
||||
glove_index_subflexor = glove.read()["index_1"] - 1500
|
||||
glove_index_side = glove.read()["index_0"] - 2100
|
||||
|
||||
vals = [glove_index_flexor, 1000 - (glove_index_subflexor), glove_index_subflexor]
|
||||
|
||||
keys = ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
|
||||
glove_middle_flexor = glove.read()["middle_2"] - 1500
|
||||
glove_middle_subflexor = 1000 - (glove.read()["middle_1"] - 1700)
|
||||
vals += [glove_middle_flexor, glove_middle_subflexor, glove_middle_subflexor - 200]
|
||||
keys += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
|
||||
glove_ring_flexor = glove.read()["ring_2"] - 1300
|
||||
print(glove_ring_flexor)
|
||||
glove_ring_subflexor = glove.read()["ring_1"] - 1100
|
||||
|
||||
vals += [glove_ring_flexor, 1000 - glove_ring_subflexor, glove_ring_subflexor]
|
||||
keys += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
|
||||
glove_pinky_flexor = glove.read()["pinky_2"] - 1500
|
||||
glove_pinky_subflexor = glove.read()["pinky_1"] - 1300
|
||||
vals += [300 + glove_pinky_flexor, max(1000 - glove_pinky_subflexor - 100, 0), glove_pinky_subflexor]
|
||||
keys += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
|
||||
robot.hand_bus.write("Goal_Position", vals, keys)
|
||||
time.sleep(0.1)
|
||||
|
||||
time.sleep(3)
|
||||
|
||||
def move_arm(loop=10):
|
||||
sleep = 1
|
||||
for i in range(loop):
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1195])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 2195])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1457, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 2357, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 974, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 2674, 1957, 1695])
|
||||
time.sleep(sleep + 2)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 1632, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 1369, 1632, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 1330, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [2381, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1681, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
|
||||
def move_hand(loop=10):
|
||||
sleep = 0.5
|
||||
for i in range(loop):
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[500, 1000, 0, 1000],
|
||||
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [100, 1000, 150], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [200, 100, 700], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[500, 1000 - 250, 0 + 300, 1000 - 200],
|
||||
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[100 + 450, 100 + 400, 100 + 400],
|
||||
["index_flexor", "index_pinky_side", "index_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[100 + 350, 1000 - 450, 150 + 450],
|
||||
["middle_flexor", "middle_pinky_side", "middle_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[200 + 650, 200 + 350, 0 + 350],
|
||||
["ring_flexor", "ring_pinky_side", "ring_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[200 + 450, 100 + 400, 700 - 400],
|
||||
["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
|
||||
move_hand(3)
|
||||
|
||||
move_arm(1)
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
executor.submit(move_arm)
|
||||
executor.submit(move_hand)
|
||||
|
||||
# initial position
|
||||
for i in range(3):
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [500, 1000, 0, 1000, 100, 950, 100, 100, 1000, 150, 200, 200, 0, 200, 100, 700]
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
# for i in range(3):
|
||||
# robot.hand_bus.write("Goal_Position", [500, 1000-150, 0+250, 1000-150,
|
||||
# 100+300, 950-250, 100+250,
|
||||
# 100+200, 1000-300, 150+300,
|
||||
# 200+500, 200+200, 0+200,
|
||||
# 200+300, 100+200, 700-200])
|
||||
# time.sleep(1)
|
||||
|
||||
# camera = 0
|
||||
# score_threshold = 0.95
|
||||
# iou_threshold = 0.3
|
||||
|
||||
# app = MediaPipeHandApp(MediaPipeHand.from_pretrained(), score_threshold, iou_threshold)
|
||||
|
||||
# def frame_processor(frame: np.ndarray) -> np.ndarray:
|
||||
# # Input Prep
|
||||
# NHWC_int_numpy_frames, NCHW_fp32_torch_frames = app_to_net_image_inputs(frame)
|
||||
|
||||
# # Run Bounding Box & Keypoint Detector
|
||||
# batched_selected_boxes, batched_selected_keypoints = app._run_box_detector(NCHW_fp32_torch_frames)
|
||||
|
||||
# # The region of interest ( bounding box of 4 (x, y) corners).
|
||||
# # list[torch.Tensor(shape=[Num Boxes, 4, 2])],
|
||||
# # where 2 == (x, y)
|
||||
# #
|
||||
# # A list element will be None if there is no selected ROI.
|
||||
# batched_roi_4corners = app._compute_object_roi(batched_selected_boxes, batched_selected_keypoints)
|
||||
|
||||
# # selected landmarks for the ROI (if any)
|
||||
# # list[torch.Tensor(shape=[Num Selected Landmarks, K, 3])],
|
||||
# # where K == number of landmark keypoints, 3 == (x, y, confidence)
|
||||
# #
|
||||
# # A list element will be None if there is no ROI.
|
||||
# landmarks_out = app._run_landmark_detector(NHWC_int_numpy_frames, batched_roi_4corners)
|
||||
|
||||
# app._draw_predictions(
|
||||
# NHWC_int_numpy_frames,
|
||||
# batched_selected_boxes,
|
||||
# batched_selected_keypoints,
|
||||
# batched_roi_4corners,
|
||||
# *landmarks_out,
|
||||
# )
|
||||
|
||||
# return NHWC_int_numpy_frames[0]
|
||||
|
||||
# capture_and_display_processed_frames(frame_processor, "QAIHM Mediapipe Hand Demo", camera)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,133 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# ********* Ping Example *********
|
||||
#
|
||||
#
|
||||
# Available SCServo model on this example : All models using Protocol SCS
|
||||
# This example is tested with a SCServo(STS/SMS/SCS), and an URT
|
||||
# Be sure that SCServo(STS/SMS/SCS) properties are already set as %% ID : 1 / Baudnum : 6 (Baudrate : 1000000)
|
||||
#
|
||||
|
||||
import os
|
||||
|
||||
if os.name == "nt":
|
||||
import msvcrt
|
||||
|
||||
def getch():
|
||||
return msvcrt.getch().decode()
|
||||
else:
|
||||
import sys
|
||||
import termios
|
||||
import tty
|
||||
|
||||
fd = sys.stdin.fileno()
|
||||
old_settings = termios.tcgetattr(fd)
|
||||
|
||||
def getch():
|
||||
try:
|
||||
tty.setraw(sys.stdin.fileno())
|
||||
ch = sys.stdin.read(1)
|
||||
finally:
|
||||
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
||||
return ch
|
||||
|
||||
|
||||
from scservo_sdk import * # Uses SCServo SDK library
|
||||
|
||||
# Default setting
|
||||
SCS_ID = 1 # SCServo ID : 1
|
||||
BAUDRATE = 1000000 # SCServo default baudrate : 1000000
|
||||
DEVICENAME = "/dev/tty.usbserial-2130" # Check which port is being used on your controller
|
||||
# ex) Windows: "COM1" Linux: "/dev/ttyUSB0" Mac: "/dev/tty.usbserial-*"
|
||||
|
||||
protocol_end = 1 # SCServo bit end(STS/SMS=0, SCS=1)
|
||||
|
||||
# Initialize PortHandler instance
|
||||
# Set the port path
|
||||
# Get methods and members of PortHandlerLinux or PortHandlerWindows
|
||||
portHandler = PortHandler(DEVICENAME)
|
||||
|
||||
# Initialize PacketHandler instance
|
||||
# Get methods and members of Protocol
|
||||
packetHandler = PacketHandler(protocol_end)
|
||||
|
||||
# Open port
|
||||
if portHandler.openPort():
|
||||
print("Succeeded to open the port")
|
||||
else:
|
||||
print("Failed to open the port")
|
||||
print("Press any key to terminate...")
|
||||
getch()
|
||||
quit()
|
||||
|
||||
|
||||
# Set port baudrate
|
||||
if portHandler.setBaudRate(BAUDRATE):
|
||||
print("Succeeded to change the baudrate")
|
||||
else:
|
||||
print("Failed to change the baudrate")
|
||||
print("Press any key to terminate...")
|
||||
getch()
|
||||
quit()
|
||||
|
||||
# Try to ping the SCServo
|
||||
# Get SCServo model number
|
||||
scs_model_number, scs_comm_result, scs_error = packetHandler.ping(portHandler, SCS_ID)
|
||||
if scs_comm_result != COMM_SUCCESS:
|
||||
print("%s" % packetHandler.getTxRxResult(scs_comm_result))
|
||||
elif scs_error != 0:
|
||||
print("%s" % packetHandler.getRxPacketError(scs_error))
|
||||
else:
|
||||
print("[ID:%03d] ping Succeeded. SCServo model number : %d" % (SCS_ID, scs_model_number))
|
||||
|
||||
|
||||
ADDR_SCS_PRESENT_POSITION = 56
|
||||
scs_present_position, scs_comm_result, scs_error = packetHandler.read2ByteTxRx(
|
||||
portHandler, SCS_ID, ADDR_SCS_PRESENT_POSITION
|
||||
)
|
||||
if scs_comm_result != COMM_SUCCESS:
|
||||
print(packetHandler.getTxRxResult(scs_comm_result))
|
||||
elif scs_error != 0:
|
||||
print(packetHandler.getRxPacketError(scs_error))
|
||||
|
||||
breakpoint()
|
||||
scs_present_position = SCS_LOWORD(scs_present_position)
|
||||
# scs_present_speed = SCS_HIWORD(scs_present_position_speed)
|
||||
# print("[ID:%03d] PresPos:%03d PresSpd:%03d" % (SCS_ID, scs_present_position, SCS_TOHOST(scs_present_speed, 15)))
|
||||
print("[ID:%03d] PresPos:%03d" % (SCS_ID, scs_present_position))
|
||||
|
||||
groupSyncRead = GroupSyncRead(portHandler, packetHandler, ADDR_SCS_PRESENT_POSITION, 2)
|
||||
|
||||
scs_addparam_result = groupSyncRead.addParam(SCS_ID)
|
||||
if scs_addparam_result != True:
|
||||
print("[ID:%03d] groupSyncRead addparam failed" % SCS_ID)
|
||||
quit()
|
||||
|
||||
# Syncread present position
|
||||
scs_comm_result = groupSyncRead.txRxPacket()
|
||||
if scs_comm_result != COMM_SUCCESS:
|
||||
print("%s" % packetHandler.getTxRxResult(scs_comm_result))
|
||||
|
||||
# Check if groupsyncread data of SCServo#1 is available
|
||||
scs_getdata_result = groupSyncRead.isAvailable(SCS_ID, ADDR_SCS_PRESENT_POSITION, 2)
|
||||
if scs_getdata_result == True:
|
||||
# Get SCServo#1 present position value
|
||||
scs_present_position = groupSyncRead.getData(SCS_ID, ADDR_SCS_PRESENT_POSITION, 2)
|
||||
else:
|
||||
scs_present_position = 0
|
||||
print("[ID:%03d] groupSyncRead getdata failed" % SCS_ID)
|
||||
|
||||
# # Check if groupsyncread data of SCServo#2 is available
|
||||
# scs_getdata_result = groupSyncRead.isAvailable(SCS2_ID, ADDR_SCS_PRESENT_POSITION, 2)
|
||||
# if scs_getdata_result == True:
|
||||
# # Get SCServo#2 present position value
|
||||
# scs2_present_position_speed = groupSyncRead.getData(SCS2_ID, ADDR_SCS_PRESENT_POSITION, 2)
|
||||
# else:
|
||||
# print("[ID:%03d] groupSyncRead getdata failed" % SCS2_ID)
|
||||
|
||||
scs_present_position = SCS_LOWORD(scs_present_position)
|
||||
print("[ID:%03d] PresPos:%03d" % (SCS_ID, scs_present_position))
|
||||
|
||||
|
||||
# Close port
|
||||
portHandler.closePort()
|
||||
@@ -1,45 +0,0 @@
|
||||
import serial
|
||||
|
||||
|
||||
class HomonculusGlove:
|
||||
def __init__(self):
|
||||
self.serial_port = "/dev/tty.usbmodem1101"
|
||||
self.baud_rate = 115200
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
|
||||
def read(self):
|
||||
while True:
|
||||
if self.serial.in_waiting > 0:
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
vals = [int(val) for val in vals]
|
||||
|
||||
d = {
|
||||
"thumb_0": vals[0],
|
||||
"thumb_1": vals[1],
|
||||
"thumb_2": vals[2],
|
||||
"thumb_3": vals[3],
|
||||
"index_0": vals[4],
|
||||
"index_1": vals[5],
|
||||
"index_2": vals[6],
|
||||
"middle_0": vals[7],
|
||||
"middle_1": vals[8],
|
||||
"middle_2": vals[9],
|
||||
"ring_0": vals[10],
|
||||
"ring_1": vals[11],
|
||||
"ring_2": vals[12],
|
||||
"pinky_0": vals[13],
|
||||
"pinky_1": vals[14],
|
||||
"pinky_2": vals[15],
|
||||
}
|
||||
return d
|
||||
|
||||
# if ser.in_waiting > 0:
|
||||
# line = ser.readline().decode('utf-8').strip()
|
||||
# print(line)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
glove = HomonculusGlove()
|
||||
d = glove.read()
|
||||
lol = 1
|
||||
@@ -1,693 +0,0 @@
|
||||
import threading
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
# from qai_hub_models.models.mediapipe_hand.app import MediaPipeHandApp
|
||||
# from qai_hub_models.models.mediapipe_hand.model import (
|
||||
# MediaPipeHand,
|
||||
# )
|
||||
# from qai_hub_models.utils.image_processing import (
|
||||
# app_to_net_image_inputs,
|
||||
# )
|
||||
from lerobot.common.robot_devices.motors.feetech import (
|
||||
CalibrationMode,
|
||||
FeetechMotorsBus,
|
||||
)
|
||||
|
||||
LOWER_BOUND_LINEAR = -100
|
||||
UPPER_BOUND_LINEAR = 200
|
||||
|
||||
import serial
|
||||
|
||||
|
||||
class HomonculusGlove:
|
||||
def __init__(self):
|
||||
self.serial_port = "/dev/tty.usbmodem1401"
|
||||
self.baud_rate = 115200
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
self.thread = threading.Thread(target=self.async_read)
|
||||
self.thread.start()
|
||||
self.last_d = {
|
||||
"thumb_0": 100,
|
||||
"thumb_1": 100,
|
||||
"thumb_2": 100,
|
||||
"thumb_3": 100,
|
||||
"index_0": 100,
|
||||
"index_1": 100,
|
||||
"index_2": 100,
|
||||
"middle_0": 100,
|
||||
"middle_1": 100,
|
||||
"middle_2": 100,
|
||||
"ring_0": 100,
|
||||
"ring_1": 100,
|
||||
"ring_2": 100,
|
||||
"pinky_0": 100,
|
||||
"pinky_1": 100,
|
||||
"pinky_2": 100,
|
||||
"battery_voltage": 100,
|
||||
}
|
||||
self.calibration = None
|
||||
|
||||
@property
|
||||
def joint_names(self):
|
||||
return list(self.last_d.keys())
|
||||
|
||||
def read(self, motor_names: list[str] | None = None):
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
values = np.array([self.last_d[k] for k in motor_names])
|
||||
|
||||
print(motor_names)
|
||||
print(values)
|
||||
|
||||
if self.calibration is not None:
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
print(values)
|
||||
return values
|
||||
|
||||
def async_read(self):
|
||||
while True:
|
||||
if self.serial.in_waiting > 0:
|
||||
self.serial.flush()
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
if len(vals) != 17:
|
||||
continue
|
||||
vals = [int(val) for val in vals]
|
||||
|
||||
d = {
|
||||
"thumb_0": vals[0],
|
||||
"thumb_1": vals[1],
|
||||
"thumb_2": vals[2],
|
||||
"thumb_3": vals[3],
|
||||
"index_0": vals[4],
|
||||
"index_1": vals[5],
|
||||
"index_2": vals[6],
|
||||
"middle_0": vals[7],
|
||||
"middle_1": vals[8],
|
||||
"middle_2": vals[9],
|
||||
"ring_0": vals[10],
|
||||
"ring_1": vals[11],
|
||||
"ring_2": vals[12],
|
||||
"pinky_0": vals[13],
|
||||
"pinky_1": vals[14],
|
||||
"pinky_2": vals[15],
|
||||
"battery_voltage": vals[16],
|
||||
}
|
||||
self.last_d = d
|
||||
# print(d.values())
|
||||
|
||||
def run_calibration(self):
|
||||
print("\nMove arm to open position")
|
||||
input("Press Enter to continue...")
|
||||
open_pos_list = []
|
||||
for _ in range(300):
|
||||
open_pos = self.read()
|
||||
open_pos_list.append(open_pos)
|
||||
time.sleep(0.01)
|
||||
open_pos = np.array(open_pos_list)
|
||||
max_open_pos = open_pos.max(axis=0)
|
||||
min_open_pos = open_pos.min(axis=0)
|
||||
|
||||
print(f"{max_open_pos=}")
|
||||
print(f"{min_open_pos=}")
|
||||
|
||||
print("\nMove arm to closed position")
|
||||
input("Press Enter to continue...")
|
||||
closed_pos_list = []
|
||||
for _ in range(300):
|
||||
closed_pos = self.read()
|
||||
closed_pos_list.append(closed_pos)
|
||||
time.sleep(0.01)
|
||||
closed_pos = np.array(closed_pos_list)
|
||||
max_closed_pos = closed_pos.max(axis=0)
|
||||
closed_pos[closed_pos < 1000] = 60000
|
||||
min_closed_pos = closed_pos.min(axis=0)
|
||||
|
||||
print(f"{max_closed_pos=}")
|
||||
print(f"{min_closed_pos=}")
|
||||
|
||||
open_pos = np.array([max_open_pos, max_closed_pos]).max(axis=0)
|
||||
closed_pos = np.array([min_open_pos, min_closed_pos]).min(axis=0)
|
||||
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
for i, jname in enumerate(self.joint_names):
|
||||
if jname in [
|
||||
"thumb_0",
|
||||
"thumb_3",
|
||||
"index_2",
|
||||
"middle_2",
|
||||
"ring_2",
|
||||
"pinky_0",
|
||||
"pinky_2",
|
||||
"index_0",
|
||||
]:
|
||||
tmp_pos = open_pos[i]
|
||||
open_pos[i] = closed_pos[i]
|
||||
closed_pos[i] = tmp_pos
|
||||
|
||||
print()
|
||||
print(f"{open_pos=}")
|
||||
print(f"{closed_pos=}")
|
||||
|
||||
homing_offset = [0] * len(self.joint_names)
|
||||
drive_mode = [0] * len(self.joint_names)
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.joint_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": open_pos,
|
||||
"end_pos": closed_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.joint_names,
|
||||
}
|
||||
# return calib_dict
|
||||
self.set_calibration(calib_dict)
|
||||
|
||||
def set_calibration(self, calibration: dict[str, list]):
|
||||
self.calibration = calibration
|
||||
|
||||
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
|
||||
a "zero position" at 0 degree.
|
||||
|
||||
Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor
|
||||
rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range.
|
||||
|
||||
Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation
|
||||
when given a goal position that is + or - their resolution. For instance, feetech xl330-m077 have a resolution of 4096, and
|
||||
at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830,
|
||||
or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor.
|
||||
To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work
|
||||
in the centered nominal degree range ]-180, 180[.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.motor_names
|
||||
|
||||
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
|
||||
values = values.astype(np.float32)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
calib_idx = self.calibration["motor_names"].index(name)
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# Rescale the present position to a nominal range [0, 100] %,
|
||||
# useful for joints with linear motions like Aloha gripper
|
||||
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
|
||||
|
||||
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
|
||||
if name == "pinky_1" and (values[i] < LOWER_BOUND_LINEAR):
|
||||
values[i] = end_pos
|
||||
else:
|
||||
msg = (
|
||||
f"Wrong motor position range detected for {name}. "
|
||||
f"Expected to be in nominal range of [0, 100] % (a full linear translation), "
|
||||
f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, "
|
||||
f"but present value is {values[i]} %. "
|
||||
"This might be due to a cable connection issue creating an artificial jump in motor values. "
|
||||
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
|
||||
)
|
||||
print(msg)
|
||||
# raise JointOutOfRangeError(msg)
|
||||
|
||||
return values
|
||||
|
||||
# def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
# """Inverse of `apply_calibration`."""
|
||||
# if motor_names is None:
|
||||
# motor_names = self.motor_names
|
||||
|
||||
# for i, name in enumerate(motor_names):
|
||||
# calib_idx = self.calibration["motor_names"].index(name)
|
||||
# calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
# if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
# start_pos = self.calibration["start_pos"][calib_idx]
|
||||
# end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# # Convert from nominal lnear range of [0, 100] % to
|
||||
# # actual motor range of values which can be arbitrary.
|
||||
# values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos
|
||||
|
||||
# values = np.round(values).astype(np.int32)
|
||||
# return values
|
||||
|
||||
|
||||
class HopeJuniorRobot:
|
||||
def __init__(self):
|
||||
self.arm_bus = FeetechMotorsBus(
|
||||
port="/dev/tty.usbmodem58760429571",
|
||||
motors={
|
||||
# "motor1": (2, "sts3250"),
|
||||
# "motor2": (1, "scs0009"),
|
||||
"shoulder_pitch": [1, "sts3250"],
|
||||
"shoulder_yaw": [2, "sts3215"], # TODO: sts3250
|
||||
"shoulder_roll": [3, "sts3215"], # TODO: sts3250
|
||||
"elbow_flex": [4, "sts3250"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"wrist_yaw": [6, "sts3215"],
|
||||
"wrist_pitch": [7, "sts3215"],
|
||||
},
|
||||
protocol_version=0,
|
||||
)
|
||||
self.hand_bus = FeetechMotorsBus(
|
||||
port="/dev/tty.usbmodem585A0077581",
|
||||
motors={
|
||||
"thumb_basel_rotation": [30, "scs0009"],
|
||||
"thumb_flexor": [27, "scs0009"],
|
||||
"thumb_pinky_side": [26, "scs0009"],
|
||||
"thumb_thumb_side": [28, "scs0009"],
|
||||
"index_flexor": [25, "scs0009"],
|
||||
"index_pinky_side": [31, "scs0009"],
|
||||
"index_thumb_side": [32, "scs0009"],
|
||||
"middle_flexor": [24, "scs0009"],
|
||||
"middle_pinky_side": [33, "scs0009"],
|
||||
"middle_thumb_side": [34, "scs0009"],
|
||||
"ring_flexor": [21, "scs0009"],
|
||||
"ring_pinky_side": [36, "scs0009"],
|
||||
"ring_thumb_side": [35, "scs0009"],
|
||||
"pinky_flexor": [23, "scs0009"],
|
||||
"pinky_pinky_side": [38, "scs0009"],
|
||||
"pinky_thumb_side": [37, "scs0009"],
|
||||
},
|
||||
protocol_version=1,
|
||||
group_sync_read=False,
|
||||
)
|
||||
|
||||
def get_hand_calibration(self):
|
||||
homing_offset = [0] * len(self.hand_bus.motor_names)
|
||||
drive_mode = [0] * len(self.hand_bus.motor_names)
|
||||
|
||||
start_pos = [
|
||||
500,
|
||||
900,
|
||||
1000,
|
||||
0,
|
||||
100,
|
||||
250,
|
||||
750,
|
||||
100,
|
||||
400,
|
||||
150,
|
||||
100,
|
||||
120,
|
||||
980,
|
||||
100,
|
||||
950,
|
||||
750,
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
500 - 250,
|
||||
900 - 300,
|
||||
1000 - 550,
|
||||
0 + 550,
|
||||
1000,
|
||||
start_pos[5] + 500,
|
||||
start_pos[6] - 500,
|
||||
1000,
|
||||
400 + 700,
|
||||
150 + 700,
|
||||
1000,
|
||||
120 + 700,
|
||||
980 - 700,
|
||||
1000,
|
||||
950 - 700,
|
||||
750 - 700,
|
||||
]
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.hand_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.hand_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def connect(self):
|
||||
self.arm_bus.connect()
|
||||
self.hand_bus.connect()
|
||||
|
||||
|
||||
ESCAPE_KEY_ID = 27
|
||||
|
||||
|
||||
def capture_and_display_processed_frames(
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray],
|
||||
window_display_name: str,
|
||||
cap_device: int = 0,
|
||||
) -> None:
|
||||
"""
|
||||
Capture frames from the given input camera device, run them through
|
||||
the frame processor, and display the outputs in a window with the given name.
|
||||
|
||||
User should press Esc to exit.
|
||||
|
||||
Inputs:
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray]
|
||||
Processes frames.
|
||||
Input and output are numpy arrays of shape (H W C) with BGR channel layout and dtype uint8 / byte.
|
||||
window_display_name: str
|
||||
Name of the window used to display frames.
|
||||
cap_device: int
|
||||
Identifier for the camera to use to capture frames.
|
||||
"""
|
||||
cv2.namedWindow(window_display_name)
|
||||
capture = cv2.VideoCapture(cap_device)
|
||||
if not capture.isOpened():
|
||||
raise ValueError("Unable to open video capture.")
|
||||
|
||||
frame_count = 0
|
||||
has_frame, frame = capture.read()
|
||||
while has_frame:
|
||||
assert isinstance(frame, np.ndarray)
|
||||
|
||||
frame_count = frame_count + 1
|
||||
# mirror frame
|
||||
frame = np.ascontiguousarray(frame[:, ::-1, ::-1])
|
||||
|
||||
# process & show frame
|
||||
processed_frame = frame_processor(frame)
|
||||
cv2.imshow(window_display_name, processed_frame[:, :, ::-1])
|
||||
|
||||
has_frame, frame = capture.read()
|
||||
key = cv2.waitKey(1)
|
||||
if key == ESCAPE_KEY_ID:
|
||||
break
|
||||
|
||||
capture.release()
|
||||
|
||||
|
||||
def main():
|
||||
robot = HopeJuniorRobot()
|
||||
robot.connect()
|
||||
|
||||
# robot.hand_bus.calibration = None
|
||||
|
||||
# breakpoint()
|
||||
# print(robot.arm_bus.read("Present_Position"))
|
||||
robot.arm_bus.write("Torque_Enable", 1)
|
||||
robot.arm_bus.write("Acceleration", 20)
|
||||
robot.arm_bus.read("Acceleration")
|
||||
|
||||
calibration = robot.get_hand_calibration()
|
||||
robot.hand_bus.write("Goal_Position", calibration["start_pos"])
|
||||
# robot.hand_bus.write("Goal_Position", calibration["end_pos"][:4], robot.hand_bus.motor_names[:4])
|
||||
robot.hand_bus.set_calibration(calibration)
|
||||
lol = 1
|
||||
|
||||
# # print(motors_bus.write("Goal_Position", 500))
|
||||
# print(robot.hand_bus.read("Present_Position"))
|
||||
# # pos = hand_bus.read("Present_Position")
|
||||
# # hand_bus.write("Goal_Position", pos[0]+20, hand_bus.motor_names[0])
|
||||
# # hand_bus.write("Goal_Position", pos[i]+delta, hand_bus.motor_names[i])
|
||||
# robot.hand_bus.read("Acceleration")
|
||||
# robot.hand_bus.write("Acceleration", 10)
|
||||
|
||||
# sleep = 1
|
||||
# # robot.hand_bus.write(
|
||||
# # "Goal_Position", [glove.last_d['index_2']-1500,300,300], ["index_pinky_side", "index_flexor", "index_thumb_side"]
|
||||
# # )
|
||||
# #time.sleep(sleep)
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [100, 0, 0], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [200, 100, 600], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
|
||||
# breakpoint()
|
||||
|
||||
glove = HomonculusGlove()
|
||||
glove.run_calibration()
|
||||
# while True:
|
||||
# joint_names = ["index_1", "index_2"]
|
||||
# joint_values = glove.read(joint_names)
|
||||
# print(joint_values)
|
||||
|
||||
input()
|
||||
while True:
|
||||
joint_names = []
|
||||
# joint_names += ["thumb_0", "thumb_2", "thumb_3"]
|
||||
joint_names += ["index_0", "index_1"]
|
||||
# joint_names += ["middle_1", "middle_2"]
|
||||
# joint_names += ["ring_1", "ring_2"]
|
||||
# joint_names += ["pinky_0", "pinky_2"]
|
||||
joint_values = glove.read(joint_names)
|
||||
joint_values = joint_values.round().astype(int)
|
||||
joint_dict = {k: v for k, v in zip(joint_names, joint_values, strict=False)}
|
||||
|
||||
motor_values = []
|
||||
motor_names = []
|
||||
# motor_names += ["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"]
|
||||
# motor_values += [joint_dict["thumb_3"], joint_dict["thumb_0"], joint_dict["thumb_2"], joint_dict["thumb_2"]]
|
||||
motor_names += ["index_pinky_side", "index_thumb_side"]
|
||||
# if joint_dict["index_0"] -2100 > 0:
|
||||
splayamount = 0.5
|
||||
motor_values += [
|
||||
(100 - joint_dict["index_0"]) * splayamount + joint_dict["index_1"] * (1 - splayamount),
|
||||
(joint_dict["index_0"]) * splayamount + joint_dict["index_1"] * (1 - splayamount),
|
||||
]
|
||||
# else:
|
||||
# motor_values += [100-joint_dict["index_0"], joint_dict["index_0"]]
|
||||
|
||||
# motor_names += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
# motor_values += [joint_dict["middle_2"], joint_dict["middle_1"], joint_dict["middle_1"]]
|
||||
# motor_names += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
# motor_values += [joint_dict["ring_2"], joint_dict["ring_1"], joint_dict["ring_1"]]
|
||||
# motor_names += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
|
||||
# motor_values += [joint_dict["pinky_2"], joint_dict["pinky_0"], joint_dict["pinky_0"]]
|
||||
|
||||
motor_values = np.array(motor_values)
|
||||
motor_values = np.clip(motor_values, 0, 100)
|
||||
|
||||
robot.hand_bus.write("Goal_Position", motor_values, motor_names)
|
||||
time.sleep(0.02)
|
||||
|
||||
while True:
|
||||
# print(glove.read()['index_2']-1500)
|
||||
glove_index_flexor = glove.read()["index_2"] - 1500
|
||||
glove_index_subflexor = glove.read()["index_1"] - 1500
|
||||
glove_index_side = glove.read()["index_0"] - 2100
|
||||
|
||||
vals = [glove_index_flexor, 1000 - (glove_index_subflexor), glove_index_subflexor]
|
||||
|
||||
keys = ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
|
||||
glove_middle_flexor = glove.read()["middle_2"] - 1500
|
||||
glove_middle_subflexor = 1000 - (glove.read()["middle_1"] - 1700)
|
||||
vals += [glove_middle_flexor, glove_middle_subflexor, glove_middle_subflexor - 200]
|
||||
keys += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
|
||||
glove_ring_flexor = glove.read()["ring_2"] - 1300
|
||||
print(glove_ring_flexor)
|
||||
glove_ring_subflexor = glove.read()["ring_1"] - 1100
|
||||
|
||||
vals += [glove_ring_flexor, 1000 - glove_ring_subflexor, glove_ring_subflexor]
|
||||
keys += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
|
||||
glove_pinky_flexor = glove.read()["pinky_2"] - 1500
|
||||
glove_pinky_subflexor = glove.read()["pinky_1"] - 1300
|
||||
vals += [300 + glove_pinky_flexor, max(1000 - glove_pinky_subflexor - 100, 0), glove_pinky_subflexor]
|
||||
keys += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
|
||||
robot.hand_bus.write("Goal_Position", vals, keys)
|
||||
time.sleep(0.1)
|
||||
|
||||
time.sleep(3)
|
||||
|
||||
def move_arm(loop=10):
|
||||
sleep = 1
|
||||
for i in range(loop):
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1195])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 2195])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1457, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 2357, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 974, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 2674, 1957, 1695])
|
||||
time.sleep(sleep + 2)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 1632, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 1369, 1632, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 1330, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [2381, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1681, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
|
||||
def move_hand(loop=10):
|
||||
sleep = 0.5
|
||||
for i in range(loop):
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[500, 1000, 0, 1000],
|
||||
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [100, 1000, 150], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [200, 100, 700], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[500, 1000 - 250, 0 + 300, 1000 - 200],
|
||||
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[100 + 450, 100 + 400, 100 + 400],
|
||||
["index_flexor", "index_pinky_side", "index_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[100 + 350, 1000 - 450, 150 + 450],
|
||||
["middle_flexor", "middle_pinky_side", "middle_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[200 + 650, 200 + 350, 0 + 350],
|
||||
["ring_flexor", "ring_pinky_side", "ring_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[200 + 450, 100 + 400, 700 - 400],
|
||||
["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
|
||||
move_hand(3)
|
||||
|
||||
move_arm(1)
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
executor.submit(move_arm)
|
||||
executor.submit(move_hand)
|
||||
|
||||
# initial position
|
||||
for i in range(3):
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [500, 1000, 0, 1000, 100, 950, 100, 100, 1000, 150, 200, 200, 0, 200, 100, 700]
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
# for i in range(3):
|
||||
# robot.hand_bus.write("Goal_Position", [500, 1000-150, 0+250, 1000-150,
|
||||
# 100+300, 950-250, 100+250,
|
||||
# 100+200, 1000-300, 150+300,
|
||||
# 200+500, 200+200, 0+200,
|
||||
# 200+300, 100+200, 700-200])
|
||||
# time.sleep(1)
|
||||
|
||||
# camera = 0
|
||||
# score_threshold = 0.95
|
||||
# iou_threshold = 0.3
|
||||
|
||||
# app = MediaPipeHandApp(MediaPipeHand.from_pretrained(), score_threshold, iou_threshold)
|
||||
|
||||
# def frame_processor(frame: np.ndarray) -> np.ndarray:
|
||||
# # Input Prep
|
||||
# NHWC_int_numpy_frames, NCHW_fp32_torch_frames = app_to_net_image_inputs(frame)
|
||||
|
||||
# # Run Bounding Box & Keypoint Detector
|
||||
# batched_selected_boxes, batched_selected_keypoints = app._run_box_detector(NCHW_fp32_torch_frames)
|
||||
|
||||
# # The region of interest ( bounding box of 4 (x, y) corners).
|
||||
# # list[torch.Tensor(shape=[Num Boxes, 4, 2])],
|
||||
# # where 2 == (x, y)
|
||||
# #
|
||||
# # A list element will be None if there is no selected ROI.
|
||||
# batched_roi_4corners = app._compute_object_roi(batched_selected_boxes, batched_selected_keypoints)
|
||||
|
||||
# # selected landmarks for the ROI (if any)
|
||||
# # list[torch.Tensor(shape=[Num Selected Landmarks, K, 3])],
|
||||
# # where K == number of landmark keypoints, 3 == (x, y, confidence)
|
||||
# #
|
||||
# # A list element will be None if there is no ROI.
|
||||
# landmarks_out = app._run_landmark_detector(NHWC_int_numpy_frames, batched_roi_4corners)
|
||||
|
||||
# app._draw_predictions(
|
||||
# NHWC_int_numpy_frames,
|
||||
# batched_selected_boxes,
|
||||
# batched_selected_keypoints,
|
||||
# batched_roi_4corners,
|
||||
# *landmarks_out,
|
||||
# )
|
||||
|
||||
# return NHWC_int_numpy_frames[0]
|
||||
|
||||
# capture_and_display_processed_frames(frame_processor, "QAIHM Mediapipe Hand Demo", camera)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,97 +0,0 @@
|
||||
#faulty servo
|
||||
Model = [777]
|
||||
ID = [7]
|
||||
Baud_Rate = [0]
|
||||
Return_Delay = [0]
|
||||
Response_Status_Level = [1]
|
||||
Min_Angle_Limit = [1140]
|
||||
Max_Angle_Limit = [2750]
|
||||
Max_Temperature_Limit = [70]
|
||||
Max_Voltage_Limit = [140]
|
||||
Min_Voltage_Limit = [40]
|
||||
Max_Torque_Limit = [1000]
|
||||
Phase = [12]
|
||||
Unloading_Condition = [44]
|
||||
LED_Alarm_Condition = [47]
|
||||
P_Coefficient = [32]
|
||||
D_Coefficient = [32]
|
||||
I_Coefficient = [0]
|
||||
Minimum_Startup_Force = [16]
|
||||
CW_Dead_Zone = [1]
|
||||
CCW_Dead_Zone = [1]
|
||||
Protection_Current = [310]
|
||||
Angular_Resolution = [1]
|
||||
Offset = [1047]
|
||||
Mode = [0]
|
||||
Protective_Torque = [20]
|
||||
Protection_Time = [200]
|
||||
Overload_Torque = [80]
|
||||
Speed_closed_loop_P_proportional_coefficient = [10]
|
||||
Over_Current_Protection_Time = [200]
|
||||
Velocity_closed_loop_I_integral_coefficient = [200]
|
||||
Torque_Enable = [1]
|
||||
Acceleration = [20]
|
||||
Goal_Position = [0]
|
||||
Goal_Time = [0]
|
||||
Goal_Speed = [0]
|
||||
Torque_Limit = [1000]
|
||||
Lock = [1]
|
||||
Present_Position = [1494]
|
||||
Present_Speed = [0]
|
||||
Present_Load = [0]
|
||||
Present_Voltage = [123]
|
||||
Present_Temperature = [24]
|
||||
Status = [0]
|
||||
Moving = [0]
|
||||
Present_Current = [0]
|
||||
Maximum_Acceleration = [306]
|
||||
|
||||
|
||||
|
||||
#all servos of hopejr
|
||||
Model = [2825 777 777 2825 777 777 777]
|
||||
ID = [1 2 3 4 5 6 7]
|
||||
Baud_Rate = [0 0 0 0 0 0 0]
|
||||
Return_Delay = [0 0 0 0 0 0 0]
|
||||
Response_Status_Level = [1 1 1 1 1 1 1]
|
||||
Min_Angle_Limit = [ 650 1300 1300 1200 600 1725 0]
|
||||
Max_Angle_Limit = [2600 2050 2800 2500 4096 2250 4095]
|
||||
Max_Temperature_Limit = [80 70 70 80 70 70 70]
|
||||
Max_Voltage_Limit = [160 140 140 160 140 140 80]
|
||||
Min_Voltage_Limit = [60 40 40 60 40 40 40]
|
||||
Max_Torque_Limit = [1000 1000 1000 1000 1000 1000 1000]
|
||||
Phase = [12 12 12 12 12 12 12]
|
||||
Unloading_Condition = [45 44 44 45 44 44 44]
|
||||
LED_Alarm_Condition = [45 47 47 45 47 47 47]
|
||||
P_Coefficient = [32 32 32 32 32 32 32]
|
||||
D_Coefficient = [32 32 32 32 32 32 32]
|
||||
I_Coefficient = [0 0 0 0 0 0 0]
|
||||
Minimum_Startup_Force = [15 16 16 12 16 16 16]
|
||||
CW_Dead_Zone = [0 1 1 0 1 1 1]
|
||||
CCW_Dead_Zone = [0 1 1 0 1 1 1]
|
||||
Protection_Current = [310 310 310 310 310 310 500]
|
||||
Angular_Resolution = [1 1 1 1 1 1 1]
|
||||
Offset = [ 0 1047 1024 1047 1024 1024 0]
|
||||
Mode = [0 0 0 0 0 0 0]
|
||||
Protective_Torque = [20 20 20 20 20 20 20]
|
||||
Protection_Time = [200 200 200 200 200 200 200]
|
||||
Overload_Torque = [80 80 80 80 80 80 80]
|
||||
Speed_closed_loop_P_proportional_coefficient = [10 10 10 10 10 10 10]
|
||||
Over_Current_Protection_Time = [250 200 200 250 200 200 200]
|
||||
Velocity_closed_loop_I_integral_coefficient = [200 200 200 200 200 200 200]
|
||||
Torque_Enable = [1 1 1 1 1 1 1]
|
||||
Acceleration = [20 20 20 20 20 20 20]
|
||||
Goal_Position = [1909 1977 1820 1000 707 1941 1036]
|
||||
Goal_Time = [0 0 0 0 0 0 0]
|
||||
Goal_Speed = [0 0 0 0 0 0 0]
|
||||
Torque_Limit = [1000 1000 1000 200 1000 1000 1000]
|
||||
Lock = [1 1 1 1 1 1 1]
|
||||
Present_Position = [1909 1982 1821 1200 710 1941 1036]
|
||||
Present_Speed = [0 0 0 0 0 0 0]
|
||||
Present_Load = [ 0 48 0 0 32 0 0]
|
||||
Present_Voltage = [122 123 122 123 122 122 122]
|
||||
Present_Temperature = [23 28 28 26 29 28 28]
|
||||
Status = [0 0 0 0 0 0 1]
|
||||
Moving = [0 0 0 0 0 0 0]
|
||||
Present_Current = [0 1 0 1 1 0 1]
|
||||
Maximum_Acceleration = [1530 306 306 1530 306 306 254]
|
||||
@@ -1,192 +0,0 @@
|
||||
import threading
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import serial
|
||||
|
||||
from lerobot.common.robot_devices.motors.feetech import (
|
||||
CalibrationMode,
|
||||
FeetechMotorsBus,
|
||||
)
|
||||
|
||||
LOWER_BOUND_LINEAR = -100
|
||||
UPPER_BOUND_LINEAR = 200
|
||||
|
||||
ESCAPE_KEY_ID = 27
|
||||
|
||||
|
||||
class HopeJuniorRobot:
|
||||
def __init__(self):
|
||||
self.arm_bus = FeetechMotorsBus(
|
||||
port="/dev/ttyACM1",
|
||||
motors={
|
||||
# "motor1": (2, "sts3250"),
|
||||
# "motor2": (1, "scs0009"),
|
||||
#"shoulder_pitch": [1, "sts3250"],
|
||||
#"shoulder_yaw": [2, "sts3215"], # TODO: sts3250
|
||||
#"shoulder_roll": [3, "sts3215"], # TODO: sts3250
|
||||
#"elbow_flex": [4, "sts3250"],
|
||||
#"wrist_roll": [5, "sts3215"],
|
||||
#"wrist_yaw": [6, "sts3215"],
|
||||
"wrist_pitch": [7, "sts3215"],
|
||||
},
|
||||
protocol_version=0,
|
||||
)
|
||||
self.hand_bus = FeetechMotorsBus(
|
||||
port="/dev/ttyACM1",
|
||||
motors={
|
||||
"thumb_basel_rotation": [30, "scs0009"],
|
||||
"thumb_flexor": [27, "scs0009"],
|
||||
"thumb_pinky_side": [26, "scs0009"],
|
||||
"thumb_thumb_side": [28, "scs0009"],
|
||||
"index_flexor": [25, "scs0009"],
|
||||
"index_pinky_side": [31, "scs0009"],
|
||||
"index_thumb_side": [32, "scs0009"],
|
||||
"middle_flexor": [24, "scs0009"],
|
||||
"middle_pinky_side": [33, "scs0009"],
|
||||
"middle_thumb_side": [34, "scs0009"],
|
||||
"ring_flexor": [21, "scs0009"],
|
||||
"ring_pinky_side": [36, "scs0009"],
|
||||
"ring_thumb_side": [35, "scs0009"],
|
||||
"pinky_flexor": [23, "scs0009"],
|
||||
"pinky_pinky_side": [38, "scs0009"],
|
||||
"pinky_thumb_side": [37, "scs0009"],
|
||||
},
|
||||
protocol_version=1,
|
||||
group_sync_read=False,
|
||||
)
|
||||
|
||||
def get_hand_calibration(self):
|
||||
"""
|
||||
Returns a dictionary containing calibration settings for each motor
|
||||
on the hand bus.
|
||||
"""
|
||||
homing_offset = [0] * len(self.hand_bus.motor_names)
|
||||
drive_mode = [0] * len(self.hand_bus.motor_names)
|
||||
|
||||
start_pos = [
|
||||
500, 900, 0, 1000, 100, 250, 750, 100, 400, 150, 100, 120, 980, 100, 950, 750,
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
start_pos[0] - 400, # 500 - 400 = 100
|
||||
start_pos[1] - 300, # 900 - 300 = 600
|
||||
start_pos[2] + 550, # 0 + 550 = 550
|
||||
start_pos[3] - 550, # 1000 - 550 = 450
|
||||
start_pos[4] + 900, # 100 + 900 = 1000
|
||||
start_pos[5] + 500, # 250 + 500 = 750
|
||||
start_pos[6] - 500, # 750 - 500 = 250
|
||||
start_pos[7] + 900, # 100 + 900 = 1000
|
||||
start_pos[8] + 700, # 400 + 700 = 1100
|
||||
start_pos[9] + 700, # 150 + 700 = 850
|
||||
start_pos[10] + 900, # 100 + 900 = 1000
|
||||
start_pos[11] + 700, # 120 + 700 = 820
|
||||
start_pos[12] - 700, # 980 - 700 = 280
|
||||
start_pos[13] + 900, # 100 + 900 = 1000
|
||||
start_pos[14] - 700, # 950 - 700 = 250
|
||||
start_pos[15] - 700, # 750 - 700 = 50
|
||||
]
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.hand_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.hand_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def get_arm_calibration(self):
|
||||
"""
|
||||
Returns a dictionary containing calibration settings for each motor
|
||||
on the arm bus.
|
||||
"""
|
||||
homing_offset = [0] * len(self.arm_bus.motor_names)
|
||||
drive_mode = [0] * len(self.arm_bus.motor_names)
|
||||
|
||||
# Example placeholders
|
||||
start_pos = [
|
||||
600, # shoulder_up
|
||||
1500, # shoulder_forward
|
||||
1300, # shoulder_yaw
|
||||
1000, # bend_elbow
|
||||
1600, # wrist_roll
|
||||
1700, # wrist_yaw
|
||||
600, # wrist_pitch
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
2300, # shoulder_up
|
||||
2300, # shoulder_forward
|
||||
2800, # shoulder_yaw
|
||||
2500, # bend_elbow
|
||||
2800, # wrist_roll
|
||||
2200, # wrist_yaw
|
||||
1700, # wrist_pitch
|
||||
]
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.arm_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.arm_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def connect(self):
|
||||
"""Connect to the Feetech buses."""
|
||||
self.arm_bus.connect()
|
||||
# self.hand_bus.connect()
|
||||
|
||||
|
||||
def capture_and_display_processed_frames(
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray],
|
||||
window_display_name: str,
|
||||
cap_device: int = 0,
|
||||
) -> None:
|
||||
"""
|
||||
Capture frames from the given input camera device, run them through
|
||||
the frame processor, and display the outputs in a window with the given name.
|
||||
|
||||
User should press Esc to exit.
|
||||
|
||||
Inputs:
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray]
|
||||
Processes frames.
|
||||
Input and output are numpy arrays of shape (H W C) with BGR channel layout and dtype uint8 / byte.
|
||||
window_display_name: str
|
||||
Name of the window used to display frames.
|
||||
cap_device: int
|
||||
Identifier for the camera to use to capture frames.
|
||||
"""
|
||||
cv2.namedWindow(window_display_name)
|
||||
capture = cv2.VideoCapture(cap_device)
|
||||
if not capture.isOpened():
|
||||
raise ValueError("Unable to open video capture.")
|
||||
|
||||
frame_count = 0
|
||||
has_frame, frame = capture.read()
|
||||
while has_frame:
|
||||
frame_count = frame_count + 1
|
||||
# Mirror frame horizontally and flip color for demonstration
|
||||
frame = np.ascontiguousarray(frame[:, ::-1, ::-1])
|
||||
|
||||
# process & show frame
|
||||
processed_frame = frame_processor(frame)
|
||||
cv2.imshow(window_display_name, processed_frame[:, :, ::-1])
|
||||
|
||||
has_frame, frame = capture.read()
|
||||
key = cv2.waitKey(1)
|
||||
if key == ESCAPE_KEY_ID:
|
||||
break
|
||||
|
||||
capture.release()
|
||||
|
Before Width: | Height: | Size: 56 KiB |
|
Before Width: | Height: | Size: 54 KiB |
|
Before Width: | Height: | Size: 56 KiB |
@@ -1,44 +0,0 @@
|
||||
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import time
|
||||
from typing import List, Tuple
|
||||
def log_and_plot_params(bus, params_to_log: list, servo_names: list,
|
||||
test_id="servo_log", interval=0.1, duration=5, save_plot=True) -> Tuple[dict, List[float]]:
|
||||
|
||||
"""
|
||||
Logs specific servo parameters for a given duration and generates a plot.
|
||||
"""
|
||||
|
||||
servo_data = {servo_name: {param: [] for param in params_to_log} for servo_name in servo_names}
|
||||
timestamps = []
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
while time.time() - start_time < duration:
|
||||
timestamp = time.time() - start_time
|
||||
timestamps.append(timestamp)
|
||||
for param in params_to_log:
|
||||
values = bus.read(param, servo_names)
|
||||
for servo_name, value in zip(servo_names, values):
|
||||
servo_data[servo_name][param].append(value)
|
||||
|
||||
time.sleep(interval)
|
||||
|
||||
if save_plot:
|
||||
for servo_name, data in servo_data.items():
|
||||
plt.figure(figsize=(10, 6))
|
||||
for param in params_to_log:
|
||||
if all(v is not None for v in data[param]):
|
||||
plt.plot(timestamps, data[param], label=param)
|
||||
plt.xlabel("Time (s)")
|
||||
plt.ylabel("Parameter Values")
|
||||
plt.title(f"Parameter Trends for Servo: {servo_name}")
|
||||
plt.legend()
|
||||
plt.grid(True)
|
||||
plt.tight_layout()
|
||||
plot_filename = f"{test_id}_{servo_name}.png"
|
||||
plt.savefig(plot_filename)
|
||||
print(f"Plot saved as {plot_filename}")
|
||||
|
||||
return servo_data, timestamps
|
||||
|
Before Width: | Height: | Size: 65 KiB |
|
Before Width: | Height: | Size: 60 KiB |
|
Before Width: | Height: | Size: 54 KiB |
@@ -1,68 +0,0 @@
|
||||
STS_SERIES_CONTROL_TABLE = {
|
||||
"Model": (3, 2),
|
||||
"ID": (5, 1),
|
||||
"Baud_Rate": (6, 1),
|
||||
"Return_Delay": (7, 1),
|
||||
"Response_Status_Level": (8, 1),
|
||||
"Min_Angle_Limit": (9, 2),
|
||||
"Max_Angle_Limit": (11, 2),
|
||||
"Max_Temperature_Limit": (13, 1),
|
||||
"Max_Voltage_Limit": (14, 1),
|
||||
"Min_Voltage_Limit": (15, 1),
|
||||
"Max_Torque_Limit": (16, 2),
|
||||
"Phase": (18, 1),
|
||||
"Unloading_Condition": (19, 1),
|
||||
"LED_Alarm_Condition": (20, 1),
|
||||
"P_Coefficient": (21, 1),
|
||||
"D_Coefficient": (22, 1),
|
||||
"I_Coefficient": (23, 1),
|
||||
"Minimum_Startup_Force": (24, 2),
|
||||
"CW_Dead_Zone": (26, 1),
|
||||
"CCW_Dead_Zone": (27, 1),
|
||||
"Protection_Current": (28, 2),
|
||||
"Angular_Resolution": (30, 1),
|
||||
"Offset": (31, 2),
|
||||
"Mode": (33, 1),
|
||||
"Protective_Torque": (34, 1),
|
||||
"Protection_Time": (35, 1),
|
||||
"Overload_Torque": (36, 1),
|
||||
"Speed_closed_loop_P_proportional_coefficient": (37, 1),
|
||||
"Over_Current_Protection_Time": (38, 1),
|
||||
"Velocity_closed_loop_I_integral_coefficient": (39, 1),
|
||||
"Torque_Enable": (40, 1),
|
||||
"Acceleration": (41, 1),
|
||||
"Goal_Position": (42, 2),
|
||||
"Goal_Time": (44, 2),
|
||||
"Goal_Speed": (46, 2),
|
||||
"Torque_Limit": (48, 2),
|
||||
"Lock": (55, 1),
|
||||
"Present_Position": (56, 2),
|
||||
"Present_Speed": (58, 2),
|
||||
"Present_Load": (60, 2),
|
||||
"Present_Voltage": (62, 1),
|
||||
"Present_Temperature": (63, 1),
|
||||
"Status": (65, 1),
|
||||
"Moving": (66, 1),
|
||||
"Present_Current": (69, 2),
|
||||
# Not in the Memory Table
|
||||
"Maximum_Acceleration": (85, 2),
|
||||
}
|
||||
|
||||
import time
|
||||
|
||||
# Assuming STS_SERIES_CONTROL_TABLE is defined globally
|
||||
|
||||
def print_all_params(robot):
|
||||
"""
|
||||
Reads all parameters from the STS_SERIES_CONTROL_TABLE and prints their values.
|
||||
"""
|
||||
for param in STS_SERIES_CONTROL_TABLE.keys():
|
||||
try:
|
||||
val = robot.arm_bus.read(param)
|
||||
print(f"{param} = {val}")
|
||||
except Exception as e:
|
||||
print(f"{param} read failed: {e}")
|
||||
|
||||
|
||||
# Example usage:
|
||||
print_all_params(robot)
|
||||
@@ -1,26 +0,0 @@
|
||||
#include <DFRobot_VisualRotaryEncoder.h>
|
||||
|
||||
DFRobot_VisualRotaryEncoder_I2C sensor(0x54, &Wire);
|
||||
|
||||
void setup()
|
||||
{
|
||||
Serial.begin(115200);
|
||||
|
||||
// Attempt to initialize the sensor
|
||||
while (NO_ERR != sensor.begin()) {
|
||||
// Failed? Just wait a bit and try again
|
||||
delay(3000);
|
||||
}
|
||||
}
|
||||
|
||||
void loop()
|
||||
{
|
||||
// Read the encoder value
|
||||
uint16_t encoderValue = sensor.getEncoderValue();
|
||||
|
||||
// Print it followed by a newline
|
||||
Serial.println(encoderValue);
|
||||
|
||||
// Delay 10ms between readings
|
||||
delay(10);
|
||||
}
|
||||
@@ -1,544 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"what are the actual interest values on the hopejr? make like a map"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"can change these dynamically so if the arm is moving down we can relax it instead of tensing it? so for example decreasing torque if the target position is lower than the actual position, for example. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"arm_calibration = robot.get_arm_calibration()\n",
|
||||
"robot.arm_bus.write(\"Goal_Position\", arm_calibration[\"start_pos\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Present Position: [1494]\n",
|
||||
"Acceleration Read: [20]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import time\n",
|
||||
"from hopejr import HopeJuniorRobot\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"robot = HopeJuniorRobot()\n",
|
||||
"robot.connect()\n",
|
||||
"\n",
|
||||
"# Example read of the current position\n",
|
||||
"print(\"Present Position:\", robot.arm_bus.read(\"Present_Position\"))\n",
|
||||
"\n",
|
||||
"# Enable torque and set acceleration\n",
|
||||
"robot.arm_bus.write(\"Torque_Enable\", 1)\n",
|
||||
"robot.arm_bus.write(\"Acceleration\", 20)\n",
|
||||
"print(\"Acceleration Read:\", robot.arm_bus.read(\"Acceleration\"))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"robot.arm_bus.write(\"Torque_Limit\", 100,[\"elbow_flex\"])\n",
|
||||
"robot.arm_bus.write(\"Protective_Torque\", 0, [\"elbow_flex\"])\n",
|
||||
"robot.arm_bus.write(\"Acceleration\", 20)\n",
|
||||
"robot.arm_bus.write(\"Goal_Position\", [2000], [\"elbow_flex\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"array([1000, 1000, 1000, 1000, 1000, 1000, 1000])"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"robot.arm_bus.read(\"Max_Torque_Limit\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"robot.arm_bus.write(\"Goal_Position\", [1909, 1977, 1820, 1000, 707, 1941, 1036]) #"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"robot.arm_bus.write(\"Max_Voltage_Limit\", [160, 140, 140, 160, 140, 140, 140]) #so its not torque limit nor max torque limit, , no protective torque or overload torque\n",
|
||||
"#it's 1) max voltage limit, min-max angle limits are arbitrairly set for all the motors, max temp is only set for the shoulder\n",
|
||||
"#max acceleration is also set, we could lower that in the elbow to make it less responsive to commands basically\n",
|
||||
"#so we limit speed and temperature, maybe we should limit torque thouhg, minimum startup force is also important. protection current as well\n",
|
||||
"#changed that to 310.\n",
|
||||
"#\"Max_Voltage_Limit\" also needs to be changed, different from torque_limit"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Model = [777]\n",
|
||||
"ID = [7]\n",
|
||||
"Baud_Rate = [0]\n",
|
||||
"Return_Delay = [0]\n",
|
||||
"Response_Status_Level = [1]\n",
|
||||
"Min_Angle_Limit = [1140]\n",
|
||||
"Max_Angle_Limit = [2750]\n",
|
||||
"Max_Temperature_Limit = [70]\n",
|
||||
"Max_Voltage_Limit = [140]\n",
|
||||
"Min_Voltage_Limit = [40]\n",
|
||||
"Max_Torque_Limit = [1000]\n",
|
||||
"Phase = [12]\n",
|
||||
"Unloading_Condition = [44]\n",
|
||||
"LED_Alarm_Condition = [47]\n",
|
||||
"P_Coefficient = [32]\n",
|
||||
"D_Coefficient = [32]\n",
|
||||
"I_Coefficient = [0]\n",
|
||||
"Minimum_Startup_Force = [16]\n",
|
||||
"CW_Dead_Zone = [1]\n",
|
||||
"CCW_Dead_Zone = [1]\n",
|
||||
"Protection_Current = [310]\n",
|
||||
"Angular_Resolution = [1]\n",
|
||||
"Offset = [1047]\n",
|
||||
"Mode = [0]\n",
|
||||
"Protective_Torque = [20]\n",
|
||||
"Protection_Time = [200]\n",
|
||||
"Overload_Torque = [80]\n",
|
||||
"Speed_closed_loop_P_proportional_coefficient = [10]\n",
|
||||
"Over_Current_Protection_Time = [200]\n",
|
||||
"Velocity_closed_loop_I_integral_coefficient = [200]\n",
|
||||
"Torque_Enable = [1]\n",
|
||||
"Acceleration = [20]\n",
|
||||
"Goal_Position = [0]\n",
|
||||
"Goal_Time = [0]\n",
|
||||
"Goal_Speed = [0]\n",
|
||||
"Torque_Limit = [1000]\n",
|
||||
"Lock = [1]\n",
|
||||
"Present_Position = [1494]\n",
|
||||
"Present_Speed = [0]\n",
|
||||
"Present_Load = [0]\n",
|
||||
"Present_Voltage = [123]\n",
|
||||
"Present_Temperature = [24]\n",
|
||||
"Status = [0]\n",
|
||||
"Moving = [0]\n",
|
||||
"Present_Current = [0]\n",
|
||||
"Maximum_Acceleration = [306]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"STS_SERIES_CONTROL_TABLE = {\n",
|
||||
" \"Model\": (3, 2),\n",
|
||||
" \"ID\": (5, 1),\n",
|
||||
" \"Baud_Rate\": (6, 1),\n",
|
||||
" \"Return_Delay\": (7, 1),\n",
|
||||
" \"Response_Status_Level\": (8, 1),\n",
|
||||
" \"Min_Angle_Limit\": (9, 2),\n",
|
||||
" \"Max_Angle_Limit\": (11, 2),\n",
|
||||
" \"Max_Temperature_Limit\": (13, 1),\n",
|
||||
" \"Max_Voltage_Limit\": (14, 1),\n",
|
||||
" \"Min_Voltage_Limit\": (15, 1),\n",
|
||||
" \"Max_Torque_Limit\": (16, 2),\n",
|
||||
" \"Phase\": (18, 1),\n",
|
||||
" \"Unloading_Condition\": (19, 1),\n",
|
||||
" \"LED_Alarm_Condition\": (20, 1),\n",
|
||||
" \"P_Coefficient\": (21, 1),\n",
|
||||
" \"D_Coefficient\": (22, 1),\n",
|
||||
" \"I_Coefficient\": (23, 1),\n",
|
||||
" \"Minimum_Startup_Force\": (24, 2),\n",
|
||||
" \"CW_Dead_Zone\": (26, 1),\n",
|
||||
" \"CCW_Dead_Zone\": (27, 1),\n",
|
||||
" \"Protection_Current\": (28, 2),\n",
|
||||
" \"Angular_Resolution\": (30, 1),\n",
|
||||
" \"Offset\": (31, 2),\n",
|
||||
" \"Mode\": (33, 1),\n",
|
||||
" \"Protective_Torque\": (34, 1),\n",
|
||||
" \"Protection_Time\": (35, 1),\n",
|
||||
" \"Overload_Torque\": (36, 1),\n",
|
||||
" \"Speed_closed_loop_P_proportional_coefficient\": (37, 1),\n",
|
||||
" \"Over_Current_Protection_Time\": (38, 1),\n",
|
||||
" \"Velocity_closed_loop_I_integral_coefficient\": (39, 1),\n",
|
||||
" \"Torque_Enable\": (40, 1),\n",
|
||||
" \"Acceleration\": (41, 1),\n",
|
||||
" \"Goal_Position\": (42, 2),\n",
|
||||
" \"Goal_Time\": (44, 2),\n",
|
||||
" \"Goal_Speed\": (46, 2),\n",
|
||||
" \"Torque_Limit\": (48, 2),\n",
|
||||
" \"Lock\": (55, 1),\n",
|
||||
" \"Present_Position\": (56, 2),\n",
|
||||
" \"Present_Speed\": (58, 2),\n",
|
||||
" \"Present_Load\": (60, 2),\n",
|
||||
" \"Present_Voltage\": (62, 1),\n",
|
||||
" \"Present_Temperature\": (63, 1),\n",
|
||||
" \"Status\": (65, 1),\n",
|
||||
" \"Moving\": (66, 1),\n",
|
||||
" \"Present_Current\": (69, 2),\n",
|
||||
" # Not in the Memory Table\n",
|
||||
" \"Maximum_Acceleration\": (85, 2),\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"import time\n",
|
||||
"\n",
|
||||
"# Assuming STS_SERIES_CONTROL_TABLE is defined globally\n",
|
||||
"\n",
|
||||
"def print_all_params(robot):\n",
|
||||
" \"\"\"\n",
|
||||
" Reads all parameters from the STS_SERIES_CONTROL_TABLE and prints their values.\n",
|
||||
" \"\"\"\n",
|
||||
" for param in STS_SERIES_CONTROL_TABLE.keys():\n",
|
||||
" try:\n",
|
||||
" val = robot.arm_bus.read(param)\n",
|
||||
" print(f\"{param} = {val}\")\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"{param} read failed: {e}\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Example usage:\n",
|
||||
"print_all_params(robot)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"probably max input voltage, we can also look at temperature and "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Acceleration Read: [20 20]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"\n",
|
||||
"print(\"Acceleration Read:\", robot.arm_bus.read(\"Acceleration\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"robot.arm_bus.write(\"LED_Alarm_Condition\", 2, [\"elbow_flex\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"robot.arm_bus.write(\"Acceleration\", 20, [\"elbow_flex\"])\n",
|
||||
"robot.arm_bus.write(\"Acceleration\", 100, [\"wrist_yaw\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 73,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"robot.arm_bus.write(\"Goal_Position\", [1000, 1000], [\"elbow_flex\", \"wrist_yaw\"])\n",
|
||||
"time.sleep(2)\n",
|
||||
"robot.arm_bus.write(\"Goal_Position\", [2000, 2000], [\"elbow_flex\", \"wrist_yaw\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 68,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Elbow Flex Current: [1]\n",
|
||||
"Elbow Flex Current: [0]\n",
|
||||
"Elbow Flex Current: [3]\n",
|
||||
"Elbow Flex Current: [1]\n",
|
||||
"Elbow Flex Current: [1]\n",
|
||||
"Elbow Flex Current: [2]\n",
|
||||
"Elbow Flex Current: [1]\n",
|
||||
"Elbow Flex Current: [1]\n",
|
||||
"Elbow Flex Current: [2]\n",
|
||||
"Elbow Flex Current: [1]\n",
|
||||
"Elbow Flex Current: [1]\n",
|
||||
"Elbow Flex Current: [0]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ename": "KeyboardInterrupt",
|
||||
"evalue": "",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[68], line 25\u001b[0m\n\u001b[1;32m 22\u001b[0m time\u001b[38;5;241m.\u001b[39msleep(\u001b[38;5;241m2\u001b[39m)\n\u001b[1;32m 23\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 24\u001b[0m \u001b[38;5;66;03m# If current is zero, hold at pos_a for a bit\u001b[39;00m\n\u001b[0;32m---> 25\u001b[0m \u001b[43mtime\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msleep\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\n",
|
||||
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import time\n",
|
||||
"\n",
|
||||
"# Enable torque on elbow_flex\n",
|
||||
"robot.arm_bus.write(\"Torque_Enable\", 1, [\"elbow_flex\"])\n",
|
||||
"\n",
|
||||
"pos_a = 2500\n",
|
||||
"pos_b = 1000\n",
|
||||
"\n",
|
||||
"robot.arm_bus.write(\"Goal_Position\", pos_a, [\"elbow_flex\"])\n",
|
||||
"time.sleep(2)\n",
|
||||
"\n",
|
||||
"while True:\n",
|
||||
" current_val = robot.arm_bus.read(\"Present_Current\", \"elbow_flex\")\n",
|
||||
" print(\"Elbow Flex Current:\", current_val)\n",
|
||||
" \n",
|
||||
" # If the servo is under non-zero load/current, switch position\n",
|
||||
" if current_val > 1:\n",
|
||||
" robot.arm_bus.write(\"Goal_Position\", pos_b, [\"elbow_flex\"])\n",
|
||||
" time.sleep(2)\n",
|
||||
" # Go back to pos_a again\n",
|
||||
" robot.arm_bus.write(\"Goal_Position\", pos_a, [\"elbow_flex\"])\n",
|
||||
" time.sleep(2)\n",
|
||||
" else:\n",
|
||||
" # If current is zero, hold at pos_a for a bit\n",
|
||||
" time.sleep(1)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\"Acceleration\" = 0, infinitely fast\n",
|
||||
"\"Acceleration\" = 20 slow\n",
|
||||
"elbow_flex is the LED one (4)\n",
|
||||
"\n",
|
||||
"robot.arm_bus.write(\"LED_Alarm_Condition\", 2, [\"elbow_flex\"]) #on constantly\n",
|
||||
"robot.arm_bus.write(\"LED_Alarm_Condition\", 1, [\"elbow_flex\"]) #beeping\n",
|
||||
"robot.arm_bus.write(\"LED_Alarm_Condition\", 0, [\"elbow_flex\"]) #off\n",
|
||||
"\n",
|
||||
"\"Max_Torque_Limit\": (16, 2), is what i have o play around with or \"Protective_Torque\": (37, 1), maybe\n",
|
||||
"\n",
|
||||
"robot.arm_bus.write(\"Torque_Enable\", 1, [\"elbow_flex\"]) 1 can move 0 cant move\n",
|
||||
"\n",
|
||||
"robot.arm_bus.write(\"Torque_Limit\", 300, [\"elbow_flex\"]) #how strong/weak the servo is. 0 makes it so that it cannot move basically, but i'd like to have that value change honestly and for it to be waeaker\n",
|
||||
"\n",
|
||||
"robot.arm_bus.write(\"Torque_Limit\", 20,[\"elbow_flex\"]) 20 in ordre to get some motion\n",
|
||||
"\n",
|
||||
"default is 1000\n",
|
||||
"\n",
|
||||
"robot.arm_bus.write(\"Goal_Speed\", -s, [\"elbow_flex\"]) #changes how fast the servo moves when going to the target, does not make it move with a specific speed "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"import time\n",
|
||||
"\n",
|
||||
"# Enable torque on elbow_flex\n",
|
||||
"robot.arm_bus.write(\"Torque_Enable\", 1, [\"elbow_flex\"])\n",
|
||||
"\n",
|
||||
"pos_a = 1000\n",
|
||||
"pos_b = 2500\n",
|
||||
"\n",
|
||||
"robot.arm_bus.write(\"Goal_Position\", pos_a, [\"elbow_flex\"])\n",
|
||||
"time.sleep(2)\n",
|
||||
"\n",
|
||||
"while True:\n",
|
||||
" current_val = robot.arm_bus.read(\"Present_Current\", \"elbow_flex\")\n",
|
||||
" print(\"Elbow Flex Current:\", current_val)\n",
|
||||
" \n",
|
||||
" # If the servo is under non-zero load/current, switch position\n",
|
||||
" if current_val > 1:\n",
|
||||
" robot.arm_bus.write(\"Goal_Position\", pos_b, [\"elbow_flex\"])\n",
|
||||
" time.sleep(2)\n",
|
||||
" # Go back to pos_a again\n",
|
||||
" robot.arm_bus.write(\"Goal_Position\", pos_a, [\"elbow_flex\"])\n",
|
||||
" time.sleep(2)\n",
|
||||
" else:\n",
|
||||
" # If current is zero, hold at pos_a for a bit\n",
|
||||
" time.sleep(1)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"so if current is larger than x then you disable it \n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"robot.arm_bus.write(\"LED_Alarm_Condition\", 2, [\"elbow_flex\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0]\n",
|
||||
"[0]\n",
|
||||
"[2]\n",
|
||||
"[4]\n",
|
||||
"[0]\n",
|
||||
"[0]\n",
|
||||
"[0]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ename": "KeyboardInterrupt",
|
||||
"evalue": "",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[43], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28mprint\u001b[39m(robot\u001b[38;5;241m.\u001b[39marm_bus\u001b[38;5;241m.\u001b[39mread(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mPresent_Current\u001b[39m\u001b[38;5;124m\"\u001b[39m, [\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124melbow_flex\u001b[39m\u001b[38;5;124m\"\u001b[39m]))\n\u001b[0;32m----> 3\u001b[0m \u001b[43mtime\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msleep\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\n",
|
||||
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"while True:\n",
|
||||
" print(robot.arm_bus.read(\"Present_Current\", [\"elbow_flex\"]))\n",
|
||||
" time.sleep(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 47,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Max_Voltage_Limit = [160 140 140 160 140 140 80]\n",
|
||||
"Min_Angle_Limit = [ 650 1300 1300 1200 600 1725 0]\n",
|
||||
"Max_Angle_Limit = [2600 2050 2800 2500 4096 2250 4095]\n",
|
||||
"Max_Temperature_Limit = [80 70 70 80 70 70 70]\n",
|
||||
"Acceleration = [20 20 20 20 20 20 20]\n",
|
||||
"Torque_Limit = [1000 1000 1000 200 1000 1000 1000]\n",
|
||||
"Minimum_Startup_Force = [15 16 16 12 16 16 16]\n",
|
||||
"Protection_Current = [310 310 310 310 310 310 500]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import time\n",
|
||||
"\n",
|
||||
"def print_important_params(robot):\n",
|
||||
"\n",
|
||||
" # Example parameters you mentioned; adjust as needed\n",
|
||||
" param_list = [\n",
|
||||
" \"Max_Voltage_Limit\",\n",
|
||||
" \"Min_Angle_Limit\",\n",
|
||||
" \"Max_Angle_Limit\",\n",
|
||||
" \"Max_Temperature_Limit\",\n",
|
||||
" \"Acceleration\", # or \"Maximum_Acceleration\" if you prefer that register\n",
|
||||
" \"Torque_Limit\", # or \"Max_Torque_Limit\" if your table uses that\n",
|
||||
" \"Minimum_Startup_Force\",\n",
|
||||
" \"Protection_Current\",\n",
|
||||
" ]\n",
|
||||
" \n",
|
||||
" for param in param_list:\n",
|
||||
" try:\n",
|
||||
" val = robot.arm_bus.read(param)\n",
|
||||
" print(f\"{param} = {val}\")\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"{param} read failed: {e}\")\n",
|
||||
"\n",
|
||||
"# -------------------------------\n",
|
||||
"# Example usage\n",
|
||||
"\n",
|
||||
"print_important_params(robot)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "lerobot",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
import time
|
||||
from hopejr import HopeJuniorRobot
|
||||
|
||||
|
||||
def main():
|
||||
# Instantiate and connect to the robot
|
||||
robot = HopeJuniorRobot()
|
||||
robot.connect()
|
||||
|
||||
# Example read of the current position
|
||||
print("Present Position:", robot.arm_bus.read("Present_Position"))
|
||||
|
||||
# Enable torque and set acceleration
|
||||
robot.arm_bus.write("Torque_Enable", 1)
|
||||
robot.arm_bus.write("Acceleration", 20)
|
||||
print("Acceleration Read:", robot.arm_bus.read("Acceleration"))
|
||||
|
||||
# Move elbow_flex and wrist_yaw a few times
|
||||
robot.arm_bus.write("Goal_Position", [1000, 1000], ["elbow_flex", "wrist_yaw"])
|
||||
time.sleep(2)
|
||||
robot.arm_bus.write("Goal_Position", [1500, 1500], ["elbow_flex", "wrist_yaw"])
|
||||
time.sleep(2)
|
||||
robot.arm_bus.write("Goal_Position", [1000, 1000], ["elbow_flex", "wrist_yaw"])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,49 +0,0 @@
|
||||
STS_SERIES_CONTROL_TABLE = {
|
||||
"Model": (3, 2),
|
||||
"ID": (5, 1),
|
||||
"Baud_Rate": (6, 1),
|
||||
"Return_Delay": (7, 1),
|
||||
"Response_Status_Level": (8, 1),
|
||||
"Min_Angle_Limit": (9, 2),
|
||||
"Max_Angle_Limit": (11, 2),
|
||||
"Max_Temperature_Limit": (13, 1),
|
||||
"Max_Voltage_Limit": (14, 1),
|
||||
"Min_Voltage_Limit": (15, 1),
|
||||
"Max_Torque_Limit": (16, 2),
|
||||
"Phase": (18, 1),
|
||||
"Unloading_Condition": (19, 1),
|
||||
"LED_Alarm_Condition": (20, 1),
|
||||
"P_Coefficient": (21, 1),
|
||||
"D_Coefficient": (22, 1),
|
||||
"I_Coefficient": (23, 1),
|
||||
"Minimum_Startup_Force": (24, 2),
|
||||
"CW_Dead_Zone": (26, 1),
|
||||
"CCW_Dead_Zone": (27, 1),
|
||||
"Protection_Current": (28, 2),
|
||||
"Angular_Resolution": (30, 1),
|
||||
"Offset": (31, 2),
|
||||
"Mode": (33, 1),
|
||||
"Protective_Torque": (34, 1),
|
||||
"Protection_Time": (35, 1),
|
||||
"Overload_Torque": (36, 1),
|
||||
"Speed_closed_loop_P_proportional_coefficient": (37, 1),
|
||||
"Over_Current_Protection_Time": (38, 1),
|
||||
"Velocity_closed_loop_I_integral_coefficient": (39, 1),
|
||||
"Torque_Enable": (40, 1),
|
||||
"Acceleration": (41, 1),
|
||||
"Goal_Position": (42, 2),
|
||||
"Goal_Time": (44, 2),
|
||||
"Goal_Speed": (46, 2),
|
||||
"Torque_Limit": (48, 2),
|
||||
"Lock": (55, 1),
|
||||
"Present_Position": (56, 2),
|
||||
"Present_Speed": (58, 2),
|
||||
"Present_Load": (60, 2),
|
||||
"Present_Voltage": (62, 1),
|
||||
"Present_Temperature": (63, 1),
|
||||
"Status": (65, 1),
|
||||
"Moving": (66, 1),
|
||||
"Present_Current": (69, 2),
|
||||
# Not in the Memory Table
|
||||
"Maximum_Acceleration": (85, 2),
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
|
||||
First check that kicks in is current:
|
||||
Protection_Current (310) amperes or sth
|
||||
Present_Current, compared against protection crrent
|
||||
Over_Current_Protection_Time, how long until you shut it down
|
||||
|
||||
make a quick update about this
|
||||
|
||||
variables of interest are
|
||||
Max_Torque_Limit = 1000,
|
||||
Present_Load = 1000-something, which triggered the overload torque mechanism
|
||||
Overload_Torque = 80, how much of the max torque limit do we allow?
|
||||
Protection_Time = 200, after how long do we set Torque_Enable to 1? *not true lol
|
||||
Protective_Torque = 20, after we trigger the safety mechanism, how much torque do we allow the motor to have?
|
||||
|
||||
theres actually no temperature or voltage check that the feetechs perform, the only two are current and torque, which works like i said above
|
||||
@@ -1,236 +1 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
This file contains lists of available environments, dataset and policies to reflect the current state of LeRobot library.
|
||||
We do not want to import all the dependencies, but instead we keep it lightweight to ensure fast access to these variables.
|
||||
|
||||
Example:
|
||||
```python
|
||||
import lerobot
|
||||
print(lerobot.available_envs)
|
||||
print(lerobot.available_tasks_per_env)
|
||||
print(lerobot.available_datasets)
|
||||
print(lerobot.available_datasets_per_env)
|
||||
print(lerobot.available_real_world_datasets)
|
||||
print(lerobot.available_policies)
|
||||
print(lerobot.available_policies_per_env)
|
||||
print(lerobot.available_robots)
|
||||
print(lerobot.available_cameras)
|
||||
print(lerobot.available_motors)
|
||||
```
|
||||
|
||||
When implementing a new dataset loadable with LeRobotDataset follow these steps:
|
||||
- Update `available_datasets_per_env` in `lerobot/__init__.py`
|
||||
|
||||
When implementing a new environment (e.g. `gym_aloha`), follow these steps:
|
||||
- Update `available_tasks_per_env` and `available_datasets_per_env` in `lerobot/__init__.py`
|
||||
|
||||
When implementing a new policy class (e.g. `DiffusionPolicy`) follow these steps:
|
||||
- Update `available_policies` and `available_policies_per_env`, in `lerobot/__init__.py`
|
||||
- Set the required `name` class attribute.
|
||||
- Update variables in `tests/test_available.py` by importing your new Policy class
|
||||
"""
|
||||
|
||||
import itertools
|
||||
|
||||
from lerobot.__version__ import __version__ # noqa: F401
|
||||
|
||||
# TODO(rcadene): Improve policies and envs. As of now, an item in `available_policies`
|
||||
# refers to a yaml file AND a modeling name. Same for `available_envs` which refers to
|
||||
# a yaml file AND a environment name. The difference should be more obvious.
|
||||
available_tasks_per_env = {
|
||||
"aloha": [
|
||||
"AlohaInsertion-v0",
|
||||
"AlohaTransferCube-v0",
|
||||
],
|
||||
"pusht": ["PushT-v0"],
|
||||
"xarm": ["XarmLift-v0"],
|
||||
"dora_aloha_real": ["DoraAloha-v0", "DoraKoch-v0", "DoraReachy2-v0"],
|
||||
}
|
||||
available_envs = list(available_tasks_per_env.keys())
|
||||
|
||||
available_datasets_per_env = {
|
||||
"aloha": [
|
||||
"lerobot/aloha_sim_insertion_human",
|
||||
"lerobot/aloha_sim_insertion_scripted",
|
||||
"lerobot/aloha_sim_transfer_cube_human",
|
||||
"lerobot/aloha_sim_transfer_cube_scripted",
|
||||
"lerobot/aloha_sim_insertion_human_image",
|
||||
"lerobot/aloha_sim_insertion_scripted_image",
|
||||
"lerobot/aloha_sim_transfer_cube_human_image",
|
||||
"lerobot/aloha_sim_transfer_cube_scripted_image",
|
||||
],
|
||||
# TODO(alexander-soare): Add "lerobot/pusht_keypoints". Right now we can't because this is too tightly
|
||||
# coupled with tests.
|
||||
"pusht": ["lerobot/pusht", "lerobot/pusht_image"],
|
||||
"xarm": [
|
||||
"lerobot/xarm_lift_medium",
|
||||
"lerobot/xarm_lift_medium_replay",
|
||||
"lerobot/xarm_push_medium",
|
||||
"lerobot/xarm_push_medium_replay",
|
||||
"lerobot/xarm_lift_medium_image",
|
||||
"lerobot/xarm_lift_medium_replay_image",
|
||||
"lerobot/xarm_push_medium_image",
|
||||
"lerobot/xarm_push_medium_replay_image",
|
||||
],
|
||||
"dora_aloha_real": [
|
||||
"lerobot/aloha_static_battery",
|
||||
"lerobot/aloha_static_candy",
|
||||
"lerobot/aloha_static_coffee",
|
||||
"lerobot/aloha_static_coffee_new",
|
||||
"lerobot/aloha_static_cups_open",
|
||||
"lerobot/aloha_static_fork_pick_up",
|
||||
"lerobot/aloha_static_pingpong_test",
|
||||
"lerobot/aloha_static_pro_pencil",
|
||||
"lerobot/aloha_static_screw_driver",
|
||||
"lerobot/aloha_static_tape",
|
||||
"lerobot/aloha_static_thread_velcro",
|
||||
"lerobot/aloha_static_towel",
|
||||
"lerobot/aloha_static_vinh_cup",
|
||||
"lerobot/aloha_static_vinh_cup_left",
|
||||
"lerobot/aloha_static_ziploc_slide",
|
||||
],
|
||||
}
|
||||
|
||||
available_real_world_datasets = [
|
||||
"lerobot/aloha_mobile_cabinet",
|
||||
"lerobot/aloha_mobile_chair",
|
||||
"lerobot/aloha_mobile_elevator",
|
||||
"lerobot/aloha_mobile_shrimp",
|
||||
"lerobot/aloha_mobile_wash_pan",
|
||||
"lerobot/aloha_mobile_wipe_wine",
|
||||
"lerobot/aloha_static_battery",
|
||||
"lerobot/aloha_static_candy",
|
||||
"lerobot/aloha_static_coffee",
|
||||
"lerobot/aloha_static_coffee_new",
|
||||
"lerobot/aloha_static_cups_open",
|
||||
"lerobot/aloha_static_fork_pick_up",
|
||||
"lerobot/aloha_static_pingpong_test",
|
||||
"lerobot/aloha_static_pro_pencil",
|
||||
"lerobot/aloha_static_screw_driver",
|
||||
"lerobot/aloha_static_tape",
|
||||
"lerobot/aloha_static_thread_velcro",
|
||||
"lerobot/aloha_static_towel",
|
||||
"lerobot/aloha_static_vinh_cup",
|
||||
"lerobot/aloha_static_vinh_cup_left",
|
||||
"lerobot/aloha_static_ziploc_slide",
|
||||
"lerobot/umi_cup_in_the_wild",
|
||||
"lerobot/unitreeh1_fold_clothes",
|
||||
"lerobot/unitreeh1_rearrange_objects",
|
||||
"lerobot/unitreeh1_two_robot_greeting",
|
||||
"lerobot/unitreeh1_warehouse",
|
||||
"lerobot/nyu_rot_dataset",
|
||||
"lerobot/utokyo_saytap",
|
||||
"lerobot/imperialcollege_sawyer_wrist_cam",
|
||||
"lerobot/utokyo_xarm_bimanual",
|
||||
"lerobot/tokyo_u_lsmo",
|
||||
"lerobot/utokyo_pr2_opening_fridge",
|
||||
"lerobot/cmu_franka_exploration_dataset",
|
||||
"lerobot/cmu_stretch",
|
||||
"lerobot/asu_table_top",
|
||||
"lerobot/utokyo_pr2_tabletop_manipulation",
|
||||
"lerobot/utokyo_xarm_pick_and_place",
|
||||
"lerobot/ucsd_kitchen_dataset",
|
||||
"lerobot/austin_buds_dataset",
|
||||
"lerobot/dlr_sara_grid_clamp",
|
||||
"lerobot/conq_hose_manipulation",
|
||||
"lerobot/columbia_cairlab_pusht_real",
|
||||
"lerobot/dlr_sara_pour",
|
||||
"lerobot/dlr_edan_shared_control",
|
||||
"lerobot/ucsd_pick_and_place_dataset",
|
||||
"lerobot/berkeley_cable_routing",
|
||||
"lerobot/nyu_franka_play_dataset",
|
||||
"lerobot/austin_sirius_dataset",
|
||||
"lerobot/cmu_play_fusion",
|
||||
"lerobot/berkeley_gnm_sac_son",
|
||||
"lerobot/nyu_door_opening_surprising_effectiveness",
|
||||
"lerobot/berkeley_fanuc_manipulation",
|
||||
"lerobot/jaco_play",
|
||||
"lerobot/viola",
|
||||
"lerobot/kaist_nonprehensile",
|
||||
"lerobot/berkeley_mvp",
|
||||
"lerobot/uiuc_d3field",
|
||||
"lerobot/berkeley_gnm_recon",
|
||||
"lerobot/austin_sailor_dataset",
|
||||
"lerobot/utaustin_mutex",
|
||||
"lerobot/roboturk",
|
||||
"lerobot/stanford_hydra_dataset",
|
||||
"lerobot/berkeley_autolab_ur5",
|
||||
"lerobot/stanford_robocook",
|
||||
"lerobot/toto",
|
||||
"lerobot/fmb",
|
||||
"lerobot/droid_100",
|
||||
"lerobot/berkeley_rpt",
|
||||
"lerobot/stanford_kuka_multimodal_dataset",
|
||||
"lerobot/iamlab_cmu_pickup_insert",
|
||||
"lerobot/taco_play",
|
||||
"lerobot/berkeley_gnm_cory_hall",
|
||||
"lerobot/usc_cloth_sim",
|
||||
]
|
||||
|
||||
available_datasets = list(
|
||||
itertools.chain(*available_datasets_per_env.values(), available_real_world_datasets)
|
||||
)
|
||||
|
||||
# lists all available policies from `lerobot/common/policies`
|
||||
available_policies = [
|
||||
"act",
|
||||
"diffusion",
|
||||
"tdmpc",
|
||||
"vqbet",
|
||||
]
|
||||
|
||||
# lists all available robots from `lerobot/common/robot_devices/robots`
|
||||
available_robots = [
|
||||
"koch",
|
||||
"koch_bimanual",
|
||||
"aloha",
|
||||
"so100",
|
||||
"moss",
|
||||
]
|
||||
|
||||
# lists all available cameras from `lerobot/common/robot_devices/cameras`
|
||||
available_cameras = [
|
||||
"opencv",
|
||||
"intelrealsense",
|
||||
]
|
||||
|
||||
# lists all available motors from `lerobot/common/robot_devices/motors`
|
||||
available_motors = [
|
||||
"dynamixel",
|
||||
"feetech",
|
||||
]
|
||||
|
||||
# keys and values refer to yaml files
|
||||
available_policies_per_env = {
|
||||
"aloha": ["act"],
|
||||
"pusht": ["diffusion", "vqbet"],
|
||||
"xarm": ["tdmpc"],
|
||||
"koch_real": ["act_koch_real"],
|
||||
"aloha_real": ["act_aloha_real"],
|
||||
"dora_aloha_real": ["act_aloha_real"],
|
||||
}
|
||||
|
||||
env_task_pairs = [(env, task) for env, tasks in available_tasks_per_env.items() for task in tasks]
|
||||
env_dataset_pairs = [
|
||||
(env, dataset) for env, datasets in available_datasets_per_env.items() for dataset in datasets
|
||||
]
|
||||
env_dataset_policy_triplets = [
|
||||
(env, dataset, policy)
|
||||
for env, datasets in available_datasets_per_env.items()
|
||||
for dataset in datasets
|
||||
for policy in available_policies_per_env[env]
|
||||
]
|
||||
|
||||
@@ -1,19 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""To enable `lerobot.__version__`"""
|
||||
""" To enable `lerobot.__version__` """
|
||||
|
||||
from importlib.metadata import PackageNotFoundError, version
|
||||
|
||||
|
||||
159
lerobot/common/datasets/abstract.py
Normal file
@@ -0,0 +1,159 @@
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
|
||||
import einops
|
||||
import torch
|
||||
import torchrl
|
||||
import tqdm
|
||||
from huggingface_hub import snapshot_download
|
||||
from tensordict import TensorDict
|
||||
from torchrl.data.replay_buffers.replay_buffers import TensorDictReplayBuffer
|
||||
from torchrl.data.replay_buffers.samplers import SliceSampler
|
||||
from torchrl.data.replay_buffers.storages import TensorStorage, _collate_id
|
||||
from torchrl.data.replay_buffers.writers import ImmutableDatasetWriter, Writer
|
||||
from torchrl.envs.transforms.transforms import Compose
|
||||
|
||||
|
||||
class AbstractExperienceReplay(TensorDictReplayBuffer):
|
||||
def __init__(
|
||||
self,
|
||||
dataset_id: str,
|
||||
batch_size: int = None,
|
||||
*,
|
||||
shuffle: bool = True,
|
||||
root: Path | None = None,
|
||||
pin_memory: bool = False,
|
||||
prefetch: int = None,
|
||||
sampler: SliceSampler = None,
|
||||
collate_fn: Callable = None,
|
||||
writer: Writer = None,
|
||||
transform: "torchrl.envs.Transform" = None,
|
||||
):
|
||||
self.dataset_id = dataset_id
|
||||
self.shuffle = shuffle
|
||||
self.root = root
|
||||
storage = self._download_or_load_dataset()
|
||||
|
||||
super().__init__(
|
||||
storage=storage,
|
||||
sampler=sampler,
|
||||
writer=ImmutableDatasetWriter() if writer is None else writer,
|
||||
collate_fn=_collate_id if collate_fn is None else collate_fn,
|
||||
pin_memory=pin_memory,
|
||||
prefetch=prefetch,
|
||||
batch_size=batch_size,
|
||||
transform=transform,
|
||||
)
|
||||
|
||||
@property
|
||||
def stats_patterns(self) -> dict:
|
||||
return {
|
||||
("observation", "state"): "b c -> 1 c",
|
||||
("observation", "image"): "b c h w -> 1 c 1 1",
|
||||
("action",): "b c -> 1 c",
|
||||
}
|
||||
|
||||
@property
|
||||
def image_keys(self) -> list:
|
||||
return [("observation", "image")]
|
||||
|
||||
@property
|
||||
def num_cameras(self) -> int:
|
||||
return len(self.image_keys)
|
||||
|
||||
@property
|
||||
def num_samples(self) -> int:
|
||||
return len(self)
|
||||
|
||||
@property
|
||||
def num_episodes(self) -> int:
|
||||
return len(self._storage._storage["episode"].unique())
|
||||
|
||||
@property
|
||||
def transform(self):
|
||||
return self._transform
|
||||
|
||||
def set_transform(self, transform):
|
||||
if not isinstance(transform, Compose):
|
||||
# required since torchrl calls `len(self._transform)` downstream
|
||||
if isinstance(transform, list):
|
||||
self._transform = Compose(*transform)
|
||||
else:
|
||||
self._transform = Compose(transform)
|
||||
else:
|
||||
self._transform = transform
|
||||
|
||||
def compute_or_load_stats(self, num_batch=100, batch_size=32) -> TensorDict:
|
||||
stats_path = Path(self.data_dir) / "stats.pth"
|
||||
if stats_path.exists():
|
||||
stats = torch.load(stats_path)
|
||||
else:
|
||||
logging.info(f"compute_stats and save to {stats_path}")
|
||||
stats = self._compute_stats(num_batch, batch_size)
|
||||
torch.save(stats, stats_path)
|
||||
return stats
|
||||
|
||||
def _download_or_load_dataset(self) -> torch.StorageBase:
|
||||
if self.root is None:
|
||||
self.data_dir = snapshot_download(repo_id=f"cadene/{self.dataset_id}", repo_type="dataset")
|
||||
else:
|
||||
self.data_dir = self.root / self.dataset_id
|
||||
return TensorStorage(TensorDict.load_memmap(self.data_dir))
|
||||
|
||||
def _compute_stats(self, num_batch=100, batch_size=32):
|
||||
rb = TensorDictReplayBuffer(
|
||||
storage=self._storage,
|
||||
batch_size=batch_size,
|
||||
prefetch=True,
|
||||
)
|
||||
|
||||
mean, std, max, min = {}, {}, {}, {}
|
||||
|
||||
# compute mean, min, max
|
||||
for _ in tqdm.tqdm(range(num_batch)):
|
||||
batch = rb.sample()
|
||||
for key, pattern in self.stats_patterns.items():
|
||||
batch[key] = batch[key].float()
|
||||
if key not in mean:
|
||||
# first batch initialize mean, min, max
|
||||
mean[key] = einops.reduce(batch[key], pattern, "mean")
|
||||
max[key] = einops.reduce(batch[key], pattern, "max")
|
||||
min[key] = einops.reduce(batch[key], pattern, "min")
|
||||
else:
|
||||
mean[key] += einops.reduce(batch[key], pattern, "mean")
|
||||
max[key] = torch.maximum(max[key], einops.reduce(batch[key], pattern, "max"))
|
||||
min[key] = torch.minimum(min[key], einops.reduce(batch[key], pattern, "min"))
|
||||
batch = rb.sample()
|
||||
|
||||
for key in self.stats_patterns:
|
||||
mean[key] /= num_batch
|
||||
|
||||
# compute std, min, max
|
||||
for _ in tqdm.tqdm(range(num_batch)):
|
||||
batch = rb.sample()
|
||||
for key, pattern in self.stats_patterns.items():
|
||||
batch[key] = batch[key].float()
|
||||
batch_mean = einops.reduce(batch[key], pattern, "mean")
|
||||
if key not in std:
|
||||
# first batch initialize std
|
||||
std[key] = (batch_mean - mean[key]) ** 2
|
||||
else:
|
||||
std[key] += (batch_mean - mean[key]) ** 2
|
||||
max[key] = torch.maximum(max[key], einops.reduce(batch[key], pattern, "max"))
|
||||
min[key] = torch.minimum(min[key], einops.reduce(batch[key], pattern, "min"))
|
||||
|
||||
for key in self.stats_patterns:
|
||||
std[key] = torch.sqrt(std[key] / num_batch)
|
||||
|
||||
stats = TensorDict({}, batch_size=[])
|
||||
for key in self.stats_patterns:
|
||||
stats[(*key, "mean")] = mean[key]
|
||||
stats[(*key, "std")] = std[key]
|
||||
stats[(*key, "max")] = max[key]
|
||||
stats[(*key, "min")] = min[key]
|
||||
|
||||
if key[0] == "observation":
|
||||
# use same stats for the next observations
|
||||
stats[("next", *key)] = stats[key]
|
||||
return stats
|
||||
183
lerobot/common/datasets/aloha.py
Normal file
@@ -0,0 +1,183 @@
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
|
||||
import einops
|
||||
import gdown
|
||||
import h5py
|
||||
import torch
|
||||
import torchrl
|
||||
import tqdm
|
||||
from tensordict import TensorDict
|
||||
from torchrl.data.replay_buffers.samplers import SliceSampler
|
||||
from torchrl.data.replay_buffers.storages import TensorStorage
|
||||
from torchrl.data.replay_buffers.writers import Writer
|
||||
|
||||
from lerobot.common.datasets.abstract import AbstractExperienceReplay
|
||||
|
||||
DATASET_IDS = [
|
||||
"aloha_sim_insertion_human",
|
||||
"aloha_sim_insertion_scripted",
|
||||
"aloha_sim_transfer_cube_human",
|
||||
"aloha_sim_transfer_cube_scripted",
|
||||
]
|
||||
|
||||
FOLDER_URLS = {
|
||||
"aloha_sim_insertion_human": "https://drive.google.com/drive/folders/1RgyD0JgTX30H4IM5XZn8I3zSV_mr8pyF",
|
||||
"aloha_sim_insertion_scripted": "https://drive.google.com/drive/folders/1TsojQQSXtHEoGnqgJ3gmpPQR2DPLtS2N",
|
||||
"aloha_sim_transfer_cube_human": "https://drive.google.com/drive/folders/1sc-E4QYW7A0o23m1u2VWNGVq5smAsfCo",
|
||||
"aloha_sim_transfer_cube_scripted": "https://drive.google.com/drive/folders/1aRyoOhQwxhyt1J8XgEig4s6kzaw__LXj",
|
||||
}
|
||||
|
||||
EP48_URLS = {
|
||||
"aloha_sim_insertion_human": "https://drive.google.com/file/d/18Cudl6nikDtgRolea7je8iF_gGKzynOP/view?usp=drive_link",
|
||||
"aloha_sim_insertion_scripted": "https://drive.google.com/file/d/1wfMSZ24oOh5KR_0aaP3Cnu_c4ZCveduB/view?usp=drive_link",
|
||||
"aloha_sim_transfer_cube_human": "https://drive.google.com/file/d/18smMymtr8tIxaNUQ61gW6dG50pt3MvGq/view?usp=drive_link",
|
||||
"aloha_sim_transfer_cube_scripted": "https://drive.google.com/file/d/1pnGIOd-E4-rhz2P3VxpknMKRZCoKt6eI/view?usp=drive_link",
|
||||
}
|
||||
|
||||
EP49_URLS = {
|
||||
"aloha_sim_insertion_human": "https://drive.google.com/file/d/1C1kZYyROzs-PrLc0SkDgUgMi4-L3lauE/view?usp=drive_link",
|
||||
"aloha_sim_insertion_scripted": "https://drive.google.com/file/d/17EuCUWS6uCCr6yyNzpXdcdE-_TTNCKtf/view?usp=drive_link",
|
||||
"aloha_sim_transfer_cube_human": "https://drive.google.com/file/d/1Nk7l53d9sJoGDBKAOnNrExX5nLacATc6/view?usp=drive_link",
|
||||
"aloha_sim_transfer_cube_scripted": "https://drive.google.com/file/d/1GKReZHrXU73NMiC5zKCq_UtqPVtYq8eo/view?usp=drive_link",
|
||||
}
|
||||
|
||||
NUM_EPISODES = {
|
||||
"aloha_sim_insertion_human": 50,
|
||||
"aloha_sim_insertion_scripted": 50,
|
||||
"aloha_sim_transfer_cube_human": 50,
|
||||
"aloha_sim_transfer_cube_scripted": 50,
|
||||
}
|
||||
|
||||
EPISODE_LEN = {
|
||||
"aloha_sim_insertion_human": 500,
|
||||
"aloha_sim_insertion_scripted": 400,
|
||||
"aloha_sim_transfer_cube_human": 400,
|
||||
"aloha_sim_transfer_cube_scripted": 400,
|
||||
}
|
||||
|
||||
CAMERAS = {
|
||||
"aloha_sim_insertion_human": ["top"],
|
||||
"aloha_sim_insertion_scripted": ["top"],
|
||||
"aloha_sim_transfer_cube_human": ["top"],
|
||||
"aloha_sim_transfer_cube_scripted": ["top"],
|
||||
}
|
||||
|
||||
|
||||
def download(data_dir, dataset_id):
|
||||
assert dataset_id in DATASET_IDS
|
||||
assert dataset_id in FOLDER_URLS
|
||||
assert dataset_id in EP48_URLS
|
||||
assert dataset_id in EP49_URLS
|
||||
|
||||
data_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
gdown.download_folder(FOLDER_URLS[dataset_id], output=str(data_dir))
|
||||
|
||||
# because of the 50 files limit per directory, two files episode 48 and 49 were missing
|
||||
gdown.download(EP48_URLS[dataset_id], output=str(data_dir / "episode_48.hdf5"), fuzzy=True)
|
||||
gdown.download(EP49_URLS[dataset_id], output=str(data_dir / "episode_49.hdf5"), fuzzy=True)
|
||||
|
||||
|
||||
class AlohaExperienceReplay(AbstractExperienceReplay):
|
||||
def __init__(
|
||||
self,
|
||||
dataset_id: str,
|
||||
batch_size: int = None,
|
||||
*,
|
||||
shuffle: bool = True,
|
||||
root: Path | None = None,
|
||||
pin_memory: bool = False,
|
||||
prefetch: int = None,
|
||||
sampler: SliceSampler = None,
|
||||
collate_fn: Callable = None,
|
||||
writer: Writer = None,
|
||||
transform: "torchrl.envs.Transform" = None,
|
||||
):
|
||||
assert dataset_id in DATASET_IDS
|
||||
|
||||
super().__init__(
|
||||
dataset_id,
|
||||
batch_size,
|
||||
shuffle=shuffle,
|
||||
root=root,
|
||||
pin_memory=pin_memory,
|
||||
prefetch=prefetch,
|
||||
sampler=sampler,
|
||||
collate_fn=collate_fn,
|
||||
writer=writer,
|
||||
transform=transform,
|
||||
)
|
||||
|
||||
@property
|
||||
def stats_patterns(self) -> dict:
|
||||
d = {
|
||||
("observation", "state"): "b c -> 1 c",
|
||||
("action",): "b c -> 1 c",
|
||||
}
|
||||
for cam in CAMERAS[self.dataset_id]:
|
||||
d[("observation", "image", cam)] = "b c h w -> 1 c 1 1"
|
||||
return d
|
||||
|
||||
@property
|
||||
def image_keys(self) -> list:
|
||||
return [("observation", "image", cam) for cam in CAMERAS[self.dataset_id]]
|
||||
|
||||
def _download_and_preproc_obsolete(self):
|
||||
assert self.root is not None
|
||||
raw_dir = self.root / f"{self.dataset_id}_raw"
|
||||
if not raw_dir.is_dir():
|
||||
download(raw_dir, self.dataset_id)
|
||||
|
||||
total_num_frames = 0
|
||||
logging.info("Compute total number of frames to initialize offline buffer")
|
||||
for ep_id in range(NUM_EPISODES[self.dataset_id]):
|
||||
ep_path = raw_dir / f"episode_{ep_id}.hdf5"
|
||||
with h5py.File(ep_path, "r") as ep:
|
||||
total_num_frames += ep["/action"].shape[0] - 1
|
||||
logging.info(f"{total_num_frames=}")
|
||||
|
||||
logging.info("Initialize and feed offline buffer")
|
||||
idxtd = 0
|
||||
for ep_id in tqdm.tqdm(range(NUM_EPISODES[self.dataset_id])):
|
||||
ep_path = raw_dir / f"episode_{ep_id}.hdf5"
|
||||
with h5py.File(ep_path, "r") as ep:
|
||||
ep_num_frames = ep["/action"].shape[0]
|
||||
|
||||
# last step of demonstration is considered done
|
||||
done = torch.zeros(ep_num_frames, 1, dtype=torch.bool)
|
||||
done[-1] = True
|
||||
|
||||
state = torch.from_numpy(ep["/observations/qpos"][:])
|
||||
action = torch.from_numpy(ep["/action"][:])
|
||||
|
||||
ep_td = TensorDict(
|
||||
{
|
||||
("observation", "state"): state[:-1],
|
||||
"action": action[:-1],
|
||||
"episode": torch.tensor([ep_id] * (ep_num_frames - 1)),
|
||||
"frame_id": torch.arange(0, ep_num_frames - 1, 1),
|
||||
("next", "observation", "state"): state[1:],
|
||||
# TODO: compute reward and success
|
||||
# ("next", "reward"): reward[1:],
|
||||
("next", "done"): done[1:],
|
||||
# ("next", "success"): success[1:],
|
||||
},
|
||||
batch_size=ep_num_frames - 1,
|
||||
)
|
||||
|
||||
for cam in CAMERAS[self.dataset_id]:
|
||||
image = torch.from_numpy(ep[f"/observations/images/{cam}"][:])
|
||||
image = einops.rearrange(image, "b h w c -> b c h w").contiguous()
|
||||
ep_td["observation", "image", cam] = image[:-1]
|
||||
ep_td["next", "observation", "image", cam] = image[1:]
|
||||
|
||||
if ep_id == 0:
|
||||
# hack to initialize tensordict data structure to store episodes
|
||||
td_data = ep_td[0].expand(total_num_frames).memmap_like(self.root / f"{self.dataset_id}")
|
||||
|
||||
td_data[idxtd : idxtd + len(ep_td)] = ep_td
|
||||
idxtd = idxtd + len(ep_td)
|
||||
|
||||
return TensorStorage(td_data.lock_())
|
||||
@@ -1,213 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from copy import deepcopy
|
||||
from math import ceil
|
||||
|
||||
import einops
|
||||
import torch
|
||||
import tqdm
|
||||
from datasets import Image
|
||||
|
||||
from lerobot.common.datasets.video_utils import VideoFrame
|
||||
|
||||
|
||||
def get_stats_einops_patterns(dataset, num_workers=0):
|
||||
"""These einops patterns will be used to aggregate batches and compute statistics.
|
||||
|
||||
Note: We assume the images are in channel first format
|
||||
"""
|
||||
|
||||
dataloader = torch.utils.data.DataLoader(
|
||||
dataset,
|
||||
num_workers=num_workers,
|
||||
batch_size=2,
|
||||
shuffle=False,
|
||||
)
|
||||
batch = next(iter(dataloader))
|
||||
|
||||
stats_patterns = {}
|
||||
for key, feats_type in dataset.features.items():
|
||||
# NOTE: skip language_instruction embedding in stats computation
|
||||
if key == "language_instruction":
|
||||
continue
|
||||
|
||||
# sanity check that tensors are not float64
|
||||
assert batch[key].dtype != torch.float64
|
||||
|
||||
if isinstance(feats_type, (VideoFrame, Image)):
|
||||
# sanity check that images are channel first
|
||||
_, c, h, w = batch[key].shape
|
||||
assert c < h and c < w, f"expect channel first images, but instead {batch[key].shape}"
|
||||
|
||||
# sanity check that images are float32 in range [0,1]
|
||||
assert batch[key].dtype == torch.float32, f"expect torch.float32, but instead {batch[key].dtype=}"
|
||||
assert batch[key].max() <= 1, f"expect pixels lower than 1, but instead {batch[key].max()=}"
|
||||
assert batch[key].min() >= 0, f"expect pixels greater than 1, but instead {batch[key].min()=}"
|
||||
|
||||
stats_patterns[key] = "b c h w -> c 1 1"
|
||||
elif batch[key].ndim == 2:
|
||||
stats_patterns[key] = "b c -> c "
|
||||
elif batch[key].ndim == 1:
|
||||
stats_patterns[key] = "b -> 1"
|
||||
else:
|
||||
raise ValueError(f"{key}, {feats_type}, {batch[key].shape}")
|
||||
|
||||
return stats_patterns
|
||||
|
||||
|
||||
def compute_stats(dataset, batch_size=8, num_workers=8, max_num_samples=None):
|
||||
"""Compute mean/std and min/max statistics of all data keys in a LeRobotDataset."""
|
||||
if max_num_samples is None:
|
||||
max_num_samples = len(dataset)
|
||||
|
||||
# for more info on why we need to set the same number of workers, see `load_from_videos`
|
||||
stats_patterns = get_stats_einops_patterns(dataset, num_workers)
|
||||
|
||||
# mean and std will be computed incrementally while max and min will track the running value.
|
||||
mean, std, max, min = {}, {}, {}, {}
|
||||
for key in stats_patterns:
|
||||
mean[key] = torch.tensor(0.0).float()
|
||||
std[key] = torch.tensor(0.0).float()
|
||||
max[key] = torch.tensor(-float("inf")).float()
|
||||
min[key] = torch.tensor(float("inf")).float()
|
||||
|
||||
def create_seeded_dataloader(dataset, batch_size, seed):
|
||||
generator = torch.Generator()
|
||||
generator.manual_seed(seed)
|
||||
dataloader = torch.utils.data.DataLoader(
|
||||
dataset,
|
||||
num_workers=num_workers,
|
||||
batch_size=batch_size,
|
||||
shuffle=True,
|
||||
drop_last=False,
|
||||
generator=generator,
|
||||
)
|
||||
return dataloader
|
||||
|
||||
# Note: Due to be refactored soon. The point of storing `first_batch` is to make sure we don't get
|
||||
# surprises when rerunning the sampler.
|
||||
first_batch = None
|
||||
running_item_count = 0 # for online mean computation
|
||||
dataloader = create_seeded_dataloader(dataset, batch_size, seed=1337)
|
||||
for i, batch in enumerate(
|
||||
tqdm.tqdm(dataloader, total=ceil(max_num_samples / batch_size), desc="Compute mean, min, max")
|
||||
):
|
||||
this_batch_size = len(batch["index"])
|
||||
running_item_count += this_batch_size
|
||||
if first_batch is None:
|
||||
first_batch = deepcopy(batch)
|
||||
for key, pattern in stats_patterns.items():
|
||||
batch[key] = batch[key].float()
|
||||
# Numerically stable update step for mean computation.
|
||||
batch_mean = einops.reduce(batch[key], pattern, "mean")
|
||||
# Hint: to update the mean we need x̄ₙ = (Nₙ₋₁x̄ₙ₋₁ + Bₙxₙ) / Nₙ, where the subscript represents
|
||||
# the update step, N is the running item count, B is this batch size, x̄ is the running mean,
|
||||
# and x is the current batch mean. Some rearrangement is then required to avoid risking
|
||||
# numerical overflow. Another hint: Nₙ₋₁ = Nₙ - Bₙ. Rearrangement yields
|
||||
# x̄ₙ = x̄ₙ₋₁ + Bₙ * (xₙ - x̄ₙ₋₁) / Nₙ
|
||||
mean[key] = mean[key] + this_batch_size * (batch_mean - mean[key]) / running_item_count
|
||||
max[key] = torch.maximum(max[key], einops.reduce(batch[key], pattern, "max"))
|
||||
min[key] = torch.minimum(min[key], einops.reduce(batch[key], pattern, "min"))
|
||||
|
||||
if i == ceil(max_num_samples / batch_size) - 1:
|
||||
break
|
||||
|
||||
first_batch_ = None
|
||||
running_item_count = 0 # for online std computation
|
||||
dataloader = create_seeded_dataloader(dataset, batch_size, seed=1337)
|
||||
for i, batch in enumerate(
|
||||
tqdm.tqdm(dataloader, total=ceil(max_num_samples / batch_size), desc="Compute std")
|
||||
):
|
||||
this_batch_size = len(batch["index"])
|
||||
running_item_count += this_batch_size
|
||||
# Sanity check to make sure the batches are still in the same order as before.
|
||||
if first_batch_ is None:
|
||||
first_batch_ = deepcopy(batch)
|
||||
for key in stats_patterns:
|
||||
assert torch.equal(first_batch_[key], first_batch[key])
|
||||
for key, pattern in stats_patterns.items():
|
||||
batch[key] = batch[key].float()
|
||||
# Numerically stable update step for mean computation (where the mean is over squared
|
||||
# residuals).See notes in the mean computation loop above.
|
||||
batch_std = einops.reduce((batch[key] - mean[key]) ** 2, pattern, "mean")
|
||||
std[key] = std[key] + this_batch_size * (batch_std - std[key]) / running_item_count
|
||||
|
||||
if i == ceil(max_num_samples / batch_size) - 1:
|
||||
break
|
||||
|
||||
for key in stats_patterns:
|
||||
std[key] = torch.sqrt(std[key])
|
||||
|
||||
stats = {}
|
||||
for key in stats_patterns:
|
||||
stats[key] = {
|
||||
"mean": mean[key],
|
||||
"std": std[key],
|
||||
"max": max[key],
|
||||
"min": min[key],
|
||||
}
|
||||
return stats
|
||||
|
||||
|
||||
def aggregate_stats(ls_datasets) -> dict[str, torch.Tensor]:
|
||||
"""Aggregate stats of multiple LeRobot datasets into one set of stats without recomputing from scratch.
|
||||
|
||||
The final stats will have the union of all data keys from each of the datasets.
|
||||
|
||||
The final stats will have the union of all data keys from each of the datasets. For instance:
|
||||
- new_max = max(max_dataset_0, max_dataset_1, ...)
|
||||
- new_min = min(min_dataset_0, min_dataset_1, ...)
|
||||
- new_mean = (mean of all data)
|
||||
- new_std = (std of all data)
|
||||
"""
|
||||
data_keys = set()
|
||||
for dataset in ls_datasets:
|
||||
data_keys.update(dataset.stats.keys())
|
||||
stats = {k: {} for k in data_keys}
|
||||
for data_key in data_keys:
|
||||
for stat_key in ["min", "max"]:
|
||||
# compute `max(dataset_0["max"], dataset_1["max"], ...)`
|
||||
stats[data_key][stat_key] = einops.reduce(
|
||||
torch.stack([d.stats[data_key][stat_key] for d in ls_datasets if data_key in d.stats], dim=0),
|
||||
"n ... -> ...",
|
||||
stat_key,
|
||||
)
|
||||
total_samples = sum(d.num_samples for d in ls_datasets if data_key in d.stats)
|
||||
# Compute the "sum" statistic by multiplying each mean by the number of samples in the respective
|
||||
# dataset, then divide by total_samples to get the overall "mean".
|
||||
# NOTE: the brackets around (d.num_samples / total_samples) are needed tor minimize the risk of
|
||||
# numerical overflow!
|
||||
stats[data_key]["mean"] = sum(
|
||||
d.stats[data_key]["mean"] * (d.num_samples / total_samples)
|
||||
for d in ls_datasets
|
||||
if data_key in d.stats
|
||||
)
|
||||
# The derivation for standard deviation is a little more involved but is much in the same spirit as
|
||||
# the computation of the mean.
|
||||
# Given two sets of data where the statistics are known:
|
||||
# σ_combined = sqrt[ (n1 * (σ1^2 + d1^2) + n2 * (σ2^2 + d2^2)) / (n1 + n2) ]
|
||||
# where d1 = μ1 - μ_combined, d2 = μ2 - μ_combined
|
||||
# NOTE: the brackets around (d.num_samples / total_samples) are needed tor minimize the risk of
|
||||
# numerical overflow!
|
||||
stats[data_key]["std"] = torch.sqrt(
|
||||
sum(
|
||||
(d.stats[data_key]["std"] ** 2 + (d.stats[data_key]["mean"] - stats[data_key]["mean"]) ** 2)
|
||||
* (d.num_samples / total_samples)
|
||||
for d in ls_datasets
|
||||
if data_key in d.stats
|
||||
)
|
||||
)
|
||||
return stats
|
||||
@@ -1,117 +1,131 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
from omegaconf import ListConfig, OmegaConf
|
||||
from torchrl.data.replay_buffers import PrioritizedSliceSampler, SliceSampler
|
||||
|
||||
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, MultiLeRobotDataset
|
||||
from lerobot.common.datasets.transforms import get_image_transforms
|
||||
from lerobot.common.envs.transforms import NormalizeTransform, Prod
|
||||
|
||||
# DATA_DIR specifies to location where datasets are loaded. By default, DATA_DIR is None and
|
||||
# we load from `$HOME/.cache/huggingface/hub/datasets`. For our unit tests, we set `DATA_DIR=tests/data`
|
||||
# to load a subset of our datasets for faster continuous integration.
|
||||
DATA_DIR = Path(os.environ["DATA_DIR"]) if "DATA_DIR" in os.environ else None
|
||||
|
||||
|
||||
def resolve_delta_timestamps(cfg):
|
||||
"""Resolves delta_timestamps config key (in-place) by using `eval`.
|
||||
|
||||
Doesn't do anything if delta_timestamps is not specified or has already been resolve (as evidenced by
|
||||
the data type of its values).
|
||||
"""
|
||||
delta_timestamps = cfg.training.get("delta_timestamps")
|
||||
if delta_timestamps is not None:
|
||||
for key in delta_timestamps:
|
||||
if isinstance(delta_timestamps[key], str):
|
||||
# TODO(rcadene, alexander-soare): remove `eval` to avoid exploit
|
||||
cfg.training.delta_timestamps[key] = eval(delta_timestamps[key])
|
||||
|
||||
|
||||
def make_dataset(cfg, split: str = "train") -> LeRobotDataset | MultiLeRobotDataset:
|
||||
"""
|
||||
Args:
|
||||
cfg: A Hydra config as per the LeRobot config scheme.
|
||||
split: Select the data subset used to create an instance of LeRobotDataset.
|
||||
All datasets hosted on [lerobot](https://huggingface.co/lerobot) contain only one subset: "train".
|
||||
Thus, by default, `split="train"` selects all the available data. `split` aims to work like the
|
||||
slicer in the hugging face datasets:
|
||||
https://huggingface.co/docs/datasets/v2.19.0/loading#slice-splits
|
||||
As of now, it only supports `split="train[:n]"` to load the first n frames of the dataset or
|
||||
`split="train[n:]"` to load the last n frames. For instance `split="train[:1000]"`.
|
||||
Returns:
|
||||
The LeRobotDataset.
|
||||
"""
|
||||
if not isinstance(cfg.dataset_repo_id, (str, ListConfig)):
|
||||
raise ValueError(
|
||||
"Expected cfg.dataset_repo_id to be either a single string to load one dataset or a list of "
|
||||
"strings to load multiple datasets."
|
||||
)
|
||||
|
||||
# A soft check to warn if the environment matches the dataset. Don't check if we are using a real world env (dora).
|
||||
if cfg.env.name != "dora":
|
||||
if isinstance(cfg.dataset_repo_id, str):
|
||||
dataset_repo_ids = [cfg.dataset_repo_id] # single dataset
|
||||
else:
|
||||
dataset_repo_ids = cfg.dataset_repo_id # multiple datasets
|
||||
|
||||
for dataset_repo_id in dataset_repo_ids:
|
||||
if cfg.env.name not in dataset_repo_id:
|
||||
logging.warning(
|
||||
f"There might be a mismatch between your training dataset ({dataset_repo_id=}) and your "
|
||||
f"environment ({cfg.env.name=})."
|
||||
)
|
||||
|
||||
resolve_delta_timestamps(cfg)
|
||||
|
||||
image_transforms = None
|
||||
if cfg.training.image_transforms.enable:
|
||||
cfg_tf = cfg.training.image_transforms
|
||||
image_transforms = get_image_transforms(
|
||||
brightness_weight=cfg_tf.brightness.weight,
|
||||
brightness_min_max=cfg_tf.brightness.min_max,
|
||||
contrast_weight=cfg_tf.contrast.weight,
|
||||
contrast_min_max=cfg_tf.contrast.min_max,
|
||||
saturation_weight=cfg_tf.saturation.weight,
|
||||
saturation_min_max=cfg_tf.saturation.min_max,
|
||||
hue_weight=cfg_tf.hue.weight,
|
||||
hue_min_max=cfg_tf.hue.min_max,
|
||||
sharpness_weight=cfg_tf.sharpness.weight,
|
||||
sharpness_min_max=cfg_tf.sharpness.min_max,
|
||||
max_num_transforms=cfg_tf.max_num_transforms,
|
||||
random_order=cfg_tf.random_order,
|
||||
)
|
||||
|
||||
if isinstance(cfg.dataset_repo_id, str):
|
||||
dataset = LeRobotDataset(
|
||||
cfg.dataset_repo_id,
|
||||
split=split,
|
||||
delta_timestamps=cfg.training.get("delta_timestamps"),
|
||||
image_transforms=image_transforms,
|
||||
video_backend=cfg.video_backend,
|
||||
)
|
||||
def make_offline_buffer(
|
||||
cfg, overwrite_sampler=None, normalize=True, overwrite_batch_size=None, overwrite_prefetch=None
|
||||
):
|
||||
if cfg.policy.balanced_sampling:
|
||||
assert cfg.online_steps > 0
|
||||
batch_size = None
|
||||
pin_memory = False
|
||||
prefetch = None
|
||||
else:
|
||||
dataset = MultiLeRobotDataset(
|
||||
cfg.dataset_repo_id,
|
||||
split=split,
|
||||
delta_timestamps=cfg.training.get("delta_timestamps"),
|
||||
image_transforms=image_transforms,
|
||||
video_backend=cfg.video_backend,
|
||||
)
|
||||
assert cfg.online_steps == 0
|
||||
num_slices = cfg.policy.batch_size
|
||||
batch_size = cfg.policy.horizon * num_slices
|
||||
pin_memory = cfg.device == "cuda"
|
||||
prefetch = cfg.prefetch
|
||||
|
||||
if cfg.get("override_dataset_stats"):
|
||||
for key, stats_dict in cfg.override_dataset_stats.items():
|
||||
for stats_type, listconfig in stats_dict.items():
|
||||
# example of stats_type: min, max, mean, std
|
||||
stats = OmegaConf.to_container(listconfig, resolve=True)
|
||||
dataset.stats[key][stats_type] = torch.tensor(stats, dtype=torch.float32)
|
||||
if overwrite_batch_size is not None:
|
||||
batch_size = overwrite_batch_size
|
||||
|
||||
return dataset
|
||||
if overwrite_prefetch is not None:
|
||||
prefetch = overwrite_prefetch
|
||||
|
||||
if overwrite_sampler is None:
|
||||
# TODO(rcadene): move batch_size outside
|
||||
num_traj_per_batch = cfg.policy.batch_size # // cfg.horizon
|
||||
# TODO(rcadene): Sampler outputs a batch_size <= cfg.batch_size.
|
||||
# We would need to add a transform to pad the tensordict to ensure batch_size == cfg.batch_size.
|
||||
|
||||
if cfg.offline_prioritized_sampler:
|
||||
logging.info("use prioritized sampler for offline dataset")
|
||||
sampler = PrioritizedSliceSampler(
|
||||
max_capacity=100_000,
|
||||
alpha=cfg.policy.per_alpha,
|
||||
beta=cfg.policy.per_beta,
|
||||
num_slices=num_traj_per_batch,
|
||||
strict_length=False,
|
||||
)
|
||||
else:
|
||||
logging.info("use simple sampler for offline dataset")
|
||||
sampler = SliceSampler(
|
||||
num_slices=num_traj_per_batch,
|
||||
strict_length=False,
|
||||
)
|
||||
else:
|
||||
sampler = overwrite_sampler
|
||||
|
||||
if cfg.env.name == "simxarm":
|
||||
from lerobot.common.datasets.simxarm import SimxarmExperienceReplay
|
||||
|
||||
clsfunc = SimxarmExperienceReplay
|
||||
dataset_id = f"xarm_{cfg.env.task}_medium"
|
||||
|
||||
elif cfg.env.name == "pusht":
|
||||
from lerobot.common.datasets.pusht import PushtExperienceReplay
|
||||
|
||||
clsfunc = PushtExperienceReplay
|
||||
dataset_id = "pusht"
|
||||
|
||||
elif cfg.env.name == "aloha":
|
||||
from lerobot.common.datasets.aloha import AlohaExperienceReplay
|
||||
|
||||
clsfunc = AlohaExperienceReplay
|
||||
dataset_id = f"aloha_{cfg.env.task}"
|
||||
else:
|
||||
raise ValueError(cfg.env.name)
|
||||
|
||||
offline_buffer = clsfunc(
|
||||
dataset_id=dataset_id,
|
||||
sampler=sampler,
|
||||
batch_size=batch_size,
|
||||
root=DATA_DIR,
|
||||
pin_memory=pin_memory,
|
||||
prefetch=prefetch if isinstance(prefetch, int) else None,
|
||||
)
|
||||
|
||||
if cfg.policy.name == "tdmpc":
|
||||
img_keys = []
|
||||
for key in offline_buffer.image_keys:
|
||||
img_keys.append(("next", *key))
|
||||
img_keys += offline_buffer.image_keys
|
||||
else:
|
||||
img_keys = offline_buffer.image_keys
|
||||
|
||||
transforms = [Prod(in_keys=img_keys, prod=1 / 255)]
|
||||
|
||||
if normalize:
|
||||
# TODO(rcadene): make normalization strategy configurable between mean_std, min_max, manual_min_max, min_max_from_spec
|
||||
stats = offline_buffer.compute_or_load_stats()
|
||||
|
||||
# we only normalize the state and action, since the images are usually normalized inside the model for now (except for tdmpc: see the following)
|
||||
in_keys = [("observation", "state"), ("action")]
|
||||
|
||||
if cfg.policy.name == "tdmpc":
|
||||
# TODO(rcadene): we add img_keys to the keys to normalize for tdmpc only, since diffusion and act policies normalize the image inside the model for now
|
||||
in_keys += img_keys
|
||||
# TODO(racdene): since we use next observations in tdmpc, we also add them to the normalization. We are wasting a bit of compute on this for now.
|
||||
in_keys += [("next", *key) for key in img_keys]
|
||||
in_keys.append(("next", "observation", "state"))
|
||||
|
||||
if cfg.policy.name == "diffusion" and cfg.env.name == "pusht":
|
||||
# TODO(rcadene): we overwrite stats to have the same as pretrained model, but we should remove this
|
||||
stats["observation", "state", "min"] = torch.tensor([13.456424, 32.938293], dtype=torch.float32)
|
||||
stats["observation", "state", "max"] = torch.tensor([496.14618, 510.9579], dtype=torch.float32)
|
||||
stats["action", "min"] = torch.tensor([12.0, 25.0], dtype=torch.float32)
|
||||
stats["action", "max"] = torch.tensor([511.0, 511.0], dtype=torch.float32)
|
||||
|
||||
# TODO(rcadene): remove this and put it in config. Ideally we want to reproduce SOTA results just with mean_std
|
||||
normalization_mode = "mean_std" if cfg.env.name == "aloha" else "min_max"
|
||||
transforms.append(NormalizeTransform(stats, in_keys, mode=normalization_mode))
|
||||
|
||||
offline_buffer.set_transform(transforms)
|
||||
|
||||
if not overwrite_sampler:
|
||||
index = torch.arange(0, offline_buffer.num_samples, 1)
|
||||
sampler.extend(index)
|
||||
|
||||
return offline_buffer
|
||||
|
||||
@@ -1,401 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
|
||||
import datasets
|
||||
import torch
|
||||
import torch.utils
|
||||
|
||||
from lerobot.common.datasets.compute_stats import aggregate_stats
|
||||
from lerobot.common.datasets.utils import (
|
||||
calculate_episode_data_index,
|
||||
load_episode_data_index,
|
||||
load_hf_dataset,
|
||||
load_info,
|
||||
load_previous_and_future_frames,
|
||||
load_stats,
|
||||
load_videos,
|
||||
reset_episode_index,
|
||||
)
|
||||
from lerobot.common.datasets.video_utils import VideoFrame, load_from_videos
|
||||
|
||||
# For maintainers, see lerobot/common/datasets/push_dataset_to_hub/CODEBASE_VERSION.md
|
||||
CODEBASE_VERSION = "v1.6"
|
||||
DATA_DIR = Path(os.environ["DATA_DIR"]) if "DATA_DIR" in os.environ else None
|
||||
|
||||
|
||||
class LeRobotDataset(torch.utils.data.Dataset):
|
||||
def __init__(
|
||||
self,
|
||||
repo_id: str,
|
||||
root: Path | None = DATA_DIR,
|
||||
split: str = "train",
|
||||
image_transforms: Callable | None = None,
|
||||
delta_timestamps: dict[list[float]] | None = None,
|
||||
video_backend: str | None = None,
|
||||
):
|
||||
super().__init__()
|
||||
self.repo_id = repo_id
|
||||
self.root = root
|
||||
self.split = split
|
||||
self.image_transforms = image_transforms
|
||||
self.delta_timestamps = delta_timestamps
|
||||
# load data from hub or locally when root is provided
|
||||
# TODO(rcadene, aliberts): implement faster transfer
|
||||
# https://huggingface.co/docs/huggingface_hub/en/guides/download#faster-downloads
|
||||
self.hf_dataset = load_hf_dataset(repo_id, CODEBASE_VERSION, root, split)
|
||||
if split == "train":
|
||||
self.episode_data_index = load_episode_data_index(repo_id, CODEBASE_VERSION, root)
|
||||
else:
|
||||
self.episode_data_index = calculate_episode_data_index(self.hf_dataset)
|
||||
self.hf_dataset = reset_episode_index(self.hf_dataset)
|
||||
self.stats = load_stats(repo_id, CODEBASE_VERSION, root)
|
||||
self.info = load_info(repo_id, CODEBASE_VERSION, root)
|
||||
if self.video:
|
||||
self.videos_dir = load_videos(repo_id, CODEBASE_VERSION, root)
|
||||
self.video_backend = video_backend if video_backend is not None else "pyav"
|
||||
|
||||
@property
|
||||
def fps(self) -> int:
|
||||
"""Frames per second used during data collection."""
|
||||
return self.info["fps"]
|
||||
|
||||
@property
|
||||
def video(self) -> bool:
|
||||
"""Returns True if this dataset loads video frames from mp4 files.
|
||||
Returns False if it only loads images from png files.
|
||||
"""
|
||||
return self.info.get("video", False)
|
||||
|
||||
@property
|
||||
def features(self) -> datasets.Features:
|
||||
return self.hf_dataset.features
|
||||
|
||||
@property
|
||||
def camera_keys(self) -> list[str]:
|
||||
"""Keys to access image and video stream from cameras."""
|
||||
keys = []
|
||||
for key, feats in self.hf_dataset.features.items():
|
||||
if isinstance(feats, (datasets.Image, VideoFrame)):
|
||||
keys.append(key)
|
||||
return keys
|
||||
|
||||
@property
|
||||
def video_frame_keys(self) -> list[str]:
|
||||
"""Keys to access video frames that requires to be decoded into images.
|
||||
|
||||
Note: It is empty if the dataset contains images only,
|
||||
or equal to `self.cameras` if the dataset contains videos only,
|
||||
or can even be a subset of `self.cameras` in a case of a mixed image/video dataset.
|
||||
"""
|
||||
video_frame_keys = []
|
||||
for key, feats in self.hf_dataset.features.items():
|
||||
if isinstance(feats, VideoFrame):
|
||||
video_frame_keys.append(key)
|
||||
return video_frame_keys
|
||||
|
||||
@property
|
||||
def num_samples(self) -> int:
|
||||
"""Number of samples/frames."""
|
||||
return len(self.hf_dataset)
|
||||
|
||||
@property
|
||||
def num_episodes(self) -> int:
|
||||
"""Number of episodes."""
|
||||
return len(self.hf_dataset.unique("episode_index"))
|
||||
|
||||
@property
|
||||
def tolerance_s(self) -> float:
|
||||
"""Tolerance in seconds used to discard loaded frames when their timestamps
|
||||
are not close enough from the requested frames. It is only used when `delta_timestamps`
|
||||
is provided or when loading video frames from mp4 files.
|
||||
"""
|
||||
# 1e-4 to account for possible numerical error
|
||||
return 1 / self.fps - 1e-4
|
||||
|
||||
def __len__(self):
|
||||
return self.num_samples
|
||||
|
||||
def __getitem__(self, idx):
|
||||
item = self.hf_dataset[idx]
|
||||
|
||||
if self.delta_timestamps is not None:
|
||||
item = load_previous_and_future_frames(
|
||||
item,
|
||||
self.hf_dataset,
|
||||
self.episode_data_index,
|
||||
self.delta_timestamps,
|
||||
self.tolerance_s,
|
||||
)
|
||||
|
||||
if self.video:
|
||||
item = load_from_videos(
|
||||
item,
|
||||
self.video_frame_keys,
|
||||
self.videos_dir,
|
||||
self.tolerance_s,
|
||||
self.video_backend,
|
||||
)
|
||||
|
||||
if self.image_transforms is not None:
|
||||
for cam in self.camera_keys:
|
||||
item[cam] = self.image_transforms(item[cam])
|
||||
|
||||
return item
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
f"{self.__class__.__name__}(\n"
|
||||
f" Repository ID: '{self.repo_id}',\n"
|
||||
f" Split: '{self.split}',\n"
|
||||
f" Number of Samples: {self.num_samples},\n"
|
||||
f" Number of Episodes: {self.num_episodes},\n"
|
||||
f" Type: {'video (.mp4)' if self.video else 'image (.png)'},\n"
|
||||
f" Recorded Frames per Second: {self.fps},\n"
|
||||
f" Camera Keys: {self.camera_keys},\n"
|
||||
f" Video Frame Keys: {self.video_frame_keys if self.video else 'N/A'},\n"
|
||||
f" Transformations: {self.image_transforms},\n"
|
||||
f" Codebase Version: {self.info.get('codebase_version', '< v1.6')},\n"
|
||||
f")"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_preloaded(
|
||||
cls,
|
||||
repo_id: str = "from_preloaded",
|
||||
root: Path | None = None,
|
||||
split: str = "train",
|
||||
transform: callable = None,
|
||||
delta_timestamps: dict[list[float]] | None = None,
|
||||
# additional preloaded attributes
|
||||
hf_dataset=None,
|
||||
episode_data_index=None,
|
||||
stats=None,
|
||||
info=None,
|
||||
videos_dir=None,
|
||||
video_backend=None,
|
||||
) -> "LeRobotDataset":
|
||||
"""Create a LeRobot Dataset from existing data and attributes instead of loading from the filesystem.
|
||||
|
||||
It is especially useful when converting raw data into LeRobotDataset before saving the dataset
|
||||
on the filesystem or uploading to the hub.
|
||||
|
||||
Note: Meta-data attributes like `repo_id`, `version`, `root`, etc are optional and potentially
|
||||
meaningless depending on the downstream usage of the return dataset.
|
||||
"""
|
||||
# create an empty object of type LeRobotDataset
|
||||
obj = cls.__new__(cls)
|
||||
obj.repo_id = repo_id
|
||||
obj.root = root
|
||||
obj.split = split
|
||||
obj.image_transforms = transform
|
||||
obj.delta_timestamps = delta_timestamps
|
||||
obj.hf_dataset = hf_dataset
|
||||
obj.episode_data_index = episode_data_index
|
||||
obj.stats = stats
|
||||
obj.info = info if info is not None else {}
|
||||
obj.videos_dir = videos_dir
|
||||
obj.video_backend = video_backend if video_backend is not None else "pyav"
|
||||
return obj
|
||||
|
||||
|
||||
class MultiLeRobotDataset(torch.utils.data.Dataset):
|
||||
"""A dataset consisting of multiple underlying `LeRobotDataset`s.
|
||||
|
||||
The underlying `LeRobotDataset`s are effectively concatenated, and this class adopts much of the API
|
||||
structure of `LeRobotDataset`.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
repo_ids: list[str],
|
||||
root: Path | None = DATA_DIR,
|
||||
split: str = "train",
|
||||
image_transforms: Callable | None = None,
|
||||
delta_timestamps: dict[list[float]] | None = None,
|
||||
video_backend: str | None = None,
|
||||
):
|
||||
super().__init__()
|
||||
self.repo_ids = repo_ids
|
||||
# Construct the underlying datasets passing everything but `transform` and `delta_timestamps` which
|
||||
# are handled by this class.
|
||||
self._datasets = [
|
||||
LeRobotDataset(
|
||||
repo_id,
|
||||
root=root,
|
||||
split=split,
|
||||
delta_timestamps=delta_timestamps,
|
||||
image_transforms=image_transforms,
|
||||
video_backend=video_backend,
|
||||
)
|
||||
for repo_id in repo_ids
|
||||
]
|
||||
# Check that some properties are consistent across datasets. Note: We may relax some of these
|
||||
# consistency requirements in future iterations of this class.
|
||||
for repo_id, dataset in zip(self.repo_ids, self._datasets, strict=True):
|
||||
if dataset.info != self._datasets[0].info:
|
||||
raise ValueError(
|
||||
f"Detected a mismatch in dataset info between {self.repo_ids[0]} and {repo_id}. This is "
|
||||
"not yet supported."
|
||||
)
|
||||
# Disable any data keys that are not common across all of the datasets. Note: we may relax this
|
||||
# restriction in future iterations of this class. For now, this is necessary at least for being able
|
||||
# to use PyTorch's default DataLoader collate function.
|
||||
self.disabled_data_keys = set()
|
||||
intersection_data_keys = set(self._datasets[0].hf_dataset.features)
|
||||
for dataset in self._datasets:
|
||||
intersection_data_keys.intersection_update(dataset.hf_dataset.features)
|
||||
if len(intersection_data_keys) == 0:
|
||||
raise RuntimeError(
|
||||
"Multiple datasets were provided but they had no keys common to all of them. The "
|
||||
"multi-dataset functionality currently only keeps common keys."
|
||||
)
|
||||
for repo_id, dataset in zip(self.repo_ids, self._datasets, strict=True):
|
||||
extra_keys = set(dataset.hf_dataset.features).difference(intersection_data_keys)
|
||||
logging.warning(
|
||||
f"keys {extra_keys} of {repo_id} were disabled as they are not contained in all the "
|
||||
"other datasets."
|
||||
)
|
||||
self.disabled_data_keys.update(extra_keys)
|
||||
|
||||
self.root = root
|
||||
self.split = split
|
||||
self.image_transforms = image_transforms
|
||||
self.delta_timestamps = delta_timestamps
|
||||
self.stats = aggregate_stats(self._datasets)
|
||||
|
||||
@property
|
||||
def repo_id_to_index(self):
|
||||
"""Return a mapping from dataset repo_id to a dataset index automatically created by this class.
|
||||
|
||||
This index is incorporated as a data key in the dictionary returned by `__getitem__`.
|
||||
"""
|
||||
return {repo_id: i for i, repo_id in enumerate(self.repo_ids)}
|
||||
|
||||
@property
|
||||
def repo_index_to_id(self):
|
||||
"""Return the inverse mapping if repo_id_to_index."""
|
||||
return {v: k for k, v in self.repo_id_to_index}
|
||||
|
||||
@property
|
||||
def fps(self) -> int:
|
||||
"""Frames per second used during data collection.
|
||||
|
||||
NOTE: Fow now, this relies on a check in __init__ to make sure all sub-datasets have the same info.
|
||||
"""
|
||||
return self._datasets[0].info["fps"]
|
||||
|
||||
@property
|
||||
def video(self) -> bool:
|
||||
"""Returns True if this dataset loads video frames from mp4 files.
|
||||
|
||||
Returns False if it only loads images from png files.
|
||||
|
||||
NOTE: Fow now, this relies on a check in __init__ to make sure all sub-datasets have the same info.
|
||||
"""
|
||||
return self._datasets[0].info.get("video", False)
|
||||
|
||||
@property
|
||||
def features(self) -> datasets.Features:
|
||||
features = {}
|
||||
for dataset in self._datasets:
|
||||
features.update({k: v for k, v in dataset.features.items() if k not in self.disabled_data_keys})
|
||||
return features
|
||||
|
||||
@property
|
||||
def camera_keys(self) -> list[str]:
|
||||
"""Keys to access image and video stream from cameras."""
|
||||
keys = []
|
||||
for key, feats in self.features.items():
|
||||
if isinstance(feats, (datasets.Image, VideoFrame)):
|
||||
keys.append(key)
|
||||
return keys
|
||||
|
||||
@property
|
||||
def video_frame_keys(self) -> list[str]:
|
||||
"""Keys to access video frames that requires to be decoded into images.
|
||||
|
||||
Note: It is empty if the dataset contains images only,
|
||||
or equal to `self.cameras` if the dataset contains videos only,
|
||||
or can even be a subset of `self.cameras` in a case of a mixed image/video dataset.
|
||||
"""
|
||||
video_frame_keys = []
|
||||
for key, feats in self.features.items():
|
||||
if isinstance(feats, VideoFrame):
|
||||
video_frame_keys.append(key)
|
||||
return video_frame_keys
|
||||
|
||||
@property
|
||||
def num_samples(self) -> int:
|
||||
"""Number of samples/frames."""
|
||||
return sum(d.num_samples for d in self._datasets)
|
||||
|
||||
@property
|
||||
def num_episodes(self) -> int:
|
||||
"""Number of episodes."""
|
||||
return sum(d.num_episodes for d in self._datasets)
|
||||
|
||||
@property
|
||||
def tolerance_s(self) -> float:
|
||||
"""Tolerance in seconds used to discard loaded frames when their timestamps
|
||||
are not close enough from the requested frames. It is only used when `delta_timestamps`
|
||||
is provided or when loading video frames from mp4 files.
|
||||
"""
|
||||
# 1e-4 to account for possible numerical error
|
||||
return 1 / self.fps - 1e-4
|
||||
|
||||
def __len__(self):
|
||||
return self.num_samples
|
||||
|
||||
def __getitem__(self, idx: int) -> dict[str, torch.Tensor]:
|
||||
if idx >= len(self):
|
||||
raise IndexError(f"Index {idx} out of bounds.")
|
||||
# Determine which dataset to get an item from based on the index.
|
||||
start_idx = 0
|
||||
dataset_idx = 0
|
||||
for dataset in self._datasets:
|
||||
if idx >= start_idx + dataset.num_samples:
|
||||
start_idx += dataset.num_samples
|
||||
dataset_idx += 1
|
||||
continue
|
||||
break
|
||||
else:
|
||||
raise AssertionError("We expect the loop to break out as long as the index is within bounds.")
|
||||
item = self._datasets[dataset_idx][idx - start_idx]
|
||||
item["dataset_index"] = torch.tensor(dataset_idx)
|
||||
for data_key in self.disabled_data_keys:
|
||||
if data_key in item:
|
||||
del item[data_key]
|
||||
|
||||
return item
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
f"{self.__class__.__name__}(\n"
|
||||
f" Repository IDs: '{self.repo_ids}',\n"
|
||||
f" Split: '{self.split}',\n"
|
||||
f" Number of Samples: {self.num_samples},\n"
|
||||
f" Number of Episodes: {self.num_episodes},\n"
|
||||
f" Type: {'video (.mp4)' if self.video else 'image (.png)'},\n"
|
||||
f" Recorded Frames per Second: {self.fps},\n"
|
||||
f" Camera Keys: {self.camera_keys},\n"
|
||||
f" Video Frame Keys: {self.video_frame_keys if self.video else 'N/A'},\n"
|
||||
f" Transformations: {self.image_transforms},\n"
|
||||
f")"
|
||||
)
|
||||