From ce3b9f627e55223d6d1c449d348c6b351b35d082 Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Thu, 7 Aug 2025 14:25:44 +0200 Subject: [PATCH] chore(docs): prioritize use of entry points in docs + fix nightly badge (#1692) * chore(docs): fix typo in nightly badge * chore(docs): prioritize the use of entrypoints for consistency --- .github/PULL_REQUEST_TEMPLATE.md | 2 +- Makefile | 18 ++++++------ README.md | 12 ++++---- docs/source/cameras.mdx | 2 +- docs/source/hilserl.mdx | 4 +-- docs/source/hope_jr.mdx | 22 +++++++-------- docs/source/il_robots.mdx | 16 +++++------ docs/source/il_sim.mdx | 4 +-- docs/source/koch.mdx | 10 +++---- docs/source/lekiwi.mdx | 8 +++--- docs/source/smolvla.mdx | 6 ++-- docs/source/so100.mdx | 10 +++---- docs/source/so101.mdx | 10 +++---- examples/4_train_policy_with_script.md | 28 +++++++++---------- examples/backward_compatibility/replay.py | 2 +- src/lerobot/calibrate.py | 2 +- src/lerobot/cameras/opencv/camera_opencv.py | 5 ++-- .../cameras/realsense/camera_realsense.py | 5 ++-- src/lerobot/find_cameras.py | 2 +- src/lerobot/find_port.py | 2 +- src/lerobot/motors/motors_bus.py | 4 +-- src/lerobot/policies/pi0/modeling_pi0.py | 4 +-- .../policies/pi0fast/modeling_pi0fast.py | 4 +-- .../policies/smolvla/modeling_smolvla.py | 4 +-- src/lerobot/record.py | 4 +-- src/lerobot/replay.py | 4 +-- src/lerobot/robots/viperx/README.md | 4 +-- src/lerobot/scripts/eval.py | 4 +-- src/lerobot/setup_motors.py | 2 +- src/lerobot/teleoperate.py | 4 +-- .../templates/lerobot_modelcard_template.md | 4 +-- 31 files changed, 105 insertions(+), 107 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 22f1ee3d..d37b1a92 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -30,7 +30,7 @@ pytest -sx tests/test_stuff.py::test_something ``` ```bash -python -m lerobot.scripts.train --some.option=true +lerobot-train --some.option=true ``` ## SECTION TO REMOVE BEFORE SUBMITTING YOUR PR diff --git a/Makefile b/Makefile index 5bfbe76a..fbe8a5ba 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,7 @@ test-end-to-end: ${MAKE} DEVICE=$(DEVICE) test-smolvla-ete-eval test-act-ete-train: - python -m lerobot.scripts.train \ + lerobot-train \ --policy.type=act \ --policy.dim_model=64 \ --policy.n_action_steps=20 \ @@ -68,12 +68,12 @@ test-act-ete-train: --output_dir=tests/outputs/act/ test-act-ete-train-resume: - python -m lerobot.scripts.train \ + lerobot-train \ --config_path=tests/outputs/act/checkpoints/000002/pretrained_model/train_config.json \ --resume=true test-act-ete-eval: - python -m lerobot.scripts.eval \ + lerobot-eval \ --policy.path=tests/outputs/act/checkpoints/000004/pretrained_model \ --policy.device=$(DEVICE) \ --env.type=aloha \ @@ -82,7 +82,7 @@ test-act-ete-eval: --eval.batch_size=1 test-diffusion-ete-train: - python -m lerobot.scripts.train \ + lerobot-train \ --policy.type=diffusion \ --policy.down_dims='[64,128,256]' \ --policy.diffusion_step_embed_dim=32 \ @@ -106,7 +106,7 @@ test-diffusion-ete-train: --output_dir=tests/outputs/diffusion/ test-diffusion-ete-eval: - python -m lerobot.scripts.eval \ + lerobot-eval \ --policy.path=tests/outputs/diffusion/checkpoints/000002/pretrained_model \ --policy.device=$(DEVICE) \ --env.type=pusht \ @@ -115,7 +115,7 @@ test-diffusion-ete-eval: --eval.batch_size=1 test-tdmpc-ete-train: - python -m lerobot.scripts.train \ + lerobot-train \ --policy.type=tdmpc \ --policy.device=$(DEVICE) \ --policy.push_to_hub=false \ @@ -137,7 +137,7 @@ test-tdmpc-ete-train: --output_dir=tests/outputs/tdmpc/ test-tdmpc-ete-eval: - python -m lerobot.scripts.eval \ + lerobot-eval \ --policy.path=tests/outputs/tdmpc/checkpoints/000002/pretrained_model \ --policy.device=$(DEVICE) \ --env.type=xarm \ @@ -148,7 +148,7 @@ test-tdmpc-ete-eval: test-smolvla-ete-train: - python -m lerobot.scripts.train \ + lerobot-train \ --policy.type=smolvla \ --policy.n_action_steps=20 \ --policy.chunk_size=20 \ @@ -171,7 +171,7 @@ test-smolvla-ete-train: --output_dir=tests/outputs/smolvla/ test-smolvla-ete-eval: - python -m lerobot.scripts.eval \ + lerobot-eval \ --policy.path=tests/outputs/smolvla/checkpoints/000004/pretrained_model \ --policy.device=$(DEVICE) \ --env.type=aloha \ diff --git a/README.md b/README.md index 7255ed3e..b5e666aa 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@
-[![Tests](https://github.com/huggingface/lerobot/actions/workflows/nightly.yml/badge.svg?branch=main)](https://github.com/huggingface/lerobot/actions/workflows/nighty.yml?query=branch%3Amain) +[![Tests](https://github.com/huggingface/lerobot/actions/workflows/nightly.yml/badge.svg?branch=main)](https://github.com/huggingface/lerobot/actions/workflows/nightly.yml?query=branch%3Amain) [![Python versions](https://img.shields.io/pypi/pyversions/lerobot)](https://www.python.org/downloads/) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/huggingface/lerobot/blob/main/LICENSE) [![Status](https://img.shields.io/pypi/status/lerobot)](https://pypi.org/project/lerobot/) @@ -276,7 +276,7 @@ Check out [example 2](https://github.com/huggingface/lerobot/blob/main/examples/ We also provide a more capable script to parallelize the evaluation over multiple environments during the same rollout. Here is an example with a pretrained model hosted on [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht): ```bash -python -m lerobot.scripts.eval \ +lerobot-eval \ --policy.path=lerobot/diffusion_pusht \ --env.type=pusht \ --eval.batch_size=10 \ @@ -288,10 +288,10 @@ python -m lerobot.scripts.eval \ Note: After training your own policy, you can re-evaluate the checkpoints with: ```bash -python -m lerobot.scripts.eval --policy.path={OUTPUT_DIR}/checkpoints/last/pretrained_model +lerobot-eval --policy.path={OUTPUT_DIR}/checkpoints/last/pretrained_model ``` -See `python -m lerobot.scripts.eval --help` for more instructions. +See `lerobot-eval --help` for more instructions. ### Train your own policy @@ -303,7 +303,7 @@ A link to the wandb logs for the run will also show up in yellow in your termina \WandB logs example -Note: For efficiency, during training every checkpoint is evaluated on a low number of episodes. You may use `--eval.n_episodes=500` to evaluate on more episodes than the default. Or, after training, you may want to re-evaluate your best checkpoints on more episodes or change the evaluation settings. See `python -m lerobot.scripts.eval --help` for more instructions. +Note: For efficiency, during training every checkpoint is evaluated on a low number of episodes. You may use `--eval.n_episodes=500` to evaluate on more episodes than the default. Or, after training, you may want to re-evaluate your best checkpoints on more episodes or change the evaluation settings. See `lerobot-eval --help` for more instructions. #### Reproduce state-of-the-art (SOTA) @@ -311,7 +311,7 @@ We provide some pretrained policies on our [hub page](https://huggingface.co/ler You can reproduce their training by loading the config from their run. Simply running: ```bash -python -m lerobot.scripts.train --config_path=lerobot/diffusion_pusht +lerobot-train --config_path=lerobot/diffusion_pusht ``` reproduces SOTA results for Diffusion Policy on the PushT task. diff --git a/docs/source/cameras.mdx b/docs/source/cameras.mdx index 604863d7..5c35be0b 100644 --- a/docs/source/cameras.mdx +++ b/docs/source/cameras.mdx @@ -9,7 +9,7 @@ To instantiate a camera, you need a camera identifier. This identifier might cha To find the camera indices of the cameras plugged into your system, run the following script: ```bash -python -m lerobot.find_cameras opencv # or realsense for Intel Realsense cameras +lerobot-find-cameras opencv # or realsense for Intel Realsense cameras ``` The output will look something like this if you have two cameras connected: diff --git a/docs/source/hilserl.mdx b/docs/source/hilserl.mdx index 2f73d096..f8a5c69b 100644 --- a/docs/source/hilserl.mdx +++ b/docs/source/hilserl.mdx @@ -412,7 +412,7 @@ Example configuration for training the [reward classifier](https://huggingface.c To train the classifier, use the `train.py` script with your configuration: ```bash -python -m lerobot.scripts.train --config_path path/to/reward_classifier_train_config.json +lerobot-train --config_path path/to/reward_classifier_train_config.json ``` **Deploying and Testing the Model** @@ -458,7 +458,7 @@ The reward classifier will automatically provide rewards based on the visual inp 3. **Train the classifier**: ```bash - python -m lerobot.scripts.train --config_path src/lerobot/configs/reward_classifier_train_config.json + lerobot-train --config_path src/lerobot/configs/reward_classifier_train_config.json ``` 4. **Test the classifier**: diff --git a/docs/source/hope_jr.mdx b/docs/source/hope_jr.mdx index 72aa8f92..856febb9 100644 --- a/docs/source/hope_jr.mdx +++ b/docs/source/hope_jr.mdx @@ -19,7 +19,7 @@ pip install -e ".[hopejr]" Before starting calibration and operation, you need to identify the USB ports for each HopeJR component. Run this script to find the USB ports for the arm, hand, glove, and exoskeleton: ```bash -python -m lerobot.find_port +lerobot-find-port ``` This will display the available USB ports and their associated devices. Make note of the port paths (e.g., `/dev/tty.usbmodem58760433331`, `/dev/tty.usbmodem11301`) as you'll need to specify them in the `--robot.port` and `--teleop.port` parameters when recording data, replaying episodes, or running teleoperation scripts. @@ -31,7 +31,7 @@ Before performing teleoperation, HopeJR's limbs need to be calibrated. Calibrati ### 1.1 Calibrate Robot Hand ```bash -python -m lerobot.calibrate \ +lerobot-calibrate \ --robot.type=hope_jr_hand \ --robot.port=/dev/tty.usbmodem58760432281 \ --robot.id=blue \ @@ -81,7 +81,7 @@ Once you have set the appropriate boundaries for all joints, click "Save" to sav ### 1.2 Calibrate Teleoperator Glove ```bash -python -m lerobot.calibrate \ +lerobot-calibrate \ --teleop.type=homunculus_glove \ --teleop.port=/dev/tty.usbmodem11201 \ --teleop.id=red \ @@ -120,7 +120,7 @@ Once calibration is complete, the system will save the calibration to `/Users/yo ### 1.3 Calibrate Robot Arm ```bash -python -m lerobot.calibrate \ +lerobot-calibrate \ --robot.type=hope_jr_arm \ --robot.port=/dev/tty.usbserial-1110 \ --robot.id=white @@ -146,7 +146,7 @@ Use the calibration interface to set the range boundaries for each joint. Move e ### 1.4 Calibrate Teleoperator Exoskeleton ```bash -python -m lerobot.calibrate \ +lerobot-calibrate \ --teleop.type=homunculus_arm \ --teleop.port=/dev/tty.usbmodem11201 \ --teleop.id=black @@ -178,7 +178,7 @@ Due to global variable conflicts in the Feetech middleware, teleoperation for ar ### Hand ```bash -python -m lerobot.teleoperate \ +lerobot-teleoperate \ --robot.type=hope_jr_hand \ --robot.port=/dev/tty.usbmodem58760432281 \ --robot.id=blue \ @@ -194,7 +194,7 @@ python -m lerobot.teleoperate \ ### Arm ```bash -python -m lerobot.teleoperate \ +lerobot-teleoperate \ --robot.type=hope_jr_arm \ --robot.port=/dev/tty.usbserial-1110 \ --robot.id=white \ @@ -214,7 +214,7 @@ Record, Replay and Train with Hope-JR is still experimental. This step records the dataset, which can be seen as an example [here](https://huggingface.co/datasets/nepyope/hand_record_test_with_video_data/settings). ```bash -python -m lerobot.record \ +lerobot-record \ --robot.type=hope_jr_hand \ --robot.port=/dev/tty.usbmodem58760432281 \ --robot.id=right \ @@ -236,7 +236,7 @@ python -m lerobot.record \ ### Replay ```bash -python -m lerobot.replay \ +lerobot-replay \ --robot.type=hope_jr_hand \ --robot.port=/dev/tty.usbmodem58760432281 \ --robot.id=right \ @@ -248,7 +248,7 @@ python -m lerobot.replay \ ### Train ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --dataset.repo_id=nepyope/hand_record_test_with_video_data \ --policy.type=act \ --output_dir=outputs/train/hopejr_hand \ @@ -263,7 +263,7 @@ python -m lerobot.scripts.train \ This training run can be viewed as an example [here](https://wandb.ai/tino/lerobot/runs/rp0k8zvw?nw=nwusertino). ```bash -python -m lerobot.record \ +lerobot-record \ --robot.type=hope_jr_hand \ --robot.port=/dev/tty.usbmodem58760432281 \ --robot.id=right \ diff --git a/docs/source/il_robots.mdx b/docs/source/il_robots.mdx index ec5491b2..905046be 100644 --- a/docs/source/il_robots.mdx +++ b/docs/source/il_robots.mdx @@ -45,7 +45,7 @@ Note that the `id` associated with a robot is used to store the calibration file ```bash -python -m lerobot.teleoperate \ +lerobot-teleoperate \ --robot.type=so101_follower \ --robot.port=/dev/tty.usbmodem58760431541 \ --robot.id=my_awesome_follower_arm \ @@ -101,7 +101,7 @@ With `rerun`, you can teleoperate again while simultaneously visualizing the cam ```bash -python -m lerobot.teleoperate \ +lerobot-teleoperate \ --robot.type=koch_follower \ --robot.port=/dev/tty.usbmodem58760431541 \ --robot.id=my_awesome_follower_arm \ @@ -174,7 +174,7 @@ Now you can record a dataset. To record 5 episodes and upload your dataset to th ```bash -python -m lerobot.record \ +lerobot-record \ --robot.type=so101_follower \ --robot.port=/dev/tty.usbmodem585A0076841 \ --robot.id=my_awesome_follower_arm \ @@ -376,7 +376,7 @@ You can replay the first episode on your robot with either the command below or ```bash -python -m lerobot.replay \ +lerobot-replay \ --robot.type=so101_follower \ --robot.port=/dev/tty.usbmodem58760431541 \ --robot.id=my_awesome_follower_arm \ @@ -428,10 +428,10 @@ Your robot should replicate movements similar to those you recorded. For example ## Train a policy -To train a policy to control your robot, use the [`python -m lerobot.scripts.train`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: +To train a policy to control your robot, use the [`lerobot-train`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --dataset.repo_id=${HF_USER}/so101_test \ --policy.type=act \ --output_dir=outputs/train/act_so101_test \ @@ -453,7 +453,7 @@ Training should take several hours. You will find checkpoints in `outputs/train/ To resume training from a checkpoint, below is an example command to resume from `last` checkpoint of the `act_so101_test` policy: ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --config_path=outputs/train/act_so101_test/checkpoints/last/pretrained_model/train_config.json \ --resume=true ``` @@ -490,7 +490,7 @@ You can use the `record` script from [`lerobot/record.py`](https://github.com/hu ```bash -python -m lerobot.record \ +lerobot-record \ --robot.type=so100_follower \ --robot.port=/dev/ttyACM1 \ --robot.cameras="{ up: {type: opencv, index_or_path: /dev/video10, width: 640, height: 480, fps: 30}, side: {type: intelrealsense, serial_number_or_name: 233522074606, width: 640, height: 480, fps: 30}}" \ diff --git a/docs/source/il_sim.mdx b/docs/source/il_sim.mdx index 761e24e0..3dd80dc4 100644 --- a/docs/source/il_sim.mdx +++ b/docs/source/il_sim.mdx @@ -96,10 +96,10 @@ If you uploaded your dataset to the hub you can [visualize your dataset online]( ## Train a policy -To train a policy to control your robot, use the [`python -m lerobot.scripts.train`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: +To train a policy to control your robot, use the [`lerobot-train`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --dataset.repo_id=${HF_USER}/il_gym \ --policy.type=act \ --output_dir=outputs/train/il_sim_test \ diff --git a/docs/source/koch.mdx b/docs/source/koch.mdx index d0b991e7..3e94899a 100644 --- a/docs/source/koch.mdx +++ b/docs/source/koch.mdx @@ -31,7 +31,7 @@ pip install -e ".[dynamixel]" To find the port for each bus servo adapter, run this script: ```bash -python -m lerobot.find_port +lerobot-find-port ``` @@ -98,7 +98,7 @@ For a visual reference on how to set the motor ids please refer to [this video]( ```bash -python -m lerobot.setup_motors \ +lerobot-setup-motors \ --robot.type=koch_follower \ --robot.port=/dev/tty.usbmodem575E0031751 # <- paste here the port found at previous step ``` @@ -174,7 +174,7 @@ Do the same steps for the leader arm but modify the command or script accordingl ```bash -python -m lerobot.setup_motors \ +lerobot-setup-motors \ --teleop.type=koch_leader \ --teleop.port=/dev/tty.usbmodem575E0031751 \ # <- paste here the port found at previous step ``` @@ -211,7 +211,7 @@ Run the following command or API example to calibrate the follower arm: ```bash -python -m lerobot.calibrate \ +lerobot-calibrate \ --robot.type=koch_follower \ --robot.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot --robot.id=my_awesome_follower_arm # <- Give the robot a unique name @@ -249,7 +249,7 @@ Do the same steps to calibrate the leader arm, run the following command or API ```bash -python -m lerobot.calibrate \ +lerobot-calibrate \ --teleop.type=koch_leader \ --teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot --teleop.id=my_awesome_leader_arm # <- Give the robot a unique name diff --git a/docs/source/lekiwi.mdx b/docs/source/lekiwi.mdx index a5bdb19c..14c06e44 100644 --- a/docs/source/lekiwi.mdx +++ b/docs/source/lekiwi.mdx @@ -60,7 +60,7 @@ First, we will assemble the two SO100/SO101 arms. One to attach to the mobile ba To find the port for each bus servo adapter, run this script: ```bash -python -m lerobot.find_port +lerobot-find-port ``` @@ -116,7 +116,7 @@ The instructions for configuring the motors can be found in the SO101 [docs](./s You can run this command to setup motors for LeKiwi. It will first setup the motors for arm (id 6..1) and then setup motors for wheels (9,8,7) ```bash -python -m lerobot.setup_motors \ +lerobot-setup-motors \ --robot.type=lekiwi \ --robot.port=/dev/tty.usbmodem58760431551 # <- paste here the port found at previous step ``` @@ -174,7 +174,7 @@ The calibration process is very important because it allows a neural network tra Make sure the arm is connected to the Raspberry Pi and run this script or API example (on the Raspberry Pi via SSH) to launch calibration of the follower arm: ```bash -python -m lerobot.calibrate \ +lerobot-calibrate \ --robot.type=lekiwi \ --robot.id=my_awesome_kiwi # <- Give the robot a unique name ``` @@ -193,7 +193,7 @@ Then, to calibrate the leader arm (which is attached to the laptop/pc). Run the ```bash -python -m lerobot.calibrate \ +lerobot-calibrate \ --teleop.type=so100_leader \ --teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot --teleop.id=my_awesome_leader_arm # <- Give the robot a unique name diff --git a/docs/source/smolvla.mdx b/docs/source/smolvla.mdx index 880beaa1..89c475a9 100644 --- a/docs/source/smolvla.mdx +++ b/docs/source/smolvla.mdx @@ -54,7 +54,7 @@ If you don't have a gpu device, you can train using our notebook on [![Google Co Pass your dataset to the training script using `--dataset.repo_id`. If you want to test your installation, run the following command where we use one of the datasets we collected for the [SmolVLA Paper](https://huggingface.co/papers/2506.01844). ```bash -cd lerobot && python -m lerobot.scripts.train \ +cd lerobot && lerobot-train \ --policy.path=lerobot/smolvla_base \ --dataset.repo_id=${HF_USER}/mydataset \ --batch_size=64 \ @@ -73,7 +73,7 @@ cd lerobot && python -m lerobot.scripts.train \ Fine-tuning is an art. For a complete overview of the options for finetuning, run ```bash -python -m lerobot.scripts.train --help +lerobot-train --help ```

@@ -97,7 +97,7 @@ Similarly for when recording an episode, it is recommended that you are logged i Once you are logged in, you can run inference in your setup by doing: ```bash -python -m lerobot.record \ +lerobot-record \ --robot.type=so101_follower \ --robot.port=/dev/ttyACM0 \ # <- Use your port --robot.id=my_blue_follower_arm \ # <- Use your robot id diff --git a/docs/source/so100.mdx b/docs/source/so100.mdx index d9ff922c..8578e1e8 100644 --- a/docs/source/so100.mdx +++ b/docs/source/so100.mdx @@ -26,7 +26,7 @@ Unlike the SO-101, the motor connectors are not easily accessible once the arm i To find the port for each bus servo adapter, run this script: ```bash -python -m lerobot.find_port +lerobot-find-port ``` @@ -93,7 +93,7 @@ For a visual reference on how to set the motor ids please refer to [this video]( ```bash -python -m lerobot.setup_motors \ +lerobot-setup-motors \ --robot.type=so100_follower \ --robot.port=/dev/tty.usbmodem585A0076841 # <- paste here the port found at previous step ``` @@ -168,7 +168,7 @@ Do the same steps for the leader arm. ```bash -python -m lerobot.setup_motors \ +lerobot-setup-motors \ --teleop.type=so100_leader \ --teleop.port=/dev/tty.usbmodem575E0031751 # <- paste here the port found at previous step ``` @@ -568,7 +568,7 @@ Run the following command or API example to calibrate the follower arm: ```bash -python -m lerobot.calibrate \ +lerobot-calibrate \ --robot.type=so100_follower \ --robot.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot --robot.id=my_awesome_follower_arm # <- Give the robot a unique name @@ -606,7 +606,7 @@ Do the same steps to calibrate the leader arm, run the following command or API ```bash -python -m lerobot.calibrate \ +lerobot-calibrate \ --teleop.type=so100_leader \ --teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot --teleop.id=my_awesome_leader_arm # <- Give the robot a unique name diff --git a/docs/source/so101.mdx b/docs/source/so101.mdx index a20a3fa9..b9fb9cab 100644 --- a/docs/source/so101.mdx +++ b/docs/source/so101.mdx @@ -162,7 +162,7 @@ It is advisable to install one 3-pin cable in the motor after placing them befor To find the port for each bus servo adapter, connect MotorBus to your computer via USB and power. Run the following script and disconnect the MotorBus when prompted: ```bash -python -m lerobot.find_port +lerobot-find-port ``` @@ -240,7 +240,7 @@ Connect the usb cable from your computer and the power supply to the follower ar ```bash -python -m lerobot.setup_motors \ +lerobot-setup-motors \ --robot.type=so101_follower \ --robot.port=/dev/tty.usbmodem585A0076841 # <- paste here the port found at previous step ``` @@ -316,7 +316,7 @@ Do the same steps for the leader arm. ```bash -python -m lerobot.setup_motors \ +lerobot-setup-motors \ --teleop.type=so101_leader \ --teleop.port=/dev/tty.usbmodem575E0031751 # <- paste here the port found at previous step ``` @@ -353,7 +353,7 @@ Run the following command or API example to calibrate the follower arm: ```bash -python -m lerobot.calibrate \ +lerobot-calibrate \ --robot.type=so101_follower \ --robot.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot --robot.id=my_awesome_follower_arm # <- Give the robot a unique name @@ -402,7 +402,7 @@ Do the same steps to calibrate the leader arm, run the following command or API ```bash -python -m lerobot.calibrate \ +lerobot-calibrate \ --teleop.type=so101_leader \ --teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot --teleop.id=my_awesome_leader_arm # <- Give the robot a unique name diff --git a/examples/4_train_policy_with_script.md b/examples/4_train_policy_with_script.md index d6cd6cc2..ffa7de66 100644 --- a/examples/4_train_policy_with_script.md +++ b/examples/4_train_policy_with_script.md @@ -62,7 +62,7 @@ By default, every field takes its default value specified in the dataclass. If a Let's say that we want to train [Diffusion Policy](../src/lerobot/policies/diffusion) on the [pusht](https://huggingface.co/datasets/lerobot/pusht) dataset, using the [gym_pusht](https://github.com/huggingface/gym-pusht) environment for evaluation. The command to do so would look like this: ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --dataset.repo_id=lerobot/pusht \ --policy.type=diffusion \ --env.type=pusht @@ -77,7 +77,7 @@ Let's break this down: Let's see another example. Let's say you've been training [ACT](../src/lerobot/policies/act) on [lerobot/aloha_sim_insertion_human](https://huggingface.co/datasets/lerobot/aloha_sim_insertion_human) using the [gym-aloha](https://github.com/huggingface/gym-aloha) environment for evaluation with: ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --policy.type=act \ --dataset.repo_id=lerobot/aloha_sim_insertion_human \ --env.type=aloha \ @@ -90,7 +90,7 @@ We now want to train a different policy for aloha on another task. We'll change Looking at the [`AlohaEnv`](../src/lerobot/envs/configs.py) config, the task is `"AlohaInsertion-v0"` by default, which corresponds to the task we trained on in the command above. The [gym-aloha](https://github.com/huggingface/gym-aloha?tab=readme-ov-file#description) environment also has the `AlohaTransferCube-v0` task which corresponds to this other task we want to train on. Putting this together, we can train this new policy on this different task using: ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --policy.type=act \ --dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \ --env.type=aloha \ @@ -127,7 +127,7 @@ Now, let's assume that we want to reproduce the run just above. That run has pro We can then simply load the config values from this file using: ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --config_path=outputs/train/act_aloha_transfer/checkpoints/last/pretrained_model/ \ --output_dir=outputs/train/act_aloha_transfer_2 ``` @@ -137,7 +137,7 @@ python -m lerobot.scripts.train \ Similarly to Hydra, we can still override some parameters in the CLI if we want to, e.g.: ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --config_path=outputs/train/act_aloha_transfer/checkpoints/last/pretrained_model/ \ --output_dir=outputs/train/act_aloha_transfer_2 --policy.n_action_steps=80 @@ -148,7 +148,7 @@ python -m lerobot.scripts.train \ `--config_path` can also accept the repo_id of a repo on the hub that contains a `train_config.json` file, e.g. running: ```bash -python -m lerobot.scripts.train --config_path=lerobot/diffusion_pusht +lerobot-train --config_path=lerobot/diffusion_pusht ``` will start a training run with the same configuration used for training [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht) @@ -160,7 +160,7 @@ Being able to resume a training run is important in case it crashed or aborted f Let's reuse the command from the previous run and add a few more options: ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --policy.type=act \ --dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \ --env.type=aloha \ @@ -179,7 +179,7 @@ INFO 2025-01-24 16:10:56 ts/train.py:263 Checkpoint policy after step 100 Now let's simulate a crash by killing the process (hit `ctrl`+`c`). We can then simply resume this run from the last checkpoint available with: ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --config_path=outputs/train/run_resumption/checkpoints/last/pretrained_model/ \ --resume=true ``` @@ -190,7 +190,7 @@ Another reason for which you might want to resume a run is simply to extend trai You could double the number of steps of the previous run with: ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --config_path=outputs/train/run_resumption/checkpoints/last/pretrained_model/ \ --resume=true \ --steps=200000 @@ -224,7 +224,7 @@ In addition to the features currently in Draccus, we've added a special `.path` For example, we could fine-tune a [policy pre-trained on the aloha transfer task](https://huggingface.co/lerobot/act_aloha_sim_transfer_cube_human) on the aloha insertion task. We can achieve this with: ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --policy.path=lerobot/act_aloha_sim_transfer_cube_human \ --dataset.repo_id=lerobot/aloha_sim_insertion_human \ --env.type=aloha \ @@ -270,7 +270,7 @@ We'll summarize here the main use cases to remember from this tutorial. #### Train a policy from scratch – CLI ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --policy.type=act \ # <- select 'act' policy --env.type=pusht \ # <- select 'pusht' environment --dataset.repo_id=lerobot/pusht # <- train on this dataset @@ -279,7 +279,7 @@ python -m lerobot.scripts.train \ #### Train a policy from scratch - config file + CLI ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --config_path=path/to/pretrained_model \ # <- can also be a repo_id --policy.n_action_steps=80 # <- you may still override values ``` @@ -287,7 +287,7 @@ python -m lerobot.scripts.train \ #### Resume/continue a training run ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --config_path=checkpoint/pretrained_model/ \ --resume=true \ --steps=200000 # <- you can change some training parameters @@ -296,7 +296,7 @@ python -m lerobot.scripts.train \ #### Fine-tuning ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --policy.path=lerobot/act_aloha_sim_transfer_cube_human \ # <- can also be a local path to a checkpoint --dataset.repo_id=lerobot/aloha_sim_insertion_human \ --env.type=aloha \ diff --git a/examples/backward_compatibility/replay.py b/examples/backward_compatibility/replay.py index cc339754..6c680f20 100644 --- a/examples/backward_compatibility/replay.py +++ b/examples/backward_compatibility/replay.py @@ -18,7 +18,7 @@ Replays the actions of an episode from a dataset on a robot. Example: ```shell -python -m lerobot.replay \ +lerobot-replay \ --robot.type=so100_follower \ --robot.port=/dev/tty.usbmodem58760431541 \ --robot.id=black \ diff --git a/src/lerobot/calibrate.py b/src/lerobot/calibrate.py index 0dda80ba..0aa61a2f 100644 --- a/src/lerobot/calibrate.py +++ b/src/lerobot/calibrate.py @@ -18,7 +18,7 @@ Helper to recalibrate your device (robot or teleoperator). Example: ```shell -python -m lerobot.calibrate \ +lerobot-calibrate \ --teleop.type=so100_leader \ --teleop.port=/dev/tty.usbmodem58760431551 \ --teleop.id=blue diff --git a/src/lerobot/cameras/opencv/camera_opencv.py b/src/lerobot/cameras/opencv/camera_opencv.py index aad19819..3665a909 100644 --- a/src/lerobot/cameras/opencv/camera_opencv.py +++ b/src/lerobot/cameras/opencv/camera_opencv.py @@ -60,7 +60,7 @@ class OpenCVCamera(Camera): or port changes, especially on Linux. Use the provided utility script to find available camera indices or paths: ```bash - python -m lerobot.find_cameras opencv + lerobot-find-cameras opencv ``` The camera's default settings (FPS, resolution, color mode) are used unless @@ -165,8 +165,7 @@ class OpenCVCamera(Camera): self.videocapture.release() self.videocapture = None raise ConnectionError( - f"Failed to open {self}." - f"Run `python -m lerobot.find_cameras opencv` to find available cameras." + f"Failed to open {self}.Run `lerobot-find-cameras opencv` to find available cameras." ) self._configure_capture_settings() diff --git a/src/lerobot/cameras/realsense/camera_realsense.py b/src/lerobot/cameras/realsense/camera_realsense.py index 918c5592..12ce89c9 100644 --- a/src/lerobot/cameras/realsense/camera_realsense.py +++ b/src/lerobot/cameras/realsense/camera_realsense.py @@ -51,7 +51,7 @@ class RealSenseCamera(Camera): Use the provided utility script to find available camera indices and default profiles: ```bash - python -m lerobot.find_cameras realsense + lerobot-find-cameras realsense ``` A `RealSenseCamera` instance requires a configuration object specifying the @@ -176,8 +176,7 @@ class RealSenseCamera(Camera): self.rs_profile = None self.rs_pipeline = None raise ConnectionError( - f"Failed to open {self}." - "Run `python -m lerobot.find_cameras realsense` to find available cameras." + f"Failed to open {self}.Run `lerobot-find-cameras realsense` to find available cameras." ) from e self._configure_capture_settings() diff --git a/src/lerobot/find_cameras.py b/src/lerobot/find_cameras.py index 8f88d310..ec8f5ff3 100644 --- a/src/lerobot/find_cameras.py +++ b/src/lerobot/find_cameras.py @@ -20,7 +20,7 @@ Helper to find the camera devices available in your system. Example: ```shell -python -m lerobot.find_cameras +lerobot-find-cameras ``` """ diff --git a/src/lerobot/find_port.py b/src/lerobot/find_port.py index babe0288..e32b9cb9 100644 --- a/src/lerobot/find_port.py +++ b/src/lerobot/find_port.py @@ -18,7 +18,7 @@ Helper to find the USB port associated with your MotorsBus. Example: ```shell -python -m lerobot.find_port +lerobot-find-port ``` """ diff --git a/src/lerobot/motors/motors_bus.py b/src/lerobot/motors/motors_bus.py index 597bcd3c..97830fc3 100644 --- a/src/lerobot/motors/motors_bus.py +++ b/src/lerobot/motors/motors_bus.py @@ -222,7 +222,7 @@ class MotorsBus(abc.ABC): A MotorsBus subclass instance requires a port (e.g. `FeetechMotorsBus(port="/dev/tty.usbmodem575E0031751"`)). To find the port, you can run our utility script: ```bash - python -m lerobot.find_port.py + lerobot-find-port.py >>> Finding all available ports for the MotorsBus. >>> ["/dev/tty.usbmodem575E0032081", "/dev/tty.usbmodem575E0031751"] >>> Remove the usb cable from your MotorsBus and press Enter when done. @@ -446,7 +446,7 @@ class MotorsBus(abc.ABC): except (FileNotFoundError, OSError, serial.SerialException) as e: raise ConnectionError( f"\nCould not connect on port '{self.port}'. Make sure you are using the correct port." - "\nTry running `python -m lerobot.find_port`\n" + "\nTry running `lerobot-find-port`\n" ) from e @abc.abstractmethod diff --git a/src/lerobot/policies/pi0/modeling_pi0.py b/src/lerobot/policies/pi0/modeling_pi0.py index e56946ac..de41e2bd 100644 --- a/src/lerobot/policies/pi0/modeling_pi0.py +++ b/src/lerobot/policies/pi0/modeling_pi0.py @@ -30,7 +30,7 @@ pip install -e ".[pi0]" Example of finetuning the pi0 pretrained model (`pi0_base` in `openpi`): ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --policy.path=lerobot/pi0 \ --dataset.repo_id=danaaubakirova/koch_test ``` @@ -38,7 +38,7 @@ python -m lerobot.scripts.train \ Example of finetuning the pi0 neural network with PaliGemma and expert Gemma pretrained with VLM default parameters before pi0 finetuning: ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --policy.type=pi0 \ --dataset.repo_id=danaaubakirova/koch_test ``` diff --git a/src/lerobot/policies/pi0fast/modeling_pi0fast.py b/src/lerobot/policies/pi0fast/modeling_pi0fast.py index 80e10bc0..88727b58 100644 --- a/src/lerobot/policies/pi0fast/modeling_pi0fast.py +++ b/src/lerobot/policies/pi0fast/modeling_pi0fast.py @@ -25,14 +25,14 @@ Disclaimer: It is not expected to perform as well as the original implementation Example of finetuning the pi0+FAST pretrained model (`pi0_fast_base` in `openpi`): ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --policy.path=lerobot/pi0fast_base \ --dataset.repo_id=danaaubakirova/koch_test ``` Example of training the pi0+FAST neural network with from scratch: ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --policy.type=pi0fast \ --dataset.repo_id=danaaubakirova/koch_test ``` diff --git a/src/lerobot/policies/smolvla/modeling_smolvla.py b/src/lerobot/policies/smolvla/modeling_smolvla.py index 469645e8..18f2fc58 100644 --- a/src/lerobot/policies/smolvla/modeling_smolvla.py +++ b/src/lerobot/policies/smolvla/modeling_smolvla.py @@ -28,7 +28,7 @@ pip install -e ".[smolvla]" Example of finetuning the smolvla pretrained model (`smolvla_base`): ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --policy.path=lerobot/smolvla_base \ --dataset.repo_id=danaaubakirova/svla_so100_task1_v3 \ --batch_size=64 \ @@ -38,7 +38,7 @@ python -m lerobot.scripts.train \ Example of finetuning a smolVLA. SmolVLA is composed of a pretrained VLM, and an action expert. ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --policy.type=smolvla \ --dataset.repo_id=danaaubakirova/svla_so100_task1_v3 \ --batch_size=64 \ diff --git a/src/lerobot/record.py b/src/lerobot/record.py index 575fcb94..09fa33fe 100644 --- a/src/lerobot/record.py +++ b/src/lerobot/record.py @@ -18,7 +18,7 @@ Records a dataset. Actions for the robot can be either generated by teleoperatio Example: ```shell -python -m lerobot.record \ +lerobot-record \ --robot.type=so100_follower \ --robot.port=/dev/tty.usbmodem58760431541 \ --robot.cameras="{laptop: {type: opencv, camera_index: 0, width: 640, height: 480}}" \ @@ -36,7 +36,7 @@ python -m lerobot.record \ Example recording with bimanual so100: ```shell -python -m lerobot.record \ +lerobot-record \ --robot.type=bi_so100_follower \ --robot.left_arm_port=/dev/tty.usbmodem5A460851411 \ --robot.right_arm_port=/dev/tty.usbmodem5A460812391 \ diff --git a/src/lerobot/replay.py b/src/lerobot/replay.py index a9dceb74..2b62fd67 100644 --- a/src/lerobot/replay.py +++ b/src/lerobot/replay.py @@ -18,7 +18,7 @@ Replays the actions of an episode from a dataset on a robot. Examples: ```shell -python -m lerobot.replay \ +lerobot-replay \ --robot.type=so100_follower \ --robot.port=/dev/tty.usbmodem58760431541 \ --robot.id=black \ @@ -28,7 +28,7 @@ python -m lerobot.replay \ Example replay with bimanual so100: ```shell -python -m lerobot.replay \ +lerobot-replay \ --robot.type=bi_so100_follower \ --robot.left_arm_port=/dev/tty.usbmodem5A460851411 \ --robot.right_arm_port=/dev/tty.usbmodem5A460812391 \ diff --git a/src/lerobot/robots/viperx/README.md b/src/lerobot/robots/viperx/README.md index 5cdb152a..bbc9f722 100644 --- a/src/lerobot/robots/viperx/README.md +++ b/src/lerobot/robots/viperx/README.md @@ -141,10 +141,10 @@ python lerobot/scripts/control_robot.py \ ## Train a policy -To train a policy to control your robot, use the [`python -m lerobot.scripts.train`](../src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: +To train a policy to control your robot, use the [`lerobot-train`](../src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --dataset.repo_id=${HF_USER}/aloha_test \ --policy.type=act \ --output_dir=outputs/train/act_aloha_test \ diff --git a/src/lerobot/scripts/eval.py b/src/lerobot/scripts/eval.py index 6a6c02a2..13d30c68 100644 --- a/src/lerobot/scripts/eval.py +++ b/src/lerobot/scripts/eval.py @@ -21,7 +21,7 @@ You want to evaluate a model from the hub (eg: https://huggingface.co/lerobot/di for 10 episodes. ``` -python -m lerobot.scripts.eval \ +lerobot-eval \ --policy.path=lerobot/diffusion_pusht \ --env.type=pusht \ --eval.batch_size=10 \ @@ -32,7 +32,7 @@ python -m lerobot.scripts.eval \ OR, you want to evaluate a model checkpoint from the LeRobot training script for 10 episodes. ``` -python -m lerobot.scripts.eval \ +lerobot-eval \ --policy.path=outputs/train/diffusion_pusht/checkpoints/005000/pretrained_model \ --env.type=pusht \ --eval.batch_size=10 \ diff --git a/src/lerobot/setup_motors.py b/src/lerobot/setup_motors.py index 76cdca56..c1d256c2 100644 --- a/src/lerobot/setup_motors.py +++ b/src/lerobot/setup_motors.py @@ -18,7 +18,7 @@ Helper to set motor ids and baudrate. Example: ```shell -python -m lerobot.setup_motors \ +lerobot-setup-motors \ --teleop.type=so100_leader \ --teleop.port=/dev/tty.usbmodem575E0031751 ``` diff --git a/src/lerobot/teleoperate.py b/src/lerobot/teleoperate.py index 3c72caf7..e7be6967 100644 --- a/src/lerobot/teleoperate.py +++ b/src/lerobot/teleoperate.py @@ -18,7 +18,7 @@ Simple script to control a robot from teleoperation. Example: ```shell -python -m lerobot.teleoperate \ +lerobot-teleoperate \ --robot.type=so101_follower \ --robot.port=/dev/tty.usbmodem58760431541 \ --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \ @@ -32,7 +32,7 @@ python -m lerobot.teleoperate \ Example teleoperation with bimanual so100: ```shell -python -m lerobot.teleoperate \ +lerobot-teleoperate \ --robot.type=bi_so100_follower \ --robot.left_arm_port=/dev/tty.usbmodem5A460851411 \ --robot.right_arm_port=/dev/tty.usbmodem5A460812391 \ diff --git a/src/lerobot/templates/lerobot_modelcard_template.md b/src/lerobot/templates/lerobot_modelcard_template.md index 7b7aaa84..9293d6ba 100644 --- a/src/lerobot/templates/lerobot_modelcard_template.md +++ b/src/lerobot/templates/lerobot_modelcard_template.md @@ -44,7 +44,7 @@ Below is the short version on how to train and run inference/eval: ### Train from scratch ```bash -python -m lerobot.scripts.train \ +lerobot-train \ --dataset.repo_id=${HF_USER}/ \ --policy.type=act \ --output_dir=outputs/train/ \ @@ -59,7 +59,7 @@ _Writes checkpoints to `outputs/train//checkpoints/`._ ### Evaluate the policy/run inference ```bash -python -m lerobot.record \ +lerobot-record \ --robot.type=so100_follower \ --dataset.repo_id=/eval_ \ --policy.path=/ \