forked from tangger/lerobot
Compare commits
37 Commits
smolvla_do
...
my-fix-bas
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
97f061fca6 | ||
|
|
f01c6d06cf | ||
|
|
b41362a4aa | ||
|
|
209f89a274 | ||
|
|
efeef4eef2 | ||
|
|
812feac7d7 | ||
|
|
31c30cce82 | ||
|
|
345bc8b2d0 | ||
|
|
c2d645717a | ||
|
|
1fd53b441a | ||
|
|
73fd12dea1 | ||
|
|
60389f70ca | ||
|
|
64cc94479a | ||
|
|
093773f850 | ||
|
|
17625ca4ff | ||
|
|
492b1812dd | ||
|
|
2d98bf63a7 | ||
|
|
825e0e1ab1 | ||
|
|
7f3fe83465 | ||
|
|
95a16c605d | ||
|
|
5fd40b188e | ||
|
|
4027d97c70 | ||
|
|
496c9d0037 | ||
|
|
3ae55831c8 | ||
|
|
6d776f7eb9 | ||
|
|
312d45b7fe | ||
|
|
302bdd51a6 | ||
|
|
8097f66248 | ||
|
|
ac0f2e9dd9 | ||
|
|
a3a73f6822 | ||
|
|
a9d425f470 | ||
|
|
1c5a3585e7 | ||
|
|
44b457852e | ||
|
|
97d609feb4 | ||
|
|
4719f41a43 | ||
|
|
8d6acb3a4f | ||
|
|
d89d0884e3 |
@@ -10,8 +10,3 @@
|
||||
- local: getting_started_real_world_robot
|
||||
title: Getting Started with Real-World Robots
|
||||
title: "Tutorials"
|
||||
- sections:
|
||||
- local: smolvla
|
||||
title: Use SmolVLA
|
||||
title: "Policies"
|
||||
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
# Use SmolVLA
|
||||
|
||||
SmolVLA is designed to be easy to use and integrate—whether you're finetuning on your own data or plugging it into an existing robotics stack.
|
||||
|
||||
<p align="center">
|
||||
<img src="https://cdn-uploads.huggingface.co/production/uploads/640e21ef3c82bd463ee5a76d/aooU0a3DMtYmy_1IWMaIM.png" alt="SmolVLA architecture." width="500"/>
|
||||
<br/>
|
||||
<em>Figure 2. SmolVLA takes as input a sequence of RGB images from multiple cameras, the robot’s current sensorimotor state, and a natural language instruction. The VLM encodes these into contextual features, which condition the action expert to generate a continuous sequence of actions.</em>
|
||||
</p>
|
||||
|
||||
### Install
|
||||
|
||||
First, install the required dependencies:
|
||||
|
||||
```python
|
||||
git clone https://github.com/huggingface/lerobot.git
|
||||
cd lerobot
|
||||
pip install -e ".[smolvla]"
|
||||
```
|
||||
|
||||
### Finetune the pretrained model
|
||||
Use [`smolvla_base`](https://hf.co/lerobot/smolvla_base), our pretrained 450M model, with the lerobot training framework:
|
||||
|
||||
```python
|
||||
python lerobot/scripts/train.py \
|
||||
--policy.path=lerobot/smolvla_base \
|
||||
--dataset.repo_id=lerobot/svla_so100_stacking \
|
||||
--batch_size=64 \
|
||||
--steps=200000
|
||||
```
|
||||
|
||||
<p align="center">
|
||||
<img src="https://cdn-uploads.huggingface.co/production/uploads/640e21ef3c82bd463ee5a76d/S-3vvVCulChREwHDkquoc.gif" alt="Comparison of SmolVLA across task variations." width="500"/>
|
||||
<br/>
|
||||
<em>Figure 1: Comparison of SmolVLA across task variations. From left to right: (1) asynchronous pick-place cube counting, (2) synchronous pick-place cube counting, (3) pick-place cube counting under perturbations, and (4) generalization on pick-and-place of the lego block with real-world SO101.</em>
|
||||
</p>
|
||||
|
||||
|
||||
### Train from scratch
|
||||
|
||||
If you'd like to build from the architecture (pretrained VLM + action expert) rather than a pretrained checkpoint:
|
||||
|
||||
```python
|
||||
python lerobot/scripts/train.py \
|
||||
--policy.type=smolvla \
|
||||
--dataset.repo_id=lerobot/svla_so100_stacking \
|
||||
--batch_size=64 \
|
||||
--steps=200000
|
||||
```
|
||||
You can also load `SmolVLAPolicy` directly:
|
||||
|
||||
```python
|
||||
from lerobot.common.policies.smolvla.modeling_smolvla import SmolVLAPolicy
|
||||
policy = SmolVLAPolicy.from_pretrained("lerobot/smolvla_base")
|
||||
```
|
||||
|
||||
## Evaluate the pretrained policy and run it in real-time
|
||||
|
||||
If you want to record the evaluation process and safe the videos on the hub, login to your HF account by running:
|
||||
|
||||
```python
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
```
|
||||
|
||||
Store your Hugging Face repository name in a variable to run these commands:
|
||||
|
||||
```python
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
Now, indicate the path to the policy, which is `lerobot/smolvla_base` in this case, and run:
|
||||
|
||||
```python
|
||||
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--control.type=record \
|
||||
--control.fps=30 \
|
||||
--control.single_task="Grasp a lego block and put it in the bin." \
|
||||
--control.repo_id=${HF_USER}/eval_svla_base_test \
|
||||
--control.tags='["tutorial"]' \
|
||||
--control.warmup_time_s=5 \
|
||||
--control.episode_time_s=30 \
|
||||
--control.reset_time_s=30 \
|
||||
--control.num_episodes=10 \
|
||||
--control.push_to_hub=true \
|
||||
--control.policy.path=lerobot/smolvla_base
|
||||
|
||||
```
|
||||
|
||||
Depending on your evaluation setup, you can configure the duration and the number of episodes to record for your evaluation suite.
|
||||
@@ -168,7 +168,7 @@ available_datasets = sorted(
|
||||
)
|
||||
|
||||
# lists all available policies from `lerobot/common/policies`
|
||||
available_policies = ["act", "diffusion", "tdmpc", "vqbet"]
|
||||
available_policies = ["act", "diffusion", "tdmpc", "vqbet", "smolvla"]
|
||||
|
||||
# lists all available robots from `lerobot/common/robot_devices/robots`
|
||||
available_robots = [
|
||||
|
||||
@@ -662,6 +662,7 @@ class VLAFlowMatching(nn.Module):
|
||||
self.config.max_period,
|
||||
device=device,
|
||||
)
|
||||
|
||||
time_emb = time_emb.type(dtype=dtype)
|
||||
|
||||
time_emb = time_emb[:, None, :].expand_as(action_emb)
|
||||
|
||||
@@ -272,7 +272,6 @@ def control_loop(
|
||||
action = {"action": action}
|
||||
|
||||
if dataset is not None:
|
||||
observation = {k: v for k, v in observation.items() if k not in ["task", "robot_type"]}
|
||||
frame = {**observation, **action, "task": single_task}
|
||||
dataset.add_frame(frame)
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ import pytest
|
||||
import lerobot
|
||||
from lerobot.common.policies.act.modeling_act import ACTPolicy
|
||||
from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy
|
||||
from lerobot.common.policies.smolvla.modeling_smolvla import SmolVLAPolicy
|
||||
from lerobot.common.policies.tdmpc.modeling_tdmpc import TDMPCPolicy
|
||||
from lerobot.common.policies.vqbet.modeling_vqbet import VQBeTPolicy
|
||||
from tests.utils import require_env
|
||||
@@ -45,7 +46,7 @@ def test_available_policies():
|
||||
This test verifies that the class attribute `name` for all policies is
|
||||
consistent with those listed in `lerobot/__init__.py`.
|
||||
"""
|
||||
policy_classes = [ACTPolicy, DiffusionPolicy, TDMPCPolicy, VQBeTPolicy]
|
||||
policy_classes = [ACTPolicy, DiffusionPolicy, TDMPCPolicy, VQBeTPolicy, SmolVLAPolicy]
|
||||
policies = [pol_cls.name for pol_cls in policy_classes]
|
||||
assert set(policies) == set(lerobot.available_policies), policies
|
||||
|
||||
|
||||
Reference in New Issue
Block a user