diff --git a/README.md b/README.md index 35911869..d71fad79 100644 --- a/README.md +++ b/README.md @@ -135,11 +135,7 @@ hydra.run.dir=outputs/visualize_dataset/example ### Evaluate a pretrained policy -You can import our environment class, download pretrained policies from the HuggingFace hub, and use our rollout utilities with rendering: -```python -""" Copy pasted from `examples/2_evaluate_pretrained_policy.py` -# TODO -``` +Check out [example 2](./examples/2_evaluate_pretrained_policy.py) to see how you can load a pretrained policy from HuggingFace hub, load up the corresponding environment and model, and run an evaluation. Or you can achieve the same result by executing our script from the command line: ```bash @@ -150,7 +146,7 @@ eval_episodes=10 \ hydra.run.dir=outputs/eval/example_hub ``` -After launching training of your own policy, you can also re-evaluate the checkpoints with: +After training your own policy, you can also re-evaluate the checkpoints with: ```bash python lerobot/scripts/eval.py \ --config PATH/TO/FOLDER/config.yaml \ @@ -163,19 +159,9 @@ See `python lerobot/scripts/eval.py --help` for more instructions. ### Train your own policy -You can import our dataset, environment, policy classes, and use our training utilities (if some data is missing, it will be automatically downloaded from HuggingFace hub): -```python -""" Copy pasted from `examples/3_train_policy.py` -# TODO -``` +You can import our dataset, environment, policy classes, and use our training utilities (if some data is missing, it will be automatically downloaded from HuggingFace hub): check out [example 3](./examples/3_train_policy.py). After you run this, you may want to revisit [example 2](./examples/2_evaluate_pretrained_policy.py) to evaluate your training output! -Or you can achieve the same result by executing our script from the command line: -```bash -python lerobot/scripts/train.py \ -hydra.run.dir=outputs/train/example -``` - -You can easily train any policy on any environment: +In general, you can use our training script to easily train any policy on any environment: ```bash python lerobot/scripts/train.py \ env=aloha \ diff --git a/examples/2_evaluate_pretrained_policy.py b/examples/2_evaluate_pretrained_policy.py index bb73167b..be6abd1b 100644 --- a/examples/2_evaluate_pretrained_policy.py +++ b/examples/2_evaluate_pretrained_policy.py @@ -14,7 +14,7 @@ from lerobot.scripts.eval import eval hub_id = "lerobot/diffusion_policy_pusht_image" folder = Path(snapshot_download(hub_id)) # OR uncomment the following to evaluate a policy from the local outputs/train folder. -folder = Path("outputs/train/example_pusht_diffusion") +# folder = Path("outputs/train/example_pusht_diffusion") config_path = folder / "config.yaml" weights_path = folder / "model.pt" diff --git a/tests/test_examples.py b/tests/test_examples.py index 9da7a663..4263e452 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -46,11 +46,25 @@ def test_examples_3_and_2(): with open(path, "r") as file: file_contents = file.read() - # Do less evals and use CPU. + # Do less evals, use CPU, and use the local model. file_contents = _find_and_replace( file_contents, - ['"eval_episodes=10"', '"rollout_batch_size=10"', '"device=cuda"'], - ['"eval_episodes=1"', '"rollout_batch_size=1"','"device=cpu"'], + [ + '"eval_episodes=10"', + '"rollout_batch_size=10"', + '"device=cuda"', + '# folder = Path("outputs/train/example_pusht_diffusion")', + 'hub_id = "lerobot/diffusion_policy_pusht_image"', + "folder = Path(snapshot_download(hub_id)", + ], + [ + '"eval_episodes=1"', + '"rollout_batch_size=1"', + '"device=cpu"', + 'folder = Path("outputs/train/example_pusht_diffusion")', + "", + "", + ], ) - assert Path(f"outputs/train/example_pusht_diffusion").exists() \ No newline at end of file + assert Path(f"outputs/train/example_pusht_diffusion").exists()