From e1763aa90656da4839dfb3c113dc9bf9436618c0 Mon Sep 17 00:00:00 2001 From: Remi Cadene Date: Tue, 10 Sep 2024 19:45:59 +0200 Subject: [PATCH] Clean + Add act_aloha_real.yaml + Add act_real.yaml --- lerobot/__init__.py | 4 +- lerobot/common/datasets/compute_stats.py | 2 +- .../{act_real.yaml => act_aloha_real.yaml} | 33 +++--- lerobot/configs/policy/act_real_no_state.yaml | 110 ------------------ 4 files changed, 24 insertions(+), 125 deletions(-) rename lerobot/configs/policy/{act_real.yaml => act_aloha_real.yaml} (74%) delete mode 100644 lerobot/configs/policy/act_real_no_state.yaml diff --git a/lerobot/__init__.py b/lerobot/__init__.py index aeae31008..c75c67cc7 100644 --- a/lerobot/__init__.py +++ b/lerobot/__init__.py @@ -203,7 +203,9 @@ available_policies_per_env = { "aloha": ["act"], "pusht": ["diffusion", "vqbet"], "xarm": ["tdmpc"], - "dora_aloha_real": ["act_real"], + "koch_real": ["act_koch_real"], + "aloha_real": ["act_aloha_real"], + "dora_aloha_real": ["act_aloha_real"], } env_task_pairs = [(env, task) for env, tasks in available_tasks_per_env.items() for task in tasks] diff --git a/lerobot/common/datasets/compute_stats.py b/lerobot/common/datasets/compute_stats.py index 208284465..bafac2e1e 100644 --- a/lerobot/common/datasets/compute_stats.py +++ b/lerobot/common/datasets/compute_stats.py @@ -68,7 +68,7 @@ def get_stats_einops_patterns(dataset, num_workers=0): return stats_patterns -def compute_stats(dataset, batch_size=32, num_workers=16, max_num_samples=None): +def compute_stats(dataset, batch_size=8, num_workers=8, max_num_samples=None): """Compute mean/std and min/max statistics of all data keys in a LeRobotDataset.""" if max_num_samples is None: max_num_samples = len(dataset) diff --git a/lerobot/configs/policy/act_real.yaml b/lerobot/configs/policy/act_aloha_real.yaml similarity index 74% rename from lerobot/configs/policy/act_real.yaml rename to lerobot/configs/policy/act_aloha_real.yaml index 058104f4d..dd0b461b5 100644 --- a/lerobot/configs/policy/act_real.yaml +++ b/lerobot/configs/policy/act_aloha_real.yaml @@ -1,21 +1,27 @@ # @package _global_ -# Use `act_real.yaml` to train on real-world Aloha/Aloha2 datasets. -# Compared to `act.yaml`, it contains 4 cameras (i.e. cam_right_wrist, cam_left_wrist, images, -# cam_low) instead of 1 camera (i.e. top). Also, `training.eval_freq` is set to -1. This config is used -# to evaluate checkpoints at a certain frequency of training steps. When it is set to -1, it deactivates evaluation. -# This is because real-world evaluation is done through [dora-lerobot](https://github.com/dora-rs/dora-lerobot). -# Look at its README for more information on how to evaluate a checkpoint in the real-world. +# Use `act_aloha_real.yaml` to train on real-world datasets collected on Aloha or Aloha-2 robots. +# Compared to `act.yaml`, it contains 4 cameras (i.e. cam_right_wrist, cam_left_wrist, cam_high, cam_low) instead of 1 camera (i.e. top). +# Also, `training.eval_freq` is set to -1. This config is used to evaluate checkpoints at a certain frequency of training steps. +# When it is set to -1, it deactivates evaluation. This is because real-world evaluation is done through our `control_robot.py` script. +# Look at the documentation in header of `control_robot.py` for more information on how to collect data , train and evaluate a policy. # -# Example of usage for training: +# Example of usage for training and inference with `control_robot.py`: # ```bash # python lerobot/scripts/train.py \ -# policy=act_real \ +# policy=act_aloha_real \ +# env=aloha_real +# ``` +# +# Example of usage for training and inference with [Dora-rs](https://github.com/dora-rs/dora-lerobot): +# ```bash +# python lerobot/scripts/train.py \ +# policy=act_aloha_real \ # env=dora_aloha_real # ``` seed: 1000 -dataset_repo_id: lerobot/aloha_static_vinh_cup +dataset_repo_id: cadene/aloha_lace_shoe override_dataset_stats: observation.images.cam_right_wrist: @@ -36,10 +42,11 @@ override_dataset_stats: std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) training: - offline_steps: 100000 + offline_steps: 80000 online_steps: 0 eval_freq: -1 - save_freq: 20000 + save_freq: 10000 + log_freq: 100 save_checkpoint: true batch_size: 8 @@ -62,7 +69,7 @@ policy: # Input / output structure. n_obs_steps: 1 - chunk_size: 100 # chunk_size + chunk_size: 100 n_action_steps: 100 input_shapes: @@ -107,7 +114,7 @@ policy: n_vae_encoder_layers: 4 # Inference. - temporal_ensemble_coeff: null + temporal_ensemble_momentum: null # Training and loss computation. dropout: 0.1 diff --git a/lerobot/configs/policy/act_real_no_state.yaml b/lerobot/configs/policy/act_real_no_state.yaml deleted file mode 100644 index 082610503..000000000 --- a/lerobot/configs/policy/act_real_no_state.yaml +++ /dev/null @@ -1,110 +0,0 @@ -# @package _global_ - -# Use `act_real_no_state.yaml` to train on real-world Aloha/Aloha2 datasets when cameras are moving (e.g. wrist cameras) -# Compared to `act_real.yaml`, it is camera only and does not use the state as input which is vector of robot joint positions. -# We validated experimentaly that not using state reaches better success rate. Our hypothesis is that `act_real.yaml` might -# overfits to the state, because the images are more complex to learn from since they are moving. -# -# Example of usage for training: -# ```bash -# python lerobot/scripts/train.py \ -# policy=act_real_no_state \ -# env=dora_aloha_real -# ``` - -seed: 1000 -dataset_repo_id: lerobot/aloha_static_vinh_cup - -override_dataset_stats: - observation.images.cam_right_wrist: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - observation.images.cam_left_wrist: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - observation.images.cam_high: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - observation.images.cam_low: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - -training: - offline_steps: 100000 - online_steps: 0 - eval_freq: -1 - save_freq: 20000 - save_checkpoint: true - - batch_size: 8 - lr: 1e-5 - lr_backbone: 1e-5 - weight_decay: 1e-4 - grad_clip_norm: 10 - online_steps_between_rollouts: 1 - - delta_timestamps: - action: "[i / ${fps} for i in range(${policy.chunk_size})]" - -eval: - n_episodes: 50 - batch_size: 50 - -# See `configuration_act.py` for more details. -policy: - name: act - - # Input / output structure. - n_obs_steps: 1 - chunk_size: 100 # chunk_size - n_action_steps: 100 - - input_shapes: - # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env? - observation.images.cam_right_wrist: [3, 480, 640] - observation.images.cam_left_wrist: [3, 480, 640] - observation.images.cam_high: [3, 480, 640] - observation.images.cam_low: [3, 480, 640] - output_shapes: - action: ["${env.action_dim}"] - - # Normalization / Unnormalization - input_normalization_modes: - observation.images.cam_right_wrist: mean_std - observation.images.cam_left_wrist: mean_std - observation.images.cam_high: mean_std - observation.images.cam_low: mean_std - output_normalization_modes: - action: mean_std - - # Architecture. - # Vision backbone. - vision_backbone: resnet18 - pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1 - replace_final_stride_with_dilation: false - # Transformer layers. - pre_norm: false - dim_model: 512 - n_heads: 8 - dim_feedforward: 3200 - feedforward_activation: relu - n_encoder_layers: 4 - # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code - # that means only the first layer is used. Here we match the original implementation by setting this to 1. - # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521. - n_decoder_layers: 1 - # VAE. - use_vae: true - latent_dim: 32 - n_vae_encoder_layers: 4 - - # Inference. - temporal_ensemble_coeff: null - - # Training and loss computation. - dropout: 0.1 - kl_weight: 10.0