forked from tangger/lerobot
Clean + Add act_aloha_real.yaml + Add act_real.yaml
This commit is contained in:
@@ -203,7 +203,9 @@ available_policies_per_env = {
|
|||||||
"aloha": ["act"],
|
"aloha": ["act"],
|
||||||
"pusht": ["diffusion", "vqbet"],
|
"pusht": ["diffusion", "vqbet"],
|
||||||
"xarm": ["tdmpc"],
|
"xarm": ["tdmpc"],
|
||||||
"dora_aloha_real": ["act_real"],
|
"koch_real": ["act_koch_real"],
|
||||||
|
"aloha_real": ["act_aloha_real"],
|
||||||
|
"dora_aloha_real": ["act_aloha_real"],
|
||||||
}
|
}
|
||||||
|
|
||||||
env_task_pairs = [(env, task) for env, tasks in available_tasks_per_env.items() for task in tasks]
|
env_task_pairs = [(env, task) for env, tasks in available_tasks_per_env.items() for task in tasks]
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ def get_stats_einops_patterns(dataset, num_workers=0):
|
|||||||
return stats_patterns
|
return stats_patterns
|
||||||
|
|
||||||
|
|
||||||
def compute_stats(dataset, batch_size=32, num_workers=16, max_num_samples=None):
|
def compute_stats(dataset, batch_size=8, num_workers=8, max_num_samples=None):
|
||||||
"""Compute mean/std and min/max statistics of all data keys in a LeRobotDataset."""
|
"""Compute mean/std and min/max statistics of all data keys in a LeRobotDataset."""
|
||||||
if max_num_samples is None:
|
if max_num_samples is None:
|
||||||
max_num_samples = len(dataset)
|
max_num_samples = len(dataset)
|
||||||
|
|||||||
@@ -1,21 +1,27 @@
|
|||||||
# @package _global_
|
# @package _global_
|
||||||
|
|
||||||
# Use `act_real.yaml` to train on real-world Aloha/Aloha2 datasets.
|
# Use `act_aloha_real.yaml` to train on real-world datasets collected on Aloha or Aloha-2 robots.
|
||||||
# Compared to `act.yaml`, it contains 4 cameras (i.e. cam_right_wrist, cam_left_wrist, images,
|
# Compared to `act.yaml`, it contains 4 cameras (i.e. cam_right_wrist, cam_left_wrist, cam_high, cam_low) instead of 1 camera (i.e. top).
|
||||||
# cam_low) instead of 1 camera (i.e. top). Also, `training.eval_freq` is set to -1. This config is used
|
# Also, `training.eval_freq` is set to -1. This config is used to evaluate checkpoints at a certain frequency of training steps.
|
||||||
# to evaluate checkpoints at a certain frequency of training steps. When it is set to -1, it deactivates evaluation.
|
# When it is set to -1, it deactivates evaluation. This is because real-world evaluation is done through our `control_robot.py` script.
|
||||||
# This is because real-world evaluation is done through [dora-lerobot](https://github.com/dora-rs/dora-lerobot).
|
# Look at the documentation in header of `control_robot.py` for more information on how to collect data , train and evaluate a policy.
|
||||||
# Look at its README for more information on how to evaluate a checkpoint in the real-world.
|
|
||||||
#
|
#
|
||||||
# Example of usage for training:
|
# Example of usage for training and inference with `control_robot.py`:
|
||||||
# ```bash
|
# ```bash
|
||||||
# python lerobot/scripts/train.py \
|
# python lerobot/scripts/train.py \
|
||||||
# policy=act_real \
|
# policy=act_aloha_real \
|
||||||
|
# env=aloha_real
|
||||||
|
# ```
|
||||||
|
#
|
||||||
|
# Example of usage for training and inference with [Dora-rs](https://github.com/dora-rs/dora-lerobot):
|
||||||
|
# ```bash
|
||||||
|
# python lerobot/scripts/train.py \
|
||||||
|
# policy=act_aloha_real \
|
||||||
# env=dora_aloha_real
|
# env=dora_aloha_real
|
||||||
# ```
|
# ```
|
||||||
|
|
||||||
seed: 1000
|
seed: 1000
|
||||||
dataset_repo_id: lerobot/aloha_static_vinh_cup
|
dataset_repo_id: cadene/aloha_lace_shoe
|
||||||
|
|
||||||
override_dataset_stats:
|
override_dataset_stats:
|
||||||
observation.images.cam_right_wrist:
|
observation.images.cam_right_wrist:
|
||||||
@@ -36,10 +42,11 @@ override_dataset_stats:
|
|||||||
std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
|
std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
|
||||||
|
|
||||||
training:
|
training:
|
||||||
offline_steps: 100000
|
offline_steps: 80000
|
||||||
online_steps: 0
|
online_steps: 0
|
||||||
eval_freq: -1
|
eval_freq: -1
|
||||||
save_freq: 20000
|
save_freq: 10000
|
||||||
|
log_freq: 100
|
||||||
save_checkpoint: true
|
save_checkpoint: true
|
||||||
|
|
||||||
batch_size: 8
|
batch_size: 8
|
||||||
@@ -62,7 +69,7 @@ policy:
|
|||||||
|
|
||||||
# Input / output structure.
|
# Input / output structure.
|
||||||
n_obs_steps: 1
|
n_obs_steps: 1
|
||||||
chunk_size: 100 # chunk_size
|
chunk_size: 100
|
||||||
n_action_steps: 100
|
n_action_steps: 100
|
||||||
|
|
||||||
input_shapes:
|
input_shapes:
|
||||||
@@ -107,7 +114,7 @@ policy:
|
|||||||
n_vae_encoder_layers: 4
|
n_vae_encoder_layers: 4
|
||||||
|
|
||||||
# Inference.
|
# Inference.
|
||||||
temporal_ensemble_coeff: null
|
temporal_ensemble_momentum: null
|
||||||
|
|
||||||
# Training and loss computation.
|
# Training and loss computation.
|
||||||
dropout: 0.1
|
dropout: 0.1
|
||||||
@@ -1,110 +0,0 @@
|
|||||||
# @package _global_
|
|
||||||
|
|
||||||
# Use `act_real_no_state.yaml` to train on real-world Aloha/Aloha2 datasets when cameras are moving (e.g. wrist cameras)
|
|
||||||
# Compared to `act_real.yaml`, it is camera only and does not use the state as input which is vector of robot joint positions.
|
|
||||||
# We validated experimentaly that not using state reaches better success rate. Our hypothesis is that `act_real.yaml` might
|
|
||||||
# overfits to the state, because the images are more complex to learn from since they are moving.
|
|
||||||
#
|
|
||||||
# Example of usage for training:
|
|
||||||
# ```bash
|
|
||||||
# python lerobot/scripts/train.py \
|
|
||||||
# policy=act_real_no_state \
|
|
||||||
# env=dora_aloha_real
|
|
||||||
# ```
|
|
||||||
|
|
||||||
seed: 1000
|
|
||||||
dataset_repo_id: lerobot/aloha_static_vinh_cup
|
|
||||||
|
|
||||||
override_dataset_stats:
|
|
||||||
observation.images.cam_right_wrist:
|
|
||||||
# stats from imagenet, since we use a pretrained vision model
|
|
||||||
mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
|
|
||||||
std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
|
|
||||||
observation.images.cam_left_wrist:
|
|
||||||
# stats from imagenet, since we use a pretrained vision model
|
|
||||||
mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
|
|
||||||
std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
|
|
||||||
observation.images.cam_high:
|
|
||||||
# stats from imagenet, since we use a pretrained vision model
|
|
||||||
mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
|
|
||||||
std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
|
|
||||||
observation.images.cam_low:
|
|
||||||
# stats from imagenet, since we use a pretrained vision model
|
|
||||||
mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
|
|
||||||
std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
|
|
||||||
|
|
||||||
training:
|
|
||||||
offline_steps: 100000
|
|
||||||
online_steps: 0
|
|
||||||
eval_freq: -1
|
|
||||||
save_freq: 20000
|
|
||||||
save_checkpoint: true
|
|
||||||
|
|
||||||
batch_size: 8
|
|
||||||
lr: 1e-5
|
|
||||||
lr_backbone: 1e-5
|
|
||||||
weight_decay: 1e-4
|
|
||||||
grad_clip_norm: 10
|
|
||||||
online_steps_between_rollouts: 1
|
|
||||||
|
|
||||||
delta_timestamps:
|
|
||||||
action: "[i / ${fps} for i in range(${policy.chunk_size})]"
|
|
||||||
|
|
||||||
eval:
|
|
||||||
n_episodes: 50
|
|
||||||
batch_size: 50
|
|
||||||
|
|
||||||
# See `configuration_act.py` for more details.
|
|
||||||
policy:
|
|
||||||
name: act
|
|
||||||
|
|
||||||
# Input / output structure.
|
|
||||||
n_obs_steps: 1
|
|
||||||
chunk_size: 100 # chunk_size
|
|
||||||
n_action_steps: 100
|
|
||||||
|
|
||||||
input_shapes:
|
|
||||||
# TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env?
|
|
||||||
observation.images.cam_right_wrist: [3, 480, 640]
|
|
||||||
observation.images.cam_left_wrist: [3, 480, 640]
|
|
||||||
observation.images.cam_high: [3, 480, 640]
|
|
||||||
observation.images.cam_low: [3, 480, 640]
|
|
||||||
output_shapes:
|
|
||||||
action: ["${env.action_dim}"]
|
|
||||||
|
|
||||||
# Normalization / Unnormalization
|
|
||||||
input_normalization_modes:
|
|
||||||
observation.images.cam_right_wrist: mean_std
|
|
||||||
observation.images.cam_left_wrist: mean_std
|
|
||||||
observation.images.cam_high: mean_std
|
|
||||||
observation.images.cam_low: mean_std
|
|
||||||
output_normalization_modes:
|
|
||||||
action: mean_std
|
|
||||||
|
|
||||||
# Architecture.
|
|
||||||
# Vision backbone.
|
|
||||||
vision_backbone: resnet18
|
|
||||||
pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1
|
|
||||||
replace_final_stride_with_dilation: false
|
|
||||||
# Transformer layers.
|
|
||||||
pre_norm: false
|
|
||||||
dim_model: 512
|
|
||||||
n_heads: 8
|
|
||||||
dim_feedforward: 3200
|
|
||||||
feedforward_activation: relu
|
|
||||||
n_encoder_layers: 4
|
|
||||||
# Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code
|
|
||||||
# that means only the first layer is used. Here we match the original implementation by setting this to 1.
|
|
||||||
# See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521.
|
|
||||||
n_decoder_layers: 1
|
|
||||||
# VAE.
|
|
||||||
use_vae: true
|
|
||||||
latent_dim: 32
|
|
||||||
n_vae_encoder_layers: 4
|
|
||||||
|
|
||||||
# Inference.
|
|
||||||
temporal_ensemble_coeff: null
|
|
||||||
|
|
||||||
# Training and loss computation.
|
|
||||||
dropout: 0.1
|
|
||||||
kl_weight: 10.0
|
|
||||||
Reference in New Issue
Block a user