Move normalization to policy for act and diffusion (#90)

Co-authored-by: Alexander Soare <alexander.soare159@gmail.com>
This commit is contained in:
Remi
2024-04-25 11:47:38 +02:00
committed by GitHub
parent c1bcf857c5
commit e760e4cd63
25 changed files with 543 additions and 288 deletions

View File

@@ -20,7 +20,5 @@ env:
image_size: [3, 480, 640]
episode_length: 400
fps: ${fps}
policy:
state_dim: 14
action_dim: 14

View File

@@ -20,7 +20,5 @@ env:
image_size: 96
episode_length: 300
fps: ${fps}
policy:
state_dim: 2
action_dim: 2

View File

@@ -19,7 +19,5 @@ env:
image_size: 84
episode_length: 25
fps: ${fps}
policy:
state_dim: 4
action_dim: 4

View File

@@ -11,26 +11,36 @@ log_freq: 250
n_obs_steps: 1
# when temporal_agg=False, n_action_steps=horizon
override_dataset_stats:
observation.images.top:
# stats from imagenet, since we use a pretrained vision model
mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
# See `configuration_act.py` for more details.
policy:
name: act
pretrained_model_path:
# Environment.
# Inherit these from the environment config.
state_dim: ???
action_dim: ???
# Inputs / output structure.
# Input / output structure.
n_obs_steps: ${n_obs_steps}
camera_names: [top] # [top, front_close, left_pillar, right_pillar]
chunk_size: 100 # chunk_size
n_action_steps: 100
# Vision preprocessing.
image_normalization_mean: [0.485, 0.456, 0.406]
image_normalization_std: [0.229, 0.224, 0.225]
input_shapes:
# TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env?
observation.images.top: [3, 480, 640]
observation.state: ["${env.state_dim}"]
output_shapes:
action: ["${env.action_dim}"]
# Normalization / Unnormalization
normalize_input_modes:
observation.images.top: mean_std
observation.state: mean_std
unnormalize_output_modes:
action: mean_std
# Architecture.
# Vision backbone.

View File

@@ -18,27 +18,43 @@ online_steps: 0
offline_prioritized_sampler: true
override_dataset_stats:
# TODO(rcadene, alexander-soare): should we remove image stats as well? do we use a pretrained vision model?
observation.image:
mean: [[[0.5]], [[0.5]], [[0.5]]] # (c,1,1)
std: [[[0.5]], [[0.5]], [[0.5]]] # (c,1,1)
# TODO(rcadene, alexander-soare): we override state and action stats to use the same as the pretrained model
# from the original codebase, but we should remove these and train our own pretrained model
observation.state:
min: [13.456424, 32.938293]
max: [496.14618, 510.9579]
action:
min: [12.0, 25.0]
max: [511.0, 511.0]
policy:
name: diffusion
pretrained_model_path:
# Environment.
# Inherit these from the environment config.
state_dim: ???
action_dim: ???
image_size:
- ${env.image_size} # height
- ${env.image_size} # width
# Inputs / output structure.
# Input / output structure.
n_obs_steps: ${n_obs_steps}
horizon: ${horizon}
n_action_steps: ${n_action_steps}
# Vision preprocessing.
image_normalization_mean: [0.5, 0.5, 0.5]
image_normalization_std: [0.5, 0.5, 0.5]
input_shapes:
# TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env?
observation.image: [3, 96, 96]
observation.state: ["${env.state_dim}"]
output_shapes:
action: ["${env.action_dim}"]
# Normalization / Unnormalization
normalize_input_modes:
observation.image: mean_std
observation.state: min_max
unnormalize_output_modes:
action: min_max
# Architecture / modeling.
# Vision backbone.

View File

@@ -16,8 +16,8 @@ policy:
frame_stack: 1
num_channels: 32
img_size: ${env.image_size}
state_dim: ???
action_dim: ???
state_dim: ${env.action_dim}
action_dim: ${env.action_dim}
# planning
mpc: true