backup wip

This commit is contained in:
Alexander Soare
2024-04-15 19:06:44 +01:00
parent 14f3ffb412
commit 5608e659e6
14 changed files with 1059 additions and 977 deletions

View File

@@ -18,7 +18,7 @@ policy:
pretrained_model_path:
# Environment.
# Inherit these from the environment.
# Inherit these from the environment config.
state_dim: ???
action_dim: ???

View File

@@ -1,17 +1,5 @@
# @package _global_
shape_meta:
# acceptable types: rgb, low_dim
obs:
image:
shape: [3, 96, 96]
type: rgb
agent_pos:
shape: [2]
type: low_dim
action:
shape: [2]
seed: 100000
horizon: 16
n_obs_steps: 2
@@ -33,75 +21,70 @@ offline_prioritized_sampler: true
policy:
name: diffusion
shape_meta: ${shape_meta}
pretrained_model_path:
horizon: ${horizon}
# Environment.
# Inherit these from the environment config.
state_dim: ???
action_dim: ???
image_size:
- ${env.image_size} # height
- ${env.image_size} # width
# Inputs / output structure.
n_obs_steps: ${n_obs_steps}
horizon: ${horizon}
n_action_steps: ${n_action_steps}
num_inference_steps: 100
# crop_shape: null
diffusion_step_embed_dim: 128
# Vision preprocessing.
image_normalization_mean: [0.5, 0.5, 0.5]
image_normalization_std: [0.5, 0.5, 0.5]
# Architecture / modeling.
# Vision backbone.
vision_backbone: resnet18
crop_shape: [84, 84]
random_crop: True
use_pretrained_backbone: false
use_group_norm: True
spatial_softmax_num_keypoints: 32
# Unet.
down_dims: [512, 1024, 2048]
kernel_size: 5
n_groups: 8
diffusion_step_embed_dim: 128
film_scale_modulation: True
pretrained_model_path:
batch_size: 64
per_alpha: 0.6
per_beta: 0.4
balanced_sampling: false
utd: 1
offline_steps: ${offline_steps}
use_ema: true
lr_scheduler: cosine
lr_warmup_steps: 500
grad_clip_norm: 10
delta_timestamps:
observation.image: [-0.1, 0]
observation.state: [-0.1, 0]
action: [-0.1, 0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1.0, 1.1, 1.2, 1.3, 1.4]
rgb_encoder:
backbone_name: resnet18
pretrained_backbone: false
use_group_norm: True
num_keypoints: 32
relu: true
norm_mean_std: [0.5, 0.5] # for PushT the original impl normalizes to [-1, 1] (maybe not the case for robomimic envs)
crop_shape: [84, 84]
random_crop: True
noise_scheduler:
_target_: diffusers.schedulers.scheduling_ddpm.DDPMScheduler
# Noise scheduler.
num_train_timesteps: 100
beta_schedule: squaredcos_cap_v2
beta_start: 0.0001
beta_end: 0.02
beta_schedule: squaredcos_cap_v2
variance_type: fixed_small # Yilun's paper uses fixed_small_log instead, but easy to cause Nan
clip_sample: True # required when predict_epsilon=False
prediction_type: epsilon # or sample
variance_type: fixed_small
prediction_type: epsilon # epsilon / sample
clip_sample: True
rgb_model:
pretrained: false
num_keypoints: 32
relu: true
# Inference
num_inference_steps: 100
ema:
_target_: lerobot.common.policies.diffusion.model.ema_model.EMAModel
update_after_step: 0
inv_gamma: 1.0
power: 0.75
min_value: 0.0
max_value: 0.9999
optimizer:
_target_: torch.optim.AdamW
# ---
# TODO(alexander-soare): Remove these from the policy config.
batch_size: 64
grad_clip_norm: 10
lr: 1.0e-4
betas: [0.95, 0.999]
eps: 1.0e-8
weight_decay: 1.0e-6
lr_scheduler: cosine
lr_warmup_steps: 500
adam_betas: [0.95, 0.999]
adam_eps: 1.0e-8
adam_weight_decay: 1.0e-6
utd: 1
use_ema: true
ema_update_after_step: 0
ema_min_rate: 0.0
ema_max_rate: 0.9999
ema_inv_gamma: 1.0
ema_power: 0.75
delta_timestamps:
observation.images: "[i / ${fps} for i in range(1 - ${n_obs_steps}, 1)]"
observation.state: "[i / ${fps} for i in range(1 - ${n_obs_steps}, 1)]"
action: "[i / ${fps} for i in range(1 - ${n_obs_steps}, 1 - ${n_obs_steps} + ${policy.horizon})]"