Files
lerobot_piper/lerobot/configs/policy/diffusion.yaml
2024-03-04 23:08:52 +00:00

132 lines
2.7 KiB
YAML

# @package _global_
shape_meta:
# acceptable types: rgb, low_dim
obs:
image:
shape: [3, 96, 96]
type: rgb
agent_pos:
shape: [2]
type: low_dim
action:
shape: [2]
horizon: 16
n_obs_steps: 2
n_action_steps: 8
n_latency_steps: 0
dataset_obs_steps: ${n_obs_steps}
past_action_visible: False
keypoint_visible_rate: 1.0
obs_as_global_cond: True
eval_episodes: 1
eval_freq: 10000
save_freq: 100000
log_freq: 250
offline_steps: 1344000
online_steps: 0
offline_prioritized_sampler: true
policy:
name: diffusion
shape_meta: ${shape_meta}
horizon: ${horizon}
# n_action_steps: ${eval:'${n_action_steps}+${n_latency_steps}'}
n_obs_steps: ${n_obs_steps}
num_inference_steps: 100
obs_as_global_cond: ${obs_as_global_cond}
# crop_shape: null
diffusion_step_embed_dim: 256 # before 128
down_dims: [256, 512, 1024] # before [512, 1024, 2048]
kernel_size: 5
n_groups: 8
cond_predict_scale: True
pretrained_model_path:
batch_size: 64
per_alpha: 0.6
per_beta: 0.4
balanced_sampling: false
utd: 1
offline_steps: ${offline_steps}
use_ema: true
lr_scheduler: cosine
lr_warmup_steps: 500
grad_clip_norm: 10
noise_scheduler:
_target_: diffusers.schedulers.scheduling_ddpm.DDPMScheduler
num_train_timesteps: 100
beta_start: 0.0001
beta_end: 0.02
beta_schedule: squaredcos_cap_v2
variance_type: fixed_small # Yilun's paper uses fixed_small_log instead, but easy to cause Nan
clip_sample: True # required when predict_epsilon=False
prediction_type: epsilon # or sample
obs_encoder:
# _target_: diffusion_policy.model.vision.multi_image_obs_encoder.MultiImageObsEncoder
shape_meta: ${shape_meta}
# resize_shape: null
# crop_shape: [76, 76]
# constant center crop
# random_crop: True
use_group_norm: True
share_rgb_model: False
imagenet_norm: True
rgb_model:
_target_: diffusion_policy.model.vision.model_getter.get_resnet
name: resnet18
weights: null
ema:
_target_: diffusion_policy.model.diffusion.ema_model.EMAModel
update_after_step: 0
inv_gamma: 1.0
power: 0.75
min_value: 0.0
max_value: 0.9999
optimizer:
_target_: torch.optim.AdamW
lr: 1.0e-4
betas: [0.95, 0.999]
eps: 1.0e-8
weight_decay: 1.0e-6
training:
device: "cuda:0"
seed: 42
debug: False
resume: True
# optimization
# lr_scheduler: cosine
# lr_warmup_steps: 500
num_epochs: 8000
# gradient_accumulate_every: 1
# EMA destroys performance when used with BatchNorm
# replace BatchNorm with GroupNorm.
# use_ema: True
freeze_encoder: False
# training loop control
# in epochs
rollout_every: 50
checkpoint_every: 50
val_every: 1
sample_every: 5
# steps per epoch
max_train_steps: null
max_val_steps: null
# misc
tqdm_interval_sec: 1.0