From 964f9e86d68fcfea3abd956820387fedd3f247a5 Mon Sep 17 00:00:00 2001 From: Simon Alibert <75076266+aliberts@users.noreply.github.com> Date: Thu, 4 Jul 2024 11:53:29 +0200 Subject: [PATCH] Cleanup config defaults (#300) --- lerobot/configs/default.yaml | 2 +- lerobot/configs/policy/act.yaml | 7 +++---- lerobot/configs/policy/act_real.yaml | 5 ++--- lerobot/configs/policy/act_real_no_state.yaml | 5 ++--- lerobot/configs/policy/diffusion.yaml | 5 ++--- lerobot/configs/policy/tdmpc.yaml | 1 + lerobot/configs/policy/vqbet.yaml | 5 ++--- 7 files changed, 13 insertions(+), 17 deletions(-) diff --git a/lerobot/configs/default.yaml b/lerobot/configs/default.yaml index df0dae7d..4bb1508d 100644 --- a/lerobot/configs/default.yaml +++ b/lerobot/configs/default.yaml @@ -39,7 +39,7 @@ training: # `online_env_seed` is used for environments for online training data rollouts. online_env_seed: ??? eval_freq: ??? - log_freq: 250 + log_freq: 200 save_checkpoint: true # Checkpoint is saved every `save_freq` training iterations and after the last training step. save_freq: ??? diff --git a/lerobot/configs/policy/act.yaml b/lerobot/configs/policy/act.yaml index bba2e563..ea2c5b75 100644 --- a/lerobot/configs/policy/act.yaml +++ b/lerobot/configs/policy/act.yaml @@ -10,11 +10,10 @@ override_dataset_stats: std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) training: - offline_steps: 80000 + offline_steps: 100000 online_steps: 0 - eval_freq: 10000 - save_freq: 100000 - log_freq: 250 + eval_freq: 20000 + save_freq: 20000 save_checkpoint: true batch_size: 8 diff --git a/lerobot/configs/policy/act_real.yaml b/lerobot/configs/policy/act_real.yaml index b4942615..c2f7158f 100644 --- a/lerobot/configs/policy/act_real.yaml +++ b/lerobot/configs/policy/act_real.yaml @@ -36,11 +36,10 @@ override_dataset_stats: std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) training: - offline_steps: 80000 + offline_steps: 100000 online_steps: 0 eval_freq: -1 - save_freq: 10000 - log_freq: 100 + save_freq: 20000 save_checkpoint: true batch_size: 8 diff --git a/lerobot/configs/policy/act_real_no_state.yaml b/lerobot/configs/policy/act_real_no_state.yaml index a8b1c9b6..5b8a13b4 100644 --- a/lerobot/configs/policy/act_real_no_state.yaml +++ b/lerobot/configs/policy/act_real_no_state.yaml @@ -34,11 +34,10 @@ override_dataset_stats: std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) training: - offline_steps: 80000 + offline_steps: 100000 online_steps: 0 eval_freq: -1 - save_freq: 10000 - log_freq: 100 + save_freq: 20000 save_checkpoint: true batch_size: 8 diff --git a/lerobot/configs/policy/diffusion.yaml b/lerobot/configs/policy/diffusion.yaml index 95cc75b6..880819bb 100644 --- a/lerobot/configs/policy/diffusion.yaml +++ b/lerobot/configs/policy/diffusion.yaml @@ -24,9 +24,8 @@ override_dataset_stats: training: offline_steps: 200000 online_steps: 0 - eval_freq: 5000 - save_freq: 5000 - log_freq: 250 + eval_freq: 25000 + save_freq: 25000 save_checkpoint: true batch_size: 64 diff --git a/lerobot/configs/policy/tdmpc.yaml b/lerobot/configs/policy/tdmpc.yaml index 4e55ddf7..379e9320 100644 --- a/lerobot/configs/policy/tdmpc.yaml +++ b/lerobot/configs/policy/tdmpc.yaml @@ -11,6 +11,7 @@ training: online_steps_between_rollouts: 1 online_sampling_ratio: 0.5 online_env_seed: 10000 + log_freq: 100 batch_size: 256 grad_clip_norm: 10.0 diff --git a/lerobot/configs/policy/vqbet.yaml b/lerobot/configs/policy/vqbet.yaml index a8b530e2..cc70d61a 100644 --- a/lerobot/configs/policy/vqbet.yaml +++ b/lerobot/configs/policy/vqbet.yaml @@ -22,9 +22,8 @@ override_dataset_stats: training: offline_steps: 250000 online_steps: 0 - eval_freq: 20000 - save_freq: 20000 - log_freq: 250 + eval_freq: 25000 + save_freq: 25000 save_checkpoint: true batch_size: 64