Compare commits
9 Commits
recovered-
...
user/pepij
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1892aa1b08 | ||
|
|
3b6fff70e1 | ||
|
|
6e97876e81 | ||
|
|
4bdbf2f6e0 | ||
|
|
4e9b4dd380 | ||
|
|
17d12db7c4 | ||
|
|
6a8be97bb5 | ||
|
|
841d54c050 | ||
|
|
e3c3c165aa |
@@ -749,6 +749,9 @@ class LeRobotDataset(torch.utils.data.Dataset):
|
||||
task_idx = item["task_index"].item()
|
||||
item["task"] = self.meta.tasks[task_idx]
|
||||
|
||||
# Add global index of frame (indices)
|
||||
item["indices"] = torch.tensor(idx)
|
||||
|
||||
return item
|
||||
|
||||
def __repr__(self):
|
||||
|
||||
@@ -13,9 +13,11 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import Iterator, Union
|
||||
import random
|
||||
from typing import Iterator, List, Optional, Union
|
||||
|
||||
import torch
|
||||
from torch.utils.data import Sampler
|
||||
|
||||
|
||||
class EpisodeAwareSampler:
|
||||
@@ -59,3 +61,123 @@ class EpisodeAwareSampler:
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self.indices)
|
||||
|
||||
|
||||
class SumTree:
|
||||
"""
|
||||
A classic sum-tree data structure for storing priorities.
|
||||
Each leaf stores a sample's priority, and internal nodes store sums of children.
|
||||
"""
|
||||
|
||||
def __init__(self, capacity: int):
|
||||
"""
|
||||
Args:
|
||||
capacity: Maximum number of elements.
|
||||
"""
|
||||
self.capacity = capacity
|
||||
self.size = capacity
|
||||
self.tree = [0.0] * (2 * self.size)
|
||||
|
||||
def initialize_tree(self, priorities: List[float]):
|
||||
"""
|
||||
Initializes the sum tree
|
||||
"""
|
||||
# Set leaf values
|
||||
for i, priority in enumerate(priorities):
|
||||
self.tree[i + self.size] = priority
|
||||
|
||||
# Compute internal node values
|
||||
for i in range(self.size - 1, 0, -1):
|
||||
self.tree[i] = self.tree[2 * i] + self.tree[2 * i + 1]
|
||||
|
||||
def update(self, idx: int, priority: float):
|
||||
"""
|
||||
Update the priority at leaf index `idx` and propagate changes upwards.
|
||||
"""
|
||||
tree_idx = idx + self.size
|
||||
self.tree[tree_idx] = priority # Set new priority
|
||||
|
||||
# Propagate up, explicitly summing children
|
||||
tree_idx //= 2
|
||||
while tree_idx >= 1:
|
||||
self.tree[tree_idx] = self.tree[2 * tree_idx] + self.tree[2 * tree_idx + 1]
|
||||
tree_idx //= 2
|
||||
|
||||
def total_priority(self) -> float:
|
||||
"""Returns the sum of all priorities (stored at root)."""
|
||||
return self.tree[1]
|
||||
|
||||
def sample(self, value: float) -> int:
|
||||
"""
|
||||
Samples an index where the prefix sum up to that leaf is >= `value`.
|
||||
"""
|
||||
value = min(max(value, 0), self.total_priority()) # Clamp value
|
||||
idx = 1
|
||||
while idx < self.size:
|
||||
left = 2 * idx
|
||||
if self.tree[left] >= value:
|
||||
idx = left
|
||||
else:
|
||||
value -= self.tree[left]
|
||||
idx = left + 1
|
||||
return idx - self.size # Convert tree index to data index
|
||||
|
||||
|
||||
class PrioritizedSampler(Sampler[int]):
|
||||
"""
|
||||
PyTorch Sampler that draws samples in proportion to their priority using a SumTree.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
data_len: int,
|
||||
alpha: float = 0.6,
|
||||
eps: float = 1e-6,
|
||||
num_samples_per_epoch: Optional[int] = None,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
data_len: Total number of samples in the dataset.
|
||||
alpha: Exponent for priority scaling. Default is 0.6.
|
||||
eps: Small constant to avoid zero priorities.
|
||||
num_samples_per_epoch: Number of samples per epoch (default is data_len).
|
||||
"""
|
||||
self.data_len = data_len
|
||||
self.alpha = alpha
|
||||
self.eps = eps
|
||||
self.num_samples_per_epoch = num_samples_per_epoch or data_len
|
||||
|
||||
# Initialize difficulties and sum-tree
|
||||
self.difficulties = [1.0] * data_len
|
||||
self.priorities = [0.0] * data_len
|
||||
initial_priorities = [(1.0 + eps) ** alpha] * data_len
|
||||
|
||||
self.sumtree = SumTree(data_len)
|
||||
self.sumtree.initialize_tree(initial_priorities)
|
||||
for i, p in enumerate(initial_priorities):
|
||||
self.priorities[i] = p
|
||||
|
||||
def update_priorities(self, indices: List[int], difficulties: List[float]):
|
||||
"""
|
||||
Updates the priorities in the sum-tree.
|
||||
"""
|
||||
for idx, diff in zip(indices, difficulties, strict=False):
|
||||
self.difficulties[idx] = diff
|
||||
new_priority = (diff + self.eps) ** self.alpha
|
||||
self.priorities[idx] = new_priority
|
||||
self.sumtree.update(idx, new_priority)
|
||||
|
||||
def __iter__(self) -> Iterator[int]:
|
||||
"""
|
||||
Samples indices based on their priority weights.
|
||||
"""
|
||||
total_p = self.sumtree.total_priority()
|
||||
|
||||
for _ in range(self.num_samples_per_epoch):
|
||||
r = random.random() * total_p
|
||||
idx = self.sumtree.sample(r)
|
||||
|
||||
yield idx
|
||||
|
||||
def __len__(self) -> int:
|
||||
return self.num_samples_per_epoch
|
||||
|
||||
@@ -155,11 +155,14 @@ class ACTPolicy(PreTrainedPolicy):
|
||||
batch = self.normalize_targets(batch)
|
||||
actions_hat, (mu_hat, log_sigma_x2_hat) = self.model(batch)
|
||||
|
||||
l1_loss = (
|
||||
F.l1_loss(batch["action"], actions_hat, reduction="none") * ~batch["action_is_pad"].unsqueeze(-1)
|
||||
).mean()
|
||||
elementwise_l1 = F.l1_loss(batch["action"], actions_hat, reduction="none") * ~batch[
|
||||
"action_is_pad"
|
||||
].unsqueeze(-1)
|
||||
|
||||
l1_loss = elementwise_l1.mean()
|
||||
|
||||
l1_per_sample = elementwise_l1.mean(dim=(1, 2))
|
||||
|
||||
loss_dict = {"l1_loss": l1_loss.item()}
|
||||
if self.config.use_vae:
|
||||
# Calculate Dₖₗ(latent_pdf || standard_normal). Note: After computing the KL-divergence for
|
||||
# each dimension independently, we sum over the latent dimension to get the total
|
||||
@@ -168,9 +171,17 @@ class ACTPolicy(PreTrainedPolicy):
|
||||
mean_kld = (
|
||||
(-0.5 * (1 + log_sigma_x2_hat - mu_hat.pow(2) - (log_sigma_x2_hat).exp())).sum(-1).mean()
|
||||
)
|
||||
loss_dict["kld_loss"] = mean_kld.item()
|
||||
loss_dict = {
|
||||
"l1_loss": l1_loss.item(),
|
||||
"kld_loss": mean_kld.item(),
|
||||
"per_sample_l1": l1_per_sample,
|
||||
}
|
||||
loss = l1_loss + mean_kld * self.config.kl_weight
|
||||
else:
|
||||
loss_dict = {
|
||||
"l1_loss": l1_loss.item(),
|
||||
"per_sample_l1": l1_per_sample,
|
||||
}
|
||||
loss = l1_loss
|
||||
|
||||
return loss, loss_dict
|
||||
|
||||
@@ -25,7 +25,7 @@ from torch.amp import GradScaler
|
||||
from torch.optim import Optimizer
|
||||
|
||||
from lerobot.common.datasets.factory import make_dataset
|
||||
from lerobot.common.datasets.sampler import EpisodeAwareSampler
|
||||
from lerobot.common.datasets.sampler import EpisodeAwareSampler, PrioritizedSampler
|
||||
from lerobot.common.datasets.utils import cycle
|
||||
from lerobot.common.envs.factory import make_env
|
||||
from lerobot.common.optim.factory import make_optimizer_and_scheduler
|
||||
@@ -70,6 +70,7 @@ def update_policy(
|
||||
with torch.autocast(device_type=device.type) if use_amp else nullcontext():
|
||||
loss, output_dict = policy.forward(batch)
|
||||
# TODO(rcadene): policy.unnormalize_outputs(out_dict)
|
||||
|
||||
grad_scaler.scale(loss).backward()
|
||||
|
||||
# Unscale the gradient of the optimizer's assigned params in-place **prior to gradient clipping**.
|
||||
@@ -126,6 +127,7 @@ def train(cfg: TrainPipelineConfig):
|
||||
|
||||
logging.info("Creating dataset")
|
||||
dataset = make_dataset(cfg)
|
||||
data_len = len(dataset)
|
||||
|
||||
# Create environment used for evaluating checkpoints during training on simulation data.
|
||||
# On real-world data, no need to create an environment as evaluations are done outside train.py,
|
||||
@@ -174,6 +176,15 @@ def train(cfg: TrainPipelineConfig):
|
||||
shuffle = True
|
||||
sampler = None
|
||||
|
||||
# TODO(pepijn): If experiment works integrate this
|
||||
shuffle = False
|
||||
sampler = PrioritizedSampler(
|
||||
data_len=data_len,
|
||||
alpha=0.6,
|
||||
eps=1e-6,
|
||||
num_samples_per_epoch=data_len,
|
||||
)
|
||||
|
||||
dataloader = torch.utils.data.DataLoader(
|
||||
dataset,
|
||||
num_workers=cfg.num_workers,
|
||||
@@ -220,6 +231,12 @@ def train(cfg: TrainPipelineConfig):
|
||||
use_amp=cfg.policy.use_amp,
|
||||
)
|
||||
|
||||
# Update sampler
|
||||
if "indices" in batch and "per_sample_l1" in output_dict:
|
||||
idxs = batch["indices"].cpu().tolist()
|
||||
diffs = output_dict["per_sample_l1"].detach().cpu().tolist()
|
||||
sampler.update_priorities(idxs, diffs)
|
||||
|
||||
# Note: eval and checkpoint happens *after* the `step`th training update has completed, so we
|
||||
# increment `step` here.
|
||||
step += 1
|
||||
|
||||
Reference in New Issue
Block a user