Fixes @torch.no_grad() usage (#1455)

* fix: decorator calls with parentheses

* fix no grad for normalize too

Signed-off-by: Francesco Capuano <74058581+fracapuano@users.noreply.github.com>

---------

Signed-off-by: Francesco Capuano <74058581+fracapuano@users.noreply.github.com>
This commit is contained in:
Francesco Capuano
2025-07-08 13:08:32 +02:00
committed by GitHub
parent aec1b29d23
commit a5e0aae13a
9 changed files with 16 additions and 15 deletions

View File

@@ -107,7 +107,7 @@ class ACTPolicy(PreTrainedPolicy):
else:
self._action_queue = deque([], maxlen=self.config.n_action_steps)
@torch.no_grad
@torch.no_grad()
def select_action(self, batch: dict[str, Tensor]) -> Tensor:
"""Select a single action given environment observations.
@@ -132,7 +132,7 @@ class ACTPolicy(PreTrainedPolicy):
self._action_queue.extend(actions.transpose(0, 1))
return self._action_queue.popleft()
@torch.no_grad
@torch.no_grad()
def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor:
"""Predict a chunk of actions given environment observations."""
self.eval()