From fab037f78d63ba578fffb25548eb37093cf1d7a7 Mon Sep 17 00:00:00 2001 From: Julien Perez Date: Tue, 13 Aug 2024 17:03:05 +0200 Subject: [PATCH] =?UTF-8?q?feat=20for=20the=20GPU=20poors=20:=20Add=20GPU?= =?UTF-8?q?=20availability=20check=20in=20evaluate=5Fpretr=E2=80=A6=20(#35?= =?UTF-8?q?9)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Alexander Soare --- examples/2_evaluate_pretrained_policy.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/examples/2_evaluate_pretrained_policy.py b/examples/2_evaluate_pretrained_policy.py index 8481f0f54..5c1932ded 100644 --- a/examples/2_evaluate_pretrained_policy.py +++ b/examples/2_evaluate_pretrained_policy.py @@ -18,7 +18,15 @@ from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy output_directory = Path("outputs/eval/example_pusht_diffusion") output_directory.mkdir(parents=True, exist_ok=True) -device = torch.device("cuda") +# Check if GPU is available +if torch.cuda.is_available(): + device = torch.device("cuda") + print("GPU is available. Device set to:", device) +else: + device = torch.device("cpu") + print(f"GPU is not available. Device set to: {device}. Inference will be slower than on GPU.") + # Decrease the number of reverse-diffusion steps (trades off a bit of quality for 10x speed) + policy.diffusion.num_inference_steps = 10 # Download the diffusion policy for pusht environment pretrained_policy_path = Path(snapshot_download("lerobot/diffusion_pusht"))