WIP Aloha env tests pass Rendering works (fps look fast tho? TODO action bounding is too wide [-1,1]) Update README Copy past from act repo Remove download.py add a WIP for Simxarm Remove download.py add a WIP for Simxarm Add act yaml (TODO: try train.py) Training can runs (TODO: eval) Add tasks without end_effector that are compatible with dataset, Eval can run (TODO: training and pretrained model) Add AbstractEnv, Refactor AlohaEnv, Add rendering_hook in env, Minor modifications, (TODO: Refactor Pusht and Simxarm) poetry lock fix bug in compute_stats for action normalization fix more bugs in normalization fix training fix import PushtEnv inheriates AbstractEnv, Improve factory Normalization Add _make_env to EnvAbstract Add call_rendering_hooks to pusht env SimxarmEnv inherites from AbstractEnv (NOT TESTED) Add aloha tests artifacts + update pusht stats fix image normalization: before env was in [0,1] but dataset in [0,255], and now both in [0,255] Small fix on simxarm Add next to obs Add top camera to Aloha env (TODO: make it compatible with set of cameras) Add top camera to Aloha env (TODO: make it compatible with set of cameras)
74 lines
2.4 KiB
Python
74 lines
2.4 KiB
Python
from torchrl.envs.transforms import Compose, StepCounter, Transform, TransformedEnv
|
|
|
|
|
|
def make_env(cfg, transform=None):
|
|
kwargs = {
|
|
"frame_skip": cfg.env.action_repeat,
|
|
"from_pixels": cfg.env.from_pixels,
|
|
"pixels_only": cfg.env.pixels_only,
|
|
"image_size": cfg.env.image_size,
|
|
# TODO(rcadene): do we want a specific eval_env_seed?
|
|
"seed": cfg.seed,
|
|
"num_prev_obs": cfg.n_obs_steps - 1,
|
|
}
|
|
|
|
if cfg.env.name == "simxarm":
|
|
from lerobot.common.envs.simxarm import SimxarmEnv
|
|
|
|
kwargs["task"] = cfg.env.task
|
|
clsfunc = SimxarmEnv
|
|
elif cfg.env.name == "pusht":
|
|
from lerobot.common.envs.pusht.env import PushtEnv
|
|
|
|
# assert kwargs["seed"] > 200, "Seed 0-200 are used for the demonstration dataset, so we don't want to seed the eval env with this range."
|
|
|
|
clsfunc = PushtEnv
|
|
elif cfg.env.name == "aloha":
|
|
from lerobot.common.envs.aloha.env import AlohaEnv
|
|
|
|
kwargs["task"] = cfg.env.task
|
|
clsfunc = AlohaEnv
|
|
else:
|
|
raise ValueError(cfg.env.name)
|
|
|
|
env = clsfunc(**kwargs)
|
|
|
|
# limit rollout to max_steps
|
|
env = TransformedEnv(env, StepCounter(max_steps=cfg.env.episode_length))
|
|
|
|
if transform is not None:
|
|
# useful to add normalization
|
|
if isinstance(transform, Compose):
|
|
for tf in transform:
|
|
env.append_transform(tf.clone())
|
|
elif isinstance(transform, Transform):
|
|
env.append_transform(transform.clone())
|
|
else:
|
|
raise NotImplementedError()
|
|
|
|
return env
|
|
|
|
|
|
# def make_env(env_name, frame_skip, device, is_test=False):
|
|
# env = GymEnv(
|
|
# env_name,
|
|
# frame_skip=frame_skip,
|
|
# from_pixels=True,
|
|
# pixels_only=False,
|
|
# device=device,
|
|
# )
|
|
# env = TransformedEnv(env)
|
|
# env.append_transform(NoopResetEnv(noops=30, random=True))
|
|
# if not is_test:
|
|
# env.append_transform(EndOfLifeTransform())
|
|
# env.append_transform(RewardClipping(-1, 1))
|
|
# env.append_transform(ToTensorImage())
|
|
# env.append_transform(GrayScale())
|
|
# env.append_transform(Resize(84, 84))
|
|
# env.append_transform(CatFrames(N=4, dim=-3))
|
|
# env.append_transform(RewardSum())
|
|
# env.append_transform(StepCounter(max_steps=4500))
|
|
# env.append_transform(DoubleToFloat())
|
|
# env.append_transform(VecNorm(in_keys=["pixels"]))
|
|
# return env
|