test_envs.py are passing, remove simxarm and pusht directories
This commit is contained in:
@@ -1,47 +1,47 @@
|
||||
import pytest
|
||||
from tensordict import TensorDict
|
||||
import torch
|
||||
from torchrl.envs.utils import check_env_specs, step_mdp
|
||||
from lerobot.common.datasets.factory import make_dataset
|
||||
import gymnasium as gym
|
||||
from gymnasium.utils.env_checker import check_env
|
||||
|
||||
from lerobot.common.envs.aloha.env import AlohaEnv
|
||||
from lerobot.common.envs.factory import make_env
|
||||
from lerobot.common.envs.pusht.env import PushtEnv
|
||||
from lerobot.common.envs.simxarm.env import SimxarmEnv
|
||||
from lerobot.common.utils import init_hydra_config
|
||||
|
||||
from lerobot.common.envs.utils import preprocess_observation
|
||||
|
||||
# import dmc_aloha # noqa: F401
|
||||
|
||||
from .utils import DEVICE, DEFAULT_CONFIG_PATH
|
||||
|
||||
|
||||
def print_spec_rollout(env):
|
||||
print("observation_spec:", env.observation_spec)
|
||||
print("action_spec:", env.action_spec)
|
||||
print("reward_spec:", env.reward_spec)
|
||||
print("done_spec:", env.done_spec)
|
||||
# def print_spec_rollout(env):
|
||||
# print("observation_spec:", env.observation_spec)
|
||||
# print("action_spec:", env.action_spec)
|
||||
# print("reward_spec:", env.reward_spec)
|
||||
# print("done_spec:", env.done_spec)
|
||||
|
||||
td = env.reset()
|
||||
print("reset tensordict", td)
|
||||
# td = env.reset()
|
||||
# print("reset tensordict", td)
|
||||
|
||||
td = env.rand_step(td)
|
||||
print("random step tensordict", td)
|
||||
# td = env.rand_step(td)
|
||||
# print("random step tensordict", td)
|
||||
|
||||
def simple_rollout(steps=100):
|
||||
# preallocate:
|
||||
data = TensorDict({}, [steps])
|
||||
# reset
|
||||
_data = env.reset()
|
||||
for i in range(steps):
|
||||
_data["action"] = env.action_spec.rand()
|
||||
_data = env.step(_data)
|
||||
data[i] = _data
|
||||
_data = step_mdp(_data, keep_other=True)
|
||||
return data
|
||||
# def simple_rollout(steps=100):
|
||||
# # preallocate:
|
||||
# data = TensorDict({}, [steps])
|
||||
# # reset
|
||||
# _data = env.reset()
|
||||
# for i in range(steps):
|
||||
# _data["action"] = env.action_spec.rand()
|
||||
# _data = env.step(_data)
|
||||
# data[i] = _data
|
||||
# _data = step_mdp(_data, keep_other=True)
|
||||
# return data
|
||||
|
||||
print("data from rollout:", simple_rollout(100))
|
||||
# print("data from rollout:", simple_rollout(100))
|
||||
|
||||
|
||||
@pytest.mark.skip("TODO")
|
||||
@pytest.mark.parametrize(
|
||||
"task,from_pixels,pixels_only",
|
||||
[
|
||||
@@ -63,50 +63,41 @@ def test_aloha(task, from_pixels, pixels_only):
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"task, obs_type",
|
||||
"env_task, obs_type",
|
||||
[
|
||||
("XarmLift-v0", "state"),
|
||||
("XarmLift-v0", "pixels"),
|
||||
("XarmLift-v0", "pixels_agent_pos"),
|
||||
# TODO(aliberts): Add simxarm other tasks
|
||||
# TODO(aliberts): Add gym_xarm other tasks
|
||||
],
|
||||
)
|
||||
def test_xarm(env_task, obs_type):
|
||||
import gym_xarm
|
||||
import gym_xarm # noqa: F401
|
||||
env = gym.make(f"gym_xarm/{env_task}", obs_type=obs_type)
|
||||
# env = SimxarmEnv(
|
||||
# task,
|
||||
# from_pixels=from_pixels,
|
||||
# pixels_only=pixels_only,
|
||||
# image_size=84 if from_pixels else None,
|
||||
# )
|
||||
# print_spec_rollout(env)
|
||||
# check_env_specs(env)
|
||||
check_env(env)
|
||||
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"from_pixels,pixels_only",
|
||||
"env_task, obs_type",
|
||||
[
|
||||
(True, False),
|
||||
("PushTPixels-v0", "state"),
|
||||
("PushTPixels-v0", "pixels"),
|
||||
("PushTPixels-v0", "pixels_agent_pos"),
|
||||
],
|
||||
)
|
||||
def test_pusht(from_pixels, pixels_only):
|
||||
env = PushtEnv(
|
||||
from_pixels=from_pixels,
|
||||
pixels_only=pixels_only,
|
||||
image_size=96 if from_pixels else None,
|
||||
)
|
||||
# print_spec_rollout(env)
|
||||
check_env_specs(env)
|
||||
def test_pusht(env_task, obs_type):
|
||||
import gym_pusht # noqa: F401
|
||||
env = gym.make(f"gym_pusht/{env_task}", obs_type=obs_type)
|
||||
check_env(env)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"env_name",
|
||||
[
|
||||
"simxarm",
|
||||
"pusht",
|
||||
"aloha",
|
||||
"simxarm",
|
||||
# "aloha",
|
||||
],
|
||||
)
|
||||
def test_factory(env_name):
|
||||
@@ -118,15 +109,12 @@ def test_factory(env_name):
|
||||
dataset = make_dataset(cfg)
|
||||
|
||||
env = make_env(cfg)
|
||||
obs, info = env.reset()
|
||||
obs = {key: obs[key][None, ...] for key in obs}
|
||||
obs = preprocess_observation(obs, transform=dataset.transform)
|
||||
for key in dataset.image_keys:
|
||||
assert env.reset().get(key).dtype == torch.uint8
|
||||
check_env_specs(env)
|
||||
|
||||
env = make_env(cfg, transform=dataset.transform)
|
||||
for key in dataset.image_keys:
|
||||
img = env.reset().get(key)
|
||||
img = obs[key]
|
||||
assert img.dtype == torch.float32
|
||||
# TODO(rcadene): we assume for now that image normalization takes place in the model
|
||||
assert img.max() <= 1.0
|
||||
assert img.min() >= 0.0
|
||||
check_env_specs(env)
|
||||
|
||||
Reference in New Issue
Block a user