Eval reproduced! Train running (but not reproduced)
This commit is contained in:
@@ -32,23 +32,25 @@ def eval_policy(
|
||||
ep_frames.append(frame)
|
||||
|
||||
tensordict = env.reset()
|
||||
# render first frame before rollout
|
||||
rendering_callback(env)
|
||||
if save_video:
|
||||
# render first frame before rollout
|
||||
rendering_callback(env)
|
||||
|
||||
rollout = env.rollout(
|
||||
max_steps=max_steps,
|
||||
policy=policy,
|
||||
callback=rendering_callback,
|
||||
callback=rendering_callback if save_video else None,
|
||||
auto_reset=False,
|
||||
tensordict=tensordict,
|
||||
)
|
||||
# print(", ".join([f"{x:.3f}" for x in rollout["next", "reward"][:,0].tolist()]))
|
||||
ep_reward = rollout["next", "reward"].sum()
|
||||
ep_success = rollout["next", "success"].any()
|
||||
rewards.append(ep_reward.item())
|
||||
successes.append(ep_success.item())
|
||||
|
||||
if save_video:
|
||||
video_dir.parent.mkdir(parents=True, exist_ok=True)
|
||||
video_dir.mkdir(parents=True, exist_ok=True)
|
||||
# TODO(rcadene): make fps configurable
|
||||
video_path = video_dir / f"eval_episode_{i}.mp4"
|
||||
imageio.mimsave(video_path, np.stack(ep_frames), fps=15)
|
||||
@@ -82,8 +84,8 @@ def eval(cfg: dict):
|
||||
metrics = eval_policy(
|
||||
env,
|
||||
policy=policy,
|
||||
num_episodes=10,
|
||||
save_video=True,
|
||||
num_episodes=20,
|
||||
save_video=False,
|
||||
video_dir=Path("tmp/2023_01_29_xarm_lift_final"),
|
||||
)
|
||||
print(metrics)
|
||||
|
||||
@@ -1,11 +1,24 @@
|
||||
import pickle
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import hydra
|
||||
import imageio
|
||||
import numpy as np
|
||||
import torch
|
||||
from tensordict.nn import TensorDictModule
|
||||
from termcolor import colored
|
||||
from torchrl.data import LazyMemmapStorage, TensorDictReplayBuffer
|
||||
from torchrl.data.datasets.d4rl import D4RLExperienceReplay
|
||||
from torchrl.data.datasets.openx import OpenXExperienceReplay
|
||||
from torchrl.data.replay_buffers import PrioritizedSliceSampler
|
||||
|
||||
from lerobot.common.datasets.simxarm import SimxarmExperienceReplay
|
||||
from lerobot.common.envs.factory import make_env
|
||||
from lerobot.common.logger import Logger
|
||||
from lerobot.common.tdmpc import TDMPC
|
||||
|
||||
from ..common.utils import set_seed
|
||||
from lerobot.common.utils import set_seed
|
||||
from lerobot.scripts.eval import eval_policy
|
||||
|
||||
|
||||
@hydra.main(version_base=None, config_name="default", config_path="../configs")
|
||||
@@ -15,22 +28,169 @@ def train(cfg: dict):
|
||||
print(colored("Work dir:", "yellow", attrs=["bold"]), cfg.log_dir)
|
||||
|
||||
env = make_env(cfg)
|
||||
agent = TDMPC(cfg)
|
||||
policy = TDMPC(cfg)
|
||||
# ckpt_path = "/home/rcadene/code/fowm/logs/xarm_lift/all/default/2/models/offline.pt"
|
||||
ckpt_path = "/home/rcadene/code/fowm/logs/xarm_lift/all/default/2/models/final.pt"
|
||||
agent.load(ckpt_path)
|
||||
policy.load(ckpt_path)
|
||||
|
||||
# online training
|
||||
|
||||
eval_metrics = train_agent(
|
||||
env,
|
||||
agent,
|
||||
num_episodes=10,
|
||||
save_video=True,
|
||||
video_dir=Path("tmp/2023_01_29_xarm_lift_final"),
|
||||
td_policy = TensorDictModule(
|
||||
policy,
|
||||
in_keys=["observation", "step_count"],
|
||||
out_keys=["action"],
|
||||
)
|
||||
|
||||
print(eval_metrics)
|
||||
# initialize offline dataset
|
||||
|
||||
dataset_id = f"xarm_{cfg.task}_medium"
|
||||
|
||||
num_traj_per_batch = cfg.batch_size # // cfg.horizon
|
||||
# TODO(rcadene): Sampler outputs a batch_size <= cfg.batch_size.
|
||||
# We would need to add a transform to pad the tensordict to ensure batch_size == cfg.batch_size.
|
||||
sampler = PrioritizedSliceSampler(
|
||||
max_capacity=100_000,
|
||||
alpha=0.7,
|
||||
beta=0.9,
|
||||
num_slices=num_traj_per_batch,
|
||||
strict_length=False,
|
||||
)
|
||||
|
||||
# TODO(rcadene): use PrioritizedReplayBuffer
|
||||
offline_buffer = SimxarmExperienceReplay(
|
||||
dataset_id,
|
||||
# download="force",
|
||||
download=True,
|
||||
streaming=False,
|
||||
root="data",
|
||||
sampler=sampler,
|
||||
)
|
||||
|
||||
num_steps = len(offline_buffer)
|
||||
index = torch.arange(0, num_steps, 1)
|
||||
sampler.extend(index)
|
||||
|
||||
# offline_buffer._storage.device = torch.device("cuda")
|
||||
# offline_buffer._storage._storage.to(torch.device("cuda"))
|
||||
# TODO(rcadene): add online_buffer
|
||||
|
||||
# Observation encoder
|
||||
# Dynamics predictor
|
||||
# Reward predictor
|
||||
# Policy
|
||||
# Qs state-action value predictor
|
||||
# V state value predictor
|
||||
|
||||
L = Logger(cfg.log_dir, cfg)
|
||||
|
||||
episode_idx = 0
|
||||
start_time = time.time()
|
||||
step = 0
|
||||
last_log_step = 0
|
||||
last_save_step = 0
|
||||
|
||||
while step < cfg.train_steps:
|
||||
is_offline = True
|
||||
num_updates = cfg.episode_length
|
||||
_step = step + num_updates
|
||||
rollout_metrics = {}
|
||||
|
||||
# if step >= cfg.offline_steps:
|
||||
# is_offline = False
|
||||
|
||||
# # Collect trajectory
|
||||
# obs = env.reset()
|
||||
# episode = Episode(cfg, obs)
|
||||
# success = False
|
||||
# while not episode.done:
|
||||
# action = policy.act(obs, step=step, t0=episode.first)
|
||||
# obs, reward, done, info = env.step(action.cpu().numpy())
|
||||
# reward = reward_normalizer(reward)
|
||||
# mask = 1.0 if (not done or "TimeLimit.truncated" in info) else 0.0
|
||||
# success = info.get('success', False)
|
||||
# episode += (obs, action, reward, done, mask, success)
|
||||
# assert len(episode) <= cfg.episode_length
|
||||
# buffer += episode
|
||||
# episode_idx += 1
|
||||
# rollout_metrics = {
|
||||
# 'episode_reward': episode.cumulative_reward,
|
||||
# 'episode_success': float(success),
|
||||
# 'episode_length': len(episode)
|
||||
# }
|
||||
# num_updates = len(episode) * cfg.utd
|
||||
# _step = min(step + len(episode), cfg.train_steps)
|
||||
|
||||
# Update model
|
||||
train_metrics = {}
|
||||
if is_offline:
|
||||
for i in range(num_updates):
|
||||
train_metrics.update(policy.update(offline_buffer, step + i))
|
||||
# else:
|
||||
# for i in range(num_updates):
|
||||
# train_metrics.update(
|
||||
# policy.update(buffer, step + i // cfg.utd,
|
||||
# demo_buffer=offline_buffer if cfg.balanced_sampling else None)
|
||||
# )
|
||||
|
||||
# Log training metrics
|
||||
env_step = int(_step * cfg.action_repeat)
|
||||
common_metrics = {
|
||||
"episode": episode_idx,
|
||||
"step": _step,
|
||||
"env_step": env_step,
|
||||
"total_time": time.time() - start_time,
|
||||
"is_offline": float(is_offline),
|
||||
}
|
||||
train_metrics.update(common_metrics)
|
||||
train_metrics.update(rollout_metrics)
|
||||
L.log(train_metrics, category="train")
|
||||
|
||||
# Evaluate policy periodically
|
||||
if step == 0 or env_step - last_log_step >= cfg.eval_freq:
|
||||
|
||||
eval_metrics = eval_policy(
|
||||
env,
|
||||
td_policy,
|
||||
num_episodes=cfg.eval_episodes,
|
||||
# TODO(rcadene): add step, env_step, L.video
|
||||
)
|
||||
|
||||
# TODO(rcadene):
|
||||
# if hasattr(env, "get_normalized_score"):
|
||||
# eval_metrics['normalized_score'] = env.get_normalized_score(eval_metrics["episode_reward"]) * 100.0
|
||||
|
||||
common_metrics.update(eval_metrics)
|
||||
|
||||
L.log(common_metrics, category="eval")
|
||||
last_log_step = env_step - env_step % cfg.eval_freq
|
||||
|
||||
# Save model periodically
|
||||
# if cfg.save_model and env_step - last_save_step >= cfg.save_freq:
|
||||
# L.save_model(policy, identifier=env_step)
|
||||
# print(f"Model has been checkpointed at step {env_step}")
|
||||
# last_save_step = env_step - env_step % cfg.save_freq
|
||||
|
||||
# if cfg.save_model and is_offline and _step >= cfg.offline_steps:
|
||||
# # save the model after offline training
|
||||
# L.save_model(policy, identifier="offline")
|
||||
|
||||
step = _step
|
||||
|
||||
# dataset_d4rl = D4RLExperienceReplay(
|
||||
# dataset_id="maze2d-umaze-v1",
|
||||
# split_trajs=False,
|
||||
# batch_size=1,
|
||||
# sampler=SamplerWithoutReplacement(drop_last=False),
|
||||
# prefetch=4,
|
||||
# direct_download=True,
|
||||
# )
|
||||
|
||||
# dataset_openx = OpenXExperienceReplay(
|
||||
# "cmu_stretch",
|
||||
# batch_size=1,
|
||||
# num_slices=1,
|
||||
# #download="force",
|
||||
# streaming=False,
|
||||
# root="data",
|
||||
# )
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
import pickle
|
||||
from pathlib import Path
|
||||
|
||||
import imageio
|
||||
import simxarm
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
task = "lift"
|
||||
dataset_dir = Path(f"data/xarm_{task}_medium")
|
||||
dataset_path = dataset_dir / f"buffer.pkl"
|
||||
print(f"Using offline dataset '{dataset_path}'")
|
||||
with open(dataset_path, "rb") as f:
|
||||
dataset_dict = pickle.load(f)
|
||||
|
||||
required_keys = [
|
||||
"observations",
|
||||
"next_observations",
|
||||
"actions",
|
||||
"rewards",
|
||||
"dones",
|
||||
"masks",
|
||||
]
|
||||
for k in required_keys:
|
||||
if k not in dataset_dict and k[:-1] in dataset_dict:
|
||||
dataset_dict[k] = dataset_dict.pop(k[:-1])
|
||||
|
||||
out_dir = Path("tmp/2023_01_26_xarm_lift_medium")
|
||||
out_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
frames = dataset_dict["observations"]["rgb"][:100]
|
||||
frames = frames.transpose(0, 2, 3, 1)
|
||||
imageio.mimsave(out_dir / "test.mp4", frames, fps=30)
|
||||
|
||||
frames = []
|
||||
cfg = {}
|
||||
|
||||
env = simxarm.make(
|
||||
task=task,
|
||||
obs_mode="all",
|
||||
image_size=84,
|
||||
action_repeat=cfg.get("action_repeat", 1),
|
||||
frame_stack=cfg.get("frame_stack", 1),
|
||||
seed=1,
|
||||
)
|
||||
|
||||
obs = env.reset()
|
||||
frame = env.render(mode="rgb_array", width=384, height=384)
|
||||
frames.append(frame)
|
||||
|
||||
# def is_first_obs(obs):
|
||||
# nonlocal first_obs
|
||||
# print(((dataset_dict["observations"]["state"][i]-obs["state"])**2).sum())
|
||||
# print(((dataset_dict["observations"]["rgb"][i]-obs["rgb"])**2).sum())
|
||||
|
||||
for i in range(25):
|
||||
action = dataset_dict["actions"][i]
|
||||
|
||||
print(f"#{i}")
|
||||
# print(obs["state"])
|
||||
# print(dataset_dict["observations"]["state"][i])
|
||||
print(((dataset_dict["observations"]["state"][i] - obs["state"]) ** 2).sum())
|
||||
print(((dataset_dict["observations"]["rgb"][i] - obs["rgb"]) ** 2).sum())
|
||||
|
||||
obs, reward, done, info = env.step(action)
|
||||
frame = env.render(mode="rgb_array", width=384, height=384)
|
||||
frames.append(frame)
|
||||
|
||||
print(reward)
|
||||
print(dataset_dict["rewards"][i])
|
||||
|
||||
print(done)
|
||||
print(dataset_dict["dones"][i])
|
||||
|
||||
if dataset_dict["dones"][i]:
|
||||
obs = env.reset()
|
||||
frame = env.render(mode="rgb_array", width=384, height=384)
|
||||
frames.append(frame)
|
||||
|
||||
# imageio.mimsave(out_dir / 'test_rollout.mp4', frames, fps=60)
|
||||
59
lerobot/scripts/visualize_dataset.py
Normal file
59
lerobot/scripts/visualize_dataset.py
Normal file
@@ -0,0 +1,59 @@
|
||||
import pickle
|
||||
from pathlib import Path
|
||||
|
||||
import imageio
|
||||
import simxarm
|
||||
import torch
|
||||
from torchrl.data.replay_buffers import (
|
||||
SamplerWithoutReplacement,
|
||||
SliceSampler,
|
||||
SliceSamplerWithoutReplacement,
|
||||
)
|
||||
|
||||
from lerobot.common.datasets.simxarm import SimxarmExperienceReplay
|
||||
|
||||
|
||||
def visualize_simxarm_dataset(dataset_id="xarm_lift_medium"):
|
||||
sampler = SliceSamplerWithoutReplacement(
|
||||
num_slices=1,
|
||||
strict_length=False,
|
||||
shuffle=False,
|
||||
)
|
||||
|
||||
dataset = SimxarmExperienceReplay(
|
||||
dataset_id,
|
||||
# download="force",
|
||||
download=True,
|
||||
streaming=False,
|
||||
root="data",
|
||||
sampler=sampler,
|
||||
)
|
||||
|
||||
NUM_EPISODES_TO_RENDER = 10
|
||||
MAX_NUM_STEPS = 50
|
||||
FIRST_FRAME = 0
|
||||
for _ in range(NUM_EPISODES_TO_RENDER):
|
||||
episode = dataset.sample(MAX_NUM_STEPS)
|
||||
|
||||
ep_idx = episode["episode"][FIRST_FRAME].item()
|
||||
ep_frames = torch.cat(
|
||||
[
|
||||
episode["observation"]["image"][FIRST_FRAME][None, ...],
|
||||
episode["next", "observation"]["image"],
|
||||
],
|
||||
dim=0,
|
||||
)
|
||||
|
||||
video_dir = Path("tmp/2024_02_03_xarm_lift_medium")
|
||||
video_dir.mkdir(parents=True, exist_ok=True)
|
||||
# TODO(rcadene): make fps configurable
|
||||
video_path = video_dir / f"eval_episode_{ep_idx}.mp4"
|
||||
imageio.mimsave(video_path, ep_frames.numpy().transpose(0, 2, 3, 1), fps=15)
|
||||
|
||||
# ran out of episodes
|
||||
if dataset._sampler._sample_list.numel() == 0:
|
||||
break
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
visualize_simxarm_dataset()
|
||||
Reference in New Issue
Block a user