init commit
This commit is contained in:
2
nimbus_extension/components/__init__.py
Normal file
2
nimbus_extension/components/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
# flake8: noqa: #F401
|
||||
from . import dedump, dump, load, plan_with_render, planner, render, store
|
||||
5
nimbus_extension/components/dedump/__init__.py
Normal file
5
nimbus_extension/components/dedump/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
import nimbus.components.dedump as _dedump
|
||||
|
||||
from .base_dedumper import Dedumper
|
||||
|
||||
_dedump.register("de", Dedumper)
|
||||
37
nimbus_extension/components/dedump/base_dedumper.py
Normal file
37
nimbus_extension/components/dedump/base_dedumper.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import time
|
||||
|
||||
from nimbus.components.data.iterator import Iterator
|
||||
from nimbus.components.data.package import Package
|
||||
|
||||
|
||||
class Dedumper(Iterator):
|
||||
def __init__(self, input_queue=None):
|
||||
super().__init__()
|
||||
self.input_queue = input_queue
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def _next(self) -> Package:
|
||||
try:
|
||||
self.logger.info("Dedumper try to get package from queue")
|
||||
package = self.input_queue.get()
|
||||
self.logger.info(f"get task {package.task_name} package from queue")
|
||||
st = time.time()
|
||||
|
||||
assert isinstance(package, Package), f"the transfered data type must be Package, but it is {type(package)}"
|
||||
if package.should_stop():
|
||||
self.logger.info("received stop signal")
|
||||
raise StopIteration()
|
||||
else:
|
||||
assert (
|
||||
package.is_serialized() and package.task_id >= 0
|
||||
), "received data must be deserialized and task id must be greater than 0"
|
||||
package.deserialize()
|
||||
self.collect_compute_frame_info(1, time.time() - st)
|
||||
return package
|
||||
except StopIteration:
|
||||
raise StopIteration("No more packages to process.")
|
||||
except Exception as e:
|
||||
self.logger.exception(f"Error during dedumping: {e}")
|
||||
raise e
|
||||
5
nimbus_extension/components/dump/__init__.py
Normal file
5
nimbus_extension/components/dump/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
import nimbus.components.dump as _dump
|
||||
|
||||
from .env_dumper import EnvDumper
|
||||
|
||||
_dump.register("env", EnvDumper)
|
||||
10
nimbus_extension/components/dump/env_dumper.py
Normal file
10
nimbus_extension/components/dump/env_dumper.py
Normal file
@@ -0,0 +1,10 @@
|
||||
from nimbus.components.dump import BaseDumper
|
||||
|
||||
|
||||
class EnvDumper(BaseDumper):
|
||||
def __init__(self, data_iter, output_queue=None):
|
||||
super().__init__(data_iter, output_queue=output_queue)
|
||||
|
||||
def dump(self, seq, obs):
|
||||
ser_obj = self.scene.wf.dump_plan_info()
|
||||
return ser_obj
|
||||
8
nimbus_extension/components/load/__init__.py
Normal file
8
nimbus_extension/components/load/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
||||
import nimbus.components.load as _load
|
||||
|
||||
from .env_loader import EnvLoader
|
||||
from .env_randomizer import EnvRandomizer
|
||||
|
||||
_load.register_loader("env_loader", EnvLoader)
|
||||
|
||||
_load.register_randomizer("env_randomizer", EnvRandomizer)
|
||||
180
nimbus_extension/components/load/env_loader.py
Normal file
180
nimbus_extension/components/load/env_loader.py
Normal file
@@ -0,0 +1,180 @@
|
||||
import time
|
||||
from fractions import Fraction
|
||||
|
||||
from nimbus.components.data.iterator import Iterator
|
||||
from nimbus.components.data.package import Package
|
||||
from nimbus.components.data.scene import Scene
|
||||
from nimbus.components.load import SceneLoader
|
||||
from nimbus.daemon import ComponentStatus, StatusReporter
|
||||
from nimbus.daemon.decorators import status_monitor
|
||||
from nimbus.utils.flags import get_random_seed
|
||||
from workflows.base import create_workflow
|
||||
|
||||
|
||||
class EnvLoader(SceneLoader):
|
||||
"""
|
||||
Environment loader that initializes Isaac Sim and loads scenes based on workflow configurations.
|
||||
|
||||
This loader integrates with the workflow system to manage scene loading and task execution.
|
||||
It supports two operating modes:
|
||||
- Standalone mode (pack_iter=None): Loads tasks directly from workflow configuration
|
||||
- Pipeline mode (pack_iter provided): Loads tasks from a package iterator
|
||||
|
||||
It also supports task repetition for data augmentation across different random seeds.
|
||||
|
||||
Args:
|
||||
pack_iter (Iterator[Package]): An iterator from the previous component. None for standalone.
|
||||
cfg_path (str): Path to the workflow configuration file.
|
||||
workflow_type (str): Type of workflow to create (e.g., 'SimBoxDualWorkFlow').
|
||||
simulator (dict): Simulator configuration including physics_dt, rendering_dt, headless, etc.
|
||||
task_repeat (int): How many times to repeat each task before advancing (-1 means single execution).
|
||||
need_preload (bool): Whether to preload assets on scene initialization.
|
||||
scene_info (str): Configuration key for scene information in the workflow config.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pack_iter: Iterator[Package],
|
||||
cfg_path: str,
|
||||
workflow_type: str,
|
||||
simulator: dict,
|
||||
task_repeat: int = -1,
|
||||
need_preload: bool = False,
|
||||
scene_info: str = "dining_room_scene_info",
|
||||
):
|
||||
init_start_time = time.time()
|
||||
super().__init__(pack_iter)
|
||||
|
||||
self.status_reporter = StatusReporter(self.__class__.__name__)
|
||||
self.status_reporter.update_status(ComponentStatus.IDLE)
|
||||
self.need_preload = need_preload
|
||||
self.task_repeat_cnt = task_repeat
|
||||
self.task_repeat_idx = 0
|
||||
self.workflow_type = workflow_type
|
||||
|
||||
# Parse simulator config
|
||||
physics_dt = simulator.get("physics_dt", "1/30")
|
||||
rendering_dt = simulator.get("rendering_dt", "1/30")
|
||||
if isinstance(physics_dt, str):
|
||||
physics_dt = float(Fraction(physics_dt))
|
||||
if isinstance(rendering_dt, str):
|
||||
rendering_dt = float(Fraction(rendering_dt))
|
||||
|
||||
from isaacsim import SimulationApp
|
||||
|
||||
self.simulation_app = SimulationApp(
|
||||
{
|
||||
"headless": simulator.get("headless", True),
|
||||
"anti_aliasing": simulator.get("anti_aliasing", 3),
|
||||
"multi_gpu": simulator.get("multi_gpu", True),
|
||||
"renderer": simulator.get("renderer", "RayTracedLighting"),
|
||||
}
|
||||
)
|
||||
|
||||
self.logger.info(f"simulator params: physics dt={physics_dt}, rendering dt={rendering_dt}")
|
||||
from omni.isaac.core import World
|
||||
|
||||
world = World(
|
||||
physics_dt=physics_dt,
|
||||
rendering_dt=rendering_dt,
|
||||
stage_units_in_meters=simulator.get("stage_units_in_meters", 1.0),
|
||||
)
|
||||
|
||||
# Import workflow extensions and create workflow
|
||||
from workflows import import_extensions
|
||||
|
||||
import_extensions(workflow_type)
|
||||
self.workflow = create_workflow(
|
||||
workflow_type,
|
||||
world,
|
||||
cfg_path,
|
||||
scene_info=scene_info,
|
||||
random_seed=get_random_seed(),
|
||||
)
|
||||
|
||||
self.scene = None
|
||||
self.task_finish = False
|
||||
self.cur_index = 0
|
||||
self.record_init_time(time.time() - init_start_time)
|
||||
|
||||
self.status_reporter.update_status(ComponentStatus.READY)
|
||||
|
||||
@status_monitor()
|
||||
def _init_next_task(self):
|
||||
"""
|
||||
Internal helper method to initialize and return the next task as a Scene object.
|
||||
|
||||
Handles task repetition logic and advances the task index when all repetitions are complete.
|
||||
|
||||
Returns:
|
||||
Scene: Initialized scene object for the next task.
|
||||
|
||||
Raises:
|
||||
StopIteration: When all tasks have been exhausted.
|
||||
"""
|
||||
if self.scene is not None and self.task_repeat_cnt > 0 and self.task_repeat_idx < self.task_repeat_cnt:
|
||||
self.logger.info(f"Task execute times {self.task_repeat_idx + 1}/{self.task_repeat_cnt}")
|
||||
self.workflow.init_task(self.cur_index - 1, self.need_preload)
|
||||
self.task_repeat_idx += 1
|
||||
scene = Scene(
|
||||
name=self.workflow.get_task_name(),
|
||||
wf=self.workflow,
|
||||
task_id=self.cur_index - 1,
|
||||
task_exec_num=self.task_repeat_idx,
|
||||
simulation_app=self.simulation_app,
|
||||
)
|
||||
return scene
|
||||
if self.cur_index >= len(self.workflow.task_cfgs):
|
||||
self.logger.info("No more tasks to load, stopping iteration.")
|
||||
raise StopIteration
|
||||
self.logger.info(f"Loading task {self.cur_index + 1}/{len(self.workflow.task_cfgs)}")
|
||||
self.workflow.init_task(self.cur_index, self.need_preload)
|
||||
self.task_repeat_idx = 1
|
||||
scene = Scene(
|
||||
name=self.workflow.get_task_name(),
|
||||
wf=self.workflow,
|
||||
task_id=self.cur_index,
|
||||
task_exec_num=self.task_repeat_idx,
|
||||
simulation_app=self.simulation_app,
|
||||
)
|
||||
self.cur_index += 1
|
||||
return scene
|
||||
|
||||
def load_asset(self) -> Scene:
|
||||
"""
|
||||
Load and initialize the next scene from workflow.
|
||||
|
||||
Supports two modes:
|
||||
- Standalone: Iterates through workflow tasks directly
|
||||
- Pipeline: Synchronizes with incoming packages and applies plan info to scene
|
||||
|
||||
Returns:
|
||||
Scene: The loaded and initialized Scene object.
|
||||
|
||||
Raises:
|
||||
StopIteration: When no more scenes are available.
|
||||
"""
|
||||
try:
|
||||
# Standalone mode: load tasks directly from workflow
|
||||
if self.pack_iter is None:
|
||||
self.scene = self._init_next_task()
|
||||
# Pipeline mode: load tasks from package iterator
|
||||
else:
|
||||
package = next(self.pack_iter)
|
||||
self.cur_index = package.task_id
|
||||
|
||||
# Initialize scene if this is the first package or a new task
|
||||
if self.scene is None:
|
||||
self.scene = self._init_next_task()
|
||||
elif self.cur_index > self.scene.task_id:
|
||||
self.scene = self._init_next_task()
|
||||
|
||||
# Apply plan information from package to scene
|
||||
package.data = self.scene.wf.dedump_plan_info(package.data)
|
||||
self.scene.add_plan_info(package.data)
|
||||
|
||||
return self.scene
|
||||
except StopIteration:
|
||||
raise StopIteration
|
||||
except Exception as e:
|
||||
raise e
|
||||
46
nimbus_extension/components/load/env_randomizer.py
Normal file
46
nimbus_extension/components/load/env_randomizer.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import os
|
||||
|
||||
from nimbus.components.data.iterator import Iterator
|
||||
from nimbus.components.data.scene import Scene
|
||||
from nimbus.components.load import LayoutRandomizer
|
||||
|
||||
|
||||
class EnvRandomizer(LayoutRandomizer):
|
||||
"""
|
||||
Environment randomizer that extends the base layout randomizer to include additional randomization
|
||||
capabilities specific to the simulation environment.
|
||||
This class can be used to randomize various aspects of the environment, such as object placements,
|
||||
textures, lighting conditions, and other scene parameters, based on the provided configuration.
|
||||
The randomization process can be controlled through the number of randomizations to perform and
|
||||
whether to operate in strict mode.
|
||||
|
||||
Args:
|
||||
scene_iter (Iterator[Scene]): An iterator that yields scenes to be randomized.
|
||||
random_num (int): How many randomizations to perform for each scene.
|
||||
strict_mode (bool): Whether to operate in strict mode, which enforces certain constraints
|
||||
on the randomization process.
|
||||
input_dir (str): Directory from which to load additional randomization data such as object
|
||||
placements or textures. If None, randomization is performed without loading additional data.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, scene_iter: Iterator[Scene], random_num: int = 1, strict_mode: bool = False, input_dir: str = None
|
||||
):
|
||||
super().__init__(scene_iter, random_num, strict_mode)
|
||||
assert self.random_num > 0, "random_num must be greater than 0"
|
||||
self.input_dir = input_dir
|
||||
if self.input_dir is not None:
|
||||
self.paths_names = os.listdir(self.input_dir)
|
||||
self.random_num = len(self.paths_names)
|
||||
|
||||
def randomize_scene(self, scene) -> Scene:
|
||||
if scene.plan_info is None:
|
||||
path = None
|
||||
if self.input_dir is not None:
|
||||
path = os.path.join(self.input_dir, self.paths_names[self.cur_index])
|
||||
if not scene.wf.randomization(path):
|
||||
return None
|
||||
else:
|
||||
if not scene.wf.randomization_from_mem(scene.plan_info):
|
||||
return None
|
||||
return scene
|
||||
5
nimbus_extension/components/plan_with_render/__init__.py
Normal file
5
nimbus_extension/components/plan_with_render/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
import nimbus.components.plan_with_render as _pwr
|
||||
|
||||
from .plan_with_render import EnvPlanWithRender
|
||||
|
||||
_pwr.register("plan_with_render", EnvPlanWithRender)
|
||||
@@ -0,0 +1,80 @@
|
||||
import sys
|
||||
import time
|
||||
|
||||
from nimbus.components.data.iterator import Iterator
|
||||
from nimbus.components.data.observation import Observations
|
||||
from nimbus.components.data.scene import Scene
|
||||
from nimbus.daemon.decorators import status_monitor
|
||||
from nimbus.utils.flags import is_debug_mode
|
||||
|
||||
|
||||
class EnvPlanWithRender(Iterator):
|
||||
"""
|
||||
A component that integrates planning and rendering for a given scene. It takes an iterator of scenes as
|
||||
input, performs planning and rendering for each scene, and produces sequences and observations as output.
|
||||
The component manages the planning and rendering process, including tracking the current episode and
|
||||
collecting performance metrics.
|
||||
|
||||
Args:
|
||||
scene_iter (Iterator[Scene]): An iterator that yields scenes to be processed for planning and rendering.
|
||||
"""
|
||||
|
||||
def __init__(self, scene_iter: Iterator[Scene]):
|
||||
super().__init__()
|
||||
self.scene_iter = scene_iter
|
||||
self.episodes = 1
|
||||
self.current_episode = sys.maxsize
|
||||
self.scene = None
|
||||
|
||||
@status_monitor()
|
||||
def plan_with_render(self):
|
||||
wf = self.scene.wf
|
||||
obs_num = wf.plan_with_render()
|
||||
if obs_num <= 0:
|
||||
return None
|
||||
# Assuming rgb is a dictionary of lists, get the length from one of the lists.
|
||||
obs = Observations(self.scene.name, str(self.current_episode), length=obs_num)
|
||||
return obs
|
||||
|
||||
def _next(self):
|
||||
try:
|
||||
if self.scene is None or self.current_episode >= self.episodes:
|
||||
try:
|
||||
self.scene = next(self.scene_iter)
|
||||
self.current_episode = 0
|
||||
if self.scene is None:
|
||||
return None, None, None
|
||||
except StopIteration:
|
||||
raise StopIteration("No more scene to process.")
|
||||
except Exception as e:
|
||||
self.logger.exception(f"Error loading next scene: {e}")
|
||||
if is_debug_mode():
|
||||
raise e
|
||||
self.current_episode = sys.maxsize
|
||||
return None, None, None
|
||||
|
||||
while True:
|
||||
compute_start_time = time.time()
|
||||
obs = self.plan_with_render()
|
||||
compute_end_time = time.time()
|
||||
self.current_episode += 1
|
||||
|
||||
if obs is not None:
|
||||
self.collect_compute_frame_info(obs.get_length(), compute_end_time - compute_start_time)
|
||||
return self.scene, None, obs
|
||||
|
||||
if self.current_episode >= self.episodes:
|
||||
return self.scene, None, None
|
||||
|
||||
self.logger.info(f"Generate seq failed and retry. Current episode id is {self.current_episode}")
|
||||
except StopIteration:
|
||||
raise StopIteration("No more scene to process.")
|
||||
except Exception as e:
|
||||
scene_name = getattr(self.scene, "name", "<unknown>")
|
||||
self.logger.exception(
|
||||
f"Error during idx {self.current_episode} sequence plan with render for scene {scene_name}: {e}"
|
||||
)
|
||||
if is_debug_mode():
|
||||
raise e
|
||||
self.current_episode += 1
|
||||
return self.scene, None, None
|
||||
7
nimbus_extension/components/planner/__init__.py
Normal file
7
nimbus_extension/components/planner/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
import nimbus.components.planner as _planner
|
||||
|
||||
from .env_planner import EnvSeqPlanner
|
||||
from .env_reader import EnvReader
|
||||
|
||||
_planner.register("env_planner", EnvSeqPlanner)
|
||||
_planner.register("env_reader", EnvReader)
|
||||
25
nimbus_extension/components/planner/env_planner.py
Normal file
25
nimbus_extension/components/planner/env_planner.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from nimbus.components.data.iterator import Iterator
|
||||
from nimbus.components.data.scene import Scene
|
||||
from nimbus.components.data.sequence import Sequence
|
||||
from nimbus.components.planner import SequencePlanner
|
||||
|
||||
|
||||
class EnvSeqPlanner(SequencePlanner):
|
||||
"""
|
||||
A sequence planner that generates sequences based on the environment's workflow.
|
||||
|
||||
Args:
|
||||
scene_iter (Iterator[Scene]): An iterator that provides scenes to be processed for sequence planning.
|
||||
planner_cfg (dict): A dictionary containing configuration parameters for the planner,
|
||||
such as the type of planner to use and its arguments.
|
||||
"""
|
||||
|
||||
def __init__(self, scene_iter: Iterator[Scene], planner_cfg: dict):
|
||||
super().__init__(scene_iter, planner_cfg, episodes=1)
|
||||
|
||||
def generate_sequence(self):
|
||||
wf = self.scene.wf
|
||||
sequence = wf.generate_seq()
|
||||
if len(sequence) <= 0:
|
||||
return None
|
||||
return Sequence(self.scene.name, str(self.current_episode), length=len(sequence), data=sequence)
|
||||
32
nimbus_extension/components/planner/env_reader.py
Normal file
32
nimbus_extension/components/planner/env_reader.py
Normal file
@@ -0,0 +1,32 @@
|
||||
from nimbus.components.data.iterator import Iterator
|
||||
from nimbus.components.data.scene import Scene
|
||||
from nimbus.components.data.sequence import Sequence
|
||||
from nimbus.components.planner import SequencePlanner
|
||||
|
||||
|
||||
class EnvReader(SequencePlanner):
|
||||
"""
|
||||
A sequence planner that generates sequences based on the environment's workflow.
|
||||
|
||||
Args:
|
||||
scene_iter (Iterator[Scene]): An iterator that provides scenes to be processed for sequence planning.
|
||||
planner_cfg (dict): A dictionary containing configuration parameters for the planner,
|
||||
such as the type of planner to use and its arguments.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, scene_iter: Iterator[Scene], planner_cfg: dict):
|
||||
super().__init__(scene_iter, planner_cfg, episodes=1)
|
||||
|
||||
def generate_sequence(self):
|
||||
wf = self.scene.wf
|
||||
if self.scene.plan_info is None:
|
||||
sequence = wf.recover_seq(None)
|
||||
else:
|
||||
sequence = wf.recover_seq_from_mem(self.scene.plan_info)
|
||||
if len(sequence) == 0:
|
||||
return None
|
||||
return Sequence(self.scene.name, str(self.current_episode), length=len(sequence), data=sequence)
|
||||
|
||||
def _initialize(self, scene):
|
||||
pass
|
||||
5
nimbus_extension/components/render/__init__.py
Normal file
5
nimbus_extension/components/render/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
import nimbus.components.render as _render
|
||||
|
||||
from .env_renderer import EnvRenderer
|
||||
|
||||
_render.register("env_renderer", EnvRenderer)
|
||||
25
nimbus_extension/components/render/env_renderer.py
Normal file
25
nimbus_extension/components/render/env_renderer.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from nimbus.components.data.observation import Observations
|
||||
from nimbus.components.render import BaseRenderer
|
||||
|
||||
|
||||
class EnvRenderer(BaseRenderer):
|
||||
"""
|
||||
Renderer for environment simulation.
|
||||
"""
|
||||
|
||||
def __init__(self, scene_seq_iter):
|
||||
super().__init__(scene_seq_iter)
|
||||
|
||||
def _lazy_init(self):
|
||||
pass
|
||||
|
||||
def _close_resource(self):
|
||||
pass
|
||||
|
||||
def generate_obs(self, seq):
|
||||
wf = self.scene.wf
|
||||
obs_num = wf.seq_replay(seq.data)
|
||||
if obs_num <= 0:
|
||||
return None
|
||||
obs = Observations(seq.scene_name, seq.index, length=obs_num)
|
||||
return obs
|
||||
5
nimbus_extension/components/store/__init__.py
Normal file
5
nimbus_extension/components/store/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
import nimbus.components.store as _store
|
||||
|
||||
from .env_writer import EnvWriter
|
||||
|
||||
_store.register("env_writer", EnvWriter)
|
||||
58
nimbus_extension/components/store/env_writer.py
Normal file
58
nimbus_extension/components/store/env_writer.py
Normal file
@@ -0,0 +1,58 @@
|
||||
import os
|
||||
|
||||
from nimbus.components.store import BaseWriter
|
||||
|
||||
|
||||
class EnvWriter(BaseWriter):
|
||||
"""
|
||||
A writer that saves generated sequences and observations to disk for environment simulations.
|
||||
This class extends the BaseWriter to provide specific implementations for handling data related
|
||||
to environment simulations.
|
||||
|
||||
Args:
|
||||
data_iter (Iterator): An iterator that provides data to be written, typically containing scenes,
|
||||
sequences, and observations.
|
||||
seq_output_dir (str): The directory where generated sequences will be saved. Can be None
|
||||
if sequence output is not needed.
|
||||
obs_output_dir (str): The directory where generated observations will be saved. Can be None
|
||||
if observation output is not needed.
|
||||
batch_async (bool): If True, the writer will use asynchronous batch writing to improve performance
|
||||
when handling large amounts of data. Default is True.
|
||||
async_threshold (int): The maximum number of asynchronous write operations that can be in progress
|
||||
at the same time. If the threshold is reached, the writer will wait for the oldest operation
|
||||
to complete before starting a new one. Default is 1.
|
||||
batch_size (int): The number of data items to write in each batch when using asynchronous writing.
|
||||
Default is 1, and it will be capped at 8 to prevent potential issues with too many concurrent operations.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, data_iter, seq_output_dir=None, output_dir=None, batch_async=True, async_threshold=1, batch_size=1
|
||||
):
|
||||
super().__init__(
|
||||
data_iter,
|
||||
seq_output_dir,
|
||||
output_dir,
|
||||
batch_async=batch_async,
|
||||
async_threshold=async_threshold,
|
||||
batch_size=batch_size,
|
||||
)
|
||||
|
||||
def flush_to_disk(self, task, scene_name, seq, obs):
|
||||
try:
|
||||
scene_name = self.scene.name
|
||||
if obs is not None and self.obs_output_dir is not None:
|
||||
log_dir = os.path.join(self.obs_output_dir, scene_name)
|
||||
self.logger.info(f"Try to save obs in {log_dir}")
|
||||
length = task.save(log_dir)
|
||||
self.logger.info(f"Saved {length} obs output saved in {log_dir}")
|
||||
elif seq is not None and self.seq_output_dir is not None:
|
||||
log_dir = os.path.join(self.seq_output_dir, scene_name)
|
||||
self.logger.info(f"Try to save seq in {log_dir}")
|
||||
length = task.save_seq(log_dir)
|
||||
self.logger.info(f"Saved {length} seq output saved in {log_dir}")
|
||||
else:
|
||||
self.logger.info("Skip this storage")
|
||||
return length
|
||||
except Exception as e:
|
||||
self.logger.info(f"Failed to save data for scene {scene_name}: {e}")
|
||||
raise e
|
||||
Reference in New Issue
Block a user