init commit

This commit is contained in:
zyhe
2026-03-16 11:44:10 +00:00
commit 94384a93c9
552 changed files with 363038 additions and 0 deletions

View File

@@ -0,0 +1,336 @@
"""General 3D Bounding Box class.
Origin: https://github.com/OasisYang/Wild6D/blob/main/lib/box.py
"""
import numpy as np
from numpy.linalg import lstsq as optimizer
from scipy.spatial.transform import Rotation as rotation_util
EDGES = (
[1, 5],
[2, 6],
[3, 7],
[4, 8], # lines along x-axis
[1, 3],
[5, 7],
[2, 4],
[6, 8], # lines along y-axis
[1, 2],
[3, 4],
[5, 6],
[7, 8], # lines along z-axis
)
# The vertices are ordered according to the left-hand rule, so the normal
# vector of each face will point inward the box.
FACES = np.array(
[
[5, 6, 8, 7], # +x on yz plane
[1, 3, 4, 2], # -x on yz plane
[3, 7, 8, 4], # +y on xz plane = top
[1, 2, 6, 5], # -y on xz plane
[2, 4, 8, 6], # +z on xy plane = front
[1, 5, 7, 3], # -z on xy plane
]
)
UNIT_BOX = np.asarray(
[
[0.0, 0.0, 0.0],
[-0.5, -0.5, -0.5],
[-0.5, -0.5, 0.5],
[-0.5, 0.5, -0.5],
[-0.5, 0.5, 0.5],
[0.5, -0.5, -0.5],
[0.5, -0.5, 0.5],
[0.5, 0.5, -0.5],
[0.5, 0.5, 0.5],
]
)
NUM_KEYPOINTS = 9
FRONT_FACE_ID = 4
TOP_FACE_ID = 2
def get_bbox_center_and_corners(bbox):
x_min, y_min, z_min, x_max, y_max, z_max = (
bbox.min[0],
bbox.min[1],
bbox.min[2],
bbox.max[0],
bbox.max[1],
bbox.max[2],
)
center = np.array([(x_min + x_max) / 2, (y_min + y_max) / 2, (z_min + z_max) / 2])
corners = np.array(
[
[x_min, y_min, z_min], # 0: minimum corner
[x_min, y_min, z_max], # 1: z changes
[x_min, y_max, z_min], # 2: y changes
[x_min, y_max, z_max], # 3: y and z change
[x_max, y_min, z_min], # 4: x changes
[x_max, y_min, z_max], # 5: x and z change
[x_max, y_max, z_min], # 6: x and y change
[x_max, y_max, z_max], # 7: maximum corner
]
)
result = np.vstack([center, corners])
return result
class Box(object):
"""General 3D Oriented Bounding Box."""
def __init__(self, vertices=None):
if vertices is None:
vertices = self.scaled_axis_aligned_vertices(np.array([1.0, 1.0, 1.0]))
self._vertices = vertices
self._rotation = None
self._translation = None
self._scale = None
self._transformation = None
self._volume = None
@classmethod
def from_transformation(cls, rotation, translation, scale):
"""Constructs an oriented bounding box from transformation and scale."""
if rotation.size not in (3, 9):
raise ValueError("Unsupported rotation, only 3x1 euler angles or 3x3 rotation matrices are supported.")
if rotation.size == 3:
rotation = rotation_util.from_rotvec(rotation.tolist()).as_dcm()
scaled_identity_box = cls.scaled_axis_aligned_vertices(scale)
vertices = np.zeros((NUM_KEYPOINTS, 3))
for i in range(NUM_KEYPOINTS):
vertices[i, :] = np.matmul(rotation, scaled_identity_box[i, :]) + translation.flatten()
return cls(vertices=vertices)
def __repr__(self):
representation = "Box: "
for i in range(NUM_KEYPOINTS):
representation += f"[{i}: {self.vertices[i, 0]}, {self.vertices[i, 1]}, {self.vertices[i, 2]}]"
return representation
def __len__(self):
return NUM_KEYPOINTS
def __name__(self):
return "Box"
def apply_transformation(self, transformation):
"""Applies transformation on the box.
Group multiplication is the same as rotation concatenation. Therefore return
new box with SE3(R * R2, T + R * T2); Where R2 and T2 are existing rotation
and translation. Note we do not change the scale.
Args:
transformation: a 4x4 transformation matrix.
Returns:
transformed box.
"""
if transformation.shape != (4, 4):
raise ValueError("Transformation should be a 4x4 matrix.")
new_rotation = np.matmul(transformation[:3, :3], self.rotation)
new_translation = transformation[:3, 3] + (np.matmul(transformation[:3, :3], self.translation))
return Box.from_transformation(new_rotation, new_translation, self.scale)
@classmethod
def scaled_axis_aligned_vertices(cls, scale):
"""Returns an axis-aligned set of verticies for a box of the given scale.
Args:
scale: A 3*1 vector, specifiying the size of the box in x-y-z dimension.
"""
w = scale[0] / 2.0
h = scale[1] / 2.0
d = scale[2] / 2.0
# Define the local coordinate system, w.r.t. the center of the box
aabb = np.array(
[
[0.0, 0.0, 0.0],
[-w, -h, -d],
[-w, -h, +d],
[-w, +h, -d],
[-w, +h, +d],
[+w, -h, -d],
[+w, -h, +d],
[+w, +h, -d],
[+w, +h, +d],
]
)
return aabb
@classmethod
def fit(cls, vertices):
"""Estimates a box 9-dof parameters from the given vertices.
Directly computes the scale of the box, then solves for orientation and
translation.
Args:
vertices: A 9*3 array of points. Points are arranged as 1 + 8 (center
keypoint + 8 box vertices) matrix.
Returns:
orientation: 3*3 rotation matrix.
translation: 3*1 translation vector.
scale: 3*1 scale vector.
"""
orientation = np.identity(3)
translation = np.zeros((3, 1))
scale = np.zeros(3)
# The scale would remain invariant under rotation and translation.
# We can safely estimate the scale from the oriented box.
for axis in range(3):
for edge_id in range(4):
# The edges are stored in quadruples according to each axis
begin, end = EDGES[axis * 4 + edge_id]
scale[axis] += np.linalg.norm(vertices[begin, :] - vertices[end, :])
scale[axis] /= 4.0
x = cls.scaled_axis_aligned_vertices(scale)
system = np.concatenate((x, np.ones((NUM_KEYPOINTS, 1))), axis=1)
solution, _, _, _ = optimizer(system, vertices, rcond=None)
orientation = solution[:3, :3].T
translation = solution[3, :3]
return orientation, translation, scale
def inside(self, point):
"""Tests whether a given point is inside the box.
Brings the 3D point into the local coordinate of the box. In the local
coordinate, the looks like an axis-aligned bounding box. Next checks if
the box contains the point.
Args:
point: A 3*1 numpy vector.
Returns:
True if the point is inside the box, False otherwise.
"""
inv_trans = np.linalg.inv(self.transformation)
scale = self.scale
point_w = np.matmul(inv_trans[:3, :3], point) + inv_trans[:3, 3]
for i in range(3):
if abs(point_w[i]) > scale[i] / 2.0:
return False
return True
def sample(self):
"""Samples a 3D point uniformly inside this box."""
point = np.random.uniform(-0.5, 0.5, 3) * self.scale
point = np.matmul(self.rotation, point) + self.translation
return point
@property
def vertices(self):
return self._vertices
@property
def rotation(self):
if self._rotation is None:
self._rotation, self._translation, self._scale = self.fit(self._vertices)
return self._rotation
@property
def translation(self):
if self._translation is None:
self._rotation, self._translation, self._scale = self.fit(self._vertices)
return self._translation
@property
def scale(self):
if self._scale is None:
self._rotation, self._translation, self._scale = self.fit(self._vertices)
return self._scale
@property
def volume(self):
"""Compute the volume of the parallelpiped or the box.
For the boxes, this is equivalent to np.prod(self.scale). However for
parallelpiped, this is more involved. Viewing the box as a linear function
we can estimate the volume using a determinant. This is equivalent to
sp.ConvexHull(self._vertices).volume
Returns:
volume (float)
"""
if self._volume is None:
i = self._vertices[2, :] - self._vertices[1, :]
j = self._vertices[3, :] - self._vertices[1, :]
k = self._vertices[5, :] - self._vertices[1, :]
sys = np.array([i, j, k])
self._volume = abs(np.linalg.det(sys))
return self._volume
@property
def transformation(self):
if self._rotation is None:
self._rotation, self._translation, self._scale = self.fit(self._vertices)
if self._transformation is None:
self._transformation = np.identity(4)
self._transformation[:3, :3] = self._rotation
self._transformation[:3, 3] = self._translation
return self._transformation
def get_ground_plane(self, gravity_axis=1):
"""Get ground plane under the box."""
gravity = np.zeros(3)
gravity[gravity_axis] = 1
def get_face_normal(face, center):
"""Get a normal vector to the given face of the box."""
v1 = self.vertices[face[0], :] - center
v2 = self.vertices[face[1], :] - center
normal = np.cross(v1, v2)
return normal
def get_face_center(face):
"""Get the center point of the face of the box."""
center = np.zeros(3)
for vertex in face:
center += self.vertices[vertex, :]
center /= len(face)
return center
ground_plane_id = 0
ground_plane_error = 10.0
# The ground plane is defined as a plane aligned with gravity.
# gravity is the (0, 1, 0) vector in the world coordinate system.
for i in [0, 2, 4]:
face = FACES[i, :]
center = get_face_center(face)
normal = get_face_normal(face, center)
w = np.cross(gravity, normal)
w_sq_norm = np.linalg.norm(w)
if w_sq_norm < ground_plane_error:
ground_plane_error = w_sq_norm
ground_plane_id = i
face = FACES[ground_plane_id, :]
center = get_face_center(face)
normal = get_face_normal(face, center)
# For each face, we also have a parallel face that it's normal is also
# aligned with gravity vector. We pick the face with lower height (y-value).
# The parallel to face 0 is 1, face 2 is 3, and face 4 is 5.
parallel_face_id = ground_plane_id + 1
parallel_face = FACES[parallel_face_id]
parallel_face_center = get_face_center(parallel_face)
parallel_face_normal = get_face_normal(parallel_face, parallel_face_center)
if parallel_face_center[gravity_axis] < center[gravity_axis]:
center = parallel_face_center
normal = parallel_face_normal
return center, normal

View File

@@ -0,0 +1,100 @@
from typing import List
from pxr import PhysxSchema, Sdf, Usd, UsdGeom
def filter_collisions(
stage,
physicsscene_path: str,
collision_root_path: str,
prim_paths: List[str],
global_paths: List[str] | None = None,
):
"""Filter collisions between clones.
Clones will not collide with each other, but can collide with objects specified in global_paths.
Args:
physicsscene_path (str): Path to PhysicsScene object in stage.
collision_root_path (str): Path to place collision groups under.
prim_paths (List[str]): Paths of objects to filter out collision.
global_paths (List[str]): Paths of objects to generate collision (e.g. ground plane).
"""
physx_scene = PhysxSchema.PhysxSceneAPI(stage.GetPrimAtPath(physicsscene_path))
# We invert the collision group filters for more efficient collision filtering across environments
physx_scene.CreateInvertCollisionGroupFilterAttr().Set(True)
UsdGeom.Scope.Define(stage, collision_root_path)
with Sdf.ChangeBlock():
if len(global_paths) > 0:
global_collision_group_path = collision_root_path + "/global_group"
# add collision group prim
global_collision_group = Sdf.PrimSpec(
stage.GetRootLayer().GetPrimAtPath(collision_root_path),
"global_group",
Sdf.SpecifierDef,
"PhysicsCollisionGroup",
)
# prepend collision API schema
global_collision_group.SetInfo(Usd.Tokens.apiSchemas, Sdf.TokenListOp.Create({"CollectionAPI:colliders"}))
# expansion rule
expansion_rule = Sdf.AttributeSpec(
global_collision_group,
"collection:colliders:expansionRule",
Sdf.ValueTypeNames.Token,
Sdf.VariabilityUniform,
)
expansion_rule.default = "expandPrims"
# includes rel
global_includes_rel = Sdf.RelationshipSpec(global_collision_group, "collection:colliders:includes", False)
for global_path in global_paths:
global_includes_rel.targetPathList.Append(global_path)
# filteredGroups rel
global_filtered_groups = Sdf.RelationshipSpec(global_collision_group, "physics:filteredGroups", False)
# We are using inverted collision group filtering, which means objects by default don't collide across
# groups. We need to add this group as a filtered group, so that objects within this group collide with
# each other.
global_filtered_groups.targetPathList.Append(global_collision_group_path)
# set collision groups and filters
for i, prim_path in enumerate(prim_paths):
collision_group_path = collision_root_path + f"/group{i}"
# add collision group prim
collision_group = Sdf.PrimSpec(
stage.GetRootLayer().GetPrimAtPath(collision_root_path),
f"group{i}",
Sdf.SpecifierDef,
"PhysicsCollisionGroup",
)
# prepend collision API schema
collision_group.SetInfo(Usd.Tokens.apiSchemas, Sdf.TokenListOp.Create({"CollectionAPI:colliders"}))
# expansion rule
expansion_rule = Sdf.AttributeSpec(
collision_group,
"collection:colliders:expansionRule",
Sdf.ValueTypeNames.Token,
Sdf.VariabilityUniform,
)
expansion_rule.default = "expandPrims"
# includes rel
includes_rel = Sdf.RelationshipSpec(collision_group, "collection:colliders:includes", False)
includes_rel.targetPathList.Append(prim_path)
# filteredGroups rel
filtered_groups = Sdf.RelationshipSpec(collision_group, "physics:filteredGroups", False)
# We are using inverted collision group filtering, which means objects by default don't collide across
# groups. We need to add this group as a filtered group, so that objects within this group collide with
# each other.
filtered_groups.targetPathList.Append(collision_group_path)
if len(global_paths) > 0:
filtered_groups.targetPathList.Append(global_collision_group_path)
global_filtered_groups.targetPathList.Append(collision_group_path)

View File

@@ -0,0 +1,331 @@
"""
Global constants used across simbox workflows.
- CATEGORIES: per-category preferred Euler angles for data randomization
(see `dr.get_category_euler`). Keys are dataset/category names
(e.g. \"omniobject3d-banana\"), values are allowed rotations on axes x/y/z.
- HEARTH_KITCHENS: pre-defined kitchen room scenes (geometry USDs) with
associated transforms, used to load different kitchen layouts.
- CATEGORIES_SCALE_SUGGESTED: default scaling for container / place targets
by category name (e.g. basket, bowl, dish) in place-style tasks.
- OBJECT_SCALE_SUGGESTED: per-asset scaling overrides for specific USD assets
(e.g. particular bowls or boxes that need non-uniform scale).
- HANDOVER_RANGE: list of category names that are considered suitable for
handover-style skills (thin / graspable objects).
- CUROBO_BATCH_SIZE: default batch size used in cuRobo planners.
These constants are imported by utilities in `dr.py`, place/pick skills, and
scene setup code to keep configuration in a single shared location.
"""
CATEGORIES = {
# large-scale pick objects
"google_scan-book": {"y": [0, 90, 180, 270]},
"google_scan-bottle": {"x": [0, 90]},
"google_scan-clock": {"x": [0, 90, 180, 270]},
"google_scan-dinosaur": {"x": [90]},
"google_scan-garage_kit": {"x": [90]},
"google_scan-handbag": {"x": [90, 270]},
"google_scan-hat": {"x": [90]},
# "google_scan-medicine_bottle": {"x": [0, 90, 180]},
"google_scan-medicine_bottle": {"x": [0, 180]},
"google_scan-mug": {"x": [0, 90]},
"google_scan-shoe": {"x": [0, 90, 180, 270]},
"google_scan-toy_animals": {"x": [90]},
"google_scan-toy_boat": {"x": [90]},
"google_scan-toy_bus": {"x": [90]},
"google_scan-toy_car": {"x": [90]},
"google_scan-toy_plane": {"x": [90]},
"google_scan-toy_train": {"x": [90]},
"google_scan-toy_truck": {"x": [90]},
"omniobject3d-banana": {"x": [0, 180]},
"omniobject3d-belt": {"x": [90, 270]},
"omniobject3d-book": {"y": [0, 90, 180, 270]},
# "omniobject3d-bottle": {"x": [0, 90, 180]},
"omniobject3d-bottle": {"x": [0, 180]},
"omniobject3d-bowl": {"x": [90]},
"omniobject3d-boxed_beverage": {"x": [0, 90, 180, 270]},
"omniobject3d-bread": {"x": [90, 270]},
"omniobject3d-brush": {"y": [0, 90, 180, 270]},
"omniobject3d-calculator": {"x": [0]},
"omniobject3d-can": {"x": [0, 90]},
"omniobject3d-carrot": {"x": [0, 180]},
"omniobject3d-chess": {"x": [90]},
"omniobject3d-chili": {"x": [0, 180]},
"omniobject3d-chinese_chess": {"x": [90]},
"omniobject3d-clock": {"x": [90]},
"omniobject3d-corn": {"x": [0, 180]},
"omniobject3d-cucumber": {"x": [0, 180]},
"omniobject3d-cup": {"x": [90]},
"omniobject3d-dinosaur": {"x": [90]},
"omniobject3d-dish": {"x": [90]},
"omniobject3d-egg_tart": {"x": [90]},
"omniobject3d-flash_light": {"x": [0, 180]},
"omniobject3d-garage_kit": {"x": [90]},
"omniobject3d-glasses": {"x": [90]},
"omniobject3d-glasses_case": {"x": [90, 270]},
"omniobject3d-hair_dryer": {"x": [0, 180]},
"omniobject3d-hamburger": {"x": [90]},
"omniobject3d-hammer": {"x": [0, 180]},
"omniobject3d-hot_dog": {"x": [90]},
"omniobject3d-knife": {"y": [90, 270]},
"omniobject3d-medicine_bottle": {"x": [0, 90, 180]},
"omniobject3d-mooncake": {"x": [90]},
"omniobject3d-mouse": {"x": [90]},
"omniobject3d-mug": {"x": [90]},
"omniobject3d-pen": {"x": [0, 180]},
"omniobject3d-pineapple": {"x": [90]},
"omniobject3d-power_strip": {"x": [90]},
"omniobject3d-razor": {"y": [0, 90, 180, 270]},
"omniobject3d-red_wine_glass": {"x": [90]},
"omniobject3d-remote_control": {"y": [270]},
"omniobject3d-sausage": {"x": [0, 180]},
"omniobject3d-scissor": {"y": [90, 270]},
"omniobject3d-shampoo": {"x": [0, 90, 180]},
"omniobject3d-shoe": {"x": [90]},
"omniobject3d-soap": {"x": [0, 90, 180, 270]},
"omniobject3d-spanner": {"x": [90, 270]},
"omniobject3d-starfish": {"x": [90]},
"omniobject3d-sweet_potato": {"x": [0, 180]},
"omniobject3d-table_tennis_bat": {"x": [90, 270]},
"omniobject3d-teapot": {"x": [90]},
"omniobject3d-timer": {"y": [270]},
"omniobject3d-tissue": {"x": [0, 90, 180, 270]},
"omniobject3d-tooth_brush": {"y": [0, 90, 180, 270]},
"omniobject3d-tooth_paste": {"y": [0, 90, 180, 270]},
"omniobject3d-toy_animals": {"x": [90]},
"omniobject3d-toy_boat": {"x": [90]},
"omniobject3d-toy_bus": {"x": [90]},
"omniobject3d-toy_car": {"x": [90]},
"omniobject3d-toy_plane": {"x": [90]},
"omniobject3d-toy_plant": {"x": [90]},
"omniobject3d-toy_train": {"x": [90]},
"omniobject3d-toy_truck": {"x": [90]},
"omniobject3d-umbrella": {"x": [0, 180]},
"omniobject3d-watch": {"x": [0, 180]},
"phocal-bottle": {"x": [0, 90, 180]},
"phocal-cup": {"x": [90]},
"phocal-mug": {"x": [90]},
"phocal-red_wine_glass": {"x": [90]},
"phocal-remote_control": {"y": [270]},
"phocal-shampoo": {"x": [0, 90, 180]},
"phocal-spoon": {"y": [0, 90, 180, 270]},
"phocal-teapot": {"x": [90]},
"real-camera": {"x": [0, 90, 180, 270]},
# Supp
"omniobject3d-asparagus": {"x": [0]},
"omniobject3d-bamboo_shoots": {"x": [0]},
"omniobject3d-battery": {"x": [0, 90]},
"omniobject3d-cake": {"x": [0, 90]},
"omniobject3d-candle": {"x": [0, 90]},
"omniobject3d-chicken_leg": {"x": [0]},
"omniobject3d-conch": {"x": [0]},
"omniobject3d-donut": {"x": [90]},
"omniobject3d-dumpling": {"x": [90]},
"omniobject3d-egg": {"x": [0]},
"omniobject3d-eraser": {"x": [90]},
"omniobject3d-facial_cream": {"x": [90]},
"omniobject3d-garlic": {"x": [90]},
"omniobject3d-hand_cream": {"x": [0]},
# "discarded": {"x": [0]},
# "google_scan-box": {"x": [0]},
# "omniobject3d-ball": {"x": [0]},
# "omniobject3d-box": {"x": [0]},
"omniobject3d-lemon": {"x": [0]},
"omniobject3d-lipstick": {"x": [0]},
"omniobject3d-lotus_root": {"x": [0]},
"omniobject3d-mango": {"x": [0]},
"omniobject3d-mangosteen": {"x": [90]},
"omniobject3d-onion": {"x": [90]},
"omniobject3d-orange": {"x": [90]},
"omniobject3d-pastry": {"x": [90]},
"omniobject3d-peach": {"x": [0]},
"omniobject3d-pear": {"x": [0]},
"omniobject3d-pitaya": {"x": [90]},
"omniobject3d-pomegranate": {"x": [90]},
"omniobject3d-rubik_cube": {"x": [0]},
"omniobject3d-rubik_cube-old": {"x": [0]},
"omniobject3d-shrimp": {"x": [0]},
"omniobject3d-tape_measure": {"x": [0]},
"omniobject3d-tomato": {"x": [90]},
"omniobject3d-toy_motorcycle": {"x": [0]},
"omniobject3d-waffle": {"x": [90]},
# "omniobject3d-whistle": {"x": [0]},
"phocal-can": {"x": [0, 90]},
# Basic
"book": {"y": [0, 90, 180, 270]},
"plate": {"x": [90], "y": [0], "z": [0]},
"basket": {"x": [90]},
"pnp_basket": {"x": [90], "y": [0], "z": [90]},
"dirt": {"x": [90]},
# Place
# "basket": {"x": [90]},
"bowl": {"x": [90]},
"dish": {"x": [90]},
"flower_pot": {"x": [90]},
"pan": {"x": [90]},
"box": {"x": [90]},
"microwaveoven": {"x": [90]},
"drawing": {"x": [90]},
"projector": {"x": [90]},
"pad": {"x": [90]},
# Long horizon
"bolt": {"x": [90]},
}
HEARTH_KITCHENS = {
"001": {
"name": "scene",
"path": "home_scenes/rooms/kitchen/MVUHLWYKTKJ5EAABAAAAAAY8_usd_kitchen_0.usd",
"target_class": "GeometryObject",
"scale": [0.015, 0.015, 0.015],
"translation": [-9.8, -3.9, -0.3],
"euler": [0, 0, 180],
},
"002": {
"name": "scene",
"path": "home_scenes/rooms/kitchen/MVUHLWYKTKJ5EAABAAAAABA8_usd_kitchen_0.usd",
"target_class": "GeometryObject",
"scale": [0.013, 0.013, 0.013],
"translation": [-8, 1.55, -0.163],
"euler": [0, 0, -90],
},
"003": {
"name": "scene",
"path": "home_scenes/rooms/kitchen/MVUHLWYKTKJ5EAABAAAAABI8_usd_kitchen_0.usd",
"target_class": "GeometryObject",
"scale": [0.013, 0.013, 0.013],
"translation": [5.5, -6.2, -0.133],
"euler": [0, 0, 90],
},
"004": {
"name": "scene",
"path": "home_scenes/rooms/kitchen/MVUHLWYKTKJ5EAABAAAAABQ8_usd_kitchen_0.usd",
"target_class": "GeometryObject",
"scale": [0.013, 0.013, 0.013],
"translation": [-7.15, -7.2, -0.133],
"euler": [0, 0, 180],
},
"005": {
"name": "scene",
"path": "home_scenes/rooms/kitchen/MVUHLWYKTKJ5EAABAAAAABY8_usd_kitchen_0.usd",
"target_class": "GeometryObject",
"scale": [0.013, 0.013, 0.013],
"translation": [7.2, -2.6, -0.1],
"euler": [0, 0, 90],
},
"006": {
"name": "scene",
"path": "home_scenes/rooms/kitchen/MVUHLWYKTKJ5EAABAAAAACA8_usd_kitchen_0.usd",
"target_class": "GeometryObject",
"scale": [0.015, 0.015, 0.015],
"translation": [6.27, -8.4, -0.33],
"euler": [0.0, 0.0, 90.0],
},
"007": {
"name": "scene",
"path": "home_scenes/rooms/kitchen/MVUHLWYKTKJ5EAABAAAAACI8_usd_kitchen_0.usd",
"target_class": "GeometryObject",
"scale": [0.015, 0.015, 0.015],
"translation": [7.25, -5.1, -0.3],
"euler": [0, 0, 180],
},
"008": {
"name": "scene",
"path": "home_scenes/rooms/kitchen/MVUHLWYKTKJ5EAABAAAAACQ8_usd_kitchen_0.usd",
"target_class": "GeometryObject",
"scale": [0.015, 0.015, 0.015],
"translation": [5.84, 0.67, -0.3],
"euler": [0, 0, 90],
},
"009": {
"name": "scene",
"path": "home_scenes/rooms/kitchen/MVUHLWYKTKJ5EAABAAAAACY8_usd_kitchen_0.usd",
"target_class": "GeometryObject",
"scale": [0.015, 0.015, 0.015],
"translation": [-7.95, -3, -0.37],
"euler": [0, 0, 0],
},
"010": {
"name": "scene",
"path": "home_scenes/rooms/kitchen/MVUHLWYKTKJ5EAABAAAAADA8_usd_kitchen_0.usd",
"target_class": "GeometryObject",
"scale": [0.015, 0.015, 0.015],
"translation": [-5.89, -3.58, -0.3],
"euler": [0, 0, 180],
},
}
CATEGORIES_SCALE_SUGGESTED = {
# Default per-category scale for place tasks (containers / targets)
"basket": [0.001, 0.001, 0.001],
"bowl": [0.002, 0.002, 0.002],
"dish": [0.001, 0.001, 0.001],
"flower_pot": [0.001, 0.001, 0.001],
"pan": [0.001, 0.001, 0.001],
# "box": [0.0007, 0.0007, 0.0007],
"box": [0.001, 0.001, 0.001],
"microwaveoven": [0.0005, 0.0005, 0.0005],
"drawing": [0.001, 0.001, 0.001],
"projector": [0.001, 0.001, 0.001],
"pad": [0.0005, 0.0005, 0.0005],
}
OBJECT_SCALE_SUGGESTED = {
# Per-asset scale overrides for place tasks (special cases)
"google_scan-picnic_basket_0908": [0.0012, 0.0012, 0.0012],
"omniobject3d-picnic_basket_006": [0.0012, 0.0012, 0.0012],
"google_scan-bowl_0070": [0.001, 0.001, 0.001],
"google_scan-bowl_0071": [0.0008, 0.0008, 0.0008],
"google_scan-bowl_0495": [0.0008, 0.0008, 0.0008],
"google_scan-bowl_0498": [0.0015, 0.0015, 0.0015],
"google_scan-bowl_0539": [0.0015, 0.0015, 0.0015],
"google_scan-bowl_0746": [0.001, 0.001, 0.001],
"google_scan-bowl_0752": [0.0008, 0.0008, 0.0008],
"google_scan-bowl_0839": [0.0015, 0.0015, 0.0015],
"google_scan-bowl_0891": [0.0015, 0.0015, 0.0015],
"google_scan-dish_0085": [0.0008, 0.0008, 0.0008],
"google_scan-dish_0096": [0.002, 0.002, 0.002],
"google_scan-dish_0099": [0.002, 0.002, 0.002],
"google_scan-dish_0510": [0.0015, 0.0015, 0.0015],
"omniobject3d-flower_pot_001": [0.001, 0.001, 0.001],
"omniobject3d-flower_pot_002": [0.001, 0.001, 0.001],
"omniobject3d-flower_pot_003": [0.002, 0.002, 0.002],
"omniobject3d-flower_pot_007": [0.002, 0.002, 0.002],
"omniobject3d-pan_002": [0.0015, 0.0015, 0.0015],
"omniobject3d-pan_004": [0.002, 0.002, 0.002],
"omniobject3d-picnic_basket_003": [0.0009, 0.0009, 0.0009],
"omniobject3d-picnic_basket_004": [0.0009, 0.0009, 0.0009],
"omniobject3d-picnic_basket_011": [0.0009, 0.0009, 0.0009],
"omniobject3d-box_003": [0.0011, 0.0011, 0.0011],
"omniobject3d-box_022": [0.0006, 0.0006, 0.0006],
"omniobject3d-box_024": [0.0021, 0.0021, 0.0021],
"omniobject3d-box_040": [0.0011, 0.0011, 0.0011],
"omniobject3d-box_043": [0.0016, 0.0016, 0.0016],
"omniobject3d-box_044": [0.0021, 0.0021, 0.0021],
"omniobject3d-drawing_001": [0.0003, 0.0003, 0.0003],
"omniobject3d-drawing_017": [0.0004, 0.0004, 0.0004],
"omniobject3d-projector_008": [0.0005, 0.0005, 0.0005],
"omniobject3d-pad_022": [0.0003, 0.0003, 0.0003],
"omniobject3d-pad_010": [0.0007, 0.0007, 0.0007],
}
HANDOVER_RANGE = [
"omniobject3d-bamboo_shoots",
"omniobject3d-banana",
"omniobject3d-bottle",
"omniobject3d-carrot",
"omniobject3d-chili",
"omniobject3d-corn",
"omniobject3d-cucumber",
"omniobject3d-pen",
"omniobject3d-shampoo",
"omniobject3d-scissor",
"omniobject3d-sweet_potato",
"omniobject3d-tooth_brush",
"omniobject3d-tooth_paste",
"phocal-bottle",
"phocal-spoon",
]
CUROBO_BATCH_SIZE = 20

View File

@@ -0,0 +1,947 @@
################################### dinning_room ####################################
001
-
name: table
path: home_scenes/tables/dinning_room/model_8617414f380062817ad173f2ad2abc28_0.usd
target_class: GeometryObject
translation: [0, 0, 0.455]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUHLWYKTKJ5EAABAAAAAAY8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [2.449, 1.016, 0]
002
-
name: table
path: home_scenes/tables/dinning_room/model_f10d8bcb55ea4f49b58ccee293583011_0.usd
target_class: GeometryObject
translation: [0, 0, 0.457]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUHLWYKTKJ5EAABAAAAABA8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [0.669, -1.355, 0]
euler: [0.0, 0.0, 90.0]
003
-
name: table
path: home_scenes/tables/dinning_room/model_80a10232595c91f5a40b75bff0dd7041_0.usd
target_class: GeometryObject
translation: [0, 0, 0.393]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUHLWYKTKJ5EAABAAAAABI8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-3.469, 1.398, 0]
euler: [0.0, 0.0, -90.0]
004
-
name: table
path: home_scenes/tables/dinning_room/model_cceaa217b9b3ab0ae78391293b54191b_0.usd
target_class: GeometryObject
translation: [0, 0, 0.375]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUHLWYKTKJ5EAABAAAAABQ8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [0.962, -1.946, 0]
euler: [0.0, 0.0, -90.0]
005
-
name: table
path: home_scenes/tables/dinning_room/model_060a9f60fe3805fd5e7e9a610785ef14_0.usd
target_class: GeometryObject
translation: [0, 0, 0.372]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUHLWYKTKJ5EAABAAAAACA8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [3.499, -1.83, 0]
euler: [0.0, 0.0, 90.0]
006
-
name: table
path: home_scenes/tables/dinning_room/model_bddb0c92eb4e444a286662291d60b49a_0.usd
target_class: GeometryObject
translation: [0, 0, 0.25]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUHLWYKTKJ5EAABAAAAACQ8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [1.2, 1.67, 0]
euler: [0.0, 0.0, 90.0]
007
-
name: table
path: home_scenes/tables/dinning_room/model_7d24e5374cb6463fbbe80f437e3f0332_0.usd
target_class: GeometryObject
translation: [0, 0, 0.38]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUHLWYKTKJ5EAABAAAAACY8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-2.1, -1.21, 0]
euler: [0.0, 0.0, 0.0]
008
-
name: table
path: home_scenes/tables/dinning_room/model_364f0f97b6b10ad12a0b47175a3c0778_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUHLWYKTKJ5EAABAAAAADA8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [0.87, -0.59, 0]
euler: [0.0, 0.0, -90.0]
009
-
name: table
path: home_scenes/tables/dinning_room/model_39974d1093bba0a9a8977cca1520fba5_0.usd
target_class: GeometryObject
translation: [0, 0, 0.61]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUHLWYKTKJ5EAABAAAAADI8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [0.89, 1.94, 0]
euler: [0.0, 0.0, -90.0]
010
-
name: table
path: home_scenes/tables/dinning_room/model_3d1b722b5ed484506cfdc126de353335_0.usd
target_class: GeometryObject
translation: [0, 0, 0.34]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUHLWYKTKJ5EAABAAAAADY8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-3.39, 3.22, 0]
euler: [0.0, 0.0, 0.0]
011
-
name: table
path: home_scenes/tables/dinning_room/model_f4f6d2f01d34c0bfc215edad3042a8f7_0.usd
target_class: GeometryObject
translation: [0, 0, 0.42]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUHLWYKTKJ5EAABAAAAAEI8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [2.58, -0.74, 0]
euler: [0.0, 0.0, 0.0]
012
-
name: table
path: home_scenes/tables/dinning_room/model_99f950712b1df66c51fed5ac7e4b64a2_0.usd
target_class: GeometryObject
translation: [0, 0, 0.41]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWAX5JYKTKJZ2AABAAAAAAA8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-0.55, 0.25, 0]
euler: [0.0, 0.0, 0.0]
013
-
name: table
path: home_scenes/tables/dinning_room/model_d3cf4f83205a9684207c16bde08e246a_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWAX5JYKTKJZ2AABAAAAAAI8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-1.92, -0.53, 0]
euler: [0.0, 0.0, 90.0]
014
-
name: table
path: home_scenes/tables/dinning_room/model_11944338718095ef0c1bd920b6a9050f_0.usd
target_class: GeometryObject
translation: [0, 0, 0.5]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWAX5JYKTKJZ2AABAAAAAAQ8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-0.52, 0.98, 0]
euler: [0.0, 0.0, 90.0]
015
-
name: table
path: home_scenes/tables/dinning_room/model_42ed4ca298f9848982d79fdc953b8990_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWAX5JYKTKJZ2AABAAAAACA8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-2.16, -1.64, 0]
euler: [0.0, 0.0, 0.0]
016
-
name: table
path: home_scenes/tables/dinning_room/model_589c4877ac9ce500141d57e8fc7aa1e6_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWAX5JYKTKJZ2AABAAAAADA8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-3.19, 0.21, 0]
euler: [0.0, 0.0, -90.0]
017
-
name: table
path: home_scenes/tables/dinning_room/model_0d99430f60b6cd3a611e74c332f54843_0.usd
target_class: GeometryObject
translation: [0, 0, 0.38]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWAX5JYKTKJZ2AABAAAAADI8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-2.47, 0.3, 0]
euler: [0.0, 0.0, -90.0]
018
-
name: table
path: home_scenes/tables/dinning_room/model_7a61ca57c9510d3a33aea02584d5eaf4_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWAX5JYKTKJZ2AABAAAAADQ8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [1.69, -2.2, 0]
euler: [0.0, 0.0, -90.0]
019
-
name: table
path: home_scenes/tables/dinning_room/model_dca1a7c49e9cac6b823407d7af89f79e_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWAX5JYKTKJZ2AABAAAAADY8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-1.8, 1.76, 0]
euler: [0.0, 0.0, 90.0]
020
-
name: table
path: home_scenes/tables/dinning_room/model_c7c013caf09f90903b0760cd23239129_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWAX5JYKTKJZ2AABAAAAAEA8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-0.11, -2.21, 0]
euler: [0.0, 0.0, -90.0]
021
-
name: table
path: home_scenes/tables/dinning_room/model_35565d61cac99576e47beb2a926bbf93_0.usd
target_class: GeometryObject
translation: [0, 0, 0.35]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWAX5JYKTKJZ2AABAAAAAEI8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [2.27, -1.14, 0]
euler: [0.0, 0.0, 0.0]
022
-
name: table
path: home_scenes/tables/dinning_room/model_e6da468adb8d2af4a1a3f1c828f49eb7_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWBGLKQKTKJZ2AABAAAAAAA8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-0.07, 0.61, 0]
euler: [0.0, 0.0, -90.0]
023
-
name: table
path: home_scenes/tables/dinning_room/model_7e22df4193305463eeacdd2a451fdeb9_0.usd
target_class: GeometryObject
translation: [0, 0, 0.4]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWBGLKQKTKJZ2AABAAAAAAI8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [2.18, 0.35, 0]
euler: [0.0, 0.0, 0.0]
024
-
name: table
path: home_scenes/tables/dinning_room/model_fd0aa0be1645383ca0ff95134a426abe_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWBGLKQKTKJZ2AABAAAAAAQ8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [2.7, -1.67, 0]
euler: [0.0, 0.0, 180.0]
025
-
name: table
path: home_scenes/tables/dinning_room/model_2b9d5aa45476e4be343ac4a855c46f01_0.usd
target_class: GeometryObject
translation: [0, 0, 0.38]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWBGLKQKTKJZ2AABAAAAAAY8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-3.42, -4.41, 0]
euler: [0.0, 0.0, 180.0]
026
-
name: table
path: home_scenes/tables/dinning_room/model_0e084f30c652d581b7c6c6b025cd3681_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWBGLKQKTKJZ2AABAAAAABA8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-0.3, -0.83, 0]
euler: [0.0, 0.0, 0.0]
027
-
name: table
path: home_scenes/tables/dinning_room/model_43605fe0f62607d280497e58fa06473f_0.usd
target_class: GeometryObject
translation: [0, 0, 0.4]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWBGLKQKTKJZ2AABAAAAABI8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [1.3, 4.77, 0]
euler: [0.0, 0.0, 90.0]
028
-
name: table
path: home_scenes/tables/dinning_room/model_37eeee73b74849b8ed7110a4b4827125_0.usd
target_class: GeometryObject
translation: [0, 0, 0.38]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWBGLKQKTKJZ2AABAAAAACA8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [0.42, 0.6, 0]
euler: [0.0, 0.0, 90.0]
029
-
name: table
path: home_scenes/tables/dinning_room/model_2f0a9a5b320d0af09c7fcfcb134b5611_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUHLWYKTKJ5EAABAAAAABY8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-1.21, -2.52, 0]
euler: [0.0, 0.0, 0.0]
030
-
name: table
path: home_scenes/tables/dinning_room/model_29fb41f0382ef93cf8bd06cef8008b61_0.usd
target_class: GeometryObject
translation: [0, 0, 0.36]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUHLWYKTKJ5EAABAAAAACI8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-0.14, -1.85, 0]
euler: [0.0, 0.0, 90.0]
031
-
name: table
path: home_scenes/tables/dinning_room/model_fc6300caba068b4661a34781a4808ef5_0.usd
target_class: GeometryObject
translation: [0, 0, 0.38]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUHLWYKTKJ5EAABAAAAAEA8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [5.69, -6.42, 0]
euler: [0.0, 0.0, -90.0]
032
-
name: table
path: home_scenes/tables/dinning_room/model_2f0a9a5b320d0af09c7fcfcb134b5611_0.usd
# path: home_scenes/tables/dinning_room/model_687a7df09373b937b8c2a2bf9f69dec6_0.usd
target_class: GeometryObject
translation: [0, 0, 0.43]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWAX5JYKTKJZ2AABAAAAABQ8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-2.14, -2.59, 0]
euler: [0.0, 0.0, 180.0]
033
-
name: table
path: home_scenes/tables/dinning_room/model_291da4b830e03f8fa8aedc83c80fadc0_0.usd
target_class: GeometryObject
translation: [0, 0, 0.35]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWAX5JYKTKJZ2AABAAAAABY8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-3.96, -0.51, 0]
euler: [0.0, 0.0, 180.0]
034
-
name: table
path: home_scenes/tables/dinning_room/model_7201dd4d045f851c7c4f86346ae32a17_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWBGLKQKTKJZ2AABAAAAABQ8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [1.39, -0.66, 0]
euler: [0.0, 0.0, 0.0]
035
-
name: table
path: home_scenes/tables/dinning_room/model_9d98e55f6d2026aa7675d6e35a33084c_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MWBGLKQKTKJZ2AABAAAAABY8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-3.17, -1.13, 0]
euler: [0.0, 0.0, 90.0]
036
-
name: table
path: home_scenes/tables/dinning_room/model_6b2604d2a95463155020b2770700aa9e_0.usd
target_class: GeometryObject
translation: [0, 0, 0.38]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MV7J6NIKTKJZ2AABAAAAADA8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-2.94, -6.33, 0]
euler: [0.0, 0.0, 90.0]
037
-
name: table
path: home_scenes/tables/dinning_room/model_2989da46b0a817ce48103cc8f2260f2f_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MV7J6NIKTKJZ2AABAAAAADI8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [0.18, -11.98, 0]
euler: [0.0, 0.0, 90.0]
038
-
name: table
path: home_scenes/tables/dinning_room/model_b58d5b89f730400a1fd1da905f1b166e_0.usd
target_class: GeometryObject
translation: [0, 0, 0.35]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MV7J6NIKTKJZ2AABAAAAADQ8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-2.46, -1, 0]
euler: [0.0, 0.0, 90.0]
039
-
name: table
path: home_scenes/tables/dinning_room/model_52273498e78b14f0f6d6d9de22baff8c_0.usd
target_class: GeometryObject
translation: [0, 0, 0.39]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MV7J6NIKTKJZ2AABAAAAADY8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [9.66, -1.75, 0]
euler: [0.0, 0.0, 0.0]
040
-
name: table
path: home_scenes/tables/dinning_room/model_6c6e0ad86bfda72cf00031de7fca9b42_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MV7J6NIKTKJZ2AABAAAAAEA8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [2.67, 1.79, 0]
euler: [0.0, 0.0, 90.0]
041
-
name: table
path: home_scenes/tables/dinning_room/model_eef587b5c4bcc52eb1400975c3c71e90_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MV7J6NIKTKJZ2AABAAAAAEI8_usd_dinning_room_1.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [1.89, -4.8, 0]
euler: [0.0, 0.0, 90.0]
042
-
name: table
path: home_scenes/tables/dinning_room/model_3ce6e78553b30fdc01453dbd2ab0883a_0.usd
target_class: GeometryObject
translation: [0, 0, 0.38]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUCSQAKTKJ5EAABAAAAAAA8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [2.75, -4.2, 0]
euler: [0.0, 0.0, 90.0]
043
-
name: table
path: home_scenes/tables/dinning_room/model_7edbf82df1c2cd8f2f2599f8d13ede29_0.usd
target_class: GeometryObject
translation: [0, 0, 0.42]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUCSQAKTKJ5EAABAAAAAAI8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-2.1, 2.66, 0]
euler: [0.0, 0.0, 90.0]
044
-
name: table
path: home_scenes/tables/dinning_room/model_ac91d35e26a97ca3cdf0451ccd7a79fe_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUCSQAKTKJ5EAABAAAAAAQ8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-2.98, -0.56, 0]
euler: [0.0, 0.0, -90.0]
045
-
name: table
path: home_scenes/tables/dinning_room/model_1253acdb578d499efe7f48cc82a6ee90_0.usd
target_class: GeometryObject
translation: [0, 0, 0.36]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUCSQAKTKJ5EAABAAAAAAY8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [2.17, -0.9, 0]
euler: [0.0, 0.0, 90.0]
046
-
name: table
path: home_scenes/tables/dinning_room/model_f97c61131b034e8743c5281d09f792e5_0.usd
target_class: GeometryObject
translation: [0, 0, 0.34]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUCSQAKTKJ5EAABAAAAABA8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [2.19, 1.82, 0]
euler: [0.0, 0.0, -90.0]
047
-
name: table
path: home_scenes/tables/dinning_room/model_a3b65a4d5cbe5c575f2d5963098a4fbe_0.usd
target_class: GeometryObject
translation: [0, 0, 0.35]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUCSQAKTKJ5EAABAAAAABQ8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [2.85, 1.2, 0]
euler: [0.0, 0.0, 90.0]
048
-
name: table
path: home_scenes/tables/dinning_room/model_aed34bf72ac5d0043d97650c61e30afc_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUCSQAKTKJ5EAABAAAAABY8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-2.26, -1.31, 0]
euler: [0.0, 0.0, 180.0]
049
-
name: table
path: home_scenes/tables/dinning_room/model_f6c7b5049ba2987f47169b68e446cf96_0.usd
target_class: GeometryObject
translation: [0, 0, 0.53]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUCSQAKTKJ5EAABAAAAACA8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-1.48, -0.19, 0]
euler: [0.0, 0.0, 0.0]
050
-
name: table
path: home_scenes/tables/dinning_room/model_6b7d82fa97a2d2424805ded6812f6575_0.usd
target_class: GeometryObject
translation: [0, 0, 0.44]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUCSQAKTKJ5EAABAAAAACI8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [1.1, -1.36, 0]
euler: [0.0, 0.0, -90.0]
051
-
name: table
path: home_scenes/tables/dinning_room/model_668540f0e14465ed75b85cebab4b9474_0.usd
target_class: GeometryObject
translation: [0, 0, 0.41]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUCSQAKTKJ5EAABAAAAACQ8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [0.51, -1.88, 0]
euler: [0.0, 0.0, 0.0]
052
-
name: table
path: home_scenes/tables/dinning_room/model_5ecb9ba218f8e0a6a772a405a7ac0e6b_0.usd
target_class: GeometryObject
translation: [0, 0, 0.38]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUCSQAKTKJ5EAABAAAAACY8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-3.28, 3.34, 0]
euler: [0.0, 0.0, -90.0]
053
-
name: table
path: home_scenes/tables/dinning_room/model_5ac32b5baaa93b760c97d641f8d5c5d3_0.usd
target_class: GeometryObject
translation: [0, 0, 0.26]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUCSQAKTKJ5EAABAAAAADA8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [0.97, 5.43, 0]
euler: [0.0, 0.0, 0.0]
054
-
name: table
path: home_scenes/tables/dinning_room/model_071e714dd80dacd002fe5f647ddbb1cf_0.usd
target_class: GeometryObject
translation: [0, 0, 0.34]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUCSQAKTKJ5EAABAAAAADI8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-4.92, -3.55, 0]
euler: [0.0, 0.0, 90.0]
055
-
name: table
path: home_scenes/tables/dinning_room/model_e52473830620b452614edc4b5e3fea96_0.usd
target_class: GeometryObject
translation: [0, 0, 0.47]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUCSQAKTKJ5EAABAAAAADY8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [2.58, 0.18, 0]
euler: [0.0, 0.0, 0.0]
056
-
name: table
path: home_scenes/tables/dinning_room/model_74515ef948287f0f700e9c45a6fdc47c_0.usd
target_class: GeometryObject
translation: [0, 0, 0.38]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUCSQAKTKJ5EAABAAAAAEA8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-3.06, -1.74, 0]
euler: [0.0, 0.0, -90.0]
057
-
name: table
path: home_scenes/tables/dinning_room/model_ff05aec86093d627c5b847eea0ff2167_0.usd
target_class: GeometryObject
translation: [0, 0, 0.34]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUCSQAKTKJ5EAABAAAAAEI8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [1.82, 0.86, 0]
euler: [0.0, 0.0, 180.0]
058
-
name: table
path: home_scenes/tables/dinning_room/model_a5b5a6c7c01650448e60f61b16313301_0.usd
target_class: GeometryObject
translation: [0, 0, 0.35]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUHL5YKTKJ5EAABAAAAAAA8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-0.11,-1.95, 0]
euler: [0.0, 0.0, 90.0]
059
-
name: table
path: home_scenes/tables/dinning_room/model_f46d57d2d2fa0b7dee34ab1201e2cc14_0.usd
target_class: GeometryObject
translation: [0, 0, 0.36]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUHL5YKTKJ5EAABAAAAAAI8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-0.99,-4.08, 0]
euler: [0.0, 0.0, 0.0]
060
-
name: table
path: home_scenes/tables/dinning_room/model_abeaf02073e55b519c59edbf2ffa7b5b_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUHL5YKTKJ5EAABAAAAAAQ8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [1.44, -2.33, 0]
euler: [0.0, 0.0, -90.0]
061
-
name: table
path: home_scenes/tables/dinning_room/model_5a8d318d44e6a55cf7686a8246313731_0.usd
target_class: GeometryObject
translation: [0, 0, 0.38]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUHLWYKTKJ5EAABAAAAAAA8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [0.25, 0.58, 0]
euler: [0.0, 0.0, 0.0]
062
-
name: table
path: home_scenes/tables/dinning_room/model_42bfd814921da5caa11f98e41e207019_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUHLWYKTKJ5EAABAAAAAAI8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [0.8, -3.22, 0]
euler: [0.0, 0.0, 0.0]
063
-
name: table
path: home_scenes/tables/dinning_room/model_0f92fb8b3f68510e0efe9350beceef77_0.usd
target_class: GeometryObject
translation: [0, 0, 0.44]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/dinning_room/MVUHLWYKTKJ5EAABAAAAAAQ8_usd_dinning_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-1.89, 3.76, 0]
euler: [0.0, 0.0, -90.0]
################################### dinning_room ####################################

View File

@@ -0,0 +1,314 @@
"""
Domain randomization utilities for simbox workflows.
This module centralizes logic for:
- Updating table/scene pairs from precomputed JSON files (`update_scene_pair`).
- Randomizing kitchen scenes from `HEARTH_KITCHENS` (`update_hearths`/`update_scenes`).
- Randomizing articulated objects and pick objects from asset libraries
(`update_articulated_objs`, `update_rigid_objs`, `update_conveyor_objects`),
including:
- Selecting random USD assets within a category or scope.
- Reading per-object info (e.g. scale, gap) from YAML/JSON.
- Setting orientation based on:
- `orientation_mode == "suggested"` → uses `CATEGORIES` via `get_category_euler`.
- `orientation_mode == "random"` → uniform random Euler in [-180, 180]^3.
- `orientation_mode == "keep"` → keep the existing `euler` in config.
- Setting scale based on:
- `scale_mode == "keep"` → keep config scale.
- `scale_mode == "suggested"` → use `CATEGORIES_SCALE_SUGGESTED` and
override with `OBJECT_SCALE_SUGGESTED` when available.
- Mapping raw category names (e.g. "omniobject3d-banana") to human-readable
strings for language instructions.
- Looking up preferred category rotations with `get_category_euler`, which falls
back to [0, 0, 0] and prints a warning if a category is unknown.
"""
import glob
import json
import os
import random
import time
import numpy as np
from core.utils.constants import (
CATEGORIES,
CATEGORIES_SCALE_SUGGESTED,
HEARTH_KITCHENS,
OBJECT_SCALE_SUGGESTED,
)
from omegaconf import OmegaConf
def update_scene_pair(cfg):
if (
len(cfg["arena"]["fixtures"]) == 2
and cfg["arena"]["fixtures"][0].get("apply_randomization", False)
and cfg["arena"]["fixtures"][1].get("apply_randomization", False)
):
involved_scenes = (cfg["arena"]["involved_scenes"]).split(",")
table_scene_pairs = []
for involved_scene in involved_scenes:
with open(f"{involved_scene}.json", "r", encoding="utf-8") as file:
data = json.load(file)
table_scene_pairs += data
random.seed(os.getpid() + int(time.time() * 1000))
table_scene_dict = random.choice(table_scene_pairs)
table_scene_info = list(table_scene_dict.values())[0]
# Update table
assert "table" in cfg["arena"]["fixtures"][0]["name"]
cfg["arena"]["fixtures"][0]["path"] = table_scene_info["table"]["path"]
cfg["arena"]["fixtures"][0]["target_class"] = table_scene_info["table"]["target_class"]
cfg["arena"]["fixtures"][0]["scale"] = table_scene_info["table"].get("scale", [1.0, 1.0, 1.0])
cfg["arena"]["fixtures"][0]["translation"] = table_scene_info["table"].get("translation", [0.0, 0.0, 0.0])
cfg["arena"]["fixtures"][0]["euler"] = table_scene_info["table"].get("euler", [0.0, 0.0, 0.0])
# Update scene
assert "scene" in cfg["arena"]["fixtures"][1]["name"]
cfg["arena"]["fixtures"][1]["path"] = table_scene_info["scene"]["path"]
cfg["arena"]["fixtures"][1]["target_class"] = table_scene_info["scene"]["target_class"]
cfg["arena"]["fixtures"][1]["scale"] = table_scene_info["scene"].get("scale", [1.0, 1.0, 1.0])
cfg["arena"]["fixtures"][1]["translation"] = table_scene_info["scene"].get("translation", [0.0, 0.0, 0.0])
cfg["arena"]["fixtures"][1]["euler"] = table_scene_info["scene"].get("euler", [0.0, 0.0, 0.0])
return cfg
def update_hearths(cfg):
for fix_cfg in cfg["arena"]["fixtures"]:
apply_randomization = fix_cfg.get("apply_randomization", False)
name = fix_cfg.get("name", None)
if name == "scene" and apply_randomization:
fix_dict = random.choice(list(HEARTH_KITCHENS.values()))
# Substitute the scene cfg
fix_cfg["path"] = fix_dict["path"]
fix_cfg["target_class"] = fix_dict["target_class"]
fix_cfg["scale"] = fix_dict["scale"]
fix_cfg["translation"] = fix_dict["translation"]
fix_cfg["euler"] = fix_dict["euler"]
return cfg
def update_scenes(cfg):
flag = False
for obj_cfg in cfg["objects"]:
name = obj_cfg["name"]
if "hearth" in name:
flag = True
if flag:
return update_hearths(cfg)
else:
return update_scene_pair(cfg)
def update_articulated_objs(cfg):
for obj_cfg in cfg["objects"]:
apply_randomization = obj_cfg.get("apply_randomization", False)
if apply_randomization and obj_cfg["target_class"] == "ArticulatedObject":
dirs = os.path.join(cfg["asset_root"], os.path.dirname(obj_cfg["path"]))
paths = glob.glob(os.path.join(dirs, "*"))
paths.sort()
path = random.choice(paths)
# left hearth 0.5: [1, 2, 5, 6, 13, ] ;
# left hearth 0.785 [3, 4, 7, 8, 9, 11, 12, 14, 15, 16, 17]
# left hearth no planning [0, 10, 18, 19]
# right hearth 0.5: [0, 1, 4, 10, 11]
# right hearth 0.785: [2, 3, 5, 6, 7, 8, 9, ]
info_name = obj_cfg["info_name"]
info_path = f"{path}/Kps/{info_name}/info.json"
with open(info_path, "r", encoding="utf-8") as file:
info = json.load(file)
scale = info["object_scale"][:3]
asset_root = cfg["asset_root"]
obj_cfg["path"] = path.replace(f"{asset_root}/", "", 1)
obj_cfg["category"] = path.split("/")[-2]
obj_cfg["obj_info_path"] = info_path.replace(f"{asset_root}/", "", 1)
obj_cfg["scale"] = scale
name = obj_cfg["path"].split("/")[-1]
if name in [
"microwave0",
"microwave1",
"microwave2",
"microwave4",
"microwave6",
"microwave7",
"microwave9",
"microwave36754355",
"microwave52640732",
"microwave72789794",
"microwave93878040",
"microwave122930336",
"microwave160239099",
"microwave184070552",
"microwave192951465",
"microwave198542292",
"microwave202437483",
"microwave208204033",
"microwave231691637",
"microwave279963897",
"microwave305778636",
"microwave353130638",
"microwave461303737",
"microwave482895779",
]:
obj_cfg["euler"] = [0.0, 0.0, 270.0]
elif name in [
"microwave_0001",
"microwave_0002",
"microwave_0003",
"microwave_0013",
"microwave_0044",
"microwave_0045",
]:
obj_cfg["euler"] = [0.0, 0.0, 0.0]
elif name in [
"microwave7119",
"microwave7128",
"microwave7167",
"microwave7236",
"microwave7263",
"microwave7265",
"microwave7296",
"microwave7304",
"microwave7310",
"microwave7320",
]:
obj_cfg["euler"] = [0.0, 0.0, 90.0]
if "nightstand" in name:
obj_cfg["euler"] = [0.0, 0.0, 0.0]
elif "StorageFurniture" in name or "laptop" in name:
obj_cfg["euler"] = [0.0, 0.0, 90.0]
return cfg
def update_rigid_objs(cfg):
for obj_cfg in cfg["objects"]:
apply_randomization = obj_cfg.get("apply_randomization", False)
if apply_randomization and obj_cfg["target_class"] == "RigidObject":
scope = obj_cfg.get("randomization_scope", "category")
if isinstance(scope, str):
if scope == "category":
# Randomize within the same category as the current object
dirs = os.path.join(cfg["asset_root"], os.path.dirname(os.path.dirname(obj_cfg["path"])))
usds = glob.glob(os.path.join(dirs, "*", "*.usd"))
elif scope == "full":
# Randomize across all categories
dirs = os.path.join(
cfg["asset_root"], os.path.dirname(os.path.dirname(os.path.dirname(obj_cfg["path"])))
)
usds = glob.glob(os.path.join(dirs, "*", "*", "*.usd"))
elif isinstance(scope, list):
# Randomize only from the specified list of categories
usds = []
for category in scope:
category_dir = os.path.join(
cfg["asset_root"], os.path.dirname(os.path.dirname(os.path.dirname(obj_cfg["path"]))), category
)
category_usds = glob.glob(os.path.join(category_dir, "*", "*.usd"))
usds.extend(category_usds)
if len(usds) > 0:
this_usd_path = random.choice(usds)
asset_root = cfg["asset_root"]
this_usd_path = this_usd_path.replace(f"{asset_root}/", "", 1)
obj_cfg["path"] = this_usd_path
tmp_category = this_usd_path.split("/")[-3]
object_name = this_usd_path.split("/")[-2]
gap_yaml_path = cfg["asset_root"] + "/" + os.path.join(os.path.dirname(this_usd_path), "gap.yaml")
if os.path.exists(gap_yaml_path):
with open(gap_yaml_path, "r", encoding="utf-8") as file:
gap_data = OmegaConf.load(file)
gap = gap_data.get("gap", None)
obj_cfg["gap"] = gap
# Update orientation
orientation_mode = obj_cfg.get("orientation_mode", "keep")
if orientation_mode == "suggested":
obj_cfg["euler"] = get_category_euler(tmp_category)
elif orientation_mode == "random":
obj_cfg["euler"] = (np.random.uniform(-180, 180, size=3)).tolist()
elif orientation_mode == "keep":
assert "euler" in obj_cfg, "euler not found in obj_cfg for keep mode"
else:
raise NotImplementedError
# Update scale
scale_mode = obj_cfg.get("scale_mode", "keep")
if scale_mode == "keep":
assert "scale" in obj_cfg, f"scale not found in obj_cfg for keep mode, category: {tmp_category}"
elif scale_mode == "suggested":
if tmp_category in CATEGORIES_SCALE_SUGGESTED:
scale = CATEGORIES_SCALE_SUGGESTED[tmp_category]
if object_name in OBJECT_SCALE_SUGGESTED:
scale = OBJECT_SCALE_SUGGESTED[object_name]
obj_cfg["scale"] = scale
# Update category for languages
replace_texts = ["google_scan-", "omniobject3d-", "phocal-", "real-"]
for replace_text in replace_texts:
if replace_text in tmp_category:
tmp_category = tmp_category.replace(replace_text, "")
tmp_category = tmp_category.replace("_", " ")
obj_cfg["category"] = tmp_category
return cfg
def update_conveyor_objects(cfg):
asset_root = cfg["asset_root"]
for obj_cfg in cfg["objects"]:
apply_randomization = obj_cfg.get("apply_randomization", False)
if apply_randomization:
dirs = os.path.join(asset_root, os.path.dirname(os.path.dirname(obj_cfg["path"])))
usds = glob.glob(os.path.join(dirs, "*", "*.usd"))
if len(usds) > 0:
this_usd_path = random.choice(usds)
this_usd_path = this_usd_path.replace(f"{asset_root}/", "", 1)
gap_yaml_path = asset_root + "/" + os.path.join(os.path.dirname(this_usd_path), "gap.yaml")
if os.path.exists(gap_yaml_path):
with open(gap_yaml_path, "r", encoding="utf-8") as file:
gap_data = OmegaConf.load(file)
gap = gap_data.get("gap", None)
obj_cfg["gap"] = gap
obj_cfg["path"] = this_usd_path
tmp_category = this_usd_path.split("/")[-3]
# Update category for languages
replace_texts = ["google_scan-", "omniobject3d-", "phocal-", "real-"]
for replace_text in replace_texts:
if replace_text in tmp_category:
tmp_category = tmp_category.replace(replace_text, "")
tmp_category = tmp_category.replace("_", " ")
obj_cfg["category"] = tmp_category
return cfg
def get_category_euler(category):
if category not in CATEGORIES:
available_categories = list(CATEGORIES.keys())
print(
f"[get_category_euler] Category '{category}' not found in CATEGORIES. "
f"Available categories: {available_categories}. Using [0, 0, 0] as default."
)
return [0.0, 0.0, 0.0]
euler = np.zeros(3, dtype=float)
if "x" in CATEGORIES[category]:
euler[0] = random.choice(CATEGORIES[category]["x"])
if "y" in CATEGORIES[category]:
euler[1] = random.choice(CATEGORIES[category]["y"])
if "z" in CATEGORIES[category]:
euler[2] = random.choice(CATEGORIES[category]["z"])
return euler.tolist()

View File

@@ -0,0 +1,53 @@
"""
Interpolation utilities for simbox motion skills.
This module provides small helpers for:
- linear_interpolation: joint-space linear interpolation between a current
joint state and a target joint state over a fixed number of steps. Used in
skills such as joint_ctrl / approach_rotate / rotate_obj to generate smooth
joint trajectories.
- pose_interpolation: Cartesian pose interpolation where translation is
linearly interpolated and orientation uses spherical linear interpolation
(SLERP) between two quaternions. Used in rotate_random and similar pose-based
skills.
- cal_midpoint: simple midpoint computation between two 3D points, useful for
computing intermediate waypoints or grasp targets.
"""
import numpy as np
from scipy.spatial.transform import Rotation as R
from scipy.spatial.transform import Slerp
def linear_interpolation(curr_js, target_js, num_steps=10):
interpolated = []
for i in range(num_steps + 1):
alpha = i / num_steps
interpolated_js = curr_js + alpha * (target_js - curr_js)
interpolated.append(interpolated_js)
return np.array(interpolated)
def pose_interpolation(curr_trans, curr_ori, target_trans, target_ori, interp_num, normalize_quaternions=True):
# Translation interpolation (linear)
interp_trans = np.linspace(curr_trans, target_trans, interp_num, axis=0)
# Rotation interpolation (spherical linear interpolation - SLERP)
if normalize_quaternions:
curr_ori = curr_ori / np.linalg.norm(curr_ori)
target_ori = target_ori / np.linalg.norm(target_ori)
# SciPy uses [x, y, z, w] format
rotations = R.from_quat([curr_ori, target_ori], scalar_first=True) # Current # Target
slerp = Slerp([0, 1], rotations)
times = np.linspace(0, 1, interp_num)
interp_ori = slerp(times).as_quat(scalar_first=True)
return interp_trans, interp_ori
def cal_midpoint(start_point, end_point):
return (start_point + end_point) / 2

View File

@@ -0,0 +1,209 @@
"""The Intersection Over Union (IoU) for 3D oriented bounding boxes.
Origin: https://github.com/OasisYang/Wild6D/blob/main/lib/iou.py
"""
import numpy as np
import scipy.spatial as sp
from core.utils.box import FACES, NUM_KEYPOINTS
_PLANE_THICKNESS_EPSILON = 0.000001
_POINT_IN_FRONT_OF_PLANE = 1
_POINT_ON_PLANE = 0
_POINT_BEHIND_PLANE = -1
class IoU(object):
"""General Intersection Over Union cost for Oriented 3D bounding boxes."""
def __init__(self, box1, box2):
self._box1 = box1
self._box2 = box2
self._intersection_points = []
def iou(self):
"""Computes the exact IoU using Sutherland-Hodgman algorithm."""
self._intersection_points = []
self._compute_intersection_points(self._box1, self._box2)
self._compute_intersection_points(self._box2, self._box1)
if self._intersection_points:
intersection_volume = sp.ConvexHull(self._intersection_points).volume
box1_volume = self._box1.volume
box2_volume = self._box2.volume
union_volume = box1_volume + box2_volume - intersection_volume
return intersection_volume / union_volume
else:
return 0.0
def iou_sampling(self, num_samples=10000):
"""Computes intersection over union by sampling points.
Generate n samples inside each box and check if those samples are inside
the other box. Each box has a different volume, therefore the number o
samples in box1 is estimating a different volume than box2. To address
this issue, we normalize the iou estimation based on the ratio of the
volume of the two boxes.
Args:
num_samples: Number of generated samples in each box
Returns:
IoU Estimate (float)
"""
p1 = [self._box1.sample() for _ in range(num_samples)]
p2 = [self._box2.sample() for _ in range(num_samples)]
box1_volume = self._box1.volume
box2_volume = self._box2.volume
box1_intersection_estimate = 0
box2_intersection_estimate = 0
for point in p1:
if self._box2.inside(point):
box1_intersection_estimate += 1
for point in p2:
if self._box1.inside(point):
box2_intersection_estimate += 1
# We are counting the volume of intersection twice.
intersection_volume_estimate = (
box1_volume * box1_intersection_estimate + box2_volume * box2_intersection_estimate
) / 2.0
union_volume_estimate = (box1_volume * num_samples + box2_volume * num_samples) - intersection_volume_estimate
iou_estimate = intersection_volume_estimate / union_volume_estimate
return iou_estimate
def _compute_intersection_points(self, box_src, box_template):
"""Computes the intersection of two boxes."""
# Transform the source box to be axis-aligned
inv_transform = np.linalg.inv(box_src.transformation)
box_src_axis_aligned = box_src.apply_transformation(inv_transform)
template_in_src_coord = box_template.apply_transformation(inv_transform)
for face in range(len(FACES)):
indices = FACES[face, :]
poly = [template_in_src_coord.vertices[indices[i], :] for i in range(4)]
clip = self.intersect_box_poly(box_src_axis_aligned, poly)
for point in clip:
# Transform the intersection point back to the world coordinate
point_w = np.matmul(box_src.rotation, point) + box_src.translation
self._intersection_points.append(point_w)
for point_id in range(NUM_KEYPOINTS):
v = template_in_src_coord.vertices[point_id, :]
if box_src_axis_aligned.inside(v):
point_w = np.matmul(box_src.rotation, v) + box_src.translation
self._intersection_points.append(point_w)
def intersect_box_poly(self, box, poly):
"""Clips the polygon against the faces of the axis-aligned box."""
for axis in range(3):
poly = self._clip_poly(poly, box.vertices[1, :], 1.0, axis)
poly = self._clip_poly(poly, box.vertices[8, :], -1.0, axis)
return poly
def _clip_poly(self, poly, plane, normal, axis):
"""Clips the polygon with the plane using the Sutherland-Hodgman algorithm.
See en.wikipedia.org/wiki/Sutherland-Hodgman_algorithm for the overview of
the Sutherland-Hodgman algorithm. Here we adopted a robust implementation
from "Real-Time Collision Detection", by Christer Ericson, page 370.
Args:
poly: List of 3D vertices defining the polygon.
plane: The 3D vertices of the (2D) axis-aligned plane.
normal: normal
axis: A tuple defining a 2D axis.
Returns:
List of 3D vertices of the clipped polygon.
"""
# The vertices of the clipped polygon are stored in the result list.
result = []
if len(poly) <= 1:
return result
# polygon is fully located on clipping plane
poly_in_plane = True
# Test all the edges in the polygon against the clipping plane.
for i, current_poly_point in enumerate(poly):
prev_poly_point = poly[(i + len(poly) - 1) % len(poly)]
d1 = self._classify_point_to_plane(prev_poly_point, plane, normal, axis)
d2 = self._classify_point_to_plane(current_poly_point, plane, normal, axis)
if d2 == _POINT_BEHIND_PLANE:
poly_in_plane = False
if d1 == _POINT_IN_FRONT_OF_PLANE:
intersection = self._intersect(plane, prev_poly_point, current_poly_point, axis)
result.append(intersection)
elif d1 == _POINT_ON_PLANE:
if not result or (not np.array_equal(result[-1], prev_poly_point)):
result.append(prev_poly_point)
elif d2 == _POINT_IN_FRONT_OF_PLANE:
poly_in_plane = False
if d1 == _POINT_BEHIND_PLANE:
intersection = self._intersect(plane, prev_poly_point, current_poly_point, axis)
result.append(intersection)
elif d1 == _POINT_ON_PLANE:
if not result or (not np.array_equal(result[-1], prev_poly_point)):
result.append(prev_poly_point)
result.append(current_poly_point)
else:
if d1 != _POINT_ON_PLANE:
result.append(current_poly_point)
if poly_in_plane:
return poly
else:
return result
def _intersect(self, plane, prev_point, current_point, axis):
"""Computes the intersection of a line with an axis-aligned plane.
Args:
plane: Formulated as two 3D points on the plane.
prev_point: The point on the edge of the line.
current_point: The other end of the line.
axis: A tuple defining a 2D axis.
Returns:
A 3D point intersection of the poly edge with the plane.
"""
alpha = (current_point[axis] - plane[axis]) / (current_point[axis] - prev_point[axis])
# Compute the intersecting points using linear interpolation (lerp)
intersection_point = alpha * prev_point + (1.0 - alpha) * current_point
return intersection_point
def _inside(self, plane, point, axis):
"""Check whether a given point is on a 2D plane."""
# Cross products to determine the side of the plane the point lie.
x, y = axis
u = plane[0] - point
v = plane[1] - point
a = u[x] * v[y]
b = u[y] * v[x]
return a >= b
def _classify_point_to_plane(self, point, plane, normal, axis):
"""Classify position of a point w.r.t the given plane.
See Real-Time Collision Detection, by Christer Ericson, page 364.
Args:
point: 3x1 vector indicating the point
plane: 3x1 vector indicating a point on the plane
normal: scalar (+1, or -1) indicating the normal to the vector
axis: scalar (0, 1, or 2) indicating the xyz axis
Returns:
Side: which side of the plane the point is located.
"""
signed_distance = normal * (point[axis] - plane[axis])
if signed_distance > _PLANE_THICKNESS_EPSILON:
return _POINT_IN_FRONT_OF_PLANE
elif signed_distance < -_PLANE_THICKNESS_EPSILON:
return _POINT_BEHIND_PLANE
else:
return _POINT_ON_PLANE
@property
def intersection_points(self):
return self._intersection_points

View File

@@ -0,0 +1,44 @@
"""
Language instruction utilities for simbox workflows.
This module provides a small helper to post-process natural language
instructions based on the current randomized objects in a task config.
- update_language:
- Reads `language_instruction` and `detailed_language_instruction` templates
from `cfg["data"]` (semicolon-separated list).
- For each object with `apply_randomization=True`, replaces occurrences of
the object `name` in the templates with its (possibly normalized) `category`
field, so that instructions stay consistent with the randomized assets.
- Returns two lists: updated high-level and detailed language instructions.
"""
def update_language(cfg):
language_instructions = cfg["data"].get("language_instruction", "Pick up the ${objects.0.name} with left arm")
detailed_language_instructions = cfg["data"].get(
"detailed_language_instruction", "Grasp and lift the ${objects.0.name} with the left robotic arm"
)
language_instructions = language_instructions.split(";")
detailed_language_instructions = detailed_language_instructions.split(";")
result_language_instructions = []
result_detailed_language_instructions = []
for language_instruction, detailed_language_instruction in zip(
language_instructions, detailed_language_instructions
):
for obj_cfg in cfg["objects"]:
apply_randomization = obj_cfg.get("apply_randomization", False)
if apply_randomization:
category = obj_cfg["category"]
name = obj_cfg["name"]
language_instruction = language_instruction.replace(name, category)
detailed_language_instruction = detailed_language_instruction.replace(name, category)
result_language_instructions.append(language_instruction)
result_detailed_language_instructions.append(detailed_language_instruction)
return result_language_instructions, result_detailed_language_instructions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,190 @@
"""
Planning utilities for simbox motion generation.
This code is adapted from work by the Genie Sim Team; we thank them for
sharing their ideas and implementation.
This module provides helper functions that operate on cuRobo planning results
(`MotionGen` / `IKSolver` outputs) to:
- sort_by_difference_js: rank joint-space paths by how \"smooth\" they are,
using cumulative joint differences (optionally weighted per joint). Used to
pick the best trajectory among multiple candidates.
- filter_paths_by_position_error / filter_paths_by_rotation_error: apply
simple statistical filters (threshold at mean error) on end-effector pose
errors to reject outlier paths.
- get_prioritized_indices: combine the above filters to produce a prioritized
list of candidate indices from a `result` object (e.g. `MotionGenResult`).
- select_index_by_priority_single: pick the single best candidate index from a
single planning result, with safe fallback to index 0.
- select_index_by_priority_dual: pick a best index that is jointly successful
in both a pre-planning result and a final result (dual-stage planning),
with sensible fallbacks when intersections are empty.
"""
import random
import torch
def sort_by_difference_js(paths, weights=None):
"""
Sorts a list of joint space paths based on the cumulative difference between consecutive waypoints.
Args:
paths (List(JointState): A list of JointState, each path contains a position of shape (T, D) where
T is the number of waypoints per path, and D is the dimensionality of each waypoint.
weights (torch.tensor, optional): A tensor of shape (D,) representing weights for each dimension.
If None, all dimensions are weighted equally.
Returns:
torch.tensor: Indices that would sort the paths based on the cumulative difference.
"""
assert len(paths) > 0, "The paths list should not be empty."
assert (
weights is None or weights.shape[0] == paths[0].position.shape[-1]
), "Weights must be of shape (D,) where D is the dimensionality of each waypoint."
device = paths[0].position.device
if weights is None:
weights = torch.ones(paths[0].position.shape[-1], device=device)
else:
weights = weights.to(device)
# Calculate the absolute differences between consecutive waypoints
diffs = []
for path in paths:
diff = torch.abs(path.position[1:, :] - path.position[:-1, :]) # Shape: (T-1, D)
diff = diff.sum(dim=0) # Average over waypoints, Shape: (D,)
diffs.append(diff)
diffs = torch.stack(diffs) # Shape: (N, D)
# Apply weights to the differences
weighted_diffs = diffs * weights # Broadcasting weights over the last dimension
# Sum the weighted differences over all waypoints and dimensions
cumulative_diffs = weighted_diffs.sum(dim=1) # Shape: (N,)
# Get the indices that would sort the paths based on cumulative differences
sorted_indices = torch.argsort(cumulative_diffs)
return sorted_indices
def filter_paths_by_position_error(paths, position_errors):
"""
Filters out paths whose position error exceeds one sigma threshold.
Args:
paths (List(JointState): A list of JointState, each path contains a position of shape (T, D).
position_errors (torch.tensor): A tensor of shape (N,) representing the position error for each path.
Returns:
List(bool): A filtered list of bool where each path's position error is below the threshold.
"""
assert len(paths) == position_errors.shape[0], "The number of paths must match the number of position errors."
mean_error = torch.mean(position_errors)
torch.std(position_errors)
threshold = mean_error # + std_error # one sigma threshold
res = [position_error <= threshold for position_error in position_errors]
return res
def filter_paths_by_rotation_error(paths, rotation_errors):
"""
Filters out paths whose rotation error exceeds two sigma threshold.
Args:
paths (List(JointState): A list of JointState, each path contains a position of shape (T, D).
rotation_errors (torch.tensor): A tensor of shape (N,) representing the rotation error for each path.
Returns:
List(bool): A filtered list of bool where each path's rotation error is below the threshold.
"""
assert len(paths) == rotation_errors.shape[0], "The number of paths must match the number of rotation errors."
mean_error = torch.mean(rotation_errors)
torch.std(rotation_errors)
threshold = mean_error # + std_error # one sigma threshold
res = [rotation_error <= threshold for rotation_error in rotation_errors]
return res
def get_prioritized_indices(result):
"""
Extracts successful indices and returns them as a list ordered by priority:
1. Filter by position and rotation errors.
2. Sort by joint space (JS) difference.
"""
if not torch.any(result.success):
print("result failure")
return []
# Get absolute indices of successful samples
success_indices = torch.nonzero(result.success, as_tuple=True)[0]
print("success_indices :", success_indices)
paths = [result.get_paths()[idx] for idx in success_indices]
# Apply error filters
pos_mask = filter_paths_by_position_error(paths, result.position_error[result.success])
rot_mask = filter_paths_by_rotation_error(paths, result.rotation_error[result.success])
# Identify local indices that pass both filters
filtered_local_indices = [i for i, (p, r) in enumerate(zip(pos_mask, rot_mask)) if p and r]
# Fallback to all successful paths if filtering returns nothing
if not filtered_local_indices:
target_paths = paths
target_abs_indices = success_indices
else:
target_paths = [paths[i] for i in filtered_local_indices]
target_abs_indices = success_indices[filtered_local_indices]
# Sort target paths by JS difference
sorted_rel_indices = sort_by_difference_js(target_paths)
# Map back to absolute indices and return as a list
return [target_abs_indices[i].item() for i in sorted_rel_indices]
def select_index_by_priority_single(result):
"""Select the best index from a single result."""
prioritized_indices = get_prioritized_indices(result)
return prioritized_indices[0] if prioritized_indices else 0
def select_index_by_priority_dual(pre_result, result):
"""Select the best index considering both pre_result and result."""
# Get prioritized indices based on the final result
prioritized_indices = get_prioritized_indices(result)
if not prioritized_indices:
return 0
# Determine indices where both pre_result and result succeeded
both_success_mask = pre_result.success & result.success
both_success_indices = torch.nonzero(both_success_mask, as_tuple=True)[0]
both_success_set = set(both_success_indices.cpu().tolist())
if both_success_set:
# Return the highest priority index that is successful in both results
for idx in prioritized_indices:
if idx in both_success_set:
print("Pre and final both success, selected highest priority candidate.")
return idx
# Logically, the loop above should always find a match.
# Fallback to random choice among common successes just in case.
print("Pre and final both success, falling back to random choice.")
return random.choice(list(both_success_set))
# If no common successes exist, check if any final results succeeded
if prioritized_indices:
print("Only final success.")
# Optionally return prioritized_indices[0] instead of random for better results
return random.choice(prioritized_indices)
return 0

View File

@@ -0,0 +1,147 @@
import math
from copy import deepcopy
import numpy as np
from core.utils.usd_geom_utils import compute_bbox
from scipy.spatial.transform import Rotation as R
class RandomRegionSampler:
@staticmethod
def A_in_B_region_sampler(obj, tgt, x_bias=0, y_bias=0, z_bias=0):
bbox_tgt = compute_bbox(tgt.prim)
tgt_z_max = bbox_tgt.max[2]
bbox_obj = compute_bbox(obj.prim)
obj_z_min = bbox_obj.min[2]
tgt_trans = tgt.get_local_pose()[0]
obj_trans = deepcopy(tgt_trans)
obj_trans[0] += x_bias
obj_trans[1] += y_bias
obj_trans[2] = tgt_z_max + (obj.get_local_pose()[0][2] - obj_z_min) - 0.005 + z_bias
obj_ori = obj.get_local_pose()[1]
return obj_trans, obj_ori
@staticmethod
def A_on_B_region_sampler(obj, tgt, pos_range, yaw_rotation):
# Translation
shift = np.random.uniform(*pos_range)
bbox_obj = compute_bbox(obj.prim)
obj_z_min = bbox_obj.min[2]
bbox_tgt = compute_bbox(tgt.prim)
tgt_center = (np.asarray(bbox_tgt.min) + np.asarray(bbox_tgt.max)) / 2
tgt_z_max = bbox_tgt.max[2]
place_pos = np.zeros(3)
place_pos[0] = tgt_center[0]
place_pos[1] = tgt_center[1]
place_pos[2] = (
tgt_z_max + (obj.get_local_pose()[0][2] - obj_z_min) + 0.001
) # add a small value to avoid penetration
place_pos += shift
# Orientation
yaw = np.random.uniform(*yaw_rotation)
dr = R.from_euler("xyz", [0.0, 0.0, yaw], degrees=True)
r = R.from_quat(obj.get_local_pose()[1], scalar_first=True)
orientation = (dr * r).as_quat(scalar_first=True)
return place_pos, orientation
@staticmethod
def A_by_B_circle_sampler(obj, tgt, r_range, theta_range, yaw_rotation):
# Translation
bbox_tgt = compute_bbox(tgt.prim)
bbox_obj = compute_bbox(obj.prim)
tgt_height = (np.asarray(bbox_tgt.max) - np.asarray(bbox_tgt.min)) / 2
tgt_center = (np.asarray(bbox_tgt.min) + np.asarray(bbox_tgt.max)) / 2
obj_height = (np.asarray(bbox_obj.max) - np.asarray(bbox_obj.min)) / 2
r = np.random.uniform(*r_range)
theta = np.random.uniform(*theta_range)
delta_x = r * math.cos(theta / 180 * math.pi)
delta_y = r * math.sin(theta / 180 * math.pi)
delta_z = obj_height[2] - tgt_height[2]
place_pos = np.zeros(3)
place_pos[0] = tgt_center[0] + delta_x
place_pos[1] = tgt_center[1] + delta_y
place_pos[2] = tgt_center[2] + delta_z
# Orientation
yaw = np.random.uniform(*yaw_rotation)
dr = R.from_euler("xyz", [0.0, 0.0, yaw], degrees=True)
r = R.from_quat(obj.get_local_pose()[1], scalar_first=True)
orientation = (dr * r).as_quat(scalar_first=True)
return place_pos, orientation
@staticmethod
def A_by_B_region_sampler(obj, tgt, pos_range, yaw_rotation):
# Translation
shift = np.random.uniform(*pos_range)
bbox_tgt = compute_bbox(tgt.prim)
bbox_obj = compute_bbox(obj.prim)
tgt_height = (np.asarray(bbox_tgt.max) - np.asarray(bbox_tgt.min)) / 2
tgt_center = (np.asarray(bbox_tgt.min) + np.asarray(bbox_tgt.max)) / 2
obj_height = (np.asarray(bbox_obj.max) - np.asarray(bbox_obj.min)) / 2
delta_x = shift[0]
delta_y = shift[1]
delta_z = obj_height[2] - tgt_height[2]
place_pos = np.zeros(3)
place_pos[0] = tgt_center[0] + delta_x
place_pos[1] = tgt_center[1] + delta_y
place_pos[2] = tgt_center[2] + delta_z
# Orientation
yaw = np.random.uniform(*yaw_rotation)
dr = R.from_euler("xyz", [0.0, 0.0, yaw], degrees=True)
r = R.from_quat(obj.get_local_pose()[1], scalar_first=True)
orientation = (dr * r).as_quat(scalar_first=True)
return place_pos, orientation
@staticmethod
def A_face_B_circle_sampler(obj, tgt, r_range, yaw_rotation):
# Translation
bbox_tgt = compute_bbox(tgt.prim)
bbox_obj = compute_bbox(obj.prim)
tgt_height = (np.asarray(bbox_tgt.max) - np.asarray(bbox_tgt.min)) / 2
tgt_center = (np.asarray(bbox_tgt.min) + np.asarray(bbox_tgt.max)) / 2
obj_height = (np.asarray(bbox_obj.max) - np.asarray(bbox_obj.min)) / 2
r = np.random.uniform(*r_range)
orientation = tgt.get_world_pose()[1]
rot = R.from_quat([orientation[1], orientation[2], orientation[3], orientation[0]])
euler_angles = rot.as_euler("xyz", degrees=True)
theta = euler_angles[2]
delta_x = r * math.cos(theta / 180 * math.pi)
delta_y = r * math.sin(theta / 180 * math.pi)
delta_z = obj_height[2] - tgt_height[2]
place_pos = np.zeros(3)
place_pos[0] = tgt_center[0] + delta_x
place_pos[1] = tgt_center[1] + delta_y
place_pos[2] = tgt_center[2] + delta_z
# Orientation
yaw = np.random.uniform(*yaw_rotation)
dr = R.from_euler("xyz", [0.0, 0.0, yaw], degrees=True)
r = R.from_quat(obj.get_local_pose()[1], scalar_first=True)
orientation = (dr * r).as_quat(scalar_first=True)
return place_pos, orientation
@staticmethod
def A_along_B_C_circle_sampler(obj, tgt, tgt2, r_range, yaw_rotation):
# Translation
r_tgt_tgt2 = np.linalg.norm(tgt.get_world_pose()[0] - tgt2.get_world_pose()[0])
bbox_tgt = compute_bbox(tgt.prim)
bbox_obj = compute_bbox(obj.prim)
tgt_height = (np.asarray(bbox_tgt.max) - np.asarray(bbox_tgt.min)) / 2
tgt_center = (np.asarray(bbox_tgt.min) + np.asarray(bbox_tgt.max)) / 2
obj_height = (np.asarray(bbox_obj.max) - np.asarray(bbox_obj.min)) / 2
r = np.random.uniform(*r_range) + r_tgt_tgt2
orientation = tgt.get_world_pose()[1]
rot = R.from_quat([orientation[1], orientation[2], orientation[3], orientation[0]])
euler_angles = rot.as_euler("xyz", degrees=True)
theta = euler_angles[2]
delta_x = r * math.cos(theta / 180 * math.pi)
delta_y = r * math.sin(theta / 180 * math.pi)
delta_z = obj_height[2] - tgt_height[2]
place_pos = np.zeros(3)
place_pos[0] = tgt_center[0] + delta_x
place_pos[1] = tgt_center[1] + delta_y
place_pos[2] = tgt_center[2] + delta_z
# Orientation
yaw = np.random.uniform(*yaw_rotation)
dr = R.from_euler("xyz", [0.0, 0.0, yaw], degrees=True)
r = R.from_quat(obj.get_local_pose()[1], scalar_first=True)
orientation = (dr * r).as_quat(scalar_first=True)
return place_pos, orientation

View File

@@ -0,0 +1,18 @@
import random
def deactivate_selected_prims(prim, selected_names, random_names):
for child_prim in prim.GetAllChildren():
for name in selected_names:
if name in child_prim.GetName().lower():
child_prim.SetActive(False)
print(f"Deactivating: {child_prim.GetPath()}")
for name in random_names:
if name in child_prim.GetName().lower():
flag = random.random() > 0.5
child_prim.SetActive(flag)
if not flag:
print(f"Deactivating: {child_prim.GetPath()}")
deactivate_selected_prims(child_prim, selected_names, random_names)

View File

@@ -0,0 +1,454 @@
################################### study_room ####################################
201
-
name: table
path: home_scenes/tables/study_room/model_d3745f1d28f11c7019c1b6b92a7a5b92_0.usd
target_class: GeometryObject
translation: [0, 0, 0.31]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MV7J6NIKTKJZ2AABAAAAADI8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [0.37, -8.66, 0]
euler: [0.0, 0.0, 90.0]
202
-
name: table
path: home_scenes/tables/study_room/model_cb26c81e8aaa0335075cba7681ac00ed_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MV7J6NIKTKJZ2AABAAAAADQ8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-0.34, -6.69, 0]
euler: [0.0, 0.0, 90.0]
203
-
name: table
path: home_scenes/tables/study_room/model_cb26c81e8aaa0335075cba7681ac00ed_0.usd
target_class: GeometryObject
translation: [0, 0, 0.39]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MV7J6NIKTKJZ2AABAAAAAEI8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [7.02, -2.5, 0]
euler: [0.0, 0.0, 180.0]
204
-
name: table
path: home_scenes/tables/study_room/model_28a1e08a6c502685948ac37d77c2a8e0_0.usd
target_class: GeometryObject
translation: [0, 0, 0.36]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MVUCSQAKTKJ5EAABAAAAAAA8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-2.87, -3.17, 0]
euler: [0.0, 0.0, -90.0]
205
-
name: table
path: home_scenes/tables/study_room/model_58aa3bd455a23b9de212727b903b95ed_0.usd
target_class: GeometryObject
translation: [0, 0, 0.36]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MVUCSQAKTKJ5EAABAAAAABY8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [2.79, -0.01, 0]
euler: [0.0, 0.0, -90.0]
206
-
name: table
path: home_scenes/tables/study_room/model_fba695820e560714556be369be7d25dd_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MVUCSQAKTKJ5EAABAAAAACQ8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-4.65, -3.99, 0]
euler: [0.0, 0.0, -90.0]
207
-
name: table
path: home_scenes/tables/study_room/model_cf397d37c39fe6ae5774e649d1c92c8f_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MVUCSQAKTKJ5EAABAAAAACY8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-4.08, -4.31, 0]
euler: [0.0, 0.0, -90.0]
208
-
name: table
path: home_scenes/tables/study_room/model_6b1b2b8a6593de28611d0c7b294a5691_0.usd
target_class: GeometryObject
translation: [0, 0, 0.38]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MVUCSQAKTKJ5EAABAAAAADI8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [0.5, 7.43, 0]
euler: [0.0, 0.0, -90.0]
209
-
name: table
path: home_scenes/tables/study_room/model_50c17a2649d0afa0b4cfe516947e05d3_0.usd
target_class: GeometryObject
translation: [0, 0, 0.46]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MVUCSQAKTKJ5EAABAAAAAEI8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [2.94, 0.21, 0]
euler: [0.0, 0.0, 0.0]
210
-
name: table
path: home_scenes/tables/study_room/model_856a914fb4f34268d85458eea8f6c5f4_0.usd
target_class: GeometryObject
translation: [0, 0, 0.38]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MVUHL5YKTKJ5EAABAAAAAAQ8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [2.06, 0.07, 0]
euler: [0.0, 0.0, -90.0]
211
-
name: table
path: home_scenes/tables/study_room/model_376dd16b9844b5af20d3f7693b06caa4_0.usd
target_class: GeometryObject
translation: [0, 0, 0.61]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MVUHLWYKTKJ5EAABAAAAAAI8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-3.17, -5.03, 0]
euler: [0.0, 0.0, 0.0]
212
-
name: table
path: home_scenes/tables/study_room/model_360fbe3245e433487c79eeb55bfe983c_0.usd
target_class: GeometryObject
translation: [0, 0, 0.41]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MVUHLWYKTKJ5EAABAAAAABA8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [6.42, 2.97, 0]
euler: [0.0, 0.0, 90.0]
213
-
name: table
path: home_scenes/tables/study_room/model_360fbe3245e433487c79eeb55bfe983c_0.usd
target_class: GeometryObject
translation: [0, 0, 0.41]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MVUHLWYKTKJ5EAABAAAAABI8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-1.78, -6.34, 0]
euler: [0.0, 0.0, -90.0]
214
-
name: table
path: home_scenes/tables/study_room/model_25f615fb59269f93ab247362cba81baa_0.usd
target_class: GeometryObject
translation: [0, 0, 0.34]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MVUHLWYKTKJ5EAABAAAAABQ8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [4.42, 0.15, 0]
euler: [0.0, 0.0, -90.0]
215
-
name: table
path: home_scenes/tables/study_room/model_9708eeaa90a98bfea7c12767e2be3427_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MVUHLWYKTKJ5EAABAAAAADI8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-6.4, -3.17, 0]
euler: [0.0, 0.0, 0.0]
216
-
name: table
path: home_scenes/tables/study_room/model_5e55f06f3732989070508a1615154c55_0.usd
target_class: GeometryObject
translation: [0, 0, 0.5]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MVUHLWYKTKJ5EAABAAAAADQ8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-0.92, 1.05, 0]
euler: [0.0, 0.0, 90.0]
217
-
name: table
path: home_scenes/tables/study_room/model_d63f525c78a190e4f0c499fc8604cdbd_0.usd
target_class: GeometryObject
translation: [0, 0, 0.27]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MVUHLWYKTKJ5EAABAAAAAEA8_usd_study_room_2.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-7.52, 4.33, 0]
euler: [0.0, 0.0, 180.0]
218
-
name: table
path: home_scenes/tables/study_room/model_1ae7e32234c1a697a3957286ee71c7b9_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MWAX5JYKTKJZ2AABAAAAAAI8_usd_study_room_1.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [4.61, -0.13, 0]
euler: [0.0, 0.0, 0.0]
219
-
name: table
path: home_scenes/tables/study_room/model_dd5947ca1f8944c0e07b4ec982c118b9_0.usd
target_class: GeometryObject
translation: [0, 0, 0.5]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MWAX5JYKTKJZ2AABAAAAAAQ8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-5.19, -3, 0]
euler: [0.0, 0.0, 180.0]
220
-
name: table
path: home_scenes/tables/study_room/model_360fbe3245e433487c79eeb55bfe983c_0.usd
target_class: GeometryObject
translation: [0, 0, 0.41]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MWAX5JYKTKJZ2AABAAAAABY8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-2.65, 5.49, 0]
euler: [0.0, 0.0, 90.0]
221
-
name: table
path: home_scenes/tables/study_room/model_a0d8d1735ea122d71ccffb0cc27a1330_0.usd
target_class: GeometryObject
translation: [0, 0, 0.39]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MWAX5JYKTKJZ2AABAAAAADA8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [2.04, 1.95, 0]
euler: [0.0, 0.0, -90.0]
222
-
name: table
path: home_scenes/tables/study_room/model_a0d8d1735ea122d71ccffb0cc27a1330_0.usd
target_class: GeometryObject
translation: [0, 0, 0.36]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MWAX5JYKTKJZ2AABAAAAADI8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [2.8, -4.8, 0]
euler: [0.0, 0.0, 180.0]
223
-
name: table
path: home_scenes/tables/study_room/model_97bb6e3be64e4b1656de2c83cbcc687d_0.usd
target_class: GeometryObject
translation: [0, 0, 0.41]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MWAX5JYKTKJZ2AABAAAAADQ8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-5.12, -1.19, 0]
euler: [0.0, 0.0, 180.0]
224
-
name: table
path: home_scenes/tables/study_room/model_360fbe3245e433487c79eeb55bfe983c_0.usd
target_class: GeometryObject
translation: [0, 0, 0.37]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MWAX5JYKTKJZ2AABAAAAADY8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-3.81, 2.15, 0]
euler: [0.0, 0.0, -90.0]
225
-
name: table
path: home_scenes/tables/study_room/model_a0d8d1735ea122d71ccffb0cc27a1330_0.usd
target_class: GeometryObject
translation: [0, 0, 0.39]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MWBGLKQKTKJZ2AABAAAAAAA8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-0.51, 3.03, 0]
euler: [0.0, 0.0, 90.0]
226
-
name: table
path: home_scenes/tables/study_room/model_a0d8d1735ea122d71ccffb0cc27a1330_0.usd
target_class: GeometryObject
translation: [0, 0, 0.39]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MWBGLKQKTKJZ2AABAAAAABA8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [5.69, -1.33, 0]
euler: [0.0, 0.0, 0.0]
227
-
name: table
path: home_scenes/tables/study_room/model_a0d8d1735ea122d71ccffb0cc27a1330_0.usd
target_class: GeometryObject
translation: [0, 0, 0.39]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MWBGLKQKTKJZ2AABAAAAABA8_usd_study_room_1.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-3.14, -5.98, 0]
euler: [0.0, 0.0, -90.0]
228
-
name: table
path: home_scenes/tables/study_room/model_a0d8d1735ea122d71ccffb0cc27a1330_0.usd
target_class: GeometryObject
translation: [0, 0, 0.39]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MWBGLKQKTKJZ2AABAAAAABA8_usd_study_room_2.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [4.78, 1.42, 0]
euler: [0.0, 0.0, -90.0]
229
-
name: table
path: home_scenes/tables/study_room/model_a0d8d1735ea122d71ccffb0cc27a1330_0.usd
target_class: GeometryObject
translation: [0, 0, 0.39]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MWBGLKQKTKJZ2AABAAAAABQ8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [1.27, 5.86, 0]
euler: [0.0, 0.0, 90.0]
230
-
name: table
path: home_scenes/tables/study_room/model_a0d8d1735ea122d71ccffb0cc27a1330_0.usd
target_class: GeometryObject
translation: [0, 0, 0.39]
scale: [0.01, 0.01, 0.01]
euler: [0.0, 0.0, 0.0]
-
name: scene
path: home_scenes/rooms/study_room/MWBGLKQKTKJZ2AABAAAAACA8_usd_study_room_0.usd
target_class: GeometryObject
scale: [0.01, 0.01, 0.01]
translation: [-2.18, 1.99, 0]
euler: [0.0, 0.0, -90.0]
################################### study_room ####################################

View File

@@ -0,0 +1,140 @@
import numpy as np
from omni.isaac.core.utils.transformations import pose_from_tf_matrix
from scipy.spatial.transform import Rotation as R
def get_orientation(euler, quaternion, degrees: bool = True):
"""Return orientation as quaternion [w, x, y, z] from euler or quaternion."""
if euler:
if degrees:
euler = np.radians(euler)
r = R.from_euler("xyz", euler)
orientation = r.as_quat(scalar_first=True) # wxyz
elif quaternion:
orientation = quaternion
else:
orientation = [1.0, 0.0, 0.0, 0.0]
return orientation
def perturb_position(translation, max_noise_m=0.05, num_samples=1):
"""Perturb translation with uniform random noise in [-max_noise_m, max_noise_m]."""
if num_samples == 1:
noise = np.random.uniform(low=-max_noise_m, high=max_noise_m, size=3)
return translation + noise
noise = np.random.uniform(low=-max_noise_m, high=max_noise_m, size=(num_samples, 3))
return translation + noise # broadcasting: (3,) + (num_samples, 3) -> (num_samples, 3)
def perturb_orientation(orientation, max_noise_deg=5, num_samples=1):
"""Perturb orientation quaternion with random rotation noise (in degrees)."""
def quaternion_multiply(q1, q2):
# q1: (4,) or (N, 4), q2: (4,) or (N, 4)
w1, x1, y1, z1 = q1[..., 0], q1[..., 1], q1[..., 2], q1[..., 3]
w2, x2, y2, z2 = q2[..., 0], q2[..., 1], q2[..., 2], q2[..., 3]
w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2
x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2
y = w1 * y2 - x1 * z2 + y1 * w2 + z1 * x2
z = w1 * z2 + x1 * y2 - y1 * x2 + z1 * w2
return np.stack([w, x, y, z], axis=-1)
max_noise_rad = np.deg2rad(max_noise_deg)
if num_samples == 1:
random_axis = np.random.randn(3)
random_axis /= np.linalg.norm(random_axis)
random_angle = np.random.uniform(low=-max_noise_rad, high=max_noise_rad)
noise_quaternion = np.array([np.cos(random_angle / 2), *(np.sin(random_angle / 2) * random_axis)])
return quaternion_multiply(orientation, noise_quaternion)
random_axes = np.random.randn(num_samples, 3)
random_axes /= np.linalg.norm(random_axes, axis=1, keepdims=True)
random_angles = np.random.uniform(low=-max_noise_rad, high=max_noise_rad, size=num_samples)
half_angles = random_angles / 2
cos_half = np.cos(half_angles)
sin_half_axis = np.sin(half_angles)[:, np.newaxis] * random_axes
noise_quaternions = np.concatenate([cos_half[:, np.newaxis], sin_half_axis], axis=1)
orientation_expanded = np.broadcast_to(orientation, (num_samples, 4))
return quaternion_multiply(orientation_expanded, noise_quaternions)
def poses_from_tf_matrices(tf_matrices):
# tf_matrices.shape [N, 4, 4]
# return ee_trans_batch_np, ee_ori_batch_np
ee_trans_list, ee_ori_list = [], []
for idx in range(tf_matrices.shape[0]):
ee_trans, ee_ori = pose_from_tf_matrix(tf_matrices[idx])
ee_trans_list.append(ee_trans)
ee_ori_list.append(ee_ori)
ee_trans_batch_np = np.stack(ee_trans_list)
ee_ori_batch_np = np.stack(ee_ori_list)
return (ee_trans_batch_np, ee_ori_batch_np)
def create_pose_matrices(pos_batch, rot_batch):
mats = np.tile(np.eye(4), (len(pos_batch), 1, 1))
mats[:, :3, :3] = rot_batch
mats[:, :3, 3] = pos_batch
return mats
def pose_to_6d(pose, degrees: bool = False):
"""Convert 4x4 pose matrix to 6D representation [x, y, z, roll, pitch, yaw]."""
pose6d = np.zeros(6)
pose6d[:3] = pose[:3, 3]
pose6d[3:6] = R.from_matrix(pose[:3, :3]).as_euler("xyz", degrees=degrees)
return pose6d
def _6d_to_pose(pose6d, degrees: bool = False):
"""Convert 6D representation [x, y, z, roll, pitch, yaw] back to 4x4 pose matrix."""
pose = np.eye(4)
pose[:3, 3] = pose6d[:3]
pose[:3, :3] = R.from_euler("xyz", pose6d[3:6], degrees=degrees).as_matrix()
return pose
def get_tf_mat(i, dh):
"""Compute single DenavitHartenberg transform matrix for index i."""
a = dh[i][0]
d = dh[i][1]
alpha = dh[i][2]
theta = dh[i][3]
q = theta
return np.array(
[
[np.cos(q), -np.sin(q), 0, a],
[np.sin(q) * np.cos(alpha), np.cos(q) * np.cos(alpha), -np.sin(alpha), -np.sin(alpha) * d],
[np.sin(q) * np.sin(alpha), np.cos(q) * np.sin(alpha), np.cos(alpha), np.cos(alpha) * d],
[0, 0, 0, 1],
]
)
def get_fk_solution(joint_angles, target_index: int = 8):
"""Simple Franka forward-kinematics using hard-coded DH parameters."""
dh_params = [
[0, 0.333, 0, joint_angles[0]],
[0, 0, -np.pi / 2, joint_angles[1]],
[0, 0.316, np.pi / 2, joint_angles[2]],
[0.0825, 0, np.pi / 2, joint_angles[3]],
[-0.0825, 0.384, -np.pi / 2, joint_angles[4]],
[0, 0, np.pi / 2, joint_angles[5]],
[0.088, 0, np.pi / 2, joint_angles[6]],
[0, 0.107, 0, 0],
[0, 0, 0, -np.pi / 4],
[0.0, 0.1034, 0, 0],
]
T = np.eye(4)
for idx in range(target_index):
T = T @ get_tf_mat(idx, dh_params)
return T

View File

@@ -0,0 +1,117 @@
# pylint: skip-file
# flake8: noqa
import os
import sys
from typing import List, Sequence, Tuple
import numpy as np
import open3d as o3d
from pxr import Gf, Usd, UsdGeom
def to_list(data: Sequence):
"""Convert sequence-like data to a list, returning an empty list for None."""
if data is None:
return []
return list(data)
def recursive_parse_new(prim: Usd.Prim) -> Tuple[list, list, list]:
"""Recursively collect mesh vertices and face indices from a prim subtree in world coordinates."""
points_total: List = []
faceVertexCounts_total: List[int] = []
faceVertexIndices_total: List[int] = []
if prim.IsA(UsdGeom.Mesh):
prim_imageable = UsdGeom.Imageable(prim)
xform_world_transform = np.array(prim_imageable.ComputeLocalToWorldTransform(Usd.TimeCode.Default()))
points = prim.GetAttribute("points").Get()
faceVertexCounts = prim.GetAttribute("faceVertexCounts").Get()
faceVertexIndices = prim.GetAttribute("faceVertexIndices").Get()
faceVertexCounts = to_list(faceVertexCounts)
faceVertexIndices = to_list(faceVertexIndices)
points = to_list(points)
points = np.array(points) # Nx3
ones = np.ones((points.shape[0], 1)) # Nx1
points_h = np.hstack([points, ones]) # Nx4
points_transformed_h = np.dot(points_h, xform_world_transform) # Nx4
points_transformed = points_transformed_h[:, :3] / points_transformed_h[:, 3][:, np.newaxis] # Nx3
points = points_transformed.tolist()
base_num = len(points_total)
faceVertexIndices = np.array(faceVertexIndices)
faceVertexIndices_total.extend((base_num + faceVertexIndices).tolist())
faceVertexCounts_total.extend(faceVertexCounts)
points_total.extend(points)
children = prim.GetChildren()
for child in children:
child_points, child_faceVertexCounts, child_faceVertexIndices = recursive_parse_new(child)
base_num = len(points_total)
child_faceVertexIndices = np.array(child_faceVertexIndices)
faceVertexIndices_total.extend((base_num + child_faceVertexIndices).tolist())
faceVertexCounts_total.extend(child_faceVertexCounts)
points_total.extend(child_points)
return (
points_total,
faceVertexCounts_total,
faceVertexIndices_total,
)
def get_mesh_from_points_and_faces(points, faceVertexCounts, faceVertexIndices):
"""Build an Open3D triangle mesh from USD mesh point and face data."""
mesh = o3d.geometry.TriangleMesh()
mesh.vertices = o3d.utility.Vector3dVector(points)
triangles = []
idx = 0
for count in faceVertexCounts:
if count == 3:
triangles.append(faceVertexIndices[idx : idx + 3])
idx += count
mesh.triangles = o3d.utility.Vector3iVector(triangles)
mesh.compute_vertex_normals()
return mesh
def sample_points_from_mesh(mesh, num_points: int = 1000):
"""Uniformly sample points from an Open3D mesh."""
pcd = mesh.sample_points_uniformly(number_of_points=num_points)
return pcd
def sample_points_from_prim(prim: Usd.Prim, num_points: int = 1000) -> np.ndarray:
"""Sample points from a prim subtree by converting meshes to a sampled point cloud."""
points, faceVertexCounts, faceVertexIndices = recursive_parse_new(prim)
mesh = get_mesh_from_points_and_faces(points, faceVertexCounts, faceVertexIndices)
pcd = sample_points_from_mesh(mesh, num_points)
return np.asarray(pcd.points)
def compute_bbox(prim: Usd.Prim) -> Gf.Range3d:
"""Compute an axis-aligned world-space bounding box for a prim subtree."""
imageable: UsdGeom.Imageable = UsdGeom.Imageable(prim)
time = Usd.TimeCode.Default()
bound = imageable.ComputeWorldBound(time, UsdGeom.Tokens.default_)
bound_range = bound.ComputeAlignedBox()
return bound_range
if __name__ == "__main__":
# Simple manual test/debug hook kept from original peizhou_prim.py
dir_path = os.path.dirname(os.path.abspath(sys.argv[0]))
models_folder = os.path.join(dir_path, "minidump_usd/0001/dest_usd/models_z/models/")
subfolders = [f.name for f in os.scandir(models_folder) if f.is_dir()]
for subfolder in subfolders:
print(subfolder)
usd_path = os.path.join(models_folder, subfolder, "instance.usd")
stage = Usd.Stage.Open(usd_path)
prim = stage.GetPrimAtPath("/Root/Instance")
points, faceVertexCounts, faceVertexIndices = recursive_parse_new(prim)
mesh = get_mesh_from_points_and_faces(points, faceVertexCounts, faceVertexIndices)
o3d.visualization.draw_geometries([mesh])
pcd = sample_points_from_mesh(mesh, num_points=10000)
o3d.visualization.draw_geometries([pcd])

View File

@@ -0,0 +1,87 @@
import logging
import os
import random
import numpy as np
import torch
try:
from pxr.Usd import Prim
except Exception: # pylint: disable=broad-except
print("No pxr found")
Prim = None
os.environ["NO_PROXY"] = os.environ["NO_PROXY"] + r"\," + "localhost" if "NO_PROXY" in os.environ else "localhost"
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
def set_semantic_label(prim: Prim, label):
from omni.isaac.core.utils.semantics import add_update_semantics
if prim.GetTypeName() == "Mesh":
add_update_semantics(prim, semantic_label=label, type_label="class")
all_children = prim.GetAllChildren()
for child in all_children:
set_semantic_label(child, label)
def set_plane_semantic_label(prim: Prim, label):
from omni.isaac.core.utils.semantics import add_update_semantics
if prim.GetTypeName() == "Plane":
add_update_semantics(prim, semantic_label=label, type_label="class")
all_children = prim.GetAllChildren()
for child in all_children:
set_plane_semantic_label(child, label)
def set_robot_semantic_label(robot: Prim, parent_name: str):
from omni.isaac.core.utils.semantics import add_update_semantics
if robot.GetTypeName() == "Mesh":
prim_path = str(robot.GetPrimPath())
prim_path = prim_path.replace(parent_name, "")
if "panda_link" in prim_path:
class_label = prim_path.split("/")[1]
elif "mount" in prim_path:
class_label = "mount"
elif "Robotiq_2F_85" in prim_path:
class_label = prim_path.split("/")[2]
class_label = f"Robotiq_2F_85_{class_label}"
add_update_semantics(robot, semantic_label=class_label, type_label="class")
all_children = robot.GetAllChildren()
for child in all_children:
set_robot_semantic_label(child, parent_name)
def set_random_seed(seed):
assert isinstance(
seed,
int,
), f'Expected "seed" to be an integer, but it is "{type(seed)}".'
print(f"set seed:{seed}")
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def check_plane_vector_angle(x1, y1, z1, threshold_deg):
normal = np.cross(x1, y1)
dot_product = np.dot(normal, z1)
norm_normal = np.linalg.norm(normal)
norm_z1 = np.linalg.norm(z1)
cos_theta = abs(dot_product) / (norm_normal * norm_z1)
theta = np.arccos(np.clip(cos_theta, -1.0, 1.0))
angle_deg = 90 - np.degrees(theta)
return angle_deg < threshold_deg

View File

@@ -0,0 +1,285 @@
from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
import open3d as o3d
import shapely
from concave_hull import concave_hull
from core.utils.dr import get_category_euler
from core.utils.usd_geom_utils import compute_bbox, recursive_parse_new
from scipy.spatial import ConvexHull
from scipy.spatial.transform import Rotation as R
from shapely.geometry import Polygon
def visualize_polygons(polygons: list[Polygon]):
fig, ax = plt.subplots()
for polygon in polygons:
x, y = polygon.exterior.xy
ax.plot(x, y)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
plt.savefig("polygons.png")
plt.close(fig)
def sort_points_clockwise(points: np.ndarray) -> np.ndarray:
"""Sort 2D points in clockwise order around their centroid."""
centroid = np.mean(points, axis=0)
angles = np.arctan2(points[:, 1] - centroid[1], points[:, 0] - centroid[0])
order = np.argsort(angles)
return points[order]
def get_xy_contour(points, contour_type="convex_hull"):
polygon = None
if isinstance(points, o3d.geometry.PointCloud):
points = np.asarray(points.points)
if points.shape[1] == 3:
points = points[:, :2]
if contour_type == "convex_hull":
xy_points = points
hull = ConvexHull(xy_points)
hull_points = xy_points[hull.vertices]
sorted_points = sort_points_clockwise(hull_points)
polygon = Polygon(sorted_points)
elif contour_type == "concave_hull":
xy_points = points
concave_hull_points = concave_hull(xy_points)
polygon = Polygon(concave_hull_points)
return polygon
def rotate_object(obj, category):
euler = get_category_euler(category)
yaw = np.random.uniform(-180, 180)
dr = R.from_euler("xyz", [0.0, 0.0, yaw], degrees=True)
r = R.from_euler("xyz", euler, degrees=True)
orientation = (dr * r).as_quat(scalar_first=True)
obj.set_local_pose(orientation=orientation)
# from pdb import set_trace
# set_trace()
def get_pcd_from_mesh(mesh, num_points=1000):
pcd = mesh.sample_points_uniformly(number_of_points=num_points)
return pcd
def transform_pointcloud(
pcd,
translation,
orientation,
) -> o3d.geometry.PointCloud:
pcd_transformed = o3d.geometry.PointCloud()
pcd_transformed.points = o3d.utility.Vector3dVector(np.asarray(pcd.points).copy())
if pcd.has_colors():
pcd_transformed.colors = o3d.utility.Vector3dVector(np.asarray(pcd.colors).copy())
if pcd.has_normals():
pcd_transformed.normals = o3d.utility.Vector3dVector(np.asarray(pcd.normals).copy())
T = np.eye(4)
T[:3, :3] = R.from_quat(orientation, scalar_first=True).as_matrix()
T[:3, 3] = translation
pcd_transformed.transform(T)
return pcd_transformed
def get_platform_available_polygon(platform_pc, pc_list, visualize=False, buffer_size=0.0):
platform_polygon = get_xy_contour(platform_pc, contour_type="concave_hull")
if visualize:
polygons = []
for pc in pc_list:
polygons.append(get_xy_contour(pc, contour_type="concave_hull"))
visualize_polygons(polygons + [platform_polygon])
for pc in pc_list:
pc_polygon = get_xy_contour(pc, contour_type="concave_hull").buffer(buffer_size)
platform_polygon = platform_polygon.difference(pc_polygon)
return platform_polygon
def compute_pcd_bbox(pcd):
aabb = pcd.get_axis_aligned_bounding_box()
return aabb
def bbox_to_polygon(x_min, y_min, x_max, y_max):
points = [(x_min, y_min), (x_max, y_min), (x_max, y_max), (x_min, y_max)]
return Polygon(points)
def find_polygon_placement(large_polygon, small_polygon, buffer_thresh=0.03, max_attempts=1000):
if large_polygon.is_empty or small_polygon.is_empty:
return []
safe_region = large_polygon.buffer(-buffer_thresh)
if safe_region.is_empty:
return []
minx, miny, maxx, maxy = safe_region.bounds
valid_placements = []
for _ in range(max_attempts):
coords = np.array(small_polygon.exterior.coords)
small_centroid = np.mean(coords, axis=0)
tx = np.random.uniform(minx, maxx)
ty = np.random.uniform(miny, maxy)
translation = np.array([tx, ty])
transformed_polygon = shapely.affinity.translate(
small_polygon,
xoff=translation[0] - small_centroid[0],
yoff=translation[1] - small_centroid[1],
)
if safe_region.contains(transformed_polygon):
valid_placements.append((translation - small_centroid, 0))
break
return valid_placements
def randomly_place_object_on_object(
object1_pcd,
object2_pcd,
object1,
object2,
available_polygon, #: Polygon = Polygon([(-10, -10), (10, -10), (10, 10), (-10, 10)]),
restrict_polygon=None, #: Polygon = None,
):
object1_polygon = get_xy_contour(object1_pcd, contour_type="concave_hull")
object2_polygon = get_xy_contour(object2_pcd, contour_type="concave_hull")
object2_polygon = object2_polygon.intersection(available_polygon)
if restrict_polygon is not None:
object2_polygon = object2_polygon.intersection(restrict_polygon)
valid_placements = find_polygon_placement(object2_polygon, object1_polygon, max_attempts=10000)
if not valid_placements:
print("No valid placements found.")
return 0
else:
rel_translation, _ = valid_placements[-1]
translation, _ = object1.get_local_pose()
translation[:2] += rel_translation
bbox_obj = compute_bbox(object1.prim)
obj_z_min = bbox_obj.min[2]
bbox_tgt = compute_bbox(object2.prim)
tgt_z_max = bbox_tgt.max[2]
translation[2] = tgt_z_max + (translation[2] - obj_z_min) + 0.001 # add a small value to avoid penetration
object1.set_local_pose(translation=translation)
return 1
def set_distractors(
objects, #
distractors, # that need to be placed
target, # target object to place distractors on, default table
distractor_cfg,
cfgs,
):
# Get meshes
objects_meshes = [recursive_parse_new(prim.prim) for prim in objects.values()]
objects_meshes = [
o3d.geometry.TriangleMesh(
vertices=o3d.utility.Vector3dVector(mesh[0]),
triangles=o3d.utility.Vector3iVector(np.array(mesh[2]).reshape(-1, 3)),
)
for mesh in objects_meshes
]
distractors_meshes = [recursive_parse_new(prim.prim) for prim in distractors.values()]
distractors_meshes = [
o3d.geometry.TriangleMesh(
vertices=o3d.utility.Vector3dVector(mesh[0]),
triangles=o3d.utility.Vector3iVector(np.array(mesh[2]).reshape(-1, 3)),
)
for mesh in distractors_meshes
]
target_mesh = recursive_parse_new(target.prim)
target_mesh = o3d.geometry.TriangleMesh(
vertices=o3d.utility.Vector3dVector(target_mesh[0]),
triangles=o3d.utility.Vector3iVector(np.array(target_mesh[2]).reshape(-1, 3)),
)
# Get pcs
num_points = 10000
objects_pcds = [get_pcd_from_mesh(mesh, num_points) for mesh in objects_meshes]
distractors_pcds = [get_pcd_from_mesh(mesh, num_points) for mesh in distractors_meshes]
new_distractors_pcds = []
target_pcd = get_pcd_from_mesh(target_mesh, num_points)
# Control minimum distance between distractors and main objects in XY plane (meters)
# min_object_distance: minimum distance between objects and distractors
# distractor_buffer: minimum distance between already placed distractors (can be 0 if not needed)
min_object_distance = distractor_cfg.get("min_object_distance", 0.03)
distractor_buffer = distractor_cfg.get("distractor_buffer", 0.03)
max_attempts = distractor_cfg.get("max_attempts", 10)
fallback_z = distractor_cfg.get("fallback_z", -5.0) # If no valid placement, move distractor out of view
for idx, distractor in enumerate(distractors.values()):
placed = False
for _ in range(max_attempts):
rotate_object(distractor, cfgs[idx].category)
tmp_distractor_pcd = deepcopy(distractors_pcds[idx])
tmp_distractor_pcd = transform_pointcloud(tmp_distractor_pcd, *distractor.get_local_pose())
# First compute available region based on already placed distractors
available_polygon = get_platform_available_polygon(
target_pcd,
new_distractors_pcds,
visualize=False,
buffer_size=distractor_buffer,
)
# Then apply buffer around main objects to ensure at least min_object_distance from distractors
if min_object_distance > 0.0:
for obj_pcd in objects_pcds:
obj_polygon = get_xy_contour(obj_pcd, contour_type="concave_hull").buffer(min_object_distance)
available_polygon = available_polygon.difference(obj_polygon)
pos_range = distractor_cfg.get("pos_range", None)
if pos_range is not None:
x_min, y_min = pos_range[0]
x_max, y_max = pos_range[1]
center_x, center_y = target.get_local_pose()[0][:2]
x_min += center_x
x_max += center_x
y_min += center_y
y_max += center_y
restrict_polygon = bbox_to_polygon(x_min, y_min, x_max, y_max)
else:
restrict_polygon = None
res = randomly_place_object_on_object(
tmp_distractor_pcd,
target_pcd,
distractor,
target,
available_polygon,
restrict_polygon,
)
if res == 1:
tmp_distractor_pcd = deepcopy(distractors_pcds[idx])
tmp_distractor_pcd = transform_pointcloud(tmp_distractor_pcd, *distractor.get_local_pose())
new_distractors_pcds.append(tmp_distractor_pcd)
placed = True
break
# If no valid placement is found within max_attempts,
# move this distractor to a safe location (e.g. [0, 0, fallback_z])
# and do not add it to new_distractors_pcds so it does not affect other placements.
if not placed:
trans, ori = distractor.get_local_pose()
trans = np.array(trans)
trans[0] = 0.0
trans[1] = 0.0
trans[2] = fallback_z
distractor.set_local_pose(translation=trans, orientation=ori)