diff --git a/docs/source/hilserl.mdx b/docs/source/hilserl.mdx index bc38408e..ad1c74f9 100644 --- a/docs/source/hilserl.mdx +++ b/docs/source/hilserl.mdx @@ -95,7 +95,6 @@ class HILSerlProcessorConfig: class ObservationConfig: add_joint_velocity_to_observation: bool = False # Add joint velocities to state add_current_to_observation: bool = False # Add motor currents to state - add_ee_pose_to_observation: bool = False # Add end-effector pose to state display_cameras: bool = False # Display camera feeds during execution class ImagePreprocessingConfig: @@ -105,7 +104,6 @@ class ImagePreprocessingConfig: class GripperConfig: use_gripper: bool = True # Enable gripper control gripper_penalty: float = 0.0 # Penalty for inappropriate gripper usage - gripper_penalty_in_reward: bool = False # Include gripper penalty in reward class ResetConfig: fixed_reset_joint_positions: Any | None = None # Joint positions for reset @@ -288,7 +286,6 @@ You can enable multiple observation processing features simultaneously: "observation": { "add_joint_velocity_to_observation": true, "add_current_to_observation": true, - "add_ee_pose_to_observation": false, "display_cameras": false } } diff --git a/docs/source/phone_teleop.mdx b/docs/source/phone_teleop.mdx index bab0ac28..22159193 100644 --- a/docs/source/phone_teleop.mdx +++ b/docs/source/phone_teleop.mdx @@ -136,13 +136,12 @@ Additionally you can customize mapping or safety limits by editing the processor ), ``` -- The `EEBoundsAndSafety` step clamps EE motion to a workspace and checks for large ee step jumps to ensure safety. The `end_effector_bounds` are the bounds for the EE pose and can be modified to change the workspace. The `max_ee_step_m` and `max_ee_twist_step_rad` are the step limits for the EE pose and can be modified to change the safety limits. +- The `EEBoundsAndSafety` step clamps EE motion to a workspace and checks for large ee step jumps to ensure safety. The `end_effector_bounds` are the bounds for the EE pose and can be modified to change the workspace. The `max_ee_step_m` are the step limits for the EE pose and can be modified to change the safety limits. ```examples/phone_to_so100/teleoperate.py EEBoundsAndSafety( end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]}, max_ee_step_m=0.10, - max_ee_twist_step_rad=0.50, ) ``` diff --git a/docs/source/processors_robots_teleop.mdx b/docs/source/processors_robots_teleop.mdx index c4fcbe03..3d8dcb40 100644 --- a/docs/source/processors_robots_teleop.mdx +++ b/docs/source/processors_robots_teleop.mdx @@ -38,7 +38,7 @@ phone_to_robot_ee_pose_processor = RobotProcessorPipeline[RobotAction, RobotActi kinematics=kinematics_solver, end_effector_step_sizes={"x": 0.5, "y": 0.5, "z": 0.5}, motor_names=list(robot.bus.motors.keys()), ), EEBoundsAndSafety( - end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]}, max_ee_step_m=0.20, max_ee_twist_step_rad=0.50, + end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]}, max_ee_step_m=0.20, ), GripperVelocityToJoint(), ], diff --git a/examples/phone_to_so100/record.py b/examples/phone_to_so100/record.py index bb2e2f5f..d3ef293a 100644 --- a/examples/phone_to_so100/record.py +++ b/examples/phone_to_so100/record.py @@ -84,7 +84,6 @@ phone_to_robot_ee_pose_processor = RobotProcessorPipeline[tuple[RobotAction, Rob EEBoundsAndSafety( end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]}, max_ee_step_m=0.20, - max_ee_twist_step_rad=0.50, ), GripperVelocityToJoint(speed_factor=20.0), ], diff --git a/examples/phone_to_so100/teleoperate.py b/examples/phone_to_so100/teleoperate.py index 6c49a845..783dce24 100644 --- a/examples/phone_to_so100/teleoperate.py +++ b/examples/phone_to_so100/teleoperate.py @@ -67,7 +67,6 @@ phone_to_robot_joints_processor = RobotProcessorPipeline[tuple[RobotAction, Robo EEBoundsAndSafety( end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]}, max_ee_step_m=0.10, - max_ee_twist_step_rad=0.50, ), GripperVelocityToJoint( speed_factor=20.0, diff --git a/examples/so100_to_so100_EE/record.py b/examples/so100_to_so100_EE/record.py index 6c38553e..9ed6e51a 100644 --- a/examples/so100_to_so100_EE/record.py +++ b/examples/so100_to_so100_EE/record.py @@ -101,7 +101,6 @@ ee_to_follower_joints = RobotProcessorPipeline[tuple[RobotAction, RobotObservati EEBoundsAndSafety( end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]}, max_ee_step_m=0.10, - max_ee_twist_step_rad=0.50, ), InverseKinematicsEEToJoints( kinematics=follower_kinematics_solver, diff --git a/examples/so100_to_so100_EE/teleoperate.py b/examples/so100_to_so100_EE/teleoperate.py index aa975578..b1a8c8c2 100644 --- a/examples/so100_to_so100_EE/teleoperate.py +++ b/examples/so100_to_so100_EE/teleoperate.py @@ -78,7 +78,6 @@ ee_to_follower_joints = RobotProcessorPipeline[tuple[RobotAction, RobotObservati EEBoundsAndSafety( end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]}, max_ee_step_m=0.10, - max_ee_twist_step_rad=0.50, ), InverseKinematicsEEToJoints( kinematics=follower_kinematics_solver, diff --git a/src/lerobot/async_inference/helpers.py b/src/lerobot/async_inference/helpers.py index 75d81a0f..a336d3a6 100644 --- a/src/lerobot/async_inference/helpers.py +++ b/src/lerobot/async_inference/helpers.py @@ -31,7 +31,6 @@ from lerobot.utils.constants import OBS_IMAGES, OBS_STATE, OBS_STR from lerobot.utils.utils import init_logging Action = torch.Tensor -ActionChunk = torch.Tensor # observation as received from the robot RawObservation = dict[str, torch.Tensor] @@ -46,7 +45,7 @@ Observation = dict[str, torch.Tensor] def visualize_action_queue_size(action_queue_size: list[int]) -> None: import matplotlib.pyplot as plt - fig, ax = plt.subplots() + _, ax = plt.subplots() ax.set_title("Action Queue Size Over Time") ax.set_xlabel("Environment steps") ax.set_ylabel("Action Queue Size") diff --git a/src/lerobot/cameras/utils.py b/src/lerobot/cameras/utils.py index dfac33e1..4a23843b 100644 --- a/src/lerobot/cameras/utils.py +++ b/src/lerobot/cameras/utils.py @@ -15,14 +15,10 @@ # limitations under the License. import platform -from pathlib import Path -from typing import TypeAlias from .camera import Camera from .configs import CameraConfig, Cv2Rotation -IndexOrPath: TypeAlias = int | Path - def make_cameras_from_configs(camera_configs: dict[str, CameraConfig]) -> dict[str, Camera]: cameras = {} diff --git a/src/lerobot/configs/default.py b/src/lerobot/configs/default.py index 1bc2b8d1..afd644e1 100644 --- a/src/lerobot/configs/default.py +++ b/src/lerobot/configs/default.py @@ -16,9 +16,6 @@ from dataclasses import dataclass, field -from lerobot import ( - policies, # noqa: F401 -) from lerobot.datasets.transforms import ImageTransformsConfig from lerobot.datasets.video_utils import get_safe_default_codec diff --git a/src/lerobot/configs/types.py b/src/lerobot/configs/types.py index e0252784..754aca1a 100644 --- a/src/lerobot/configs/types.py +++ b/src/lerobot/configs/types.py @@ -15,7 +15,6 @@ # https://stackoverflow.com/questions/24481852/serialising-an-enum-member-to-json from dataclasses import dataclass from enum import Enum -from typing import Any, Protocol class FeatureType(str, Enum): @@ -38,10 +37,6 @@ class NormalizationMode(str, Enum): IDENTITY = "IDENTITY" -class DictLike(Protocol): - def __getitem__(self, key: Any) -> Any: ... - - @dataclass class PolicyFeature: type: FeatureType diff --git a/src/lerobot/datasets/lerobot_dataset.py b/src/lerobot/datasets/lerobot_dataset.py index 9eebcea4..b8aa880d 100644 --- a/src/lerobot/datasets/lerobot_dataset.py +++ b/src/lerobot/datasets/lerobot_dataset.py @@ -848,11 +848,6 @@ class LeRobotDataset(torch.utils.data.Dataset): return item - def _add_padding_keys(self, item: dict, padding: dict[str, list[bool]]) -> dict: - for key, val in padding.items(): - item[key] = torch.BoolTensor(val) - return item - def __len__(self): return self.num_frames @@ -1396,11 +1391,6 @@ class MultiLeRobotDataset(torch.utils.data.Dataset): """ return {repo_id: i for i, repo_id in enumerate(self.repo_ids)} - @property - def repo_index_to_id(self): - """Return the inverse mapping if repo_id_to_index.""" - return {v: k for k, v in self.repo_id_to_index} - @property def fps(self) -> int: """Frames per second used during data collection. diff --git a/src/lerobot/datasets/push_dataset_to_hub/utils.py b/src/lerobot/datasets/push_dataset_to_hub/utils.py index 5f6363a7..48214e1b 100644 --- a/src/lerobot/datasets/push_dataset_to_hub/utils.py +++ b/src/lerobot/datasets/push_dataset_to_hub/utils.py @@ -13,67 +13,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import inspect -from concurrent.futures import ThreadPoolExecutor -from pathlib import Path import datasets -import numpy -import PIL import torch -from lerobot.datasets.video_utils import encode_video_frames - - -def concatenate_episodes(ep_dicts): - data_dict = {} - - keys = ep_dicts[0].keys() - for key in keys: - if torch.is_tensor(ep_dicts[0][key][0]): - data_dict[key] = torch.cat([ep_dict[key] for ep_dict in ep_dicts]) - else: - if key not in data_dict: - data_dict[key] = [] - for ep_dict in ep_dicts: - for x in ep_dict[key]: - data_dict[key].append(x) - - total_frames = data_dict["frame_index"].shape[0] - data_dict["index"] = torch.arange(0, total_frames, 1) - return data_dict - - -def save_images_concurrently(imgs_array: numpy.array, out_dir: Path, max_workers: int = 4): - out_dir = Path(out_dir) - out_dir.mkdir(parents=True, exist_ok=True) - - def save_image(img_array, i, out_dir): - img = PIL.Image.fromarray(img_array) - img.save(str(out_dir / f"frame_{i:06d}.png"), quality=100) - - num_images = len(imgs_array) - with ThreadPoolExecutor(max_workers=max_workers) as executor: - [executor.submit(save_image, imgs_array[i], i, out_dir) for i in range(num_images)] - - -def get_default_encoding() -> dict: - """Returns the default ffmpeg encoding parameters used by `encode_video_frames`.""" - signature = inspect.signature(encode_video_frames) - return { - k: v.default - for k, v in signature.parameters.items() - if v.default is not inspect.Parameter.empty and k in ["vcodec", "pix_fmt", "g", "crf"] - } - - -def check_repo_id(repo_id: str) -> None: - if len(repo_id.split("/")) != 2: - raise ValueError( - f"""`repo_id` is expected to contain a community or user id `/` the name of the dataset - (e.g. 'lerobot/pusht'), but contains '{repo_id}'.""" - ) - # TODO(aliberts): remove def calculate_episode_data_index(hf_dataset: datasets.Dataset) -> dict[str, torch.Tensor]: diff --git a/src/lerobot/datasets/streaming_dataset.py b/src/lerobot/datasets/streaming_dataset.py index c3c48d90..454389d4 100644 --- a/src/lerobot/datasets/streaming_dataset.py +++ b/src/lerobot/datasets/streaming_dataset.py @@ -298,9 +298,7 @@ class StreamingLeRobotDataset(torch.utils.data.IterableDataset): return padding_mask - def make_frame( - self, dataset_iterator: Backtrackable, previous_dataset_iterator: Backtrackable | None = None - ) -> Generator: + def make_frame(self, dataset_iterator: Backtrackable) -> Generator: """Makes a frame starting from a dataset iterator""" item = next(dataset_iterator) item = item_to_torch(item) diff --git a/src/lerobot/datasets/utils.py b/src/lerobot/datasets/utils.py index 35313bde..81b361ab 100644 --- a/src/lerobot/datasets/utils.py +++ b/src/lerobot/datasets/utils.py @@ -67,18 +67,6 @@ DEFAULT_IMAGE_PATH = "images/{image_key}/episode-{episode_index:06d}/frame-{fram LEGACY_EPISODES_PATH = "meta/episodes.jsonl" LEGACY_EPISODES_STATS_PATH = "meta/episodes_stats.jsonl" LEGACY_TASKS_PATH = "meta/tasks.jsonl" -LEGACY_DEFAULT_VIDEO_PATH = "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4" -LEGACY_DEFAULT_PARQUET_PATH = "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet" - -DATASET_CARD_TEMPLATE = """ ---- -# Metadata will go there ---- -This dataset was created using [LeRobot](https://github.com/huggingface/lerobot). - -## {} - -""" DEFAULT_FEATURES = { "timestamp": {"dtype": "float32", "shape": (1,), "names": None}, @@ -383,12 +371,6 @@ def load_episodes(local_dir: Path) -> datasets.Dataset: return episodes -def backward_compatible_episodes_stats( - stats: dict[str, dict[str, np.ndarray]], episodes: list[int] -) -> dict[int, dict[str, dict[str, np.ndarray]]]: - return dict.fromkeys(episodes, stats) - - def load_image_as_numpy( fpath: str | Path, dtype: np.dtype = np.float32, channel_first: bool = True ) -> np.ndarray: @@ -1346,12 +1328,6 @@ class Backtrackable(Generic[T]): # When cursor<0, slice so the order remains chronological return list(self._back_buf)[: self._cursor or None] - def lookahead_buffer(self) -> list[T]: - """ - Return a copy of the current lookahead buffer. - """ - return list(self._ahead_buf) - def can_peek_back(self, steps: int = 1) -> bool: """ Check if we can go back `steps` items without raising an IndexError. @@ -1377,31 +1353,6 @@ class Backtrackable(Generic[T]): except StopIteration: return False - def reset_cursor(self) -> None: - """ - Reset cursor to the most recent position (equivalent to calling next() - until you're back to the latest item). - """ - self._cursor = 0 - - def clear_ahead_buffer(self) -> None: - """ - Clear the ahead buffer, discarding any pre-fetched items. - """ - self._ahead_buf.clear() - - def switch_source_iterable(self, new_source: Iterable[T]) -> None: - """ - Switch the source of the backtrackable to a new iterable, keeping the history. - - This is useful when iterating over a sequence of datasets. The history from the - previous source is kept, but the lookahead buffer is cleared. The cursor is reset - to the present. - """ - self._source = iter(new_source) - self.clear_ahead_buffer() - self.reset_cursor() - def safe_shard(dataset: datasets.IterableDataset, index: int, num_shards: int) -> datasets.Dataset: """ diff --git a/src/lerobot/datasets/video_utils.py b/src/lerobot/datasets/video_utils.py index b4c036e6..5f8b207e 100644 --- a/src/lerobot/datasets/video_utils.py +++ b/src/lerobot/datasets/video_utils.py @@ -585,19 +585,6 @@ def get_video_pixel_channels(pix_fmt: str) -> int: raise ValueError("Unknown format") -def get_image_pixel_channels(image: Image): - if image.mode == "L": - return 1 # Grayscale - elif image.mode == "LA": - return 2 # Grayscale + Alpha - elif image.mode == "RGB": - return 3 # RGB - elif image.mode == "RGBA": - return 4 # RGBA - else: - raise ValueError("Unknown format") - - def get_video_duration_in_s(video_path: Path | str) -> float: """ Get the duration of a video file in seconds using PyAV. diff --git a/src/lerobot/envs/configs.py b/src/lerobot/envs/configs.py index 8cbc597d..8c0c8b3a 100644 --- a/src/lerobot/envs/configs.py +++ b/src/lerobot/envs/configs.py @@ -193,7 +193,6 @@ class ObservationConfig: add_joint_velocity_to_observation: bool = False add_current_to_observation: bool = False - add_ee_pose_to_observation: bool = False display_cameras: bool = False @@ -203,7 +202,6 @@ class GripperConfig: use_gripper: bool = True gripper_penalty: float = 0.0 - gripper_penalty_in_reward: bool = False @dataclass diff --git a/src/lerobot/motors/motors_bus.py b/src/lerobot/motors/motors_bus.py index dca7650e..8603d81a 100644 --- a/src/lerobot/motors/motors_bus.py +++ b/src/lerobot/motors/motors_bus.py @@ -99,12 +99,6 @@ class Motor: norm_mode: MotorNormMode -class JointOutOfRangeError(Exception): - def __init__(self, message="Joint is out of range"): - self.message = message - super().__init__(self.message) - - class PortHandler(Protocol): def __init__(self, port_name): self.is_open: bool diff --git a/src/lerobot/policies/sac/configuration_sac.py b/src/lerobot/policies/sac/configuration_sac.py index 6b5ad5b5..ada12330 100644 --- a/src/lerobot/policies/sac/configuration_sac.py +++ b/src/lerobot/policies/sac/configuration_sac.py @@ -139,8 +139,6 @@ class SACConfig(PreTrainedConfig): # Training parameter # Number of steps for online training online_steps: int = 1000000 - # Seed for the online environment - online_env_seed: int = 10000 # Capacity of the online replay buffer online_buffer_capacity: int = 100000 # Capacity of the offline replay buffer diff --git a/src/lerobot/policies/sac/modeling_sac.py b/src/lerobot/policies/sac/modeling_sac.py index c6604440..c7c6798e 100644 --- a/src/lerobot/policies/sac/modeling_sac.py +++ b/src/lerobot/policies/sac/modeling_sac.py @@ -1061,15 +1061,3 @@ class TanhMultivariateNormalDiag(TransformedDistribution): x = transform(x) return x - - -def _convert_normalization_params_to_tensor(normalization_params: dict) -> dict: - converted_params = {} - for outer_key, inner_dict in normalization_params.items(): - converted_params[outer_key] = {} - for key, value in inner_dict.items(): - converted_params[outer_key][key] = torch.tensor(value) - if "image" in outer_key: - converted_params[outer_key][key] = converted_params[outer_key][key].view(3, 1, 1) - - return converted_params diff --git a/src/lerobot/policies/vqbet/configuration_vqbet.py b/src/lerobot/policies/vqbet/configuration_vqbet.py index d7a79f18..44ada9f1 100644 --- a/src/lerobot/policies/vqbet/configuration_vqbet.py +++ b/src/lerobot/policies/vqbet/configuration_vqbet.py @@ -82,7 +82,6 @@ class VQBeTConfig(PreTrainedConfig): gpt_n_head: Number of headers of GPT gpt_hidden_dim: Size of hidden dimensions of GPT dropout: Dropout rate for GPT - mlp_hidden_dim: Size of hidden dimensions of offset header / bin prediction headers parts of VQ-BeT offset_loss_weight: A constant that is multiplied to the offset loss primary_code_loss_weight: A constant that is multiplied to the primary code prediction loss secondary_code_loss_weight: A constant that is multiplied to the secondary code prediction loss @@ -125,7 +124,6 @@ class VQBeTConfig(PreTrainedConfig): gpt_n_head: int = 8 gpt_hidden_dim: int = 512 dropout: float = 0.1 - mlp_hidden_dim: int = 1024 offset_loss_weight: float = 10000.0 primary_code_loss_weight: float = 5.0 secondary_code_loss_weight: float = 0.5 diff --git a/src/lerobot/policies/vqbet/vqbet_utils.py b/src/lerobot/policies/vqbet/vqbet_utils.py index e0afe558..44b7d5f0 100644 --- a/src/lerobot/policies/vqbet/vqbet_utils.py +++ b/src/lerobot/policies/vqbet/vqbet_utils.py @@ -231,16 +231,6 @@ class GPT(nn.Module): torch.nn.init.zeros_(module.bias) torch.nn.init.ones_(module.weight) - def crop_block_size(self, gpt_block_size): - # model surgery to decrease the block size if necessary - # e.g. we may load the GPT2 pretrained model checkpoint (block size 1024) - # but want to use a smaller block size for some smaller, simpler model - assert gpt_block_size <= self.config.gpt_block_size - self.config.gpt_block_size = gpt_block_size - self.transformer.wpe.weight = nn.Parameter(self.transformer.wpe.weight[:gpt_block_size]) - for block in self.transformer.h: - block.attn.bias = block.attn.bias[:, :, :gpt_block_size, :gpt_block_size] - def configure_parameters(self): """ This long function is unfortunately doing something very simple and is being very defensive: diff --git a/src/lerobot/processor/delta_action_processor.py b/src/lerobot/processor/delta_action_processor.py index 949ae78d..a8395637 100644 --- a/src/lerobot/processor/delta_action_processor.py +++ b/src/lerobot/processor/delta_action_processor.py @@ -83,14 +83,12 @@ class MapDeltaActionToRobotActionStep(RobotActionProcessorStep): Attributes: position_scale: A factor to scale the delta position inputs. - rotation_scale: A factor to scale the delta rotation inputs (currently unused). noise_threshold: The magnitude below which delta inputs are considered noise and do not trigger an "enabled" state. """ # Scale factors for delta movements position_scale: float = 1.0 - rotation_scale: float = 0.0 # No rotation deltas for gamepad/keyboard noise_threshold: float = 1e-3 # 1 mm threshold to filter out noise def action(self, action: RobotAction) -> RobotAction: diff --git a/src/lerobot/rl/actor.py b/src/lerobot/rl/actor.py index 3c025a05..54d0fba6 100644 --- a/src/lerobot/rl/actor.py +++ b/src/lerobot/rl/actor.py @@ -97,8 +97,6 @@ from .gym_manipulator import ( step_env_and_process_transition, ) -ACTOR_SHUTDOWN_TIMEOUT = 30 - # Main entry point diff --git a/src/lerobot/rl/learner.py b/src/lerobot/rl/learner.py index b7cfdb30..d9758d3a 100644 --- a/src/lerobot/rl/learner.py +++ b/src/lerobot/rl/learner.py @@ -102,8 +102,6 @@ from lerobot.utils.utils import ( from .learner_service import MAX_WORKERS, SHUTDOWN_TIMEOUT, LearnerService -LOG_PREFIX = "[LEARNER]" - @parser.wrap() def train_cli(cfg: TrainRLServerPipelineConfig): diff --git a/src/lerobot/robots/hope_jr/hope_jr_arm.py b/src/lerobot/robots/hope_jr/hope_jr_arm.py index baa36b56..220a29f8 100644 --- a/src/lerobot/robots/hope_jr/hope_jr_arm.py +++ b/src/lerobot/robots/hope_jr/hope_jr_arm.py @@ -105,7 +105,7 @@ class HopeJrArm(Robot): def is_calibrated(self) -> bool: return self.bus.is_calibrated - def calibrate(self, limb_name: str = None) -> None: + def calibrate(self) -> None: groups = { "all": list(self.bus.motors.keys()), "shoulder": ["shoulder_pitch", "shoulder_yaw", "shoulder_roll"], diff --git a/src/lerobot/robots/so100_follower/robot_kinematic_processor.py b/src/lerobot/robots/so100_follower/robot_kinematic_processor.py index 56686d44..87e832db 100644 --- a/src/lerobot/robots/so100_follower/robot_kinematic_processor.py +++ b/src/lerobot/robots/so100_follower/robot_kinematic_processor.py @@ -193,16 +193,12 @@ class EEBoundsAndSafety(RobotActionProcessorStep): Attributes: end_effector_bounds: A dictionary with "min" and "max" keys for position clipping. max_ee_step_m: The maximum allowed change in position (in meters) between steps. - max_ee_twist_step_rad: The maximum allowed change in orientation (in radians) between steps. _last_pos: Internal state storing the last commanded position. - _last_twist: Internal state storing the last commanded orientation. """ end_effector_bounds: dict max_ee_step_m: float = 0.05 - max_ee_twist_step_rad: float = 0.20 _last_pos: np.ndarray | None = field(default=None, init=False, repr=False) - _last_twist: np.ndarray | None = field(default=None, init=False, repr=False) def action(self, action: RobotAction) -> RobotAction: x = action["ee.x"] @@ -233,7 +229,6 @@ class EEBoundsAndSafety(RobotActionProcessorStep): raise ValueError(f"EE jump {n:.3f}m > {self.max_ee_step_m}m") self._last_pos = pos - self._last_twist = twist action["ee.x"] = float(pos[0]) action["ee.y"] = float(pos[1]) @@ -246,7 +241,6 @@ class EEBoundsAndSafety(RobotActionProcessorStep): def reset(self): """Resets the last known position and orientation.""" self._last_pos = None - self._last_twist = None def transform_features( self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]] diff --git a/src/lerobot/robots/stretch3/configuration_stretch3.py b/src/lerobot/robots/stretch3/configuration_stretch3.py index d4e217ca..c1226bf9 100644 --- a/src/lerobot/robots/stretch3/configuration_stretch3.py +++ b/src/lerobot/robots/stretch3/configuration_stretch3.py @@ -49,5 +49,3 @@ class Stretch3RobotConfig(RobotConfig): ), } ) - - mock: bool = False diff --git a/src/lerobot/robots/stretch3/robot_stretch3.py b/src/lerobot/robots/stretch3/robot_stretch3.py index 8a0ff5c6..73df360b 100644 --- a/src/lerobot/robots/stretch3/robot_stretch3.py +++ b/src/lerobot/robots/stretch3/robot_stretch3.py @@ -164,10 +164,6 @@ class Stretch3Robot(Robot): # TODO(aliberts): return action_sent when motion is limited return action - def print_logs(self) -> None: - pass - # TODO(aliberts): move robot-specific logs logic here - def teleop_safety_stop(self) -> None: if self.teleop is not None: self.teleop._safety_stop(robot=self) diff --git a/src/lerobot/teleoperators/gamepad/gamepad_utils.py b/src/lerobot/teleoperators/gamepad/gamepad_utils.py index d994dadd..9f94b674 100644 --- a/src/lerobot/teleoperators/gamepad/gamepad_utils.py +++ b/src/lerobot/teleoperators/gamepad/gamepad_utils.py @@ -52,10 +52,6 @@ class InputController: """Get the current movement deltas (dx, dy, dz) in meters.""" return 0.0, 0.0, 0.0 - def should_quit(self): - """Return True if the user has requested to quit.""" - return not self.running - def update(self): """Update controller state - call this once per frame.""" pass @@ -198,14 +194,6 @@ class KeyboardController(InputController): return delta_x, delta_y, delta_z - def should_quit(self): - """Return True if ESC was pressed.""" - return self.key_states["quit"] - - def should_save(self): - """Return True if Enter was pressed (save episode).""" - return self.key_states["success"] or self.key_states["failure"] - class GamepadController(InputController): """Generate motion deltas from gamepad input.""" @@ -351,8 +339,6 @@ class GamepadControllerHID(InputController): # Button states self.buttons = {} - self.quit_requested = False - self.save_requested = False def find_device(self): """Look for the gamepad device by vendor and product ID.""" @@ -472,11 +458,3 @@ class GamepadControllerHID(InputController): delta_z = -self.right_y * self.z_step_size # Up/down return delta_x, delta_y, delta_z - - def should_quit(self): - """Return True if quit button was pressed.""" - return self.quit_requested - - def should_save(self): - """Return True if save button was pressed.""" - return self.save_requested diff --git a/src/lerobot/teleoperators/keyboard/configuration_keyboard.py b/src/lerobot/teleoperators/keyboard/configuration_keyboard.py index 5d5ef364..6e070ded 100644 --- a/src/lerobot/teleoperators/keyboard/configuration_keyboard.py +++ b/src/lerobot/teleoperators/keyboard/configuration_keyboard.py @@ -22,8 +22,9 @@ from ..config import TeleoperatorConfig @TeleoperatorConfig.register_subclass("keyboard") @dataclass class KeyboardTeleopConfig(TeleoperatorConfig): + """KeyboardTeleopConfig""" + # TODO(Steven): Consider setting in here the keys that we want to capture/listen - mock: bool = False @TeleoperatorConfig.register_subclass("keyboard_ee") diff --git a/src/lerobot/teleoperators/stretch3_gamepad/configuration_stretch3.py b/src/lerobot/teleoperators/stretch3_gamepad/configuration_stretch3.py index 507a2158..3af0b5be 100644 --- a/src/lerobot/teleoperators/stretch3_gamepad/configuration_stretch3.py +++ b/src/lerobot/teleoperators/stretch3_gamepad/configuration_stretch3.py @@ -22,4 +22,4 @@ from ..config import TeleoperatorConfig @TeleoperatorConfig.register_subclass("stretch3") @dataclass class Stretch3GamePadConfig(TeleoperatorConfig): - mock: bool = False + """Stretch3GamePadConfig""" diff --git a/src/lerobot/teleoperators/stretch3_gamepad/stretch3_gamepad.py b/src/lerobot/teleoperators/stretch3_gamepad/stretch3_gamepad.py index 94e1ca7c..09fdfadd 100644 --- a/src/lerobot/teleoperators/stretch3_gamepad/stretch3_gamepad.py +++ b/src/lerobot/teleoperators/stretch3_gamepad/stretch3_gamepad.py @@ -112,10 +112,6 @@ class Stretch3GamePad(Teleoperator): def send_feedback(self, feedback: np.ndarray) -> None: pass - def print_logs(self) -> None: - pass - # TODO(aliberts): move robot-specific logs logic here - def disconnect(self) -> None: self.api.stop() self.is_connected = False diff --git a/src/lerobot/utils/constants.py b/src/lerobot/utils/constants.py index 33781790..824f74b3 100644 --- a/src/lerobot/utils/constants.py +++ b/src/lerobot/utils/constants.py @@ -33,7 +33,6 @@ TRUNCATED = "next.truncated" DONE = "next.done" ROBOTS = "robots" -ROBOT_TYPE = "robot_type" TELEOPERATORS = "teleoperators" # files & directories diff --git a/src/lerobot/utils/errors.py b/src/lerobot/utils/errors.py index c02d568d..31b73eac 100644 --- a/src/lerobot/utils/errors.py +++ b/src/lerobot/utils/errors.py @@ -30,14 +30,3 @@ class DeviceAlreadyConnectedError(ConnectionError): ): self.message = message super().__init__(self.message) - - -class InvalidActionError(ValueError): - """Exception raised when an action is already invalid.""" - - def __init__( - self, - message="The action is invalid. Check the value follows what it is expected from the action space.", - ): - self.message = message - super().__init__(self.message) diff --git a/src/lerobot/utils/utils.py b/src/lerobot/utils/utils.py index 523a5e4d..8777d5a9 100644 --- a/src/lerobot/utils/utils.py +++ b/src/lerobot/utils/utils.py @@ -330,10 +330,6 @@ class TimerManager: def history(self) -> list[float]: return deepcopy(self._history) - @property - def fps_history(self) -> list[float]: - return [1.0 / t for t in self._history] - @property def fps_last(self) -> float: return 0.0 if self.last == 0 else 1.0 / self.last diff --git a/tests/policies/test_sac_config.py b/tests/policies/test_sac_config.py index be6a8d26..724c331f 100644 --- a/tests/policies/test_sac_config.py +++ b/tests/policies/test_sac_config.py @@ -69,7 +69,6 @@ def test_sac_config_default_initialization(): # Training parameters assert config.online_steps == 1000000 - assert config.online_env_seed == 10000 assert config.online_buffer_capacity == 100000 assert config.offline_buffer_capacity == 100000 assert config.async_prefetch is False