diff --git a/lerobot/common/teleoperators/gamepad/teleop_gamepad.py b/lerobot/common/teleoperators/gamepad/teleop_gamepad.py index 0fcd4167..61c2b4b9 100644 --- a/lerobot/common/teleoperators/gamepad/teleop_gamepad.py +++ b/lerobot/common/teleoperators/gamepad/teleop_gamepad.py @@ -89,9 +89,9 @@ class GamepadTeleop(Teleoperator): # use HidApi for macos if sys.platform == "darwin": # NOTE: On macOS, pygame doesn’t reliably detect input from some controllers so we fall back to hidapi - from lerobot.scripts.server.end_effector_control_utils import GamepadControllerHID as Gamepad + from lerobot.common.utils.end_effector_control import GamepadControllerHID as Gamepad else: - from lerobot.scripts.server.end_effector_control_utils import GamepadController as Gamepad + from lerobot.common.utils.end_effector_control import GamepadController as Gamepad self.gamepad = Gamepad(x_step_size=1.0, y_step_size=1.0, z_step_size=1.0) self.gamepad.start() diff --git a/lerobot/common/utils/end_effector_control.py b/lerobot/common/utils/end_effector_control.py index 74003809..3bb9385a 100644 --- a/lerobot/common/utils/end_effector_control.py +++ b/lerobot/common/utils/end_effector_control.py @@ -24,7 +24,7 @@ import torch from lerobot.common.utils.robot_utils import busy_wait from lerobot.common.utils.utils import init_logging -from lerobot.scripts.server.kinematics import RobotKinematics +from lerobot.common.model.kinematics import RobotKinematics class InputController: diff --git a/lerobot/scripts/rl/gym_manipulator.py b/lerobot/scripts/rl/gym_manipulator.py index b7b9780b..d41a18fd 100644 --- a/lerobot/scripts/rl/gym_manipulator.py +++ b/lerobot/scripts/rl/gym_manipulator.py @@ -2225,7 +2225,7 @@ def main(cfg: EnvConfig): while num_episode < 10: start_loop_s = time.perf_counter() # Sample a new random action from the robot's action space. - new_random_action = env.action_space.sample() + new_random_action = env.action_space.sample() * 0 # Update the smoothed action using an exponential moving average. smoothed_action = alpha * new_random_action + (1 - alpha) * smoothed_action