chore(async): move async related code to its directory at top level (#2003)

* chore(async): move async related code to its directory at top level

* chore(style): apply pre-commit to renamed headers

* test(async): fix async imports

* docs(async): update async headers doc
This commit is contained in:
Steven Palma
2025-09-24 14:49:37 +02:00
committed by GitHub
parent 7359e18eb6
commit 1cba47da20
10 changed files with 44 additions and 41 deletions

View File

@@ -31,7 +31,7 @@ Then, spin up a policy server (in one terminal, or in a separate machine) specif
You can spin up a policy server running:
```shell
python src/lerobot/scripts/server/policy_server.py \
python src/lerobot/async_inference/policy_server.py \
--host=127.0.0.1 \
--port=8080 \
```
@@ -39,7 +39,7 @@ python src/lerobot/scripts/server/policy_server.py \
This will start a policy server listening on `127.0.0.1:8080` (`localhost`, port 8080). At this stage, the policy server is empty, as all information related to which policy to run and with which parameters are specified during the first handshake with the client. Spin up a client with:
```shell
python src/lerobot/scripts/server/robot_client.py \
python src/lerobot/async_inference/robot_client.py \
--server_address=127.0.0.1:8080 \ # SERVER: the host address and port of the policy server
--robot.type=so100_follower \ # ROBOT: your robot type
--robot.port=/dev/tty.usbmodem585A0076841 \ # ROBOT: your robot port
@@ -122,8 +122,8 @@ python -m lerobot.scripts.server.policy_server \
<!-- prettier-ignore-start -->
```python
from lerobot.scripts.server.configs import PolicyServerConfig
from lerobot.scripts.server.policy_server import serve
from lerobot.async_inference.configs import PolicyServerConfig
from lerobot.async_inference.policy_server import serve
config = PolicyServerConfig(
host="localhost",
@@ -148,7 +148,7 @@ The `RobotClient` streams observations to the `PolicyServer`, and receives actio
<hfoptions id="start_robot_client">
<hfoption id="Command">
```bash
python src/lerobot/scripts/server/robot_client.py \
python src/lerobot/async_inference/robot_client.py \
--server_address=127.0.0.1:8080 \ # SERVER: the host address and port of the policy server
--robot.type=so100_follower \ # ROBOT: your robot type
--robot.port=/dev/tty.usbmodem585A0076841 \ # ROBOT: your robot port
@@ -171,9 +171,9 @@ python src/lerobot/scripts/server/robot_client.py \
import threading
from lerobot.robots.so100_follower import SO100FollowerConfig
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
from lerobot.scripts.server.configs import RobotClientConfig
from lerobot.scripts.server.robot_client import RobotClient
from lerobot.scripts.server.helpers import visualize_action_queue_size
from lerobot.async_inference.configs import RobotClientConfig
from lerobot.async_inference.robot_client import RobotClient
from lerobot.async_inference.helpers import visualize_action_queue_size
# 1. Create the robot instance
"""Check out the cameras available in your setup by running `python lerobot/find_cameras.py`"""

View File

@@ -18,7 +18,8 @@ from dataclasses import dataclass, field
import torch
from lerobot.robots.config import RobotConfig
from lerobot.scripts.server.constants import (
from .constants import (
DEFAULT_FPS,
DEFAULT_INFERENCE_LATENCY,
DEFAULT_OBS_QUEUE_TIMEOUT,

View File

@@ -15,7 +15,7 @@
"""
Example:
```shell
python src/lerobot/scripts/server/policy_server.py \
python src/lerobot/async_inference/policy_server.py \
--host=127.0.0.1 \
--port=8080 \
--fps=30 \
@@ -38,9 +38,15 @@ import grpc
import torch
from lerobot.policies.factory import get_policy_class
from lerobot.scripts.server.configs import PolicyServerConfig
from lerobot.scripts.server.constants import SUPPORTED_POLICIES
from lerobot.scripts.server.helpers import (
from lerobot.transport import (
services_pb2, # type: ignore
services_pb2_grpc, # type: ignore
)
from lerobot.transport.utils import receive_bytes_in_chunks
from .configs import PolicyServerConfig
from .constants import SUPPORTED_POLICIES
from .helpers import (
FPSTracker,
Observation,
RemotePolicyConfig,
@@ -50,11 +56,6 @@ from lerobot.scripts.server.helpers import (
observations_similar,
raw_observation_to_observation,
)
from lerobot.transport import (
services_pb2, # type: ignore
services_pb2_grpc, # type: ignore
)
from lerobot.transport.utils import receive_bytes_in_chunks
class PolicyServer(services_pb2_grpc.AsyncInferenceServicer):

View File

@@ -15,7 +15,7 @@
"""
Example command:
```shell
python src/lerobot/scripts/server/robot_client.py \
python src/lerobot/async_inference/robot_client.py \
--robot.type=so100_follower \
--robot.port=/dev/tty.usbmodem58760431541 \
--robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \
@@ -57,9 +57,15 @@ from lerobot.robots import ( # noqa: F401
so100_follower,
so101_follower,
)
from lerobot.scripts.server.configs import RobotClientConfig
from lerobot.scripts.server.constants import SUPPORTED_ROBOTS
from lerobot.scripts.server.helpers import (
from lerobot.transport import (
services_pb2, # type: ignore
services_pb2_grpc, # type: ignore
)
from lerobot.transport.utils import grpc_channel_options, send_bytes_in_chunks
from .configs import RobotClientConfig
from .constants import SUPPORTED_ROBOTS
from .helpers import (
Action,
FPSTracker,
Observation,
@@ -72,11 +78,6 @@ from lerobot.scripts.server.helpers import (
validate_robot_cameras_for_policy,
visualize_action_queue_size,
)
from lerobot.transport import (
services_pb2, # type: ignore
services_pb2_grpc, # type: ignore
)
from lerobot.transport.utils import grpc_channel_options, send_bytes_in_chunks
class RobotClient:

View File

@@ -48,11 +48,11 @@ def test_async_inference_e2e(monkeypatch):
# Import grpc-dependent modules inside the test function
import grpc
from lerobot.async_inference.configs import PolicyServerConfig, RobotClientConfig
from lerobot.async_inference.helpers import map_robot_keys_to_lerobot_features
from lerobot.async_inference.policy_server import PolicyServer
from lerobot.async_inference.robot_client import RobotClient
from lerobot.robots.utils import make_robot_from_config
from lerobot.scripts.server.configs import PolicyServerConfig, RobotClientConfig
from lerobot.scripts.server.helpers import map_robot_keys_to_lerobot_features
from lerobot.scripts.server.policy_server import PolicyServer
from lerobot.scripts.server.robot_client import RobotClient
from lerobot.transport import (
services_pb2, # type: ignore
services_pb2_grpc, # type: ignore

View File

@@ -19,8 +19,7 @@ import time
import numpy as np
import torch
from lerobot.configs.types import FeatureType, PolicyFeature
from lerobot.scripts.server.helpers import (
from lerobot.async_inference.helpers import (
FPSTracker,
TimedAction,
TimedObservation,
@@ -30,6 +29,7 @@ from lerobot.scripts.server.helpers import (
raw_observation_to_observation,
resize_robot_observation_image,
)
from lerobot.configs.types import FeatureType, PolicyFeature
# ---------------------------------------------------------------------
# FPSTracker

View File

@@ -65,8 +65,8 @@ class MockPolicy:
def policy_server():
"""Fresh `PolicyServer` instance with a stubbed-out policy model."""
# Import only when the test actually runs (after decorator check)
from lerobot.scripts.server.configs import PolicyServerConfig
from lerobot.scripts.server.policy_server import PolicyServer
from lerobot.async_inference.configs import PolicyServerConfig
from lerobot.async_inference.policy_server import PolicyServer
test_config = PolicyServerConfig(host="localhost", port=9999)
server = PolicyServer(test_config)
@@ -95,7 +95,7 @@ def policy_server():
def _make_obs(state: torch.Tensor, timestep: int = 0, must_go: bool = False):
"""Create a TimedObservation with a given state vector."""
# Import only when needed
from lerobot.scripts.server.helpers import TimedObservation
from lerobot.async_inference.helpers import TimedObservation
return TimedObservation(
observation={
@@ -191,7 +191,7 @@ def test_obs_sanity_checks(policy_server):
def test_predict_action_chunk(monkeypatch, policy_server):
"""End-to-end test of `_predict_action_chunk` with a stubbed _get_action_chunk."""
# Import only when needed
from lerobot.scripts.server.policy_server import PolicyServer
from lerobot.async_inference.policy_server import PolicyServer
# Force server to act-style policy; patch method to return deterministic tensor
policy_server.policy_type = "act"

View File

@@ -38,8 +38,8 @@ def robot_client():
"""Fresh `RobotClient` instance for each test case (no threads started).
Uses DummyRobot."""
# Import only when the test actually runs (after decorator check)
from lerobot.scripts.server.configs import RobotClientConfig
from lerobot.scripts.server.robot_client import RobotClient
from lerobot.async_inference.configs import RobotClientConfig
from lerobot.async_inference.robot_client import RobotClient
from tests.mocks.mock_robot import MockRobotConfig
test_config = MockRobotConfig()
@@ -73,7 +73,7 @@ def robot_client():
def _make_actions(start_ts: float, start_t: int, count: int):
"""Generate `count` consecutive TimedAction objects starting at timestep `start_t`."""
from lerobot.scripts.server.helpers import TimedAction
from lerobot.async_inference.helpers import TimedAction
fps = 30 # emulates most common frame-rate
actions = []
@@ -124,7 +124,7 @@ def test_aggregate_action_queues_combines_actions_in_overlap(
):
"""`_aggregate_action_queues` must combine actions on overlapping timesteps according
to the provided aggregate_fn, here tested with multiple coefficients."""
from lerobot.scripts.server.helpers import TimedAction
from lerobot.async_inference.helpers import TimedAction
robot_client.chunks_received = 0