From 4a10482dfbdc1d580c7eed4c9c444d675f679254 Mon Sep 17 00:00:00 2001 From: Karl Pertsch Date: Thu, 27 Feb 2025 23:04:10 +0000 Subject: [PATCH] more documentation for libero examples --- README.md | 3 + docs/remote_inference.md | 35 ++++++++++- src/openpi/policies/libero_policy.py | 49 +++++++++++++-- src/openpi/training/config.py | 92 +++++++++++++++++++++++++--- 4 files changed, 160 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index d9e5c2a..2df5fb1 100644 --- a/README.md +++ b/README.md @@ -158,6 +158,9 @@ uv run scripts/serve_policy.py policy:checkpoint --policy.config=pi0_fast_libero This will spin up a server that listens on port 8000 and waits for observations to be sent to it. We can then run the Libero evaluation script to query the server. For instructions how to install Libero and run the evaluation script, see the [Libero README](examples/libero/README.md). +If you want to embed a policy server call in your own robot runtime, we have a minimal example of how to do so in the [remote inference docs](docs/remote_inference.md). + + ### More Examples diff --git a/docs/remote_inference.md b/docs/remote_inference.md index de5c620..504eea8 100644 --- a/docs/remote_inference.md +++ b/docs/remote_inference.md @@ -33,10 +33,39 @@ pip install -e . Then, you can use the client to query the remote policy server from your robot code. Here's an example of how to do this: ```python +from openpi_client import image_tools from openpi_client import websocket_client_policy -policy_client = websocket_client_policy.WebsocketClientPolicy(host="10.32.255.0", port=8000) -action_chunk = policy_client.infer(example)["actions"] +# Outside of episode loop, initialize the policy client. +# Point to the host and port of the policy server (localhost and 8000 are the defaults). +client = websocket_client_policy.WebsocketClientPolicy(host="localhost", port=8000) + +for step in range(num_steps): + # Inside the episode loop, construct the observation. + # Resize images on the client side to minimize bandwidth / latency. Always return images in uint8 format. + # We provide utilities for resizing images + uint8 conversion so you match the training routines. + # The typical resize_size for pre-trained pi0 models is 224. + # Note that the proprioceptive `state` can be passed unnormalized, normalization will be handled on the server side. + observation = { + "observation/image": image_tools.convert_to_uint8( + image_tools.resize_with_pad(img, 224, 224) + ), + "observation/wrist_image": image_tools.convert_to_uint8( + image_tools.resize_with_pad(wrist_img, 224, 224) + ), + "observation/state": state, + "prompt": task_instruction, + } + + # Call the policy server with the current observation. + # This returns an action chunk of shape (action_horizon, action_dim). + # Note that you typically only need to call the policy every N steps and execute steps + # from the predicted action chunk open-loop in the remaining steps. + action_chunk = client.infer(observation)["actions"] + + # Execute the actions in the environment. + ... + ``` -Here, the `host` and `port` arguments specify the IP address and port of the remote policy server. You can also specify these as command-line arguments to your robot code, or hard-code them in your robot codebase. The `example` is a dictionary of observations and the prompt, following the specification of the policy inputs for the policy you are serving. We have concrete examples of how to construct this dictionary for different environments in the [simple client example](examples/simple_client/main.py). +Here, the `host` and `port` arguments specify the IP address and port of the remote policy server. You can also specify these as command-line arguments to your robot code, or hard-code them in your robot codebase. The `observation` is a dictionary of observations and the prompt, following the specification of the policy inputs for the policy you are serving. We have concrete examples of how to construct this dictionary for different environments in the [simple client example](examples/simple_client/main.py). diff --git a/src/openpi/policies/libero_policy.py b/src/openpi/policies/libero_policy.py index 9e16de7..5b69983 100644 --- a/src/openpi/policies/libero_policy.py +++ b/src/openpi/policies/libero_policy.py @@ -28,45 +28,72 @@ def _parse_image(image) -> np.ndarray: @dataclasses.dataclass(frozen=True) class LiberoInputs(transforms.DataTransformFn): + """ + This class is used to convert inputs to the model to the expected format. It is used for both training and inference. + + For your own dataset, you can copy this class and modify the keys based on the comments below to pipe + the correct elements of your dataset into the model. + """ + # The action dimension of the model. Will be used to pad state and actions for pi0 model (not pi0-FAST). + # Do not change this for your own dataset. action_dim: int # Determines which model will be used. + # Do not change this for your own dataset. model_type: _model.ModelType = _model.ModelType.PI0 def __call__(self, data: dict) -> dict: - mask_padding = self.model_type == _model.ModelType.PI0 # We don't mask for pi0-FAST. + # We only mask padding for pi0 model, not pi0-FAST. Do not change this for your own dataset. + mask_padding = self.model_type == _model.ModelType.PI0 - # Get the state. We are padding from 8 to the model action dim. - # For pi0-FAST, we don't pad the state (action_dim = 7, which is < 8, so pad is skipped). + # We pad the proprioceptive input to the action dimension of the model. + # For pi0-FAST, we don't pad the state. For Libero, we don't need to differentiate + # since the pi0-FAST action_dim = 7, which is < state_dim = 8, so pad is skipped. + # Keep this for your own dataset, but if your dataset stores the proprioceptive input + # in a different key than "observation/state", you should change it below. state = transforms.pad_to_dim(data["observation/state"], self.action_dim) # Possibly need to parse images to uint8 (H,W,C) since LeRobot automatically - # stores as float32 (C,H,W), gets skipped for policy inference + # stores as float32 (C,H,W), gets skipped for policy inference. + # Keep this for your own dataset, but if your dataset stores the images + # in a different key than "observation/image" or "observation/wrist_image", + # you should change it below. + # Pi0 models support three image inputs at the moment: one third-person view, + # and two wrist views (left and right). If your dataset does not have a particular type + # of image, e.g. wrist images, you can comment it out here and replace it with zeros like we do for the + # right wrist image below. base_image = _parse_image(data["observation/image"]) wrist_image = _parse_image(data["observation/wrist_image"]) + # Create inputs dict. Do not change the keys in the dict below. inputs = { "state": state, "image": { "base_0_rgb": base_image, "left_wrist_0_rgb": wrist_image, + # Pad any non-existent images with zero-arrays of the appropriate shape. "right_wrist_0_rgb": np.zeros_like(base_image), }, "image_mask": { "base_0_rgb": np.True_, "left_wrist_0_rgb": np.True_, + # Mask any non-existent images with False (if ``mask_padding`` is True). "right_wrist_0_rgb": np.False_ if mask_padding else np.True_, }, } + # Pad actions to the model action dimension. Keep this for your own dataset. # Actions are only available during training. if "actions" in data: - # We are padding from 7 to the model action dim. + # We are padding to the model action dim. # For pi0-FAST, this is a no-op (since action_dim = 7). actions = transforms.pad_to_dim(data["actions"], self.action_dim) inputs["actions"] = actions + # Pass the prompt (aka language instruction) to the model. + # Keep this for your own dataset (but modify the key if the instruction is not + # stored in "prompt"; the output dict always needs to have the key "prompt"). if "prompt" in data: inputs["prompt"] = data["prompt"] @@ -75,6 +102,16 @@ class LiberoInputs(transforms.DataTransformFn): @dataclasses.dataclass(frozen=True) class LiberoOutputs(transforms.DataTransformFn): + """ + This class is used to convert outputs from the model back the the dataset specific format. It is + used for inference only. + + For your own dataset, you can copy this class and modify the action dimension based on the comments below. + """ + def __call__(self, data: dict) -> dict: - # Only return the first 7 dims. + # Only return the first N actions -- since we padded actions above to fit the model action + # dimension, we need to now parse out the correct number of actions in the return dict. + # For Libero, we only return the first 7 actions (since the rest is padding). + # For your own dataset, replace `7` with the action dimension of your dataset. return {"actions": np.asarray(data["actions"][:, :7])} diff --git a/src/openpi/training/config.py b/src/openpi/training/config.py index 5b93cee..3b9dfb9 100644 --- a/src/openpi/training/config.py +++ b/src/openpi/training/config.py @@ -251,9 +251,22 @@ class LeRobotAlohaDataConfig(DataConfigFactory): @dataclasses.dataclass(frozen=True) class LeRobotLiberoDataConfig(DataConfigFactory): + """ + This config is used to configure transforms that are applied at various parts of the data pipeline. + For your own dataset, you can copy this class and modify the transforms to match your dataset based on the + comments below. + """ + @override def create(self, assets_dirs: pathlib.Path, model_config: _model.BaseModelConfig) -> DataConfig: - # Make inputs look like they come from the Libero environment + # The repack transform is *only* applied to the data coming from the dataset, + # and *not* during inference. We can use it to make inputs from the dataset look + # as close as possible to those coming from the inference environment (e.g. match the keys). + # Below, we match the keys in the dataset (which we defined in the data conversion script) to + # the keys we use in our inference pipeline (defined in the inference script for libero). + # For your own dataset, first figure out what keys your environment passes to the policy server + # and then modify the mappings below so your dataset's keys get matched to those target keys. + # The repack transform simply remaps key names here. repack_transform = _transforms.Group( inputs=[ _transforms.RepackTransform( @@ -268,22 +281,38 @@ class LeRobotLiberoDataConfig(DataConfigFactory): ] ) - # Prepare data for policy training - # Convert images to uint8 numpy arrays, add masks + # The data transforms are applied to the data coming from the dataset *and* during inference. + # Below, we define the transforms for data going into the model (``inputs``) and the transforms + # for data coming out of the model (``outputs``) (the latter is only used during inference). + # We defined these transforms in `libero_policy.py`. You can check the detailed comments there for + # how to modify the transforms to match your dataset. Once you created your own transforms, you can + # replace the transforms below with your own. data_transforms = _transforms.Group( inputs=[libero_policy.LiberoInputs(action_dim=model_config.action_dim, model_type=model_config.model_type)], outputs=[libero_policy.LiberoOutputs()], ) - # Use delta actions (not for gripper) - delta_action_mask = _transforms.make_bool_mask(6, -1) - data_transforms = data_transforms.push( - inputs=[_transforms.DeltaActions(delta_action_mask)], - outputs=[_transforms.AbsoluteActions(delta_action_mask)], - ) + + # One additional data transform: pi0 models are trained on delta actions (relative to the first + # state in each action chunk). IF your data has ``absolute`` actions (e.g. target joint angles) + # you can uncomment the following line to convert the actions to delta actions. The only exception + # is for the gripper actions which are always absolute. + # In the example below, we would apply the delta conversion to the first 6 actions (joints) and + # leave the 7th action (gripper) unchanged, i.e. absolute. + # In Libero, the raw actions in the dataset are already delta actions, so we *do not* need to + # apply a separate delta conversion (that's why it's commented out). Choose whether to apply this + # transform based on whether your dataset uses ``absolute`` or ``delta`` actions out of the box. + + # delta_action_mask = _transforms.make_bool_mask(6, -1) + # data_transforms = data_transforms.push( + # inputs=[_transforms.DeltaActions(delta_action_mask)], + # outputs=[_transforms.AbsoluteActions(delta_action_mask)], + # ) # Model transforms include things like tokenizing the prompt and action targets + # You do not need to change anything here for your own dataset. model_transforms = ModelTransformFactory()(model_config) + # We return all data transforms for training and inference. No need to change anything here. return dataclasses.replace( self.create_base_config(assets_dirs), repack_transforms=repack_transform, @@ -442,21 +471,41 @@ _CONFIGS = [ # # Fine-tuning Libero configs. # + # These train configs define the hyperparameters for fine-tuning the base model on your own dataset. + # They are used to define key elements like the dataset you are training on, the base checkpoint you + # are using, and other hyperparameters like how many training steps to run or what learning rate to use. + # For your own dataset, you can copy this class and modify the dataset name, and data transforms based on + # the comments below. TrainConfig( + # Change the name to reflect your model and dataset. name="pi0_libero", + # Here you define the model config -- In this example we use pi0 as the model + # architecture and perform *full* finetuning. in the examples below we show how to modify + # this to perform *low-memory* (LORA) finetuning and use pi0-FAST as an alternative architecture. model=pi0.Pi0Config(), + # Here you define the dataset you are training on. In this example we use the Libero + # dataset. For your own dataset, you can change the repo_id to point to your dataset. + # Also modify the DataConfig to use the new config you made for your dataset above. data=LeRobotLiberoDataConfig( repo_id="physical-intelligence/libero", base_config=DataConfig( local_files_only=False, # Set to True for local-only datasets. + # This flag determines whether we load the prompt (i.e. the task instruction) from the + # ``task`` field in the LeRobot dataset. If set to True, the prompt will show up in + # a field called ``prompt`` in the input dict. The recommended setting is True. prompt_from_task=True, ), ), + # Here you define which pre-trained checkpoint you want to load to initialize the model. + # This should match the model config you chose above -- i.e. in this case we use the pi0 base model. weight_loader=weight_loaders.CheckpointWeightLoader("s3://openpi-assets/checkpoints/pi0_base/params"), + # Below you can define other hyperparameters like the learning rate, number of training steps, etc. + # Check the base TrainConfig class for a full list of available hyperparameters. num_train_steps=30_000, ), TrainConfig( name="pi0_libero_low_mem_finetune", + # Here is an example of loading a pi0 model for LoRA fine-tuning. model=pi0.Pi0Config(paligemma_variant="gemma_2b_lora", action_expert_variant="gemma_300m_lora"), data=LeRobotLiberoDataConfig( repo_id="physical-intelligence/libero", @@ -467,13 +516,28 @@ _CONFIGS = [ ), weight_loader=weight_loaders.CheckpointWeightLoader("s3://openpi-assets/checkpoints/pi0_base/params"), num_train_steps=30_000, + # The freeze filter defines which parameters should be frozen during training. + # We have a convenience function in the model config that returns the default freeze filter + # for the given model config for LoRA finetuning. Just make sure it matches the model config + # you chose above. freeze_filter=pi0.Pi0Config( paligemma_variant="gemma_2b_lora", action_expert_variant="gemma_300m_lora" ).get_freeze_filter(), + # Turn off EMA for LoRA finetuning. ema_decay=None, ), TrainConfig( name="pi0_fast_libero", + # Here is an example of loading a pi0-FAST model for full finetuning. + # Modify action_dim and action_horizon to match your dataset (action horizon is equal to + # the desired action chunk length). + # The max_token_len is the maximum number of (non-image) tokens the model can handle. + # This includes the tokenized prompt, proprioceptive state, and (FAST-tokenized) action tokens. + # Choosing this value too small may chop off tokens at the end of your sequence (the code will throw + # a warning), while choosing it too large will waste memory (since we pad each batch element to the + # max_token_len). A good rule of thumb is to use approx 180 for single-arm robots, and approx 250 for + # two-arm robots. Generally, err on the lower side here first, and potentially increase the value if + # you see many warnings being thrown during training. model=pi0_fast.Pi0FASTConfig(action_dim=7, action_horizon=10, max_token_len=180), data=LeRobotLiberoDataConfig( repo_id="physical-intelligence/libero", @@ -482,12 +546,17 @@ _CONFIGS = [ prompt_from_task=True, ), ), + # Note that we load the pi0-FAST base model checkpoint here. weight_loader=weight_loaders.CheckpointWeightLoader("s3://openpi-assets/checkpoints/pi0_fast_base/params"), num_train_steps=30_000, ), TrainConfig( name="pi0_fast_libero_low_mem_finetune", - model=pi0_fast.Pi0FASTConfig(paligemma_variant="gemma_2b_lora"), + # Here is an example of loading a pi0-FAST model for LoRA finetuning. + # For setting action_dim, action_horizon, and max_token_len, see the comments above. + model=pi0_fast.Pi0FASTConfig( + action_dim=7, action_horizon=10, max_token_len=180, paligemma_variant="gemma_2b_lora" + ), data=LeRobotLiberoDataConfig( repo_id="physical-intelligence/libero", base_config=DataConfig( @@ -497,9 +566,12 @@ _CONFIGS = [ ), weight_loader=weight_loaders.CheckpointWeightLoader("s3://openpi-assets/checkpoints/pi0_fast_base/params"), num_train_steps=30_000, + # Again, make sure to match the model config above when extracting the freeze filter + # that specifies which parameters should be frozen during LoRA finetuning. freeze_filter=pi0_fast.Pi0FASTConfig( action_dim=7, action_horizon=10, max_token_len=180, paligemma_variant="gemma_2b_lora" ).get_freeze_filter(), + # Turn off EMA for LoRA finetuning. ema_decay=None, ), #