diff --git a/policy/openpi-InternData-A1 b/policy/openpi-InternData-A1 deleted file mode 160000 index 10b4b8f..0000000 --- a/policy/openpi-InternData-A1 +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 10b4b8fd1354e2673ec4f657ae5aff025426d862 diff --git a/policy/openpi-InternData-A1/.dockerignore b/policy/openpi-InternData-A1/.dockerignore new file mode 100644 index 0000000..ec1aa77 --- /dev/null +++ b/policy/openpi-InternData-A1/.dockerignore @@ -0,0 +1,3 @@ +.venv +checkpoints +data diff --git a/policy/openpi-InternData-A1/.gitignore b/policy/openpi-InternData-A1/.gitignore new file mode 100644 index 0000000..9c83358 --- /dev/null +++ b/policy/openpi-InternData-A1/.gitignore @@ -0,0 +1,169 @@ +# Data directories. +assets/ +checkpoints/ +data/ +wandb/ +third_party/ + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/policy/openpi-InternData-A1/.pre-commit-config.yaml b/policy/openpi-InternData-A1/.pre-commit-config.yaml new file mode 100644 index 0000000..f98be5a --- /dev/null +++ b/policy/openpi-InternData-A1/.pre-commit-config.yaml @@ -0,0 +1,16 @@ +exclude: third_party/ + +repos: + - repo: https://github.com/astral-sh/uv-pre-commit + # uv version. + rev: 0.5.14 + hooks: + - id: uv-lock + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.8.6 + hooks: + # Run the linter. + - id: ruff + args: [--fix] + - id: ruff-format \ No newline at end of file diff --git a/policy/openpi-InternData-A1/.python-version b/policy/openpi-InternData-A1/.python-version new file mode 100644 index 0000000..902b2c9 --- /dev/null +++ b/policy/openpi-InternData-A1/.python-version @@ -0,0 +1 @@ +3.11 \ No newline at end of file diff --git a/policy/openpi-InternData-A1/CONTRIBUTING.md b/policy/openpi-InternData-A1/CONTRIBUTING.md new file mode 100644 index 0000000..a8f3e80 --- /dev/null +++ b/policy/openpi-InternData-A1/CONTRIBUTING.md @@ -0,0 +1,33 @@ +# Contributing to openpi + +We welcome contributions, improvements, and modifications. Everyone is welcome to use openpi in accordance to the [license](LICENSE). Contributors are also welcome to submit bug reports, feature requests, and pull requests. We can't promise to approve every pull request, and we are a small team with limited bandwidth to review all requests, but we'll give it our best effort. Specifics are described below. + +## Issues and feature requests + +You are welcome to use the Github [discussion](https://github.com/Physical-Intelligence/openpi/discussions) feature if you would like to discuss something that is not directly reporting an issue or making a feature request. This is suitable for questions about how to use some aspect of openpi, or other topics. + +If you found a bug or other issue, please first check that the issue was not already reported (use the search bar on Github under Issues). If the issue has not yet been reported, please include this information when filing a Github issue: + +- Your OS type and version and the version of Python you are using +- Code that allows us to reproduce your bug, including all dependencies +- Traceback of any exception +- Any other information that would help us, such as a screenshot + +In order for us to address any issue, we must be able to reproduce it, so if you encountered the issue after making modifications to openpi, please reproduce the issue without any other modifications and provide a code snippet that allows us to quickly reproduce the problem on `main`. + +If you would like to submit a feature request, please check that the feature request does not already exist, and please provide the following information: + +- The motivation for the feature +- A description of the problem you are trying to solve or your use case +- Enough information for us to understand the nature of the request +- Some information for how you intend to use it (this might help us in understanding the motivation!) + +We can't promise to support every feature request, but it is helpful to us to know the use cases that you are interested in! + +## Submitting a pull request + +If you implemented support for a new robot or environment, or some other new feature, we welcome pull requests (PRs) to openpi. We encourage you to create a [feature request](https://github.com/Physical-Intelligence/openpi/issues) or make a post on the [discussion](https://github.com/Physical-Intelligence/openpi/discussions) board before starting to work on your PR, if you would like to get a sense for whether we are likely to approve your PR if it is submitted. Since we are a small team with limited ability to provide maintenance and support, we may not accept all PRs (e.g., if we believe it would make the code harder to maintain, or if reviewing the PR is out of scope for us), so contacting us in advance is a good way to get a sense for whether your PR is likely to get approved for merging into openpi directly. But even if it isn't, you are of course more than welcome to maintain your own fork with whatever modifications you would like. When creating PRs, we recommend every contribution to consider the following: + +- Make sure that your PR has a clear title and description +- Run `pre-commit` (install using `pre-commit install` first), and run `ruff check .` and `ruff format .` +- Make sure your PR passes all tests diff --git a/policy/openpi-InternData-A1/LICENSE b/policy/openpi-InternData-A1/LICENSE new file mode 100644 index 0000000..f49a4e1 --- /dev/null +++ b/policy/openpi-InternData-A1/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/policy/openpi-InternData-A1/README.md b/policy/openpi-InternData-A1/README.md new file mode 100644 index 0000000..a97a567 --- /dev/null +++ b/policy/openpi-InternData-A1/README.md @@ -0,0 +1,9 @@ +# openpi-InternData-A1 + +## Training + +For detailed instructions on pretraining with InterData-A1, finetuning on real-world tasks and sim2real transfer experiments, please refer to [`docs/training.md`](docs/training.md). + + +## Pretrained Checkpoints +We pretrained Pi0 model in on InternData-A1 for 680k iterations, initialized from PaliGemma checkpoint. The resulting pretrained ckpt is available [here](https://huggingface.co/yuyinyang3y/interndata-a1). diff --git a/policy/openpi-InternData-A1/docs/docker.md b/policy/openpi-InternData-A1/docs/docker.md new file mode 100644 index 0000000..6449278 --- /dev/null +++ b/policy/openpi-InternData-A1/docs/docker.md @@ -0,0 +1,25 @@ +### Docker Setup + +All of the examples in this repo provide instructions for being run normally, and also using Docker. Although not required, the Docker option is recommended as this will simplify software installation, produce a more stable environment, and also allow you to avoid installing ROS and cluttering your machine, for examples which depend on ROS. + +- Basic Docker installation instructions are [here](https://docs.docker.com/engine/install/). +- Docker must be installed in [rootless mode](https://docs.docker.com/engine/security/rootless/). +- To use your GPU you must also install the [NVIDIA container toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html). +- The version of docker installed with `snap` is incompatible with the NVIDIA container toolkit, preventing it from accessing `libnvidia-ml.so` ([issue](https://github.com/NVIDIA/nvidia-container-toolkit/issues/154)). The snap version can be uninstalled with `sudo snap remove docker`. +- Docker Desktop is also incompatible with the NVIDIA runtime ([issue](https://github.com/NVIDIA/nvidia-container-toolkit/issues/229)). Docker Desktop can be uninstalled with `sudo apt remove docker-desktop`. + + +If starting from scratch and your host machine is Ubuntu 22.04, you can use accomplish all of the above with the convenience scripts `scripts/docker/install_docker_ubuntu22.sh` and `scripts/docker/install_nvidia_container_toolkit.sh`. + +Build the Docker image and start the container with the following command: +```bash +docker compose -f scripts/docker/compose.yml up --build +``` + +To build and run the Docker image for a specific example, use the following command: +```bash +docker compose -f examples//compose.yml up --build +``` +where `` is the name of the example you want to run. + +During the first run of any example, Docker will build the images. Go grab a coffee while this happens. Subsequent runs will be faster since the images are cached. \ No newline at end of file diff --git a/policy/openpi-InternData-A1/docs/norm_stats.md b/policy/openpi-InternData-A1/docs/norm_stats.md new file mode 100644 index 0000000..ca50b61 --- /dev/null +++ b/policy/openpi-InternData-A1/docs/norm_stats.md @@ -0,0 +1,179 @@ +# Normalization Statistics + +Here we provide instructions for computing **normalization statistics** for both **real-world**, **simulation (InternData-A1)** and **sim2real** tasks. The computed statistics are saved in JSON format and are intended to be reused during training and evaluation in the OpenPI pipeline. + +Normalization is computed over: +- `state` +- `actions` + +and follows the exact data preprocessing and repacking logic used during training. + +--- + +## 1. Simulation Tasks (InternData-A1) +This script `scripts/compute_norm_stats_sim.py` computes normalization statistics for simulation tasks in the InternData-A1 benchmark. + +### Supported Robots +- `split_aloha` +- `lift2` +- `genie1` +- `franka` + +### Dataset Structure +Download the InternData-A1 datasets from [here](https://huggingface.co/datasets/InternRobotics/InternData-A1). +The structure of the dataset is as follows: + +``` +InternData-A1/sim/ +└── / + └── / + └── / # no subtask + ├── data/ + ├── meta/ + └── videos/ +``` + +Some tasks may have subtasks / collections: + +``` +InternData-A1/sim/ +└── / + └── / + └── / + └── / + ├── data/ + ├── meta/ + └── videos/ +``` + +### Usage +``` +python scripts/compute_norm_stats_sim.py \ + --root_data_dir InternData-A1/sim \ + --task_category pick_and_place_tasks \ + --save_path stats/sim \ + --start_ratio 0.0 \ + --end_ratio 1.0 +``` + +Arguments +- `root_data_dir`: Root directory of simulation datasets. +- `task_category`: Task category to process (e.g. pick_and_place_tasks). +- `save_path`: Root directory where normalization statistics will be saved. +- `start_ratio`, `end_ratio`: Fraction of tasks to process (useful for sharding large datasets). + +### Output Structure +``` +/ +└── / + └── / + └── / + └── / # empty if no subtask + └── norm_stats.json +``` +During pretraining, set the `stats_dir` argument in `DataConfig` to the `save_path` here. + +## 2. Real-World Tasks +This script `scripts/compute_norm_stats_real.py` computes normalization statistics for real-world tasks. + +### Supported Robots +- `lift2` +- `split_aloha` +- `acone` +- `genie1` + +### Dataset Structure +Real-world datasets are expected to follow the LeRobot repository structure: +``` +InternData-A1/real/ + └── / + └── / + └── / # empty if no subtask + ├── data/ + ├── meta/ + └── videos/ +``` + +Example task path: +``` +InternData-A1/real/genie1/ +└── Pick_a_bag_of_bread_with_the_left_arm__then_handover/set_0 +``` + +### Usage +``` +python scripts/compute_norm_stats_real.py \ + --task_path InternData-A1/real/genie1/Pick_a_bag_of_bread_with_the_left_arm__then_handover/* \ + --robot_name genie1 \ + --save_path stats/real +``` + +Arguments +- `task_path`: Path (or glob pattern) to a real-world task dataset(e.g. `InternData-A1/real/genie1/Pick_a_bag_of_bread_with_the_left_arm__then_handover/*`) +- `robot_name`: Robot platform name (must be supported). +- `save_path`: Root directory where normalization statistics will be saved. + +### Output Structure +``` +/ +└── / + └── / + └── norm_stats.json +``` +During finetuning, set the `fixed_stats_dir` argument in `DataConfig` to `//` here. + +## 3. Sim2Real Experiments +This script `scripts/compute_norm_stats_sim2real.py` computes normalization statistics for sim2real experiments. + +### Supported Robots +- `lift2` + +### Dataset Structure +Dataset from InternData-A1 are expected to follow the LeRobot repository structure: +``` +InternData-A1/sim/ + └── / + └── / + └── / + └── / + ├── data/ + ├── meta/ + └── videos/ +``` + +Example task path: +``` +InternData-A1/sim/long_horizon_tasks/lift2/ +└── sort_the_rubbish + └── Sort_rubbish_1l2r + └── Sort_rubbish_2l1r + └── Sort_rubbish_2l2r +``` + +### Usage +``` +python scripts/compute_norm_stats_sim2real.py \ + --task_path InternData-A1/sim/long_horizon_tasks/lift2/sort_the_rubbish/* \ + --robot_name lift2 \ + --save_path stats/sim2real +``` + +Arguments +- `task_path`: Path (or glob pattern) to a task dataset(e.g. `InternData-A1/sim/long_horizon_tasks/lift2/sort_the_rubbish/*` means training on all the collections in the task) +- `robot_name`: Robot platform name (we only support `lift2` for now, but you can try other robots). +- `save_path`: Root directory where normalization statistics will be saved. + +### Output Structure +``` +/ +└── / + └── / + └── norm_stats.json +``` +During finetuning, set the `fixed_stats_dir` argument in `DataConfig` to `//` here. + +## Implementation Notes + +For simulation tasks and sim2real experiments, computation may stop early (e.g. after 10k steps) to limit runtime. + +For sim2real transfer, we set the gripper dimension in the state vector to zero because the state of the gripper in the real world during inference is not aligned with the state in the simulation. See `src/openpi/policies/sim2real_split_aloha_policy.py` for more details. \ No newline at end of file diff --git a/policy/openpi-InternData-A1/docs/remote_inference.md b/policy/openpi-InternData-A1/docs/remote_inference.md new file mode 100644 index 0000000..c5e4848 --- /dev/null +++ b/policy/openpi-InternData-A1/docs/remote_inference.md @@ -0,0 +1,71 @@ + +# Running openpi models remotely + +We provide utilities for running openpi models remotely. This is useful for running inference on more powerful GPUs off-robot, and also helps keep the robot and policy environments separate (and e.g. avoid dependency hell with robot software). + +## Starting a remote policy server + +To start a remote policy server, you can simply run the following command: + +```bash +uv run scripts/serve_policy.py --env=[DROID | ALOHA | LIBERO] +``` + +The `env` argument specifies which $\pi_0$ checkpoint should be loaded. Under the hood, this script will execute a command like the following, which you can use to start a policy server, e.g. for checkpoints you trained yourself (here an example for the DROID environment): + +```bash +uv run scripts/serve_policy.py policy:checkpoint --policy.config=pi0_fast_droid --policy.dir=gs://openpi-assets/checkpoints/pi0_fast_droid +``` + +This will start a policy server that will serve the policy specified by the `config` and `dir` arguments. The policy will be served on the specified port (default: 8000). + +## Querying the remote policy server from your robot code + +We provide a client utility with minimal dependencies that you can easily embed into any robot codebase. + +First, install the `openpi-client` package in your robot environment: + +```bash +cd $OPENPI_ROOT/packages/openpi-client +pip install -e . +``` + +Then, you can use the client to query the remote policy server from your robot code. Here's an example of how to do this: + +```python +from openpi_client import image_tools +from openpi_client import websocket_client_policy + +# Outside of episode loop, initialize the policy client. +# Point to the host and port of the policy server (localhost and 8000 are the defaults). +client = websocket_client_policy.WebsocketClientPolicy(host="localhost", port=8000) + +for step in range(num_steps): + # Inside the episode loop, construct the observation. + # Resize images on the client side to minimize bandwidth / latency. Always return images in uint8 format. + # We provide utilities for resizing images + uint8 conversion so you match the training routines. + # The typical resize_size for pre-trained pi0 models is 224. + # Note that the proprioceptive `state` can be passed unnormalized, normalization will be handled on the server side. + observation = { + "observation/image": image_tools.convert_to_uint8( + image_tools.resize_with_pad(img, 224, 224) + ), + "observation/wrist_image": image_tools.convert_to_uint8( + image_tools.resize_with_pad(wrist_img, 224, 224) + ), + "observation/state": state, + "prompt": task_instruction, + } + + # Call the policy server with the current observation. + # This returns an action chunk of shape (action_horizon, action_dim). + # Note that you typically only need to call the policy every N steps and execute steps + # from the predicted action chunk open-loop in the remaining steps. + action_chunk = client.infer(observation)["actions"] + + # Execute the actions in the environment. + ... + +``` + +Here, the `host` and `port` arguments specify the IP address and port of the remote policy server. You can also specify these as command-line arguments to your robot code, or hard-code them in your robot codebase. The `observation` is a dictionary of observations and the prompt, following the specification of the policy inputs for the policy you are serving. We have concrete examples of how to construct this dictionary for different environments in the [simple client example](examples/simple_client/main.py). diff --git a/policy/openpi-InternData-A1/docs/training.md b/policy/openpi-InternData-A1/docs/training.md new file mode 100644 index 0000000..e49f650 --- /dev/null +++ b/policy/openpi-InternData-A1/docs/training.md @@ -0,0 +1,102 @@ +# Training Instructions + +Here we provide instructions for pretraining on InternData-A1, finetuning on real-world tasks and finetuning on InternData-A1 tasks for sim2real transfer. + +Before training, you need to compute the normalization statistics for the tasks you want to train on. Please refer to [norm_stats.md](norm_stats.md) for more details. + +--- + +## 1. Pretraining on InternData-A1 + + +### Write a training config +We provide a `TrainConfig` example named `pretrain-interndata-a1` in `src/openpi/training/config.py`. +InternData-A1 contains four robot embodiments: +- `split_aloha` +- `lift2` +- `genie1` +- `franka` + +Accordingly, we define three `MultiDataConfigFactory` classes: +- `MultiSimSplitAlohaDataConfig` for `split_aloha` and `lift2` +- `MultiSimGenieDataConfig` for `genie1` +- `MultiSimFrankaDataConfig` for `franka` + +Please either: +- create a soft link from the InternData-A1 dataset to `data/InternData-A1`, or +- modify the `repo_dir` field in all relevant `MultiDataConfig` entries to point to your local InternData-A1 path. + +Set `stats_dir` to your local normalization statistics directory. If you use the default setting, ensure that the normalization statistics for simulation tasks are saved under `stats/sim`. + +We initialize the model from PaliGemma-3B using: +``` +weight_loader=weight_loaders.PaliGemmaWeightLoader("checkpoints/jax/paligemma/pt_224.npz") +``` +Please download the PaliGemma-3b checkpoint by running +``` +python scripts/download_paligemma.py +``` + +You may adjust other training parameters based on your available GPUs and training budget: +- `num_train_steps`: Total number of training steps +- `num_workers`: Number of data loading workers +- `fsdp_devices`: Number of GPUs per node +- `batch_size`: Batch size per GPU +- `save_interval`: Checkpoint saving interval (in steps) + +### Run training +For multi node training, run +``` +bash scripts/training_scripts/multi_node.sh +``` + +For single node multi-GPU training, run +``` +config_name=pretrain-interndata-a1 +bash scripts/training_scripts/single_node_multi_gpu.sh ${config_name} +``` + +The ckpts will be saved to `checkpoints/${config_name}`. + +## 2. Finetuning on Real-World Tasks +### Write a training config +We provide a `TrainConfig` example named `finetune-a2d-pen` in `src/openpi/training/config.py`. + +Key arguments you may need to modify include: +- `MultiDataConfigFactory` class: + - `MultiLeRobotReala2dDataConfig` for `genie1` + - `MultiLeRobotRealArxLift2DataConfig` for `lift2` and `acone` +- `repo_dir`: Path to the real-world task dataset. +- `robot_name`: the robot name in `repo_dir`, e.g. "genie1". +- `fixed_stats_dir`: Path to the normalization statistics for the real-world task. When this is set, statistics from `stats_dir` will not be used. +- `weight_loader`: Pretrained checkpoint used for initialization. +You may download our pretrained checkpoints from [here](). + +### Run training +For training, run +For single node multi-GPU training, run +``` +config_name=finetune-a2d-pen +bash scripts/training_scripts/single_node_multi_gpu.sh ${config_name} +``` + +The ckpts will be saved under `checkpoints/${config_name}`. + +## 3. Finetuning on InternData-A1 Tasks for Sim2Real Transfer +### Write a training config +We provide a `TrainConfig` example named `finetune-sim2real-lift2-sort-rubbish` in `src/openpi/training/config.py`. + +Key arguments you may need to modify include: +- `MultiDataConfigFactory` class: Currently, sim-to-real transfer is evaluated only on `lift2` tasks: + - `MultiSim2RealSplitAlohaDataConfig` for `lift2` +- `repo_dir`: Path to the corresponding InternData-A1 task. +- `fixed_stats_dir`: Path to the normalization statistics for the sim-to-real task. When specified, statistics from `stats_dir` will not be used. +- `weight_loader`: Pretrained checkpoint used for initialization. + +### Run training +For training, run +For single node multi-GPU training, run +``` +config_name=finetune-sim2real-lift2-sort-rubbish +bash scripts/training_scripts/single_node_multi_gpu.sh ${config_name} +``` \ No newline at end of file diff --git a/policy/openpi-InternData-A1/examples/aloha_real/Dockerfile b/policy/openpi-InternData-A1/examples/aloha_real/Dockerfile new file mode 100644 index 0000000..0b6c65b --- /dev/null +++ b/policy/openpi-InternData-A1/examples/aloha_real/Dockerfile @@ -0,0 +1,70 @@ +# Dockerfile for the Aloha real environment. + +# Build the container: +# docker build . -t aloha_real -f examples/aloha_real/Dockerfile + +# Run the container: +# docker run --rm -it --network=host -v /dev:/dev -v .:/app --privileged aloha_real /bin/bash + +FROM ros:noetic-robot@sha256:7cf0b9f6546abeba308ea42cb7ad3453f3e520e1af57cdf179fe915c939674bc +SHELL ["/bin/bash", "-c"] + +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + cmake \ + curl \ + libffi-dev \ + python3-rosdep \ + python3-rosinstall \ + python3-rosinstall-generator \ + whiptail \ + git \ + wget \ + openssh-client \ + ros-noetic-cv-bridge \ + ros-noetic-usb-cam \ + ros-noetic-realsense2-camera \ + keyboard-configuration + +WORKDIR /root +RUN curl 'https://raw.githubusercontent.com/Interbotix/interbotix_ros_manipulators/main/interbotix_ros_xsarms/install/amd64/xsarm_amd64_install.sh' > xsarm_amd64_install.sh +RUN chmod +x xsarm_amd64_install.sh +RUN export TZ='America/Los_Angeles' && ./xsarm_amd64_install.sh -d noetic -n + +COPY ./third_party/aloha /root/interbotix_ws/src/aloha +RUN cd /root/interbotix_ws && source /opt/ros/noetic/setup.sh && source /root/interbotix_ws/devel/setup.sh && catkin_make + +# Install python 3.10 because this ROS image comes with 3.8 +RUN mkdir /python && \ + cd /python && \ + wget https://www.python.org/ftp/python/3.10.14/Python-3.10.14.tgz && \ + tar -zxvf Python-3.10.14.tgz && \ + cd Python-3.10.14 && \ + ls -lhR && \ + ./configure --enable-optimizations && \ + make install && \ + echo 'alias python3="/usr/local/bin/python3.10"' >> ~/.bashrc && \ + echo 'alias python="/usr/local/bin/python3.10"' >> ~/.bashrc && \ + cd ~ && rm -rf /python && \ + rm -rf /var/lib/apt/lists/* + +COPY --from=ghcr.io/astral-sh/uv:0.5.6 /uv /bin/uv +ENV UV_HTTP_TIMEOUT=120 +ENV UV_LINK_MODE=copy +COPY ./examples/aloha_real/requirements.txt /tmp/requirements.txt +COPY ./packages/openpi-client/pyproject.toml /tmp/openpi-client/pyproject.toml +RUN uv pip sync --python 3.10 --system /tmp/requirements.txt /tmp/openpi-client/pyproject.toml + +ENV PYTHONPATH=/app:/app/src:/app/packages/openpi-client/src:/root/interbotix_ws/src/aloha/aloha_scripts:/root/interbotix_ws/src/aloha +WORKDIR /app + +# Create an entrypoint script to run the setup commands, followed by the command passed in. +RUN cat <<'EOF' > /usr/local/bin/entrypoint.sh +#!/bin/bash +source /opt/ros/noetic/setup.sh && source /root/interbotix_ws/devel/setup.sh && "$@" +EOF +RUN chmod +x /usr/local/bin/entrypoint.sh + +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] +CMD ["python3", "/app/examples/aloha_real/main.py"] diff --git a/policy/openpi-InternData-A1/examples/aloha_real/README.md b/policy/openpi-InternData-A1/examples/aloha_real/README.md new file mode 100644 index 0000000..b4da86d --- /dev/null +++ b/policy/openpi-InternData-A1/examples/aloha_real/README.md @@ -0,0 +1,126 @@ +# Run Aloha (Real Robot) + +This example demonstrates how to run with a real robot using an [ALOHA setup](https://github.com/tonyzhaozh/aloha). See [here](../../docs/remote_inference.md) for instructions on how to load checkpoints and run inference. We list the relevant checkpoint paths for each provided fine-tuned model below. + +## Prerequisites + +This repo uses a fork of the ALOHA repo, with very minor modifications to use Realsense cameras. + +1. Follow the [hardware installation instructions](https://github.com/tonyzhaozh/aloha?tab=readme-ov-file#hardware-installation) in the ALOHA repo. +1. Modify the `third_party/aloha/aloha_scripts/realsense_publisher.py` file to use serial numbers for your cameras. + +## With Docker + +```bash +export SERVER_ARGS="--env ALOHA --default_prompt='take the toast out of the toaster'" +docker compose -f examples/aloha_real/compose.yml up --build +``` + +## Without Docker + +Terminal window 1: + +```bash +# Create virtual environment +uv venv --python 3.10 examples/aloha_real/.venv +source examples/aloha_real/.venv/bin/activate +uv pip sync examples/aloha_real/requirements.txt +uv pip install -e packages/openpi-client + +# Run the robot +python -m examples.aloha_real.main +``` + +Terminal window 2: + +```bash +roslaunch aloha ros_nodes.launch +``` + +Terminal window 3: + +```bash +uv run scripts/serve_policy.py --env ALOHA --default_prompt='take the toast out of the toaster' +``` + +## **ALOHA Checkpoint Guide** + + +The `pi0_base` model can be used in zero shot for a simple task on the ALOHA platform, and we additionally provide two example fine-tuned checkpoints, “fold the towel” and “open the tupperware and put the food on the plate,” which can perform more advanced tasks on the ALOHA. + +While we’ve found the policies to work in unseen conditions across multiple ALOHA stations, we provide some pointers here on how best to set up scenes to maximize the chance of policy success. We cover the prompts to use for the policies, objects we’ve seen it work well on, and well-represented initial state distributions. Running these policies in zero shot is still a very experimental feature, and there is no guarantee that they will work on your robot. The recommended way to use `pi0_base` is by finetuning with data from the target robot. + + +--- + +### **Toast Task** + +This task involves the robot taking two pieces of toast out of a toaster and placing them on a plate. + +- **Checkpoint path**: `gs://openpi-assets/checkpoints/pi0_base` +- **Prompt**: "take the toast out of the toaster" +- **Objects needed**: Two pieces of toast, a plate, and a standard toaster. +- **Object Distribution**: + - Works on both real toast and rubber fake toast + - Compatible with standard 2-slice toasters + - Works with plates of varying colors + +### **Scene Setup Guidelines** +Screenshot 2025-01-31 at 10 06 02 PM + +- The toaster should be positioned in the top-left quadrant of the workspace. +- Both pieces of toast should start inside the toaster, with at least 1 cm of bread sticking out from the top. +- The plate should be placed roughly in the lower-center of the workspace. +- Works with both natural and synthetic lighting, but avoid making the scene too dark (e.g., don't place the setup inside an enclosed space or under a curtain). + + +### **Towel Task** + +This task involves folding a small towel (e.g., roughly the size of a hand towel) into eighths. + +- **Checkpoint path**: `gs://openpi-assets/checkpoints/pi0_aloha_towel` +- **Prompt**: "fold the towel" +- **Object Distribution**: + - Works on towels of varying solid colors + - Performance is worse on heavily textured or striped towels + +### **Scene Setup Guidelines** +Screenshot 2025-01-31 at 10 01 15 PM + +- The towel should be flattened and roughly centered on the table. +- Choose a towel that does not blend in with the table surface. + + +### **Tupperware Task** + +This task involves opening a tupperware filled with food and pouring the contents onto a plate. + +- **Checkpoint path**: `gs://openpi-assets/checkpoints/pi0_aloha_tupperware` +- **Prompt**: "open the tupperware and put the food on the plate" +- **Objects needed**: Tupperware, food (or food-like items), and a plate. +- **Object Distribution**: + - Works on various types of fake food (e.g., fake chicken nuggets, fries, and fried chicken). + - Compatible with tupperware of different lid colors and shapes, with best performance on square tupperware with a corner flap (see images below). + - The policy has seen plates of varying solid colors. + +### **Scene Setup Guidelines** +Screenshot 2025-01-31 at 10 02 27 PM + +- Best performance observed when both the tupperware and plate are roughly centered in the workspace. +- Positioning: + - Tupperware should be on the left. + - Plate should be on the right or bottom. + - The tupperware flap should point toward the plate. + +## Training on your own Aloha dataset + +1. Convert the dataset to the LeRobot dataset v2.0 format. + + We provide a script [convert_aloha_data_to_lerobot.py](./convert_aloha_data_to_lerobot.py) that converts the dataset to the LeRobot dataset v2.0 format. As an example we have converted the `aloha_pen_uncap_diverse_raw` dataset from the [BiPlay repo](https://huggingface.co/datasets/oier-mees/BiPlay/tree/main/aloha_pen_uncap_diverse_raw) and uploaded it to the HuggingFace Hub as [physical-intelligence/aloha_pen_uncap_diverse](https://huggingface.co/datasets/physical-intelligence/aloha_pen_uncap_diverse). + + +2. Define a training config that uses the custom dataset. + + We provide the [pi0_aloha_pen_uncap config](../../src/openpi/training/config.py) as an example. You should refer to the root [README](../../README.md) for how to run training with the new config. + +IMPORTANT: Our base checkpoint includes normalization stats from various common robot configurations. When fine-tuning a base checkpoint with a custom dataset from one of these configurations, we recommend using the corresponding normalization stats provided in the base checkpoint. In the example, this is done by specifying the trossen asset_id and a path to the pretrained checkpoint’s asset directory within the AssetsConfig. diff --git a/policy/openpi-InternData-A1/examples/aloha_real/compose.yml b/policy/openpi-InternData-A1/examples/aloha_real/compose.yml new file mode 100644 index 0000000..4e1e4ba --- /dev/null +++ b/policy/openpi-InternData-A1/examples/aloha_real/compose.yml @@ -0,0 +1,66 @@ +# Run with: +# docker compose -f examples/aloha_real/compose.yml up --build +services: + runtime: + image: aloha_real + depends_on: + - aloha_ros_nodes + - ros_master + - openpi_server + build: + context: ../.. + dockerfile: examples/aloha_real/Dockerfile + init: true + tty: true + network_mode: host + privileged: true + volumes: + - $PWD:/app + - ../../data:/data + + aloha_ros_nodes: + image: aloha_real + depends_on: + - ros_master + build: + context: ../.. + dockerfile: examples/aloha_real/Dockerfile + init: true + tty: true + network_mode: host + privileged: true + volumes: + - /dev:/dev + command: roslaunch --wait aloha ros_nodes.launch + + ros_master: + image: ros:noetic-robot + network_mode: host + privileged: true + command: + - roscore + + openpi_server: + image: openpi_server + build: + context: ../.. + dockerfile: scripts/docker/serve_policy.Dockerfile + init: true + tty: true + network_mode: host + volumes: + - $PWD:/app + - ${OPENPI_DATA_HOME:-~/.cache/openpi}:/openpi_assets + environment: + - SERVER_ARGS + - OPENPI_DATA_HOME=/openpi_assets + - IS_DOCKER=true + + # Comment out this block if not running on a machine with GPUs. + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: 1 + capabilities: [gpu] diff --git a/policy/openpi-InternData-A1/examples/aloha_real/constants.py b/policy/openpi-InternData-A1/examples/aloha_real/constants.py new file mode 100644 index 0000000..a2ea59f --- /dev/null +++ b/policy/openpi-InternData-A1/examples/aloha_real/constants.py @@ -0,0 +1,71 @@ +# Ignore lint errors because this file is mostly copied from ACT (https://github.com/tonyzhaozh/act). +# ruff: noqa + +### Task parameters + +### ALOHA fixed constants +DT = 0.001 +JOINT_NAMES = ["waist", "shoulder", "elbow", "forearm_roll", "wrist_angle", "wrist_rotate"] +START_ARM_POSE = [0, -0.96, 1.16, 0, -0.3, 0, 0.02239, -0.02239, 0, -0.96, 1.16, 0, -0.3, 0, 0.02239, -0.02239] + +# Left finger position limits (qpos[7]), right_finger = -1 * left_finger +MASTER_GRIPPER_POSITION_OPEN = 0.02417 +MASTER_GRIPPER_POSITION_CLOSE = 0.01244 +PUPPET_GRIPPER_POSITION_OPEN = 0.05800 +PUPPET_GRIPPER_POSITION_CLOSE = 0.01844 + +# Gripper joint limits (qpos[6]) +MASTER_GRIPPER_JOINT_OPEN = 0.3083 +MASTER_GRIPPER_JOINT_CLOSE = -0.6842 +PUPPET_GRIPPER_JOINT_OPEN = 1.4910 +PUPPET_GRIPPER_JOINT_CLOSE = -0.6213 + +############################ Helper functions ############################ + +MASTER_GRIPPER_POSITION_NORMALIZE_FN = lambda x: (x - MASTER_GRIPPER_POSITION_CLOSE) / ( + MASTER_GRIPPER_POSITION_OPEN - MASTER_GRIPPER_POSITION_CLOSE +) +PUPPET_GRIPPER_POSITION_NORMALIZE_FN = lambda x: (x - PUPPET_GRIPPER_POSITION_CLOSE) / ( + PUPPET_GRIPPER_POSITION_OPEN - PUPPET_GRIPPER_POSITION_CLOSE +) +MASTER_GRIPPER_POSITION_UNNORMALIZE_FN = ( + lambda x: x * (MASTER_GRIPPER_POSITION_OPEN - MASTER_GRIPPER_POSITION_CLOSE) + MASTER_GRIPPER_POSITION_CLOSE +) +PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN = ( + lambda x: x * (PUPPET_GRIPPER_POSITION_OPEN - PUPPET_GRIPPER_POSITION_CLOSE) + PUPPET_GRIPPER_POSITION_CLOSE +) +MASTER2PUPPET_POSITION_FN = lambda x: PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN(MASTER_GRIPPER_POSITION_NORMALIZE_FN(x)) + +MASTER_GRIPPER_JOINT_NORMALIZE_FN = lambda x: (x - MASTER_GRIPPER_JOINT_CLOSE) / ( + MASTER_GRIPPER_JOINT_OPEN - MASTER_GRIPPER_JOINT_CLOSE +) +PUPPET_GRIPPER_JOINT_NORMALIZE_FN = lambda x: (x - PUPPET_GRIPPER_JOINT_CLOSE) / ( + PUPPET_GRIPPER_JOINT_OPEN - PUPPET_GRIPPER_JOINT_CLOSE +) +MASTER_GRIPPER_JOINT_UNNORMALIZE_FN = ( + lambda x: x * (MASTER_GRIPPER_JOINT_OPEN - MASTER_GRIPPER_JOINT_CLOSE) + MASTER_GRIPPER_JOINT_CLOSE +) +PUPPET_GRIPPER_JOINT_UNNORMALIZE_FN = ( + lambda x: x * (PUPPET_GRIPPER_JOINT_OPEN - PUPPET_GRIPPER_JOINT_CLOSE) + PUPPET_GRIPPER_JOINT_CLOSE +) +MASTER2PUPPET_JOINT_FN = lambda x: PUPPET_GRIPPER_JOINT_UNNORMALIZE_FN(MASTER_GRIPPER_JOINT_NORMALIZE_FN(x)) + +MASTER_GRIPPER_VELOCITY_NORMALIZE_FN = lambda x: x / (MASTER_GRIPPER_POSITION_OPEN - MASTER_GRIPPER_POSITION_CLOSE) +PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN = lambda x: x / (PUPPET_GRIPPER_POSITION_OPEN - PUPPET_GRIPPER_POSITION_CLOSE) + +MASTER_POS2JOINT = ( + lambda x: MASTER_GRIPPER_POSITION_NORMALIZE_FN(x) * (MASTER_GRIPPER_JOINT_OPEN - MASTER_GRIPPER_JOINT_CLOSE) + + MASTER_GRIPPER_JOINT_CLOSE +) +MASTER_JOINT2POS = lambda x: MASTER_GRIPPER_POSITION_UNNORMALIZE_FN( + (x - MASTER_GRIPPER_JOINT_CLOSE) / (MASTER_GRIPPER_JOINT_OPEN - MASTER_GRIPPER_JOINT_CLOSE) +) +PUPPET_POS2JOINT = ( + lambda x: PUPPET_GRIPPER_POSITION_NORMALIZE_FN(x) * (PUPPET_GRIPPER_JOINT_OPEN - PUPPET_GRIPPER_JOINT_CLOSE) + + PUPPET_GRIPPER_JOINT_CLOSE +) +PUPPET_JOINT2POS = lambda x: PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN( + (x - PUPPET_GRIPPER_JOINT_CLOSE) / (PUPPET_GRIPPER_JOINT_OPEN - PUPPET_GRIPPER_JOINT_CLOSE) +) + +MASTER_GRIPPER_JOINT_MID = (MASTER_GRIPPER_JOINT_OPEN + MASTER_GRIPPER_JOINT_CLOSE) / 2 diff --git a/policy/openpi-InternData-A1/examples/aloha_real/convert_aloha_data_to_lerobot.py b/policy/openpi-InternData-A1/examples/aloha_real/convert_aloha_data_to_lerobot.py new file mode 100644 index 0000000..a3a8ddc --- /dev/null +++ b/policy/openpi-InternData-A1/examples/aloha_real/convert_aloha_data_to_lerobot.py @@ -0,0 +1,272 @@ +""" +Script to convert Aloha hdf5 data to the LeRobot dataset v2.0 format. + +Example usage: uv run examples/aloha_real/convert_aloha_data_to_lerobot.py --raw-dir /path/to/raw/data --repo-id / +""" + +import dataclasses +from pathlib import Path +import shutil +from typing import Literal + +import h5py +from lerobot.common.datasets.lerobot_dataset import LEROBOT_HOME +from lerobot.common.datasets.lerobot_dataset import LeRobotDataset +from lerobot.common.datasets.push_dataset_to_hub._download_raw import download_raw +import numpy as np +import torch +import tqdm +import tyro + + +@dataclasses.dataclass(frozen=True) +class DatasetConfig: + use_videos: bool = True + tolerance_s: float = 0.0001 + image_writer_processes: int = 10 + image_writer_threads: int = 5 + video_backend: str | None = None + + +DEFAULT_DATASET_CONFIG = DatasetConfig() + + +def create_empty_dataset( + repo_id: str, + robot_type: str, + mode: Literal["video", "image"] = "video", + *, + has_velocity: bool = False, + has_effort: bool = False, + dataset_config: DatasetConfig = DEFAULT_DATASET_CONFIG, +) -> LeRobotDataset: + motors = [ + "right_waist", + "right_shoulder", + "right_elbow", + "right_forearm_roll", + "right_wrist_angle", + "right_wrist_rotate", + "right_gripper", + "left_waist", + "left_shoulder", + "left_elbow", + "left_forearm_roll", + "left_wrist_angle", + "left_wrist_rotate", + "left_gripper", + ] + cameras = [ + "cam_high", + "cam_low", + "cam_left_wrist", + "cam_right_wrist", + ] + + features = { + "observation.state": { + "dtype": "float32", + "shape": (len(motors),), + "names": [ + motors, + ], + }, + "action": { + "dtype": "float32", + "shape": (len(motors),), + "names": [ + motors, + ], + }, + } + + if has_velocity: + features["observation.velocity"] = { + "dtype": "float32", + "shape": (len(motors),), + "names": [ + motors, + ], + } + + if has_effort: + features["observation.effort"] = { + "dtype": "float32", + "shape": (len(motors),), + "names": [ + motors, + ], + } + + for cam in cameras: + features[f"observation.images.{cam}"] = { + "dtype": mode, + "shape": (3, 480, 640), + "names": [ + "channels", + "height", + "width", + ], + } + + if Path(LEROBOT_HOME / repo_id).exists(): + shutil.rmtree(LEROBOT_HOME / repo_id) + + return LeRobotDataset.create( + repo_id=repo_id, + fps=50, + robot_type=robot_type, + features=features, + use_videos=dataset_config.use_videos, + tolerance_s=dataset_config.tolerance_s, + image_writer_processes=dataset_config.image_writer_processes, + image_writer_threads=dataset_config.image_writer_threads, + video_backend=dataset_config.video_backend, + ) + + +def get_cameras(hdf5_files: list[Path]) -> list[str]: + with h5py.File(hdf5_files[0], "r") as ep: + # ignore depth channel, not currently handled + return [key for key in ep["/observations/images"].keys() if "depth" not in key] # noqa: SIM118 + + +def has_velocity(hdf5_files: list[Path]) -> bool: + with h5py.File(hdf5_files[0], "r") as ep: + return "/observations/qvel" in ep + + +def has_effort(hdf5_files: list[Path]) -> bool: + with h5py.File(hdf5_files[0], "r") as ep: + return "/observations/effort" in ep + + +def load_raw_images_per_camera(ep: h5py.File, cameras: list[str]) -> dict[str, np.ndarray]: + imgs_per_cam = {} + for camera in cameras: + uncompressed = ep[f"/observations/images/{camera}"].ndim == 4 + + if uncompressed: + # load all images in RAM + imgs_array = ep[f"/observations/images/{camera}"][:] + else: + import cv2 + + # load one compressed image after the other in RAM and uncompress + imgs_array = [] + for data in ep[f"/observations/images/{camera}"]: + imgs_array.append(cv2.cvtColor(cv2.imdecode(data, 1), cv2.COLOR_BGR2RGB)) + imgs_array = np.array(imgs_array) + + imgs_per_cam[camera] = imgs_array + return imgs_per_cam + + +def load_raw_episode_data( + ep_path: Path, +) -> tuple[dict[str, np.ndarray], torch.Tensor, torch.Tensor, torch.Tensor | None, torch.Tensor | None]: + with h5py.File(ep_path, "r") as ep: + state = torch.from_numpy(ep["/observations/qpos"][:]) + action = torch.from_numpy(ep["/action"][:]) + + velocity = None + if "/observations/qvel" in ep: + velocity = torch.from_numpy(ep["/observations/qvel"][:]) + + effort = None + if "/observations/effort" in ep: + effort = torch.from_numpy(ep["/observations/effort"][:]) + + imgs_per_cam = load_raw_images_per_camera( + ep, + [ + "cam_high", + "cam_low", + "cam_left_wrist", + "cam_right_wrist", + ], + ) + + return imgs_per_cam, state, action, velocity, effort + + +def populate_dataset( + dataset: LeRobotDataset, + hdf5_files: list[Path], + task: str, + episodes: list[int] | None = None, +) -> LeRobotDataset: + if episodes is None: + episodes = range(len(hdf5_files)) + + for ep_idx in tqdm.tqdm(episodes): + ep_path = hdf5_files[ep_idx] + + imgs_per_cam, state, action, velocity, effort = load_raw_episode_data(ep_path) + num_frames = state.shape[0] + + for i in range(num_frames): + frame = { + "observation.state": state[i], + "action": action[i], + } + + for camera, img_array in imgs_per_cam.items(): + frame[f"observation.images.{camera}"] = img_array[i] + + if velocity is not None: + frame["observation.velocity"] = velocity[i] + if effort is not None: + frame["observation.effort"] = effort[i] + + dataset.add_frame(frame) + + dataset.save_episode(task=task) + + return dataset + + +def port_aloha( + raw_dir: Path, + repo_id: str, + raw_repo_id: str | None = None, + task: str = "DEBUG", + *, + episodes: list[int] | None = None, + push_to_hub: bool = True, + is_mobile: bool = False, + mode: Literal["video", "image"] = "image", + dataset_config: DatasetConfig = DEFAULT_DATASET_CONFIG, +): + if (LEROBOT_HOME / repo_id).exists(): + shutil.rmtree(LEROBOT_HOME / repo_id) + + if not raw_dir.exists(): + if raw_repo_id is None: + raise ValueError("raw_repo_id must be provided if raw_dir does not exist") + download_raw(raw_dir, repo_id=raw_repo_id) + + hdf5_files = sorted(raw_dir.glob("episode_*.hdf5")) + + dataset = create_empty_dataset( + repo_id, + robot_type="mobile_aloha" if is_mobile else "aloha", + mode=mode, + has_effort=has_effort(hdf5_files), + has_velocity=has_velocity(hdf5_files), + dataset_config=dataset_config, + ) + dataset = populate_dataset( + dataset, + hdf5_files, + task=task, + episodes=episodes, + ) + dataset.consolidate() + + if push_to_hub: + dataset.push_to_hub() + + +if __name__ == "__main__": + tyro.cli(port_aloha) diff --git a/policy/openpi-InternData-A1/examples/aloha_real/env.py b/policy/openpi-InternData-A1/examples/aloha_real/env.py new file mode 100644 index 0000000..399092f --- /dev/null +++ b/policy/openpi-InternData-A1/examples/aloha_real/env.py @@ -0,0 +1,57 @@ +from typing import List, Optional # noqa: UP035 + +import einops +from openpi_client import image_tools +from openpi_client.runtime import environment as _environment +from typing_extensions import override + +from examples.aloha_real import real_env as _real_env + + +class AlohaRealEnvironment(_environment.Environment): + """An environment for an Aloha robot on real hardware.""" + + def __init__( + self, + reset_position: Optional[List[float]] = None, # noqa: UP006,UP007 + render_height: int = 224, + render_width: int = 224, + ) -> None: + self._env = _real_env.make_real_env(init_node=True, reset_position=reset_position) + self._render_height = render_height + self._render_width = render_width + + self._ts = None + + @override + def reset(self) -> None: + self._ts = self._env.reset() + + @override + def is_episode_complete(self) -> bool: + return False + + @override + def get_observation(self) -> dict: + if self._ts is None: + raise RuntimeError("Timestep is not set. Call reset() first.") + + obs = self._ts.observation + for k in list(obs["images"].keys()): + if "_depth" in k: + del obs["images"][k] + + for cam_name in obs["images"]: + img = image_tools.convert_to_uint8( + image_tools.resize_with_pad(obs["images"][cam_name], self._render_height, self._render_width) + ) + obs["images"][cam_name] = einops.rearrange(img, "h w c -> c h w") + + return { + "state": obs["qpos"], + "images": obs["images"], + } + + @override + def apply_action(self, action: dict) -> None: + self._ts = self._env.step(action["actions"]) diff --git a/policy/openpi-InternData-A1/examples/aloha_real/main.py b/policy/openpi-InternData-A1/examples/aloha_real/main.py new file mode 100644 index 0000000..25a0631 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/aloha_real/main.py @@ -0,0 +1,51 @@ +import dataclasses +import logging + +from openpi_client import action_chunk_broker +from openpi_client import websocket_client_policy as _websocket_client_policy +from openpi_client.runtime import runtime as _runtime +from openpi_client.runtime.agents import policy_agent as _policy_agent +import tyro + +from examples.aloha_real import env as _env + + +@dataclasses.dataclass +class Args: + host: str = "0.0.0.0" + port: int = 8000 + + action_horizon: int = 25 + + num_episodes: int = 1 + max_episode_steps: int = 1000 + + +def main(args: Args) -> None: + ws_client_policy = _websocket_client_policy.WebsocketClientPolicy( + host=args.host, + port=args.port, + ) + logging.info(f"Server metadata: {ws_client_policy.get_server_metadata()}") + + metadata = ws_client_policy.get_server_metadata() + runtime = _runtime.Runtime( + environment=_env.AlohaRealEnvironment(reset_position=metadata.get("reset_pose")), + agent=_policy_agent.PolicyAgent( + policy=action_chunk_broker.ActionChunkBroker( + policy=ws_client_policy, + action_horizon=args.action_horizon, + ) + ), + subscribers=[], + max_hz=50, + num_episodes=args.num_episodes, + max_episode_steps=args.max_episode_steps, + ) + + runtime.run() + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO, force=True) + tyro.cli(main) diff --git a/policy/openpi-InternData-A1/examples/aloha_real/real_env.py b/policy/openpi-InternData-A1/examples/aloha_real/real_env.py new file mode 100644 index 0000000..2073d83 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/aloha_real/real_env.py @@ -0,0 +1,176 @@ +# Ignore lint errors because this file is mostly copied from ACT (https://github.com/tonyzhaozh/act). +# ruff: noqa +import collections +import time +from typing import Optional, List +import dm_env +from interbotix_xs_modules.arm import InterbotixManipulatorXS +from interbotix_xs_msgs.msg import JointSingleCommand +import numpy as np + +from examples.aloha_real import constants +from examples.aloha_real import robot_utils + +# This is the reset position that is used by the standard Aloha runtime. +DEFAULT_RESET_POSITION = [0, -0.96, 1.16, 0, -0.3, 0] + + +class RealEnv: + """ + Environment for real robot bi-manual manipulation + Action space: [left_arm_qpos (6), # absolute joint position + left_gripper_positions (1), # normalized gripper position (0: close, 1: open) + right_arm_qpos (6), # absolute joint position + right_gripper_positions (1),] # normalized gripper position (0: close, 1: open) + + Observation space: {"qpos": Concat[ left_arm_qpos (6), # absolute joint position + left_gripper_position (1), # normalized gripper position (0: close, 1: open) + right_arm_qpos (6), # absolute joint position + right_gripper_qpos (1)] # normalized gripper position (0: close, 1: open) + "qvel": Concat[ left_arm_qvel (6), # absolute joint velocity (rad) + left_gripper_velocity (1), # normalized gripper velocity (pos: opening, neg: closing) + right_arm_qvel (6), # absolute joint velocity (rad) + right_gripper_qvel (1)] # normalized gripper velocity (pos: opening, neg: closing) + "images": {"cam_high": (480x640x3), # h, w, c, dtype='uint8' + "cam_low": (480x640x3), # h, w, c, dtype='uint8' + "cam_left_wrist": (480x640x3), # h, w, c, dtype='uint8' + "cam_right_wrist": (480x640x3)} # h, w, c, dtype='uint8' + """ + + def __init__(self, init_node, *, reset_position: Optional[List[float]] = None, setup_robots: bool = True): + # reset_position = START_ARM_POSE[:6] + self._reset_position = reset_position[:6] if reset_position else DEFAULT_RESET_POSITION + + self.puppet_bot_left = InterbotixManipulatorXS( + robot_model="vx300s", + group_name="arm", + gripper_name="gripper", + robot_name="puppet_left", + init_node=init_node, + ) + self.puppet_bot_right = InterbotixManipulatorXS( + robot_model="vx300s", group_name="arm", gripper_name="gripper", robot_name="puppet_right", init_node=False + ) + if setup_robots: + self.setup_robots() + + self.recorder_left = robot_utils.Recorder("left", init_node=False) + self.recorder_right = robot_utils.Recorder("right", init_node=False) + self.image_recorder = robot_utils.ImageRecorder(init_node=False) + self.gripper_command = JointSingleCommand(name="gripper") + + def setup_robots(self): + robot_utils.setup_puppet_bot(self.puppet_bot_left) + robot_utils.setup_puppet_bot(self.puppet_bot_right) + + def get_qpos(self): + left_qpos_raw = self.recorder_left.qpos + right_qpos_raw = self.recorder_right.qpos + left_arm_qpos = left_qpos_raw[:6] + right_arm_qpos = right_qpos_raw[:6] + left_gripper_qpos = [ + constants.PUPPET_GRIPPER_POSITION_NORMALIZE_FN(left_qpos_raw[7]) + ] # this is position not joint + right_gripper_qpos = [ + constants.PUPPET_GRIPPER_POSITION_NORMALIZE_FN(right_qpos_raw[7]) + ] # this is position not joint + return np.concatenate([left_arm_qpos, left_gripper_qpos, right_arm_qpos, right_gripper_qpos]) + + def get_qvel(self): + left_qvel_raw = self.recorder_left.qvel + right_qvel_raw = self.recorder_right.qvel + left_arm_qvel = left_qvel_raw[:6] + right_arm_qvel = right_qvel_raw[:6] + left_gripper_qvel = [constants.PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN(left_qvel_raw[7])] + right_gripper_qvel = [constants.PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN(right_qvel_raw[7])] + return np.concatenate([left_arm_qvel, left_gripper_qvel, right_arm_qvel, right_gripper_qvel]) + + def get_effort(self): + left_effort_raw = self.recorder_left.effort + right_effort_raw = self.recorder_right.effort + left_robot_effort = left_effort_raw[:7] + right_robot_effort = right_effort_raw[:7] + return np.concatenate([left_robot_effort, right_robot_effort]) + + def get_images(self): + return self.image_recorder.get_images() + + def set_gripper_pose(self, left_gripper_desired_pos_normalized, right_gripper_desired_pos_normalized): + left_gripper_desired_joint = constants.PUPPET_GRIPPER_JOINT_UNNORMALIZE_FN(left_gripper_desired_pos_normalized) + self.gripper_command.cmd = left_gripper_desired_joint + self.puppet_bot_left.gripper.core.pub_single.publish(self.gripper_command) + + right_gripper_desired_joint = constants.PUPPET_GRIPPER_JOINT_UNNORMALIZE_FN( + right_gripper_desired_pos_normalized + ) + self.gripper_command.cmd = right_gripper_desired_joint + self.puppet_bot_right.gripper.core.pub_single.publish(self.gripper_command) + + def _reset_joints(self): + robot_utils.move_arms( + [self.puppet_bot_left, self.puppet_bot_right], [self._reset_position, self._reset_position], move_time=1 + ) + + def _reset_gripper(self): + """Set to position mode and do position resets: first close then open. Then change back to PWM mode + + NOTE: This diverges from the original Aloha code which first opens then closes the gripper. Pi internal aloha data + was collected with the gripper starting in the open position. Leaving the grippers fully closed was also found to + increase the frequency of motor faults. + """ + robot_utils.move_grippers( + [self.puppet_bot_left, self.puppet_bot_right], [constants.PUPPET_GRIPPER_JOINT_CLOSE] * 2, move_time=1 + ) + robot_utils.move_grippers( + [self.puppet_bot_left, self.puppet_bot_right], [constants.PUPPET_GRIPPER_JOINT_OPEN] * 2, move_time=0.5 + ) + + def get_observation(self): + obs = collections.OrderedDict() + obs["qpos"] = self.get_qpos() + obs["qvel"] = self.get_qvel() + obs["effort"] = self.get_effort() + obs["images"] = self.get_images() + return obs + + def get_reward(self): + return 0 + + def reset(self, *, fake=False): + if not fake: + # Reboot puppet robot gripper motors + self.puppet_bot_left.dxl.robot_reboot_motors("single", "gripper", True) + self.puppet_bot_right.dxl.robot_reboot_motors("single", "gripper", True) + self._reset_joints() + self._reset_gripper() + return dm_env.TimeStep( + step_type=dm_env.StepType.FIRST, reward=self.get_reward(), discount=None, observation=self.get_observation() + ) + + def step(self, action): + state_len = int(len(action) / 2) + left_action = action[:state_len] + right_action = action[state_len:] + self.puppet_bot_left.arm.set_joint_positions(left_action[:6], blocking=False) + self.puppet_bot_right.arm.set_joint_positions(right_action[:6], blocking=False) + self.set_gripper_pose(left_action[-1], right_action[-1]) + time.sleep(constants.DT) + return dm_env.TimeStep( + step_type=dm_env.StepType.MID, reward=self.get_reward(), discount=None, observation=self.get_observation() + ) + + +def get_action(master_bot_left, master_bot_right): + action = np.zeros(14) # 6 joint + 1 gripper, for two arms + # Arm actions + action[:6] = master_bot_left.dxl.joint_states.position[:6] + action[7 : 7 + 6] = master_bot_right.dxl.joint_states.position[:6] + # Gripper actions + action[6] = constants.MASTER_GRIPPER_JOINT_NORMALIZE_FN(master_bot_left.dxl.joint_states.position[6]) + action[7 + 6] = constants.MASTER_GRIPPER_JOINT_NORMALIZE_FN(master_bot_right.dxl.joint_states.position[6]) + + return action + + +def make_real_env(init_node, *, reset_position: Optional[List[float]] = None, setup_robots: bool = True) -> RealEnv: + return RealEnv(init_node, reset_position=reset_position, setup_robots=setup_robots) diff --git a/policy/openpi-InternData-A1/examples/aloha_real/requirements.in b/policy/openpi-InternData-A1/examples/aloha_real/requirements.in new file mode 100644 index 0000000..4a6182a --- /dev/null +++ b/policy/openpi-InternData-A1/examples/aloha_real/requirements.in @@ -0,0 +1,18 @@ +Pillow +dm_control +einops +h5py +matplotlib +modern_robotics +msgpack +numpy>=1.22.4,<2.0.0 +opencv-python +packaging +pexpect +pyquaternion +pyrealsense2 +pyyaml +requests +rospkg +tyro +websockets diff --git a/policy/openpi-InternData-A1/examples/aloha_real/requirements.txt b/policy/openpi-InternData-A1/examples/aloha_real/requirements.txt new file mode 100644 index 0000000..7ab2973 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/aloha_real/requirements.txt @@ -0,0 +1,156 @@ +# This file was autogenerated by uv via the following command: +# uv pip compile examples/aloha_real/requirements.in -o examples/aloha_real/requirements.txt --python-version 3.10 +absl-py==2.1.0 + # via + # dm-control + # dm-env + # labmaze + # mujoco +catkin-pkg==1.0.0 + # via rospkg +certifi==2024.8.30 + # via requests +charset-normalizer==3.4.0 + # via requests +contourpy==1.1.1 + # via matplotlib +cycler==0.12.1 + # via matplotlib +distro==1.9.0 + # via rospkg +dm-control==1.0.23 + # via -r examples/aloha_real/requirements.in +dm-env==1.6 + # via dm-control +dm-tree==0.1.8 + # via + # dm-control + # dm-env +docstring-parser==0.16 + # via tyro +docutils==0.20.1 + # via catkin-pkg +einops==0.8.0 + # via -r examples/aloha_real/requirements.in +etils==1.3.0 + # via mujoco +fonttools==4.55.2 + # via matplotlib +glfw==2.8.0 + # via + # dm-control + # mujoco +h5py==3.11.0 + # via -r examples/aloha_real/requirements.in +idna==3.10 + # via requests +importlib-resources==6.4.5 + # via etils +kiwisolver==1.4.7 + # via matplotlib +labmaze==1.0.6 + # via dm-control +lxml==5.3.0 + # via dm-control +markdown-it-py==3.0.0 + # via rich +matplotlib==3.7.5 + # via -r examples/aloha_real/requirements.in +mdurl==0.1.2 + # via markdown-it-py +modern-robotics==1.1.1 + # via -r examples/aloha_real/requirements.in +msgpack==1.1.0 + # via -r examples/aloha_real/requirements.in +mujoco==3.2.3 + # via dm-control +numpy==1.24.4 + # via + # -r examples/aloha_real/requirements.in + # contourpy + # dm-control + # dm-env + # h5py + # labmaze + # matplotlib + # modern-robotics + # mujoco + # opencv-python + # pyquaternion + # scipy +opencv-python==4.10.0.84 + # via -r examples/aloha_real/requirements.in +packaging==24.2 + # via + # -r examples/aloha_real/requirements.in + # matplotlib +pexpect==4.9.0 + # via -r examples/aloha_real/requirements.in +pillow==10.4.0 + # via + # -r examples/aloha_real/requirements.in + # matplotlib +protobuf==5.29.1 + # via dm-control +ptyprocess==0.7.0 + # via pexpect +pygments==2.18.0 + # via rich +pyopengl==3.1.7 + # via + # dm-control + # mujoco +pyparsing==3.1.4 + # via + # catkin-pkg + # dm-control + # matplotlib +pyquaternion==0.9.9 + # via -r examples/aloha_real/requirements.in +pyrealsense2==2.55.1.6486 + # via -r examples/aloha_real/requirements.in +python-dateutil==2.9.0.post0 + # via + # catkin-pkg + # matplotlib +pyyaml==6.0.2 + # via + # -r examples/aloha_real/requirements.in + # rospkg +requests==2.32.3 + # via + # -r examples/aloha_real/requirements.in + # dm-control +rich==13.9.4 + # via tyro +rospkg==1.5.1 + # via -r examples/aloha_real/requirements.in +scipy==1.10.1 + # via dm-control +setuptools==75.3.0 + # via + # catkin-pkg + # dm-control + # labmaze +shtab==1.7.1 + # via tyro +six==1.17.0 + # via python-dateutil +tqdm==4.67.1 + # via dm-control +typeguard==4.4.0 + # via tyro +typing-extensions==4.12.2 + # via + # etils + # rich + # typeguard + # tyro +tyro==0.9.2 + # via -r examples/aloha_real/requirements.in +urllib3==2.2.3 + # via requests +websockets==14.1 + # via -r examples/aloha_real/requirements.in +zipp==3.20.2 + # via etils diff --git a/policy/openpi-InternData-A1/examples/aloha_real/robot_utils.py b/policy/openpi-InternData-A1/examples/aloha_real/robot_utils.py new file mode 100644 index 0000000..965a11e --- /dev/null +++ b/policy/openpi-InternData-A1/examples/aloha_real/robot_utils.py @@ -0,0 +1,275 @@ +# Ignore lint errors because this file is mostly copied from ACT (https://github.com/tonyzhaozh/act). +# ruff: noqa +from collections import deque +import datetime +import json +import time + +from aloha.msg import RGBGrayscaleImage +from cv_bridge import CvBridge +from interbotix_xs_msgs.msg import JointGroupCommand +from interbotix_xs_msgs.msg import JointSingleCommand +import numpy as np +import rospy +from sensor_msgs.msg import JointState + +from examples.aloha_real import constants + + +class ImageRecorder: + def __init__(self, init_node=True, is_debug=False): + self.is_debug = is_debug + self.bridge = CvBridge() + self.camera_names = ["cam_high", "cam_low", "cam_left_wrist", "cam_right_wrist"] + + if init_node: + rospy.init_node("image_recorder", anonymous=True) + for cam_name in self.camera_names: + setattr(self, f"{cam_name}_rgb_image", None) + setattr(self, f"{cam_name}_depth_image", None) + setattr(self, f"{cam_name}_timestamp", 0.0) + if cam_name == "cam_high": + callback_func = self.image_cb_cam_high + elif cam_name == "cam_low": + callback_func = self.image_cb_cam_low + elif cam_name == "cam_left_wrist": + callback_func = self.image_cb_cam_left_wrist + elif cam_name == "cam_right_wrist": + callback_func = self.image_cb_cam_right_wrist + else: + raise NotImplementedError + rospy.Subscriber(f"/{cam_name}", RGBGrayscaleImage, callback_func) + if self.is_debug: + setattr(self, f"{cam_name}_timestamps", deque(maxlen=50)) + + self.cam_last_timestamps = {cam_name: 0.0 for cam_name in self.camera_names} + time.sleep(0.5) + + def image_cb(self, cam_name, data): + setattr( + self, + f"{cam_name}_rgb_image", + self.bridge.imgmsg_to_cv2(data.images[0], desired_encoding="bgr8"), + ) + # setattr( + # self, + # f"{cam_name}_depth_image", + # self.bridge.imgmsg_to_cv2(data.images[1], desired_encoding="mono16"), + # ) + setattr( + self, + f"{cam_name}_timestamp", + data.header.stamp.secs + data.header.stamp.nsecs * 1e-9, + ) + # setattr(self, f'{cam_name}_secs', data.images[0].header.stamp.secs) + # setattr(self, f'{cam_name}_nsecs', data.images[0].header.stamp.nsecs) + # cv2.imwrite('/home/lucyshi/Desktop/sample.jpg', cv_image) + if self.is_debug: + getattr(self, f"{cam_name}_timestamps").append( + data.images[0].header.stamp.secs + data.images[0].header.stamp.nsecs * 1e-9 + ) + + def image_cb_cam_high(self, data): + cam_name = "cam_high" + return self.image_cb(cam_name, data) + + def image_cb_cam_low(self, data): + cam_name = "cam_low" + return self.image_cb(cam_name, data) + + def image_cb_cam_left_wrist(self, data): + cam_name = "cam_left_wrist" + return self.image_cb(cam_name, data) + + def image_cb_cam_right_wrist(self, data): + cam_name = "cam_right_wrist" + return self.image_cb(cam_name, data) + + def get_images(self): + image_dict = {} + for cam_name in self.camera_names: + while getattr(self, f"{cam_name}_timestamp") <= self.cam_last_timestamps[cam_name]: + time.sleep(0.00001) + rgb_image = getattr(self, f"{cam_name}_rgb_image") + depth_image = getattr(self, f"{cam_name}_depth_image") + self.cam_last_timestamps[cam_name] = getattr(self, f"{cam_name}_timestamp") + image_dict[cam_name] = rgb_image + image_dict[f"{cam_name}_depth"] = depth_image + return image_dict + + def print_diagnostics(self): + def dt_helper(l): + l = np.array(l) + diff = l[1:] - l[:-1] + return np.mean(diff) + + for cam_name in self.camera_names: + image_freq = 1 / dt_helper(getattr(self, f"{cam_name}_timestamps")) + print(f"{cam_name} {image_freq=:.2f}") + print() + + +class Recorder: + def __init__(self, side, init_node=True, is_debug=False): + self.secs = None + self.nsecs = None + self.qpos = None + self.effort = None + self.arm_command = None + self.gripper_command = None + self.is_debug = is_debug + + if init_node: + rospy.init_node("recorder", anonymous=True) + rospy.Subscriber(f"/puppet_{side}/joint_states", JointState, self.puppet_state_cb) + rospy.Subscriber( + f"/puppet_{side}/commands/joint_group", + JointGroupCommand, + self.puppet_arm_commands_cb, + ) + rospy.Subscriber( + f"/puppet_{side}/commands/joint_single", + JointSingleCommand, + self.puppet_gripper_commands_cb, + ) + if self.is_debug: + self.joint_timestamps = deque(maxlen=50) + self.arm_command_timestamps = deque(maxlen=50) + self.gripper_command_timestamps = deque(maxlen=50) + time.sleep(0.1) + + def puppet_state_cb(self, data): + self.qpos = data.position + self.qvel = data.velocity + self.effort = data.effort + self.data = data + if self.is_debug: + self.joint_timestamps.append(time.time()) + + def puppet_arm_commands_cb(self, data): + self.arm_command = data.cmd + if self.is_debug: + self.arm_command_timestamps.append(time.time()) + + def puppet_gripper_commands_cb(self, data): + self.gripper_command = data.cmd + if self.is_debug: + self.gripper_command_timestamps.append(time.time()) + + def print_diagnostics(self): + def dt_helper(l): + l = np.array(l) + diff = l[1:] - l[:-1] + return np.mean(diff) + + joint_freq = 1 / dt_helper(self.joint_timestamps) + arm_command_freq = 1 / dt_helper(self.arm_command_timestamps) + gripper_command_freq = 1 / dt_helper(self.gripper_command_timestamps) + + print(f"{joint_freq=:.2f}\n{arm_command_freq=:.2f}\n{gripper_command_freq=:.2f}\n") + + +def get_arm_joint_positions(bot): + return bot.arm.core.joint_states.position[:6] + + +def get_arm_gripper_positions(bot): + return bot.gripper.core.joint_states.position[6] + + +def move_arms(bot_list, target_pose_list, move_time=1): + num_steps = int(move_time / constants.DT) + curr_pose_list = [get_arm_joint_positions(bot) for bot in bot_list] + traj_list = [ + np.linspace(curr_pose, target_pose, num_steps) + for curr_pose, target_pose in zip(curr_pose_list, target_pose_list) + ] + for t in range(num_steps): + for bot_id, bot in enumerate(bot_list): + bot.arm.set_joint_positions(traj_list[bot_id][t], blocking=False) + time.sleep(constants.DT) + + +def move_grippers(bot_list, target_pose_list, move_time): + print(f"Moving grippers to {target_pose_list=}") + gripper_command = JointSingleCommand(name="gripper") + num_steps = int(move_time / constants.DT) + curr_pose_list = [get_arm_gripper_positions(bot) for bot in bot_list] + traj_list = [ + np.linspace(curr_pose, target_pose, num_steps) + for curr_pose, target_pose in zip(curr_pose_list, target_pose_list) + ] + + with open(f"/data/gripper_traj_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.jsonl", "a") as f: + for t in range(num_steps): + d = {} + for bot_id, bot in enumerate(bot_list): + gripper_command.cmd = traj_list[bot_id][t] + bot.gripper.core.pub_single.publish(gripper_command) + d[bot_id] = {"obs": get_arm_gripper_positions(bot), "act": traj_list[bot_id][t]} + f.write(json.dumps(d) + "\n") + time.sleep(constants.DT) + + +def setup_puppet_bot(bot): + bot.dxl.robot_reboot_motors("single", "gripper", True) + bot.dxl.robot_set_operating_modes("group", "arm", "position") + bot.dxl.robot_set_operating_modes("single", "gripper", "current_based_position") + torque_on(bot) + + +def setup_master_bot(bot): + bot.dxl.robot_set_operating_modes("group", "arm", "pwm") + bot.dxl.robot_set_operating_modes("single", "gripper", "current_based_position") + torque_off(bot) + + +def set_standard_pid_gains(bot): + bot.dxl.robot_set_motor_registers("group", "arm", "Position_P_Gain", 800) + bot.dxl.robot_set_motor_registers("group", "arm", "Position_I_Gain", 0) + + +def set_low_pid_gains(bot): + bot.dxl.robot_set_motor_registers("group", "arm", "Position_P_Gain", 100) + bot.dxl.robot_set_motor_registers("group", "arm", "Position_I_Gain", 0) + + +def torque_off(bot): + bot.dxl.robot_torque_enable("group", "arm", False) + bot.dxl.robot_torque_enable("single", "gripper", False) + + +def torque_on(bot): + bot.dxl.robot_torque_enable("group", "arm", True) + bot.dxl.robot_torque_enable("single", "gripper", True) + + +# for DAgger +def sync_puppet_to_master(master_bot_left, master_bot_right, puppet_bot_left, puppet_bot_right): + print("\nSyncing!") + + # activate master arms + torque_on(master_bot_left) + torque_on(master_bot_right) + + # get puppet arm positions + puppet_left_qpos = get_arm_joint_positions(puppet_bot_left) + puppet_right_qpos = get_arm_joint_positions(puppet_bot_right) + + # get puppet gripper positions + puppet_left_gripper = get_arm_gripper_positions(puppet_bot_left) + puppet_right_gripper = get_arm_gripper_positions(puppet_bot_right) + + # move master arms to puppet positions + move_arms( + [master_bot_left, master_bot_right], + [puppet_left_qpos, puppet_right_qpos], + move_time=1, + ) + + # move master grippers to puppet positions + move_grippers( + [master_bot_left, master_bot_right], + [puppet_left_gripper, puppet_right_gripper], + move_time=1, + ) diff --git a/policy/openpi-InternData-A1/examples/aloha_real/video_display.py b/policy/openpi-InternData-A1/examples/aloha_real/video_display.py new file mode 100644 index 0000000..9ad79dd --- /dev/null +++ b/policy/openpi-InternData-A1/examples/aloha_real/video_display.py @@ -0,0 +1,36 @@ +import matplotlib.pyplot as plt +import numpy as np +from openpi_client.runtime import subscriber as _subscriber +from typing_extensions import override + + +class VideoDisplay(_subscriber.Subscriber): + """Displays video frames.""" + + def __init__(self) -> None: + self._ax: plt.Axes | None = None + self._plt_img: plt.Image | None = None + + @override + def on_episode_start(self) -> None: + plt.ion() + self._ax = plt.subplot() + self._plt_img = None + + @override + def on_step(self, observation: dict, action: dict) -> None: + assert self._ax is not None + + im = observation["image"][0] # [C, H, W] + im = np.transpose(im, (1, 2, 0)) # [H, W, C] + + if self._plt_img is None: + self._plt_img = self._ax.imshow(im) + else: + self._plt_img.set_data(im) + plt.pause(0.001) + + @override + def on_episode_end(self) -> None: + plt.ioff() + plt.close() diff --git a/policy/openpi-InternData-A1/examples/aloha_sim/Dockerfile b/policy/openpi-InternData-A1/examples/aloha_sim/Dockerfile new file mode 100644 index 0000000..1f18790 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/aloha_sim/Dockerfile @@ -0,0 +1,41 @@ +# Dockerfile for the Aloha simulation environment. + +# Build the container: +# docker build . -t aloha_sim -f examples/aloha_sim/Dockerfile + +# Run the container: +# docker run --rm -it --network=host -v .:/app aloha_sim /bin/bash + +FROM python:3.11-slim@sha256:370c586a6ffc8c619e6d652f81c094b34b14b8f2fb9251f092de23f16e299b78 +COPY --from=ghcr.io/astral-sh/uv:0.5.1 /uv /uvx /bin/ + +RUN apt-get update && \ + apt-get install -y \ + libosmesa6-dev \ + libgl1-mesa-glx \ + libglew-dev \ + libglfw3-dev \ + libgles2-mesa-dev +ENV MUJOCO_GL=egl + +WORKDIR /app + +# Copy from the cache instead of linking since it's a mounted volume +ENV UV_LINK_MODE=copy + +# Write the virtual environment outside of the project directory so it doesn't +# leak out of the container when we mount the application code. +ENV UV_PROJECT_ENVIRONMENT=/.venv + +# Copy the requirements files so we can install dependencies. +# The rest of the project is mounted as a volume, so we don't need to rebuild on changes. +# This strategy is best for development-style usage. +COPY ./examples/aloha_sim/requirements.txt /tmp/requirements.txt +COPY ./packages/openpi-client/pyproject.toml /tmp/openpi-client/pyproject.toml + +# Install python dependencies. +RUN uv venv --python 3.11.9 $UV_PROJECT_ENVIRONMENT +RUN uv pip sync /tmp/requirements.txt /tmp/openpi-client/pyproject.toml +ENV PYTHONPATH=/app:/app/src:/app/packages/openpi-client/src + +CMD ["/bin/bash", "-c", "source /.venv/bin/activate && python examples/aloha_sim/main.py"] \ No newline at end of file diff --git a/policy/openpi-InternData-A1/examples/aloha_sim/README.md b/policy/openpi-InternData-A1/examples/aloha_sim/README.md new file mode 100644 index 0000000..0c6d4c5 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/aloha_sim/README.md @@ -0,0 +1,36 @@ +# Run Aloha Sim + +## With Docker + +```bash +export SERVER_ARGS="--env ALOHA_SIM" +docker compose -f examples/aloha_sim/compose.yml up --build +``` + +## Without Docker + +Terminal window 1: + +```bash +# Create virtual environment +uv venv --python 3.10 examples/aloha_sim/.venv +source examples/aloha_sim/.venv/bin/activate +uv pip sync examples/aloha_sim/requirements.txt +uv pip install -e packages/openpi-client + +# Run the simulation +MUJOCO_GL=egl python examples/aloha_sim/main.py +``` + +Note: If you are seeing EGL errors, you may need to install the following dependencies: + +```bash +sudo apt-get install -y libegl1-mesa-dev libgles2-mesa-dev +``` + +Terminal window 2: + +```bash +# Run the server +uv run scripts/serve_policy.py --env ALOHA_SIM +``` diff --git a/policy/openpi-InternData-A1/examples/aloha_sim/compose.yml b/policy/openpi-InternData-A1/examples/aloha_sim/compose.yml new file mode 100644 index 0000000..c56e4de --- /dev/null +++ b/policy/openpi-InternData-A1/examples/aloha_sim/compose.yml @@ -0,0 +1,42 @@ +# Run with: +# docker compose -f examples/aloha_sim/compose.yml up --build +services: + runtime: + image: aloha_sim + depends_on: + - openpi_server + build: + context: ../.. + dockerfile: examples/aloha_sim/Dockerfile + init: true + tty: true + network_mode: host + privileged: true + volumes: + - $PWD:/app + - ../../data:/data + + openpi_server: + image: openpi_server + build: + context: ../.. + dockerfile: scripts/docker/serve_policy.Dockerfile + init: true + tty: true + network_mode: host + volumes: + - $PWD:/app + - ${OPENPI_DATA_HOME:-~/.cache/openpi}:/openpi_assets + environment: + - SERVER_ARGS + - OPENPI_DATA_HOME=/openpi_assets + - IS_DOCKER=true + + # Comment out this block if not running on a machine with GPUs. + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: 1 + capabilities: [gpu] diff --git a/policy/openpi-InternData-A1/examples/aloha_sim/env.py b/policy/openpi-InternData-A1/examples/aloha_sim/env.py new file mode 100644 index 0000000..af2d5b6 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/aloha_sim/env.py @@ -0,0 +1,56 @@ +import gym_aloha # noqa: F401 +import gymnasium +import numpy as np +from openpi_client import image_tools +from openpi_client.runtime import environment as _environment +from typing_extensions import override + + +class AlohaSimEnvironment(_environment.Environment): + """An environment for an Aloha robot in simulation.""" + + def __init__(self, task: str, obs_type: str = "pixels_agent_pos", seed: int = 0) -> None: + np.random.seed(seed) + self._rng = np.random.default_rng(seed) + + self._gym = gymnasium.make(task, obs_type=obs_type) + + self._last_obs = None + self._done = True + self._episode_reward = 0.0 + + @override + def reset(self) -> None: + gym_obs, _ = self._gym.reset(seed=int(self._rng.integers(2**32 - 1))) + self._last_obs = self._convert_observation(gym_obs) # type: ignore + self._done = False + self._episode_reward = 0.0 + + @override + def is_episode_complete(self) -> bool: + return self._done + + @override + def get_observation(self) -> dict: + if self._last_obs is None: + raise RuntimeError("Observation is not set. Call reset() first.") + + return self._last_obs # type: ignore + + @override + def apply_action(self, action: dict) -> None: + gym_obs, reward, terminated, truncated, info = self._gym.step(action["actions"]) + self._last_obs = self._convert_observation(gym_obs) # type: ignore + self._done = terminated or truncated + self._episode_reward = max(self._episode_reward, reward) + + def _convert_observation(self, gym_obs: dict) -> dict: + img = gym_obs["pixels"]["top"] + img = image_tools.convert_to_uint8(image_tools.resize_with_pad(img, 224, 224)) + # Convert axis order from [H, W, C] --> [C, H, W] + img = np.transpose(img, (2, 0, 1)) + + return { + "state": gym_obs["agent_pos"], + "images": {"cam_high": img}, + } diff --git a/policy/openpi-InternData-A1/examples/aloha_sim/main.py b/policy/openpi-InternData-A1/examples/aloha_sim/main.py new file mode 100644 index 0000000..d76122a --- /dev/null +++ b/policy/openpi-InternData-A1/examples/aloha_sim/main.py @@ -0,0 +1,55 @@ +import dataclasses +import logging +import pathlib + +import env as _env +from openpi_client import action_chunk_broker +from openpi_client import websocket_client_policy as _websocket_client_policy +from openpi_client.runtime import runtime as _runtime +from openpi_client.runtime.agents import policy_agent as _policy_agent +import saver as _saver +import tyro + + +@dataclasses.dataclass +class Args: + out_dir: pathlib.Path = pathlib.Path("data/aloha_sim/videos") + + task: str = "gym_aloha/AlohaTransferCube-v0" + seed: int = 0 + + action_horizon: int = 10 + + host: str = "0.0.0.0" + port: int = 8000 + + display: bool = False + + +def main(args: Args) -> None: + runtime = _runtime.Runtime( + environment=_env.AlohaSimEnvironment( + task=args.task, + seed=args.seed, + ), + agent=_policy_agent.PolicyAgent( + policy=action_chunk_broker.ActionChunkBroker( + policy=_websocket_client_policy.WebsocketClientPolicy( + host=args.host, + port=args.port, + ), + action_horizon=args.action_horizon, + ) + ), + subscribers=[ + _saver.VideoSaver(args.out_dir), + ], + max_hz=50, + ) + + runtime.run() + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO, force=True) + tyro.cli(main) diff --git a/policy/openpi-InternData-A1/examples/aloha_sim/requirements.in b/policy/openpi-InternData-A1/examples/aloha_sim/requirements.in new file mode 100644 index 0000000..d84d356 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/aloha_sim/requirements.in @@ -0,0 +1,8 @@ +gym-aloha +imageio +matplotlib +msgpack +numpy>=1.22.4,<2.0.0 +typing-extensions +tyro +websockets \ No newline at end of file diff --git a/policy/openpi-InternData-A1/examples/aloha_sim/requirements.txt b/policy/openpi-InternData-A1/examples/aloha_sim/requirements.txt new file mode 100644 index 0000000..a1087f1 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/aloha_sim/requirements.txt @@ -0,0 +1,132 @@ +# This file was autogenerated by uv via the following command: +# uv pip compile examples/aloha_sim/requirements.in -o examples/aloha_sim/requirements.txt --python-version 3.10 +absl-py==2.1.0 + # via + # dm-control + # dm-env + # labmaze + # mujoco +certifi==2024.8.30 + # via requests +charset-normalizer==3.4.0 + # via requests +cloudpickle==3.1.0 + # via gymnasium +contourpy==1.3.1 + # via matplotlib +cycler==0.12.1 + # via matplotlib +dm-control==1.0.14 + # via gym-aloha +dm-env==1.6 + # via dm-control +dm-tree==0.1.8 + # via + # dm-control + # dm-env +docstring-parser==0.16 + # via tyro +farama-notifications==0.0.4 + # via gymnasium +fonttools==4.55.2 + # via matplotlib +glfw==2.8.0 + # via + # dm-control + # mujoco +gym-aloha==0.1.1 + # via -r examples/aloha_sim/requirements.in +gymnasium==1.0.0 + # via gym-aloha +idna==3.10 + # via requests +imageio==2.36.1 + # via + # -r examples/aloha_sim/requirements.in + # gym-aloha +imageio-ffmpeg==0.5.1 + # via imageio +kiwisolver==1.4.7 + # via matplotlib +labmaze==1.0.6 + # via dm-control +lxml==5.3.0 + # via dm-control +markdown-it-py==3.0.0 + # via rich +matplotlib==3.9.3 + # via -r examples/aloha_sim/requirements.in +mdurl==0.1.2 + # via markdown-it-py +msgpack==1.1.0 + # via -r examples/aloha_sim/requirements.in +mujoco==2.3.7 + # via + # dm-control + # gym-aloha +numpy==1.26.4 + # via + # -r examples/aloha_sim/requirements.in + # contourpy + # dm-control + # dm-env + # gymnasium + # imageio + # labmaze + # matplotlib + # mujoco + # scipy +packaging==24.2 + # via matplotlib +pillow==11.0.0 + # via + # imageio + # matplotlib +protobuf==5.29.1 + # via dm-control +psutil==6.1.0 + # via imageio +pygments==2.18.0 + # via rich +pyopengl==3.1.7 + # via + # dm-control + # mujoco +pyparsing==3.2.0 + # via + # dm-control + # matplotlib +python-dateutil==2.9.0.post0 + # via matplotlib +requests==2.32.3 + # via dm-control +rich==13.9.4 + # via tyro +scipy==1.14.1 + # via dm-control +setuptools==75.6.0 + # via + # dm-control + # imageio-ffmpeg + # labmaze +shtab==1.7.1 + # via tyro +six==1.17.0 + # via python-dateutil +tqdm==4.67.1 + # via dm-control +typeguard==4.4.1 + # via tyro +typing-extensions==4.12.2 + # via + # -r examples/aloha_sim/requirements.in + # gymnasium + # rich + # typeguard + # tyro +tyro==0.9.2 + # via -r examples/aloha_sim/requirements.in +urllib3==2.2.3 + # via requests +websockets==14.1 + # via -r examples/aloha_sim/requirements.in diff --git a/policy/openpi-InternData-A1/examples/aloha_sim/saver.py b/policy/openpi-InternData-A1/examples/aloha_sim/saver.py new file mode 100644 index 0000000..bd7f2c5 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/aloha_sim/saver.py @@ -0,0 +1,40 @@ +import logging +import pathlib + +import imageio +import numpy as np +from openpi_client.runtime import subscriber as _subscriber +from typing_extensions import override + + +class VideoSaver(_subscriber.Subscriber): + """Saves episode data.""" + + def __init__(self, out_dir: pathlib.Path, subsample: int = 1) -> None: + out_dir.mkdir(parents=True, exist_ok=True) + self._out_dir = out_dir + self._images: list[np.ndarray] = [] + self._subsample = subsample + + @override + def on_episode_start(self) -> None: + self._images = [] + + @override + def on_step(self, observation: dict, action: dict) -> None: + im = observation["images"]["cam_high"] # [C, H, W] + im = np.transpose(im, (1, 2, 0)) # [H, W, C] + self._images.append(im) + + @override + def on_episode_end(self) -> None: + existing = list(self._out_dir.glob("out_[0-9]*.mp4")) + next_idx = max([int(p.stem.split("_")[1]) for p in existing], default=-1) + 1 + out_path = self._out_dir / f"out_{next_idx}.mp4" + + logging.info(f"Saving video to {out_path}") + imageio.mimwrite( + out_path, + [np.asarray(x) for x in self._images[:: self._subsample]], + fps=50 // max(1, self._subsample), + ) diff --git a/policy/openpi-InternData-A1/examples/arx/action_stats.py b/policy/openpi-InternData-A1/examples/arx/action_stats.py new file mode 100644 index 0000000..29634d8 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/arx/action_stats.py @@ -0,0 +1,212 @@ +from collections import deque +from typing import List, Dict, Optional, Any, Sequence, Deque, Union +import datasets +import torch + +from lerobot.common.datasets.lerobot_dataset import LeRobotDataset + + +def check_final( + last_states: Union[Deque[Sequence[float]], Sequence[Sequence[float]], torch.Tensor], + *, + # 索引与初始状态 + arm_dofs: int = 6, # 左臂关节数(这里按你给的 6) + gripper_index: int = -1, # 夹爪在向量中的索引(默认最后一维) + mean_initial_arm_state: Optional[Sequence[float]] = (0.0107, 0.0527, 0.0463, -0.0415, 0.0187, 0.0108), + mean_initial_gripper_state: float = 4.8438, # 目前不参与判定,保留以便后续扩展 + + # 判定阈值(角度阈值用“度”直观易调,内部会转换为弧度) + stability_window: int = 5, # 最近多少帧用于判“没有太大变化” + per_joint_range_deg: float = 2.0, # 窗口内每个关节的最大幅度(max-min)阈值(度) + mean_speed_deg: float = 0.5, # 邻帧关节差的平均 L2(每步)阈值(度/步) + min_change_from_initial_deg: float = 15.0, # 末帧相对初始的“至少变化量”(L2,度) + gripper_closed_thresh: float = 0.8, # 夹爪关闭阈值(数值越小说明越闭合) +) -> bool: + """ + 返回 True 表示“到位”:(1) 最近窗口内姿态变化不大 & (2) 夹爪关闭 & (3) 末帧与初始相差足够大。 + 所有角度的阈值以“度”给出,这里会自动转弧度再比较。 + """ + # --- 数据整理为 (N, D) tensor --- + if isinstance(last_states, torch.Tensor): + states = last_states + else: + states = torch.as_tensor(list(last_states), dtype=torch.float32) + + if states.ndim != 2: + raise ValueError(f"last_states should be 2D, got shape {tuple(states.shape)}") + N, D = states.shape + if D < arm_dofs: + raise ValueError(f"Expected at least {arm_dofs} dims for arm + gripper, got {D}") + if N < 2: + return False # 样本太少,无法判定稳定 + + # 取最近窗口 + w = min(N, stability_window) + window = states[-w:] # (w, D) + arm = window[:, :arm_dofs] # (w, 6) + last_arm = arm[-1] # (6,) + last_gripper = float(window[-1, gripper_index]) + + # --- 1) 最近 w 帧“没有太大变化” --- + # 两个指标:每关节range(max-min)要小、相邻帧的平均“速度”要小 + deg2rad = torch.pi / 180.0 + range_tol = per_joint_range_deg * deg2rad + speed_tol = mean_speed_deg * deg2rad + + ranges = arm.max(dim=0).values - arm.min(dim=0).values # (6,) + max_range = float(ranges.abs().max()) # 标量 + diffs = arm[1:] - arm[:-1] # (w-1, 6) + mean_speed = float(torch.linalg.norm(diffs, dim=1).mean()) # 每步的平均 L2 + + stable = (max_range <= range_tol) and (mean_speed <= speed_tol) + + # --- 2) 夹爪关闭 --- + gripper_closed = (last_gripper < gripper_closed_thresh) + + # --- 3) 末帧与“初始”差距要大 --- + init = torch.as_tensor(mean_initial_arm_state, dtype=last_arm.dtype, device=last_arm.device) + if init.numel() != arm_dofs: + raise ValueError(f"mean_initial_arm_state length {init.numel()} != arm_dofs {arm_dofs}") + dist_from_init = float(torch.linalg.norm(last_arm - init)) + far_from_init = (dist_from_init >= (min_change_from_initial_deg * deg2rad)) + + # 组合判定 + return bool(stable and gripper_closed and far_from_init) + # return bool(gripper_closed and far_from_init) + + +def get_last_frames(ds: LeRobotDataset, include_images: bool = False, keys=None): + """ + Quickly fetch the last frame of each episode in a LeRobotDataset. + - include_images=False: Return only scalar/vector fields from parquet (faster, no video decoding). + - include_images=True : Additionally decode the corresponding image/video frame for the last frame. + - keys: Limit the set of columns to retrieve (default: all non-image/video fields + timestamp, etc.). + Returns: list[dict], where each element contains the last frame info of one episode. + """ + # 1) Compute the global index of the last row for each episode. + # ds.episode_data_index['to'] is the exclusive end index, so last frame = to - 1. + end_idxs = (ds.episode_data_index["to"] - 1).tolist() + + # 2) Determine which columns to load. + # By default, exclude video/image columns to avoid triggering slow video decoding. + if keys is None: + non_media_keys = [k for k, ft in ds.features.items() if ft["dtype"] not in ("image", "video")] + keys = list(set(non_media_keys + ["timestamp", "episode_index", "task_index"])) + + # 3) Select all last-frame rows at once (does not call __getitem__, so no video decoding is triggered). + last_rows = ds.hf_dataset.select(end_idxs) + + # 4) Build a dictionary of tensors for each requested key. + out = [] + col = {k: last_rows[k] for k in keys} + + # Convert lists of tensors into stacked tensors for easier indexing. + for k, v in col.items(): + # datasets.arrow_dataset.Column is the HuggingFace internal type for columns. + if isinstance(v, datasets.arrow_dataset.Column) and len(v) > 0 and hasattr(v[0], "shape"): + col[k] = torch.stack(v[:]) + + # Iterate through each episode’s last frame and build a dict with its values. + for i, ep_end in enumerate(end_idxs): + item = {} + for k in keys: + val = col[k][i] + # Unpack 0-dimensional tensors into Python scalars. + if torch.is_tensor(val) and val.ndim == 0: + val = val.item() + item[k] = val + + # Map task_index back to the human-readable task string. + if "task_index" in item: + item["task"] = ds.meta.tasks[int(item["task_index"])] + out.append(item) + + # 5) Optionally decode the actual image/video frame for each last timestamp. + if include_images and len(ds.meta.video_keys) > 0: + for i, ep_end in enumerate(end_idxs): + ep_idx = int(out[i]["episode_index"]) + ts = float(out[i]["timestamp"]) + # Prepare a query dictionary: one timestamp per camera key. + query_ts = {k: [ts] for k in ds.meta.video_keys} + # Decode video frames at the specified timestamps for this episode. + frames = ds._query_videos(query_ts, ep_idx) + # Attach the decoded frame tensors to the output dictionary. + for k, v in frames.items(): + out[i][k] = v + + return out + + +if __name__ == "__main__": + # Initialize your dataset (replace with your repo ID or local path). + ds = LeRobotDataset(repo_id="arx_lift2/pick_parcel_20250915") + + # Retrieve metadata only (timestamps, states, actions, tasks) without decoding video. + last_infos = get_last_frames(ds, include_images=False) + + # Stack all 'observation.state' vectors into a single tensor for further processing. + states = torch.stack([info['observation.state'] for info in last_infos]) + # Extract the left-arm joint states (first 7 values of each state vector). + left_arm_states = states[:, 0:7] + mean_state = torch.mean(left_arm_states, dim=0) + std_state = torch.std(left_arm_states, dim=0) + + # Print the collected metadata for verification. + print(last_infos) + + # --- Run check_final per episode using the last <=50 states --- + + EP_ARM_DOFS = 6 # number of left-arm joints we use in check_final + GRIPPER_COL_FULL = -1 # gripper is the last element in the full state vector + STABILITY_WINDOW = 120 # must be consistent with check_final's default + + # Determine which episodes to iterate + episode_indices = ds.episodes if ds.episodes is not None else sorted(ds.meta.episodes.keys()) + + episode_flags = {} + num_true, num_false = 0, 0 + + for ep_idx in episode_indices: + # Global index range [from_idx, to_idx) for this episode + from_idx = int(ds.episode_data_index["from"][ep_idx]) + to_idx = int(ds.episode_data_index["to"][ep_idx]) + + if to_idx - from_idx <= 0: + episode_flags[ep_idx] = False + num_false += 1 + continue + + # Take the last <= STABILITY_WINDOW frames from this episode + idxs = list(range(max(from_idx, to_idx - STABILITY_WINDOW), to_idx)) + rows = ds.hf_dataset.select(idxs) + + # Collect full "observation.state" (shape ~ [W, S]) + s_col = rows["observation.state"] + if isinstance(s_col, datasets.arrow_dataset.Column): + S = torch.stack(s_col[:]) # Column -> list[tensor] -> stack + else: + S = torch.stack(s_col) # already a list[tensor] + + # Build the 7D small state per frame: first 6 joints + gripper + # (Assumes the gripper signal is at the last position of the full state vector) + small_states = torch.cat([S[:, :EP_ARM_DOFS], S[:, EP_ARM_DOFS:EP_ARM_DOFS+1]], dim=1) + + # Run your stopping logic + ok = check_final( + small_states, + arm_dofs=EP_ARM_DOFS, + gripper_index=-1, + stability_window=STABILITY_WINDOW, + ) + episode_flags[ep_idx] = bool(ok) + num_true += int(ok) + num_false += int(not ok) + + # Summary + total_eps = len(episode_indices) + print(f"[check_final] passed: {num_true} / {total_eps} ({(num_true/max(total_eps,1)):.1%})") + + # List some failed episodes for quick inspection + failed_eps = [e for e, passed in episode_flags.items() if not passed] + print("Failed episode indices (first 20):", failed_eps[:20]) + diff --git a/policy/openpi-InternData-A1/examples/arx/extract_frame.py b/policy/openpi-InternData-A1/examples/arx/extract_frame.py new file mode 100644 index 0000000..af08e31 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/arx/extract_frame.py @@ -0,0 +1,88 @@ +import os +import cv2 +from pathlib import Path +from tqdm import tqdm +def extract_last_frame_from_videos(root_dir, output_dir, xx_last_frame=1): + """ + 遍历目录,找到所有images.rgb.hand_right视频文件,提取最后一帧并保存 + """ + # 查找所有mp4视频文件 + video_files = [] + for root, dirs, files in os.walk(root_dir): + for file in files: + + if file.endswith('.mp4') and 'observation/head' in root: + video_files.append(os.path.join(root, file)) + + print(f"找到 {len(video_files)} 个视频文件") + + # 处理每个视频文件 + for video_path in tqdm(video_files): + try: + # 提取set名称和episode名称 + path_parts = Path(video_path).parts + set_name = None + episode_name = None + for part in path_parts: + if part.startswith('set'): + set_name = part + if part.startswith('000'): + episode_name = part.replace('.mp4', '') + + if not set_name or not episode_name: + print(f"无法从路径中提取set和episode信息: {video_path}") + continue + + # 生成输出文件名 + output_filename = f"{set_name}_{episode_name}.jpg" + output_path = os.path.join(output_dir, output_filename) + + # 打开视频文件 + cap = cv2.VideoCapture(video_path) + + if not cap.isOpened(): + print(f"无法打开视频: {video_path}") + continue + + # 获取总帧数 + total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + if total_frames == 0: + print(f"视频没有帧: {video_path}") + cap.release() + continue + + # 跳转到最后一帧 + cap.set(cv2.CAP_PROP_POS_FRAMES, total_frames - xx_last_frame) + ret, frame = cap.read() + + if ret: + # 保存最后一帧 + cv2.imwrite(output_path, frame) + print(f"已保存:\n {output_path}") + else: + print(f"无法读取最后一帧: {video_path}") + + # 释放资源 + cap.release() + + except Exception as e: + print(f"处理视频时出错 {video_path}: {str(e)}") + +if __name__ == "__main__": + # 指定要遍历的根目录 + root_directory = "/home/caijunhao/h-ceph/InternData-A1-raw/arx_lift2/Pick_the_industrial_components_from_the_conveyor" # 当前目录,您可以修改为您的目录路径 + output_path = 'data/Pick_the_industrial_components_from_the_conveyor/' + os.makedirs(output_path, exist_ok=True) + sub_list = os.listdir(root_directory) + exclude_list = [] + # exclude_list = [f"{i}" for i in range(16)] + [f"{i}" for i in range(26, 29)] + xx_last_frame = 1 + # import pdb + # pdb.set_trace() + for sub in tqdm(sub_list): + if sub.split('-')[1].split('_')[0] in exclude_list: + continue + # print("os.path.join([root_directory, sub])\n", os.path.join(root_directory, sub)) + extract_last_frame_from_videos(os.path.join(root_directory, sub), output_path, xx_last_frame=xx_last_frame) + print("处理完成!") \ No newline at end of file diff --git a/policy/openpi-InternData-A1/examples/arx/lmdb2lerobot_arx.py b/policy/openpi-InternData-A1/examples/arx/lmdb2lerobot_arx.py new file mode 100644 index 0000000..e24b5cd --- /dev/null +++ b/policy/openpi-InternData-A1/examples/arx/lmdb2lerobot_arx.py @@ -0,0 +1,670 @@ +# source /fs-computility/efm/liyang/miniconda3/etc/profile.d/conda.sh +# conda activate act + +import argparse +import json +import logging +import os +import gc +import shutil +from concurrent.futures import ALL_COMPLETED, ProcessPoolExecutor, ThreadPoolExecutor, as_completed, wait +from pathlib import Path +from typing import Callable, Dict, List, Optional, Tuple +import torchvision +import cv2 +import h5py +import lmdb +import numpy as np +import pickle +import torch +from PIL import Image +from scipy.spatial.transform import Rotation +from tqdm import tqdm +import logging +import pdb +import os +import imageio # imageio-ffmpeg +from lerobot.common.datasets.compute_stats import auto_downsample_height_width, get_feature_stats, sample_indices +from lerobot.common.datasets.lerobot_dataset import LeRobotDataset +from lerobot.common.datasets.utils import check_timestamps_sync, get_episode_data_index, validate_episode_buffer +import time +# import ray +# from ray.runtime_env import RuntimeEnv + +""" + Store both camera image and robot state as a combined observation. + Args: + observation: images(camera), states (robot state) + actions: joint, gripper, ee_pose +""" +FEATURES = { + "images.rgb.head": { + "dtype": "video", + "shape": (368, 640, 3), + "names": ["height", "width", "channel"], + }, + "images.rgb.hand_left": { + "dtype": "video", + "shape": (480, 640, 3), + "names": ["height", "width", "channel"], + }, + "images.rgb.hand_right": { + "dtype": "video", + "shape": (480, 640, 3), + "names": ["height", "width", "channel"], + }, + # "states.left_joint.position": { + # "dtype": "float32", + # "shape": (6,), + # "names": ["left_joint_0", "left_joint_1", "left_joint_2", "left_joint_3", "left_joint_4", "left_joint_5",], + # }, + # "states.left_gripper.position": { + # "dtype": "float32", + # "shape": (1,), + # "names": ["left_gripper_0",], + # }, + # "states.right_joint.position": { + # "dtype": "float32", + # "shape": (6,), + # "names": ["right_joint_0", "right_joint_1", "right_joint_2", "right_joint_3", "right_joint_4", "right_joint_5",], + # }, + # "states.right_gripper.position": { + # "dtype": "float32", + # "shape": (1,), + # "names": ["right_gripper_0",], + # }, + "observation.state": { + "dtype": "float32", + "shape": (14,), + "names": ["left_joint_0", "left_joint_1", "left_joint_2", "left_joint_3", "left_joint_4", "left_joint_5", "left_gripper_0", + "right_joint_0", "right_joint_1", "right_joint_2", "right_joint_3", "right_joint_4", "right_joint_5","right_gripper_0"], + }, + "action": { + "dtype": "float32", + "shape": (14,), + "names": ["left_joint_0", "left_joint_1", "left_joint_2", "left_joint_3", "left_joint_4", "left_joint_5", "left_gripper_0", + "right_joint_0", "right_joint_1", "right_joint_2", "right_joint_3", "right_joint_4", "right_joint_5","right_gripper_0"], + }, + # "actions.left_joint.position": { + # "dtype": "float32", + # "shape": (6,), + # "names": ["left_joint_0", "left_joint_1", "left_joint_2", "left_joint_3", "left_joint_4", "left_joint_5",], + # }, + # "actions.left_gripper.position": { + # "dtype": "float32", + # "shape": (1,), + # "names": ["left_gripper_0",], + # }, + # "actions.right_joint.position": { + # "dtype": "float32", + # "shape": (6,), + # "names": ["right_joint_0", "right_joint_1", "right_joint_2", "right_joint_3", "right_joint_4", "right_joint_5",], + # }, + # "actions.right_gripper.position": { + # "dtype": "float32", + # "shape": (1,), + # "names": ["right_gripper_0", ], + # }, + +} + + +import numpy as np + +def filter_forbidden_frames(state_dict, position_threshold=0.001, velocity_threshold=0.005): + """ + 过滤禁止的帧,基于位置和速度阈值 + + 参数: + - state_dict: 形状为 (n, 14) 的状态数组 + - position_threshold: 位置变化的阈值 + - velocity_threshold: 速度变化的阈值 + + 返回: + - valid_mask: 布尔数组,True表示有效帧 + """ + # 排除夹爪列(第6和第13列,索引从0开始) + qpos_columns = [i for i in range(14)] + qpos_data = state_dict[:, qpos_columns] + + n_frames = len(state_dict) + valid_mask = np.ones(n_frames, dtype=bool) + # import pdb + # pdb.set_trace() + # 计算帧间差异(速度) + if n_frames > 1: + + diff_sum = np.sum(np.abs(np.diff(qpos_data, axis=0)), axis=1) + # sorted_indices = np.argsort(diff_sum)[::-1] + # sorted_abs_sums = diff_sum[sorted_indices] + + # velocities = np.diff(qpos_data, axis=0) + # 检查速度是否超过阈值 + for i in range(n_frames - 1): + if np.any(np.abs(diff_sum[i]) > position_threshold): + valid_mask[i] = True # 有运动,有效帧 + else: + valid_mask[i] = False # 静止,可能是禁止帧 + valid_mask[i] = True + return valid_mask + +def statistical_filter(state_dict, std_multiplier=2.0): + """ + 使用统计方法检测异常(禁止)帧 + """ + # 排除夹爪列 + qpos_columns = [i for i in range(14) if i not in [6, 13]] + qpos_data = state_dict[:, qpos_columns] + + # 计算每列的均值和标准差 + means = np.mean(qpos_data, axis=0) + stds = np.std(qpos_data, axis=0) + + # 创建有效掩码 + valid_mask = np.ones(len(state_dict), dtype=bool) + + for i in range(len(state_dict)): + # 检查每个关节位置是否在合理范围内 + deviations = np.abs(qpos_data[i] - means) + if np.any(deviations > std_multiplier * stds): + valid_mask[i] = False # 异常帧 + + return valid_mask + + +class ARXDataset(LeRobotDataset): + def __init__( + self, + repo_id: str, + root: str | Path | None = None, + episodes: list[int] | None = None, + image_transforms: Callable | None = None, + delta_timestamps: dict[list[float]] | None = None, + tolerance_s: float = 1e-4, + download_videos: bool = True, + local_files_only: bool = False, + video_backend: str | None = None, + ): + super().__init__( + repo_id=repo_id, + root=root, + episodes=episodes, + image_transforms=image_transforms, + delta_timestamps=delta_timestamps, + tolerance_s=tolerance_s, + download_videos=download_videos, + local_files_only=local_files_only, + video_backend=video_backend, + ) + + def save_episode(self, episode_data: dict | None = None, videos: dict | None = None) -> None: + if not episode_data: + episode_buffer = self.episode_buffer + + validate_episode_buffer(episode_buffer, self.meta.total_episodes, self.features) + episode_length = episode_buffer.pop("size") + tasks = episode_buffer.pop("task") + episode_tasks = list(set(tasks)) + episode_index = episode_buffer["episode_index"] + + episode_buffer["index"] = np.arange(self.meta.total_frames, self.meta.total_frames + episode_length) + episode_buffer["episode_index"] = np.full((episode_length,), episode_index) + + for task in episode_tasks: + task_index = self.meta.get_task_index(task) + if task_index is None: + self.meta.add_task(task) + + episode_buffer["task_index"] = np.array([self.meta.get_task_index(task) for task in tasks]) + for key, ft in self.features.items(): + if key in ["index", "episode_index", "task_index"] or ft["dtype"] in ["video"]: + continue + episode_buffer[key] = np.stack(episode_buffer[key]).squeeze() + for key in self.meta.video_keys: + video_path = self.root / self.meta.get_video_file_path(episode_index, key) + episode_buffer[key] = str(video_path) # PosixPath -> str + video_path.parent.mkdir(parents=True, exist_ok=True) + shutil.copyfile(videos[key], video_path) + ep_stats = compute_episode_stats(episode_buffer, self.features) + self._save_episode_table(episode_buffer, episode_index) + self.meta.save_episode(episode_index, episode_length, episode_tasks, ep_stats) + ep_data_index = get_episode_data_index(self.meta.episodes, [episode_index]) + ep_data_index_np = {k: t.numpy() for k, t in ep_data_index.items()} + check_timestamps_sync( + episode_buffer["timestamp"], + episode_buffer["episode_index"], + ep_data_index_np, + self.fps, + self.tolerance_s, + ) + if not episode_data: + self.episode_buffer = self.create_episode_buffer() + + + def add_frame(self, frame: dict) -> None: + for name in frame: + if isinstance(frame[name], torch.Tensor): + frame[name] = frame[name].numpy() + features = {key: value for key, value in self.features.items() if key in self.hf_features} + if self.episode_buffer is None: + self.episode_buffer = self.create_episode_buffer() + frame_index = self.episode_buffer["size"] + timestamp = frame.pop("timestamp") if "timestamp" in frame else frame_index / self.fps + self.episode_buffer["frame_index"].append(frame_index) + self.episode_buffer["timestamp"].append(timestamp) + + for key in frame: + if key == "task": + self.episode_buffer["task"].append(frame["task"]) + continue + if key not in self.features: + print("key ", key) + raise ValueError(f"An element of the frame is not in the features. '{key}' not in '{self.features.keys()}'.") + # import pdb + # pdb.set_trace() + self.episode_buffer[key].append(frame[key]) + self.episode_buffer["size"] += 1 + +# def crop_resize_no_padding(image, target_size=(480, 640)): +# """ +# Crop and scale to target size (no padding) +# :param image: input image (NumPy array) +# :param target_size: target size (height, width) +# :return: processed image +# """ +# h, w = image.shape[:2] +# target_h, target_w = target_size +# target_ratio = target_w / target_h # Target aspect ratio (e.g. 640/480=1.333) + +# # the original image aspect ratio and cropping direction +# if w / h > target_ratio: # Original image is wider → crop width +# crop_w = int(h * target_ratio) # Calculate crop width based on target aspect ratio +# crop_h = h +# start_x = (w - crop_w) // 2 # Horizontal center starting point +# start_y = 0 +# else: # Original image is higher → crop height +# crop_h = int(w / target_ratio) # Calculate clipping height according to target aspect ratio +# crop_w = w +# start_x = 0 +# start_y = (h - crop_h) // 2 # Vertical center starting point + +# # Perform centered cropping (to prevent out-of-bounds) +# start_x, start_y = max(0, start_x), max(0, start_y) +# end_x, end_y = min(w, start_x + crop_w), min(h, start_y + crop_h) +# cropped = image[start_y:end_y, start_x:end_x] + +# # Resize to target size (bilinear interpolation) +# resized = cv2.resize(cropped, (target_w, target_h), interpolation=cv2.INTER_LINEAR) +# return resized + + +def load_lmdb_data(episode_path: Path, sava_path: Path, fps_factor: int, target_fps: int) -> Optional[Dict]: + def load_image(txn, key): + raw = txn.get(key) + data = pickle.loads(raw) + image = cv2.imdecode(data, cv2.IMREAD_COLOR) + # Convert to RGB if necessary + # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + # image = crop_resize_no_padding(image, target_size=(480, 640)) + return image + try: + env = lmdb.open( + str(episode_path / "lmdb"), + readonly=True, + lock=False, + max_readers=128, + readahead=False + ) + with env.begin(write=False) as txn: + keys = [k for k, _ in txn.cursor()] + + image_keys = sorted([k for k in keys if b'head' in k]) + if not image_keys: + return None + + all_qpos = pickle.loads(txn.get(b'/observations/qpos')) + + if np.isscalar(all_qpos): + total_steps = len(image_keys) + all_qpos = [all_qpos] * total_steps + else: + total_steps = len(all_qpos) + all_qpos = np.stack(all_qpos) + state_action_dict = {} + state_action_dict["states.left_joint.position"] = all_qpos[:, :6] + state_action_dict["states.left_gripper.position"] = all_qpos[:, 6][:, None] # np.expand_dims(all_qpos[:, 6], axis=1) + state_action_dict["states.right_joint.position"] = all_qpos[:, 7:13] + state_action_dict["states.right_gripper.position"] = all_qpos[:, 13][:, None] # np.expand_dims(all_qpos[:, 13], axis=1) + # state_keys = list(state_action_dict.keys()) + # for k in state_keys: + # state_action_dict[k.replace("states", "actions")] = np.concatenate([state_action_dict[k][1:, :], state_action_dict[k][-1, :][None,:]], axis=0) + + + # action_dict = {} + # action_dict["actions.left_joint.position"] = np.concatenate([state_dict["states.left_joint.position"][1:, :], state_dict["states.left_joint.position"][-1, :][None,:]], axis=0) + # action_dict["actions.left_gripper.position"] = state_dict["states.left_gripper.position"][1:, :] + # action_dict["actions.right_joint.position"] = state_dict["states.right_joint.position"][1:, :] + # action_dict["actions.right_gripper.position"] = state_dict["states.right_gripper.position"][1:, :] + + action_dict = {} + + action_dict["action"] = np.concatenate([all_qpos[1:,], all_qpos[-1,].reshape(-1, 14)], axis=0) + state_dict = {} + state_dict["observation.state"] = all_qpos + mask1 = filter_forbidden_frames(state_dict["observation.state"]) + # state_dict["observation.state"] = state_dict["observation.state"][mask1] + # action_dict["actions.left_gripper.position"] = state_dict["states.left_gripper.position"][1:, :] + # action_dict["actions.right_arm.position"] = np.concatenate([state_action_dict["states.right_joint.position"][1:, :], state_action_dict["states.right_joint.position"][-1, :][None,:]], axis=0) + # action_dict["actions.left_arm.position"] = state_dict["states.right_gripper.position"][1:, :] + + assert total_steps == len(image_keys), "qpos length mismatch" + selected_steps = [step for step in range(total_steps) if step % fps_factor == 0 and mask1[step]] + frames = [] + image_observations = { + "images.rgb.head": [], + "images.rgb.hand_left": [], + "images.rgb.hand_right": [] + } + + start_time = time.time() + + for step_index, step in enumerate(selected_steps): + step_str = f"{step:04d}" + head_key = f"observation/head/color_image/{step_str}".encode() + left_key = f"observation/left_wrist/color_image/{step_str}".encode() + right_key = f"observation/right_wrist/color_image/{step_str}".encode() + if not (head_key in keys and left_key in keys and right_key in keys): + continue + # state = all_qpos[step] + # if step_index < len(selected_steps) - 1: + # action = all_qpos[selected_steps[step_index + 1]] + # else: + # action = state + data_dict = {} + # for key, value in state_action_dict.items(): + # data_dict[key] = value[step] + data_dict['action'] = action_dict["action"][step] + data_dict["task"] = " ".join(episode_path.parent.parent.name.split("_")) + data_dict['observation.state'] = state_dict["observation.state"][step] + # frames.append({ + # "observation.states.joint.position": state, + # "actions.joint.position": action, + # "task": task_name, + # }) + frames.append(data_dict) + image_observations["images.rgb.head"].append(load_image(txn, head_key)) + image_observations["images.rgb.hand_left"].append(load_image(txn, left_key)) + image_observations["images.rgb.hand_right"].append(load_image(txn, right_key)) + end_time = time.time() + elapsed_time = end_time - start_time + print(f"load image_observations of {episode_path}") + env.close() + if not frames: + return None + os.makedirs(sava_path, exist_ok=True) + os.makedirs(sava_path/episode_path.name, exist_ok=True) + imageio.mimsave(sava_path/episode_path.name/'head.mp4', image_observations["images.rgb.head"], fps=target_fps) + imageio.mimsave(sava_path/episode_path.name/'hand_left.mp4', image_observations["images.rgb.hand_left"], fps=target_fps) + imageio.mimsave(sava_path/episode_path.name/'hand_right.mp4', image_observations["images.rgb.hand_right"], fps=target_fps) + print(f"imageio.mimsave time taken of {episode_path}") + + return { + "frames": frames, + "videos": { + "images.rgb.head": sava_path/episode_path.name/"head.mp4", + "images.rgb.hand_left": sava_path/episode_path.name/"hand_left.mp4", + "images.rgb.hand_right": sava_path/episode_path.name/"hand_right.mp4", + }, + } + + except Exception as e: + logging.error(f"Failed to load LMDB data: {e}") + return None + + +def get_all_tasks(src_path: Path, output_path: Path) -> Tuple[Path, Path]: + src_dirs = sorted(list(src_path.glob("*"))) # "set*-*_collector*_datatime" as the conversion unit + + save_dirs = [output_path/_dir.parent.name/_dir.name for _dir in src_dirs] + tasks_tuples = zip(src_dirs, save_dirs) + for task in tasks_tuples: + yield task + +def compute_episode_stats(episode_data: Dict[str, List[str] | np.ndarray], features: Dict) -> Dict: + ep_stats = {} + for key, data in episode_data.items(): + if features[key]["dtype"] == "string": + continue + elif features[key]["dtype"] in ["image", "video"]: + ep_ft_array = sample_images(data) + axes_to_reduce = (0, 2, 3) # keep channel dim + keepdims = True + else: + ep_ft_array = data # data is already a np.ndarray + axes_to_reduce = 0 # compute stats over the first axis + keepdims = data.ndim == 1 # keep as np.array + + ep_stats[key] = get_feature_stats(ep_ft_array, axis=axes_to_reduce, keepdims=keepdims) + if features[key]["dtype"] in ["image", "video"]: + ep_stats[key] = { + k: v if k == "count" else np.squeeze(v / 255.0, axis=0) for k, v in ep_stats[key].items() + } + return ep_stats + +def sample_images(input): + if type(input) is str: + video_path = input + reader = torchvision.io.VideoReader(video_path, stream="video") + frames = [frame["data"] for frame in reader] + frames_array = torch.stack(frames).numpy() # Shape: [T, C, H, W] + sampled_indices = sample_indices(len(frames_array)) + images = None + for i, idx in enumerate(sampled_indices): + img = frames_array[idx] + img = auto_downsample_height_width(img) + if images is None: + images = np.empty((len(sampled_indices), *img.shape), dtype=np.uint8) + images[i] = img + elif type(input) is np.ndarray: + frames_array = input[:, None, :, :] # Shape: [T, C, H, W] + sampled_indices = sample_indices(len(frames_array)) + images = None + for i, idx in enumerate(sampled_indices): + img = frames_array[idx] + img = auto_downsample_height_width(img) + if images is None: + images = np.empty((len(sampled_indices), *img.shape), dtype=np.uint8) + images[i] = img + return images + + +def load_local_dataset(episode_path: str, save_path:str, origin_fps=30, target_fps=30): + fps_factor = origin_fps // target_fps + # print(f"fps downsample factor: {fps_factor}") + # logging.info(f"fps downsample factor: {fps_factor}") + # for format_str in [f"{episode_id:07d}", f"{episode_id:06d}", str(episode_id)]: + # episode_path = Path(src_path) / format_str + # save_path = Path(save_path) / format_str + # if episode_path.exists(): + # break + # else: + # logging.warning(f"Episode directory not found for ID {episode_id}") + # return None, None + episode_path = Path(episode_path) + if not episode_path.exists(): + logging.warning(f"{episode_path} does not exist") + return None, None + + if not (episode_path / "lmdb/data.mdb").exists(): + logging.warning(f"LMDB data not found for episode {episode_path}") + return None, None + + raw_dataset = load_lmdb_data(episode_path, save_path, fps_factor, target_fps) + if raw_dataset is None: + return None, None + frames = raw_dataset["frames"] # states, actions, task + + videos = raw_dataset["videos"] # image paths + ## check the frames + for camera_name, video_path in videos.items(): + if not os.path.exists(video_path): + logging.error(f"Video file {video_path} does not exist.") + print(f"Camera {camera_name} Video file {video_path} does not exist.") + return None, None + return frames, videos + + +def save_as_lerobot_dataset(task: tuple[Path, Path], repo_id, num_threads, debug, origin_fps=30, target_fps=30, robot_type="piper", delete_downsampled_videos=True): + src_path, save_path = task + print(f"**Processing collected** {src_path}") + print(f"**saving to** {save_path}") + if save_path.exists(): + # print(f"Output directory {save_path} already exists. Deleting it.") + # logging.warning(f"Output directory {save_path} already exists. Deleting it.") + # shutil.rmtree(save_path) + print(f"Output directory {save_path} already exists.") + return + + dataset = ARXDataset.create( + repo_id=f"{repo_id}", + root=save_path, + fps=target_fps, + robot_type=robot_type, + features=FEATURES, + ) + all_episode_paths = sorted([f.as_posix() for f in src_path.glob(f"*") if f.is_dir()]) + # all_subdir_eids = [int(Path(path).name) for path in all_subdir] + if debug: + for i in range(1): + # pdb.set_trace() + frames, videos = load_local_dataset(episode_path=all_episode_paths[i], save_path=save_path, origin_fps=origin_fps, target_fps=target_fps) + for frame_data in frames: + dataset.add_frame(frame_data) + dataset.save_episode(videos=videos) + if delete_downsampled_videos: + for _, video_path in videos.items(): + parent_dir = os.path.dirname(video_path) + try: + shutil.rmtree(parent_dir) + # os.remove(video_path) + # print(f"Successfully deleted: {parent_dir}") + print(f"Successfully deleted: {video_path}") + except Exception as e: + pass # Handle the case where the directory might not exist or is already deleted + else: + for batch_index in range(len(all_episode_paths)//num_threads+1): + batch_episode_paths = all_episode_paths[batch_index*num_threads:(batch_index+1)*num_threads] + if len(batch_episode_paths) == 0: + continue + with ThreadPoolExecutor(max_workers=num_threads) as executor: + futures = [] + for episode_path in batch_episode_paths: + print("starting to process episode: ", episode_path) + futures.append( + executor.submit(load_local_dataset, episode_path=episode_path, save_path=save_path, origin_fps=origin_fps, target_fps=target_fps) + ) + for raw_dataset in as_completed(futures): + frames, videos = raw_dataset.result() + if frames is None or videos is None: + print(f"Skipping episode {episode_path} due to missing data.") + continue + for frame_data in frames: + dataset.add_frame(frame_data) + dataset.save_episode(videos=videos) + gc.collect() + print(f"finishing processed {videos}") + if delete_downsampled_videos: + for _, video_path in videos.items(): + # Get the parent directory of the video + parent_dir = os.path.dirname(video_path) + try: + shutil.rmtree(parent_dir) + print(f"Successfully deleted: {parent_dir}") + except Exception as e: + pass + +def main(src_path, save_path, repo_id, num_threads=60, debug=False, origin_fps=30, target_fps=30): + logging.info("Scanning for episodes...") + tasks = get_all_tasks(src_path, save_path) + # import pdb + # pdb.set_trace() + if debug: + task = next(tasks) + save_as_lerobot_dataset(task, repo_id, num_threads=num_threads, debug=debug, origin_fps=origin_fps, target_fps=target_fps) + else: + for task in tasks: + save_as_lerobot_dataset(task, repo_id, num_threads=num_threads, debug=debug, origin_fps=origin_fps, target_fps=target_fps) + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Convert collected data from Piper to Lerobot format.") + parser.add_argument( + "--src_path", + type=str, + # required=False, + default="/fs-computility/efm/shared/datasets/myData-A1/real/raw_data/agilex_split_aloha/", + help="Path to the input file containing collected data in Piper format.", + #help="/fs-computility/efm/shared/datasets/myData-A1/real/raw_data/agilex_split_aloha/Make_a_beef_sandwich", + ) + parser.add_argument( + "--save_path", + type=str, + # required=False, + default="/fs-computility/efm/shared/datasets/myData-A1/real/lerobot_v2_1/agilex_split_aloha/", + help="Path to the output file where the converted Lerobot format will be saved.", + #help="Path to the output file where the converted Lerobot format will be saved.", + ) + parser.add_argument( + "--debug", + action="store_true", + help="Run in debug mode with limited episodes", + ) + parser.add_argument( + "--num-threads", + type=int, + default=50, + help="Number of threads per process", + ) + # parser.add_argument( + # "--task_name", + # type=str, + # required=True, + # default="Pick_up_the_marker_and_put_it_into_the_pen_holder", + # help="Name of the task to be processed. Default is 'Pick_up_the_marker_and_put_it_into_the_pen_holder'.", + # ) + parser.add_argument( + "--repo_id", + type=str, + required=True, + # default="SplitAloha_20250714", + help="identifier for the dataset repository.", + ) + parser.add_argument( + "--origin_fps", + type=int, + default=30, + help="Frames per second for the obervation video. Default is 30.", + ) + parser.add_argument( + "--target_fps", + type=int, + default=30, + help="Frames per second for the downsample video. Default is 30.", + ) + args = parser.parse_args() + assert int(args.origin_fps) % int(args.target_fps) == 0, "origin_fps must be an integer multiple of target_fps" + start_time = time.time() + main( + src_path=Path(args.src_path), + save_path=Path(args.save_path), + repo_id=args.repo_id, + num_threads=args.num_threads, + debug=args.debug, + origin_fps=args.origin_fps, + target_fps=args.target_fps + ) + end_time = time.time() + elapsed_time = end_time - start_time + print(f"Total time taken: {elapsed_time:.2f} seconds") +# --target_fps 10 +# --src_path /fs-computility/efm/shared/datasets/myData-A1/real/raw_data/agilex_split_aloha/Put_the_bananas_in_the_basket +# --save_path /mnt/shared-storage-user/internvla/Users/liyang/data/processed_data/arx_lift2 \ No newline at end of file diff --git a/policy/openpi-InternData-A1/examples/arx/merge_lerobot_data.py b/policy/openpi-InternData-A1/examples/arx/merge_lerobot_data.py new file mode 100644 index 0000000..91b98b3 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/arx/merge_lerobot_data.py @@ -0,0 +1,1693 @@ +import argparse +import contextlib +import json +import os +import shutil +import traceback + +import numpy as np +import pandas as pd +from termcolor import colored + + +def load_jsonl(file_path): + """ + 从JSONL文件加载数据 + (Load data from a JSONL file) + + Args: + file_path (str): JSONL文件路径 (Path to the JSONL file) + + Returns: + list: 包含文件中每行JSON对象的列表 (List containing JSON objects from each line) + """ + data = [] + + # Special handling for episodes_stats.jsonl + if "episodes_stats.jsonl" in file_path: + try: + # Try to load the entire file as a JSON array + with open(file_path) as f: + content = f.read() + # Check if the content starts with '[' and ends with ']' + if content.strip().startswith("[") and content.strip().endswith("]"): + return json.loads(content) + else: + # Try to add brackets and parse + try: + return json.loads("[" + content + "]") + except json.JSONDecodeError: + pass + except Exception as e: + print(f"Error loading {file_path} as JSON array: {e}") + + # Fall back to line-by-line parsing + try: + with open(file_path) as f: + for line in f: + if line.strip(): + with contextlib.suppress(json.JSONDecodeError): + data.append(json.loads(line)) + except Exception as e: + print(f"Error loading {file_path} line by line: {e}") + else: + # Standard JSONL parsing for other files + with open(file_path) as f: + for line in f: + if line.strip(): + with contextlib.suppress(json.JSONDecodeError): + data.append(json.loads(line)) + + return data + + +def save_jsonl(data, file_path): + """ + 将数据保存为JSONL格式 + (Save data in JSONL format) + + Args: + data (list): 要保存的JSON对象列表 (List of JSON objects to save) + file_path (str): 输出文件路径 (Path to the output file) + """ + with open(file_path, "w") as f: + for item in data: + f.write(json.dumps(item) + "\n") + + +def merge_stats(stats_list): + """ + 合并多个数据集的统计信息,确保维度一致性 + (Merge statistics from multiple datasets, ensuring dimensional consistency) + + Args: + stats_list (list): 包含每个数据集统计信息的字典列表 + (List of dictionaries containing statistics for each dataset) + + Returns: + dict: 合并后的统计信息 (Merged statistics) + """ + # Initialize merged stats with the structure of the first stats + merged_stats = {} + + # Find common features across all stats + common_features = set(stats_list[0].keys()) + for stats in stats_list[1:]: + common_features = common_features.intersection(set(stats.keys())) + + # Process features in the order they appear in the first stats file + for feature in stats_list[0]: + if feature not in common_features: + continue + + merged_stats[feature] = {} + + # Find common stat types for this feature + common_stat_types = [] + for stat_type in ["mean", "std", "max", "min"]: + if all(stat_type in stats[feature] for stats in stats_list): + common_stat_types.append(stat_type) + + # Determine the original shape of each value + original_shapes = [] + for stats in stats_list: + if "mean" in stats[feature]: + shape = np.array(stats[feature]["mean"]).shape + original_shapes.append(shape) + + # Special handling for image features to preserve nested structure + if feature.startswith("observation.images."): + for stat_type in common_stat_types: + try: + # Get all values + values = [stats[feature][stat_type] for stats in stats_list] + + # For image features, we need to preserve the nested structure + # Initialize with the first value's structure + result = [] + + # For RGB channels + for channel_idx in range(len(values[0])): + channel_result = [] + + # For each pixel row + for pixel_idx in range(len(values[0][channel_idx])): + pixel_result = [] + + # For each pixel value + for value_idx in range(len(values[0][channel_idx][pixel_idx])): + # Calculate statistic based on type + if stat_type == "mean": + # Simple average + avg = sum( + values[i][channel_idx][pixel_idx][value_idx] + for i in range(len(values)) + ) / len(values) + pixel_result.append(avg) + elif stat_type == "std": + # Simple average of std + avg = sum( + values[i][channel_idx][pixel_idx][value_idx] + for i in range(len(values)) + ) / len(values) + pixel_result.append(avg) + elif stat_type == "max": + # Maximum + max_val = max( + values[i][channel_idx][pixel_idx][value_idx] + for i in range(len(values)) + ) + pixel_result.append(max_val) + elif stat_type == "min": + # Minimum + min_val = min( + values[i][channel_idx][pixel_idx][value_idx] + for i in range(len(values)) + ) + pixel_result.append(min_val) + + channel_result.append(pixel_result) + + result.append(channel_result) + + merged_stats[feature][stat_type] = result + except Exception as e: + print(f"Warning: Error processing image feature {feature}.{stat_type}: {e}") + # Fallback to first value + merged_stats[feature][stat_type] = values[0] + # If all shapes are the same, no need for special handling + elif len({str(shape) for shape in original_shapes}) == 1: + # All shapes are the same, use standard merging + for stat_type in common_stat_types: + values = [stats[feature][stat_type] for stats in stats_list] + + try: + # Calculate the new statistic based on the type + if stat_type == "mean": + if all("count" in stats[feature] for stats in stats_list): + counts = [stats[feature]["count"][0] for stats in stats_list] + total_count = sum(counts) + weighted_values = [ + np.array(val) * count / total_count + for val, count in zip(values, counts, strict=False) + ] + merged_stats[feature][stat_type] = np.sum(weighted_values, axis=0).tolist() + else: + merged_stats[feature][stat_type] = np.mean(np.array(values), axis=0).tolist() + + elif stat_type == "std": + if all("count" in stats[feature] for stats in stats_list): + counts = [stats[feature]["count"][0] for stats in stats_list] + total_count = sum(counts) + variances = [np.array(std) ** 2 for std in values] + weighted_variances = [ + var * count / total_count + for var, count in zip(variances, counts, strict=False) + ] + merged_stats[feature][stat_type] = np.sqrt( + np.sum(weighted_variances, axis=0) + ).tolist() + else: + merged_stats[feature][stat_type] = np.mean(np.array(values), axis=0).tolist() + + elif stat_type == "max": + merged_stats[feature][stat_type] = np.maximum.reduce(np.array(values)).tolist() + + elif stat_type == "min": + merged_stats[feature][stat_type] = np.minimum.reduce(np.array(values)).tolist() + except Exception as e: + print(f"Warning: Error processing {feature}.{stat_type}: {e}") + continue + else: + # Shapes are different, need special handling for state vectors + if feature in ["observation.state", "action"]: + # For state vectors, we need to handle different dimensions + max_dim = max(len(np.array(stats[feature]["mean"]).flatten()) for stats in stats_list) + + for stat_type in common_stat_types: + try: + # Get values and their original dimensions + values_with_dims = [] + for stats in stats_list: + val = np.array(stats[feature][stat_type]).flatten() + dim = len(val) + values_with_dims.append((val, dim)) + + # Initialize result array with zeros + result = np.zeros(max_dim) + + # Calculate statistics for each dimension separately + if stat_type == "mean": + if all("count" in stats[feature] for stats in stats_list): + counts = [stats[feature]["count"][0] for stats in stats_list] + total_count = sum(counts) + + # For each dimension, calculate weighted mean of available values + for d in range(max_dim): + dim_values = [] + dim_weights = [] + for (val, dim), count in zip(values_with_dims, counts, strict=False): + if d < dim: # Only use values that have this dimension + dim_values.append(val[d]) + dim_weights.append(count) + + if dim_values: # If we have values for this dimension + weighted_sum = sum( + v * w for v, w in zip(dim_values, dim_weights, strict=False) + ) + result[d] = weighted_sum / sum(dim_weights) + else: + # Simple average for each dimension + for d in range(max_dim): + dim_values = [val[d] for val, dim in values_with_dims if d < dim] + if dim_values: + result[d] = sum(dim_values) / len(dim_values) + + elif stat_type == "std": + if all("count" in stats[feature] for stats in stats_list): + counts = [stats[feature]["count"][0] for stats in stats_list] + total_count = sum(counts) + + # For each dimension, calculate weighted variance + for d in range(max_dim): + dim_variances = [] + dim_weights = [] + for (val, dim), count in zip(values_with_dims, counts, strict=False): + if d < dim: # Only use values that have this dimension + dim_variances.append(val[d] ** 2) # Square for variance + dim_weights.append(count) + + if dim_variances: # If we have values for this dimension + weighted_var = sum( + v * w for v, w in zip(dim_variances, dim_weights, strict=False) + ) / sum(dim_weights) + result[d] = np.sqrt(weighted_var) # Take sqrt for std + else: + # Simple average of std for each dimension + for d in range(max_dim): + dim_values = [val[d] for val, dim in values_with_dims if d < dim] + if dim_values: + result[d] = sum(dim_values) / len(dim_values) + + elif stat_type == "max": + # For each dimension, take the maximum of available values + for d in range(max_dim): + dim_values = [val[d] for val, dim in values_with_dims if d < dim] + if dim_values: + result[d] = max(dim_values) + + elif stat_type == "min": + # For each dimension, take the minimum of available values + for d in range(max_dim): + dim_values = [val[d] for val, dim in values_with_dims if d < dim] + if dim_values: + result[d] = min(dim_values) + + # Convert result to list and store + merged_stats[feature][stat_type] = result.tolist() + + except Exception as e: + print( + f"Warning: Error processing {feature}.{stat_type} with different dimensions: {e}" + ) + continue + else: + # For other features with different shapes, use the first shape as template + template_shape = original_shapes[0] + print(f"Using shape {template_shape} as template for {feature}") + + for stat_type in common_stat_types: + try: + # Use the first stats as template + merged_stats[feature][stat_type] = stats_list[0][feature][stat_type] + except Exception as e: + print( + f"Warning: Error processing {feature}.{stat_type} with shape {template_shape}: {e}" + ) + continue + + # Add count if available in all stats + if all("count" in stats[feature] for stats in stats_list): + try: + merged_stats[feature]["count"] = [sum(stats[feature]["count"][0] for stats in stats_list)] + except Exception as e: + print(f"Warning: Error processing {feature}.count: {e}") + + return merged_stats + + +def copy_videos(source_folders, output_folder, episode_mapping): + """ + 从源文件夹复制视频文件到输出文件夹,保持正确的索引和结构 + (Copy video files from source folders to output folder, maintaining correct indices and structure) + + Args: + source_folders (list): 源数据集文件夹路径列表 (List of source dataset folder paths) + output_folder (str): 输出文件夹路径 (Output folder path) + episode_mapping (list): 包含(旧文件夹,旧索引,新索引)元组的列表 + (List of tuples containing (old_folder, old_index, new_index)) + """ + # Get info.json to determine video structure + info_path = os.path.join(source_folders[0], "meta", "info.json") + with open(info_path) as f: + info = json.load(f) + + video_path_template = info["video_path"] + + # Identify video keys from the template + # Example: "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4" + video_keys = [] + for feature_name, feature_info in info["features"].items(): + if feature_info.get("dtype") == "video": + # Use the full feature name as the video key + video_keys.append(feature_name) + + print(f"Found video keys: {video_keys}") + + # Copy videos for each episode + for old_folder, old_index, new_index in episode_mapping: + # Determine episode chunk (usually 0 for small datasets) + episode_chunk = old_index // info["chunks_size"] + new_episode_chunk = new_index // info["chunks_size"] + + for video_key in video_keys: + # Try different possible source paths + source_patterns = [ + # Standard path with the episode index from metadata + os.path.join( + old_folder, + video_path_template.format( + episode_chunk=episode_chunk, video_key=video_key, episode_index=old_index + ), + ), + # Try with 0-based indexing + os.path.join( + old_folder, + video_path_template.format(episode_chunk=0, video_key=video_key, episode_index=0), + ), + # Try with different formatting + os.path.join( + old_folder, f"videos/chunk-{episode_chunk:03d}/{video_key}/episode_{old_index}.mp4" + ), + os.path.join(old_folder, f"videos/chunk-000/{video_key}/episode_000000.mp4"), + ] + + # Find the first existing source path + source_video_path = None + for pattern in source_patterns: + if os.path.exists(pattern): + source_video_path = pattern + break + + if source_video_path: + # Construct destination path + dest_video_path = os.path.join( + output_folder, + video_path_template.format( + episode_chunk=new_episode_chunk, video_key=video_key, episode_index=new_index + ), + ) + + # Create destination directory if it doesn't exist + os.makedirs(os.path.dirname(dest_video_path), exist_ok=True) + + print(f"Copying video: {source_video_path} -> {dest_video_path}") + shutil.copy2(source_video_path, dest_video_path) + else: + # If no file is found, search the directory recursively + found = False + for root, _, files in os.walk(os.path.join(old_folder, "videos")): + for file in files: + if file.endswith(".mp4") and video_key in root: + source_video_path = os.path.join(root, file) + + # Construct destination path + dest_video_path = os.path.join( + output_folder, + video_path_template.format( + episode_chunk=new_episode_chunk, + video_key=video_key, + episode_index=new_index, + ), + ) + + # Create destination directory if it doesn't exist + os.makedirs(os.path.dirname(dest_video_path), exist_ok=True) + + print( + f"Copying video (found by search): {source_video_path} -> {dest_video_path}" + ) + shutil.copy2(source_video_path, dest_video_path) + found = True + break + if found: + break + + if not found: + print( + f"Warning: Video file not found for {video_key}, episode {old_index} in {old_folder}" + ) + + +def validate_timestamps(source_folders, tolerance_s=1e-4): + """ + 验证源数据集的时间戳结构,识别潜在问题 + (Validate timestamp structure of source datasets, identify potential issues) + + Args: + source_folders (list): 源数据集文件夹路径列表 (List of source dataset folder paths) + tolerance_s (float): 时间戳不连续性的容差值,以秒为单位 (Tolerance for timestamp discontinuities in seconds) + + Returns: + tuple: (issues, fps_values) - 问题列表和检测到的FPS值列表 + (List of issues and list of detected FPS values) + """ + issues = [] + fps_values = [] + + for folder in source_folders: + try: + # 尝试从 info.json 获取 FPS (Try to get FPS from info.json) + info_path = os.path.join(folder, "meta", "info.json") + if os.path.exists(info_path): + with open(info_path) as f: + info = json.load(f) + if "fps" in info: + fps = info["fps"] + fps_values.append(fps) + print(f"数据集 {folder} FPS={fps} (Dataset {folder} FPS={fps})") + + # 检查是否有parquet文件包含时间戳 (Check if any parquet files contain timestamps) + parquet_path = None + for root, _, files in os.walk(os.path.join(folder, "parquet")): + for file in files: + if file.endswith(".parquet"): + parquet_path = os.path.join(root, file) + break + if parquet_path: + break + + if not parquet_path: + for root, _, files in os.walk(os.path.join(folder, "data")): + for file in files: + if file.endswith(".parquet"): + parquet_path = os.path.join(root, file) + break + if parquet_path: + break + + if parquet_path: + df = pd.read_parquet(parquet_path) + timestamp_cols = [col for col in df.columns if "timestamp" in col or "time" in col] + if timestamp_cols: + print( + f"数据集 {folder} 包含时间戳列: {timestamp_cols} (Dataset {folder} contains timestamp columns: {timestamp_cols})" + ) + else: + issues.append( + f"警告: 数据集 {folder} 没有时间戳列 (Warning: Dataset {folder} has no timestamp columns)" + ) + else: + issues.append( + f"警告: 数据集 {folder} 未找到parquet文件 (Warning: No parquet files found in dataset {folder})" + ) + + except Exception as e: + issues.append( + f"错误: 验证数据集 {folder} 失败: {e} (Error: Failed to validate dataset {folder}: {e})" + ) + print(f"验证错误: {e} (Validation error: {e})") + traceback.print_exc() + + # 检查FPS是否一致 (Check if FPS values are consistent) + if len(set(fps_values)) > 1: + issues.append( + f"警告: 数据集FPS不一致: {fps_values} (Warning: Inconsistent FPS across datasets: {fps_values})" + ) + + return issues, fps_values + + +def copy_data_files( + source_folders, + output_folder, + episode_mapping, + state_max_dim=14, # 默认状态向量维度为32 + action_max_dim=14, # 默认动作向量维度为32 + fps=None, + episode_to_frame_index=None, + folder_task_mapping=None, + chunks_size=1000, + default_fps=20, +): + """ + 从源文件夹复制数据文件到输出文件夹,同时处理索引映射和维度填充 + (Copy data files from source folders to output folder, handling index mapping and dimension padding) + + Args: + source_folders (list): 源数据集文件夹路径列表 (List of source dataset folder paths) + output_folder (str): 输出文件夹路径 (Output folder path) + episode_mapping (list): 包含(旧文件夹,旧索引,新索引)元组的列表 + (List of tuples containing (old_folder, old_index, new_index)) + state_max_dim (int): 状态向量的最大维度 (Maximum dimension for state vectors) + action_max_dim (int): 动作向量的最大维度 (Maximum dimension for action vectors) + fps (float): 帧率 (frames per second) + episode_to_frame_index (dict): 每个episode对应的起始帧索引 + (Start frame index for each episode) + folder_task_mapping (dict): 文件夹任务映射 (Folder task mapping) + chunks_size (int): 数据块大小 (Chunk size) + default_fps (float): 默认帧率 (Default frame rate) + """ + # 获取第一个数据集的FPS(如果未提供)(Get FPS from first dataset if not provided) + if fps is None: + info_path = os.path.join(source_folders[0], "meta", "info.json") + if os.path.exists(info_path): + with open(info_path) as f: + info = json.load(f) + fps = info.get( + "fps", default_fps + ) # 使用变量替代硬编码的20 (Use variable instead of hardcoded 20) + else: + fps = default_fps # 使用变量替代硬编码的20 (Use variable instead of hardcoded 20) + + print(f"使用FPS={fps}") + + # 为每个episode复制和处理数据文件 (Copy and process data files for each episode) + total_copied = 0 + total_failed = 0 + + # 添加一个列表来记录失败的文件及原因 + # (Add a list to record failed files and reasons) + failed_files = [] + + for i, (old_folder, old_index, new_index) in enumerate(episode_mapping): + # 尝试找到源parquet文件 (Try to find source parquet file) + episode_str = f"episode_{old_index:06d}.parquet" + source_paths = [ + os.path.join(old_folder, "parquet", episode_str), + os.path.join(old_folder, "data", episode_str), + ] + + source_path = None + for path in source_paths: + if os.path.exists(path): + source_path = path + break + + if source_path: + try: + # 读取parquet文件 (Read parquet file) + df = pd.read_parquet(source_path) + + # 检查是否需要填充维度 - 为不同特征类型使用不同的最大维度 + # 为状态向量填充 + if "observation.state" in df.columns: + # 检查第一个非空值 (Check first non-null value) + for _idx, value in enumerate(df["observation.state"]): + if value is not None and isinstance(value, (list, np.ndarray)): + current_dim = len(value) + if current_dim < state_max_dim: + print( + f"填充状态向量从 {current_dim} 维到 {state_max_dim} 维" + f" (Padding state vector from {current_dim} to {state_max_dim} dimensions)" + ) + # 使用零填充到目标维度 (Pad with zeros to target dimension) + df["observation.state"] = df["observation.state"].apply( + lambda x: np.pad(x, (0, state_max_dim - len(x)), "constant").tolist() + if x is not None + and isinstance(x, (list, np.ndarray)) + and len(x) < state_max_dim + else x + ) + break + + # 为动作向量填充 + if "action" in df.columns: + # 检查第一个非空值 (Check first non-null value) + for _idx, value in enumerate(df["action"]): + if value is not None and isinstance(value, (list, np.ndarray)): + current_dim = len(value) + if current_dim < action_max_dim: + print( + f"填充动作向量从 {current_dim} 维到 {action_max_dim} 维" + f" (Padding action vector from {current_dim} to {action_max_dim} dimensions)" + ) + # 使用零填充到目标维度 (Pad with zeros to target dimension) + df["action"] = df["action"].apply( + lambda x: np.pad(x, (0, action_max_dim - len(x)), "constant").tolist() + if x is not None + and isinstance(x, (list, np.ndarray)) + and len(x) < action_max_dim + else x + ) + break + + # 更新episode_index列 (Update episode_index column) + if "episode_index" in df.columns: + print( + f"更新episode_index从 {df['episode_index'].iloc[0]} 到 {new_index} (Update episode_index from {df['episode_index'].iloc[0]} to {new_index})" + ) + df["episode_index"] = new_index + + # 更新index列 (Update index column) + if "index" in df.columns: + if episode_to_frame_index and new_index in episode_to_frame_index: + # 使用预先计算的帧索引起始值 (Use pre-calculated frame index start value) + first_index = episode_to_frame_index[new_index] + print( + f"更新index列,起始值: {first_index}(使用全局累积帧计数)(Update index column, start value: {first_index} (using global cumulative frame count))" + ) + else: + # 如果没有提供映射,使用当前的计算方式作为回退 + # (If no mapping provided, use current calculation as fallback) + first_index = new_index * len(df) + print( + f"更新index列,起始值: {first_index}(使用episode索引乘以长度)(Update index column, start value: {first_index} (using episode index multiplied by length))" + ) + + # 更新所有帧的索引 (Update indices for all frames) + df["index"] = [first_index + i for i in range(len(df))] + + # 更新task_index列 (Update task_index column) + if "task_index" in df.columns and folder_task_mapping and old_folder in folder_task_mapping: + # 获取当前task_index (Get current task_index) + current_task_index = df["task_index"].iloc[0] + + # 检查是否有对应的新索引 (Check if there's a corresponding new index) + if current_task_index in folder_task_mapping[old_folder]: + new_task_index = folder_task_mapping[old_folder][current_task_index] + print( + f"更新task_index从 {current_task_index} 到 {new_task_index} (Update task_index from {current_task_index} to {new_task_index})" + ) + df["task_index"] = new_task_index + else: + print( + f"警告: 找不到task_index {current_task_index}的映射关系 (Warning: No mapping found for task_index {current_task_index})" + ) + + # 计算chunk编号 (Calculate chunk number) + chunk_index = new_index // chunks_size + + # 创建正确的目标目录 (Create correct target directory) + chunk_dir = os.path.join(output_folder, "data", f"chunk-{chunk_index:03d}") + os.makedirs(chunk_dir, exist_ok=True) + + # 构建正确的目标路径 (Build correct target path) + dest_path = os.path.join(chunk_dir, f"episode_{new_index:06d}.parquet") + + # 保存到正确位置 (Save to correct location) + df.to_parquet(dest_path, index=False) + + total_copied += 1 + print(f"已处理并保存: {dest_path} (Processed and saved: {dest_path})") + + except Exception as e: + error_msg = f"处理 {source_path} 失败: {e} (Processing {source_path} failed: {e})" + print(error_msg) + traceback.print_exc() + failed_files.append({"file": source_path, "reason": str(e), "episode": old_index}) + total_failed += 1 + else: + # 文件不在标准位置,尝试递归搜索 + found = False + for root, _, files in os.walk(old_folder): + for file in files: + if file.endswith(".parquet") and f"episode_{old_index:06d}" in file: + try: + source_path = os.path.join(root, file) + + # 读取parquet文件 (Read parquet file) + df = pd.read_parquet(source_path) + + # 检查是否需要填充维度 - 为不同特征类型使用不同的最大维度 + # 为状态向量填充 + if "observation.state" in df.columns: + # 检查第一个非空值 (Check first non-null value) + for _idx, value in enumerate(df["observation.state"]): + if value is not None and isinstance(value, (list, np.ndarray)): + current_dim = len(value) + if current_dim < state_max_dim: + print( + f"填充状态向量从 {current_dim} 维到 {state_max_dim} 维" + f" (Padding state vector from {current_dim} to {state_max_dim} dimensions)" + ) + # 使用零填充到目标维度 (Pad with zeros to target dimension) + df["observation.state"] = df["observation.state"].apply( + lambda x: np.pad(x, (0, state_max_dim - len(x)), "constant").tolist() + if x is not None + and isinstance(x, (list, np.ndarray)) + and len(x) < state_max_dim + else x + ) + break + + # 为动作向量填充 + if "action" in df.columns: + # 检查第一个非空值 (Check first non-null value) + for _idx, value in enumerate(df["action"]): + if value is not None and isinstance(value, (list, np.ndarray)): + current_dim = len(value) + if current_dim < action_max_dim: + print( + f"填充动作向量从 {current_dim} 维到 {action_max_dim} 维" + f" (Padding action vector from {current_dim} to {action_max_dim} dimensions)" + ) + # 使用零填充到目标维度 (Pad with zeros to target dimension) + df["action"] = df["action"].apply( + lambda x: np.pad(x, (0, action_max_dim - len(x)), "constant").tolist() + if x is not None + and isinstance(x, (list, np.ndarray)) + and len(x) < action_max_dim + else x + ) + break + + # 更新episode_index列 (Update episode_index column) + if "episode_index" in df.columns: + print( + f"更新episode_index从 {df['episode_index'].iloc[0]} 到 {new_index} (Update episode_index from {df['episode_index'].iloc[0]} to {new_index})" + ) + df["episode_index"] = new_index + + # 更新index列 (Update index column) + if "index" in df.columns: + if episode_to_frame_index and new_index in episode_to_frame_index: + # 使用预先计算的帧索引起始值 (Use pre-calculated frame index start value) + first_index = episode_to_frame_index[new_index] + print( + f"更新index列,起始值: {first_index}(使用全局累积帧计数)(Update index column, start value: {first_index} (using global cumulative frame count))" + ) + else: + # 如果没有提供映射,使用当前的计算方式作为回退 + # (If no mapping provided, use current calculation as fallback) + first_index = new_index * len(df) + print( + f"更新index列,起始值: {first_index}(使用episode索引乘以长度)(Update index column, start value: {first_index} (using episode index multiplied by length))" + ) + + # 更新所有帧的索引 (Update indices for all frames) + df["index"] = [first_index + i for i in range(len(df))] + + # 更新task_index列 (Update task_index column) + if ( + "task_index" in df.columns + and folder_task_mapping + and old_folder in folder_task_mapping + ): + # 获取当前task_index (Get current task_index) + current_task_index = df["task_index"].iloc[0] + + # 检查是否有对应的新索引 (Check if there's a corresponding new index) + if current_task_index in folder_task_mapping[old_folder]: + new_task_index = folder_task_mapping[old_folder][current_task_index] + print( + f"更新task_index从 {current_task_index} 到 {new_task_index} (Update task_index from {current_task_index} to {new_task_index})" + ) + df["task_index"] = new_task_index + else: + print( + f"警告: 找不到task_index {current_task_index}的映射关系 (Warning: No mapping found for task_index {current_task_index})" + ) + + # 计算chunk编号 (Calculate chunk number) + chunk_index = new_index // chunks_size + + # 创建正确的目标目录 (Create correct target directory) + chunk_dir = os.path.join(output_folder, "data", f"chunk-{chunk_index:03d}") + os.makedirs(chunk_dir, exist_ok=True) + + # 构建正确的目标路径 (Build correct target path) + dest_path = os.path.join(chunk_dir, f"episode_{new_index:06d}.parquet") + + # 保存到正确位置 (Save to correct location) + df.to_parquet(dest_path, index=False) + + total_copied += 1 + found = True + print(f"已处理并保存: {dest_path} (Processed and saved: {dest_path})") + break + except Exception as e: + error_msg = f"处理 {source_path} 失败: {e} (Processing {source_path} failed: {e})" + print(error_msg) + traceback.print_exc() + failed_files.append({"file": source_path, "reason": str(e), "episode": old_index}) + total_failed += 1 + if found: + break + + if not found: + error_msg = f"找不到episode {old_index}的parquet文件,源文件夹: {old_folder}" + print(error_msg) + failed_files.append( + {"file": f"episode_{old_index:06d}.parquet", "reason": "文件未找到", "folder": old_folder} + ) + total_failed += 1 + + print(f"共复制 {total_copied} 个数据文件,{total_failed} 个失败") + + # 打印所有失败的文件详情 (Print details of all failed files) + if failed_files: + print("\n失败的文件详情 (Details of failed files):") + for i, failed in enumerate(failed_files): + print(f"{i + 1}. 文件 (File): {failed['file']}") + if "folder" in failed: + print(f" 文件夹 (Folder): {failed['folder']}") + if "episode" in failed: + print(f" Episode索引 (Episode index): {failed['episode']}") + print(f" 原因 (Reason): {failed['reason']}") + print("---") + + return total_copied > 0 + + +def pad_parquet_data(source_path, target_path, original_dim=14, target_dim=18): + """ + 通过零填充将parquet数据从原始维度扩展到目标维度 + (Extend parquet data from original dimension to target dimension by zero-padding) + + Args: + source_path (str): 源parquet文件路径 (Source parquet file path) + target_path (str): 目标parquet文件路径 (Target parquet file path) + original_dim (int): 原始向量维度 (Original vector dimension) + target_dim (int): 目标向量维度 (Target vector dimension) + """ + # 读取parquet文件 + df = pd.read_parquet(source_path) + + # 打印列名以便调试 + print(f"Columns in {source_path}: {df.columns.tolist()}") + + # 创建新的DataFrame来存储填充后的数据 + new_df = df.copy() + + # 检查observation.state和action列是否存在 + if "observation.state" in df.columns: + # 检查第一行数据,确认是否为向量 + first_state = df["observation.state"].iloc[0] + print(f"First observation.state type: {type(first_state)}, value: {first_state}") + + # 如果是向量(列表或numpy数组) + if isinstance(first_state, (list, np.ndarray)): + # 检查维度 + state_dim = len(first_state) + print(f"observation.state dimension: {state_dim}") + + if state_dim < target_dim: + # 填充向量 + print(f"Padding observation.state from {state_dim} to {target_dim} dimensions") + new_df["observation.state"] = df["observation.state"].apply( + lambda x: np.pad(x, (0, target_dim - len(x)), "constant").tolist() + ) + + # 同样处理action列 + if "action" in df.columns: + # 检查第一行数据 + first_action = df["action"].iloc[0] + print(f"First action type: {type(first_action)}, value: {first_action}") + + # 如果是向量 + if isinstance(first_action, (list, np.ndarray)): + # 检查维度 + action_dim = len(first_action) + print(f"action dimension: {action_dim}") + + if action_dim < target_dim: + # 填充向量 + print(f"Padding action from {action_dim} to {target_dim} dimensions") + new_df["action"] = df["action"].apply( + lambda x: np.pad(x, (0, target_dim - len(x)), "constant").tolist() + ) + + # 确保目标目录存在 + os.makedirs(os.path.dirname(target_path), exist_ok=True) + + # 保存到新的parquet文件 + new_df.to_parquet(target_path, index=False) + + print(f"已将{source_path}处理并保存到{target_path}") + + return new_df + + +def count_video_frames_torchvision(video_path): + """ + Count the number of frames in a video file using torchvision + + Args: + video_path (str): + + Returns: + Frame count (int): + """ + try: + import torchvision + + # Ensure torchvision version is recent enough for VideoReader and AV1 support + # (This is a general good practice, specific version checks might be needed + # depending on the exact AV1 library used by torchvision's backend) + # print(f"Torchvision version: {torchvision.__version__}") + # print(f"PyTorch version: {torch.__version__}") + + # VideoReader requires the video path as a string + reader = torchvision.io.VideoReader(video_path, "video") + + # Attempt to get frame count from metadata + # Metadata structure can vary; "video" stream usually has "num_frames" + metadata = reader.get_metadata() + frame_count = 0 + + if "video" in metadata and "num_frames" in metadata["video"] and len(metadata["video"]["num_frames"]) > 0: + # num_frames is often a list, take the first element + frame_count = int(metadata["video"]["num_frames"][0]) + if frame_count > 0: + # If metadata provides a positive frame count, we can often trust it. + # For some backends/formats, this might be the most reliable way. + return frame_count + + # If metadata didn't provide a reliable frame count, or to be absolutely sure, + # we can iterate through the frames. + # This is more robust but potentially slower. + count_manually = 0 + for _ in reader: # Iterating through the reader yields frames + count_manually += 1 + + # If manual count is zero but metadata had a count, it might indicate an issue + # or an empty video. Prioritize manual count if it's > 0. + if count_manually > 0: + return count_manually + elif frame_count > 0 : # Fallback to metadata if manual count was 0 but metadata had a value + print(f"Warning: Manual count is 0, but metadata indicates {frame_count} frames. Video might be empty or there was a read issue. Returning metadata count.") + return frame_count + else: + # This case means both metadata (if available) and manual iteration yielded 0. + print(f"Video appears to have no frames: {video_path}") + return 0 + + except ImportError: + print("Warning: torchvision or its dependencies (like ffmpeg) not installed, cannot count video frames") + return 0 + except RuntimeError as e: + # RuntimeError can be raised by VideoReader for various issues (e.g., file not found, corrupt file, unsupported codec by the backend) + if "No video stream found" in str(e): + print(f"Error: No video stream found in video file: {video_path}") + elif "Could not open" in str(e) or "Demuxing video" in str(e): + print(f"Error: Could not open or demux video file (possibly unsupported format or corrupted file): {video_path} - {e}") + else: + print(f"Runtime error counting video frames: {e}") + return 0 + except Exception as e: + print(f"Error counting video frames: {e}") + return 0 + finally: + # VideoReader does not have an explicit close() or release() method. + # It's managed by its destructor when it goes out of scope. + pass + + +def early_validation(source_folders, episode_mapping, default_fps=20, fps=None): + """ + Validate and copy image files from source folders to output folder. + Performs validation first before any copying to ensure dataset consistency. + + Args: + source_folders (list): List of source dataset folder paths + output_folder (str): Output folder path + episode_mapping (list): List of tuples containing (old_folder, old_index, new_index) + default_fps (int): Default frame rate to use if not specified + fps (int): Frame rate to use for video encoding + + Returns: + dict: Validation results containing expected frame count and actual image count for each episode + """ + if fps is None: + info_path = os.path.join(source_folders[0], "meta", "info.json") + if os.path.exists(info_path): + with open(info_path) as f: + info = json.load(f) + fps = info.get("fps", default_fps) + else: + fps = default_fps + + print(f"Using FPS={fps}") + + # Get video path template and video keys + info_path = os.path.join(source_folders[0], "meta", "info.json") + with open(info_path) as f: + info = json.load(f) + + video_path_template = info["video_path"] + image_keys = [] + + for feature_name, feature_info in info["features"].items(): + if feature_info.get("dtype") == "video": + image_keys.append(feature_name) + + print(f"Found video/image keys: {image_keys}") + + # Validate first before copying anything + print("Starting validation of images and videos...") + validation_results = {} + validation_failed = False + + episode_file_mapping = {} + for old_folder, old_index, new_index in episode_mapping: + # Get expected frame count from episodes.jsonl + episode_file = os.path.join(old_folder, "meta", "episodes.jsonl") + expected_frames = 0 + if os.path.exists(episode_file): + if episode_file not in episode_file_mapping: + episodes = load_jsonl(episode_file) + episodes = {ep["episode_index"]: ep for ep in episodes} + episode_file_mapping[episode_file] = episodes + episode_data = episode_file_mapping[episode_file].get(old_index, None) + if episode_data and "length" in episode_data: + expected_frames = episode_data["length"] + + validation_key = f"{old_folder}_{old_index}" + validation_results[validation_key] = { + "expected_frames": expected_frames, + "image_counts": {}, + "video_frames": {}, + "old_index": old_index, + "new_index": new_index, + "is_valid": True # Default to valid + } + + # Check each image directory and video + episode_chunk = old_index // info["chunks_size"] + for image_dir in image_keys: + # Find the video file + source_video_path = os.path.join( + old_folder, + video_path_template.format( + episode_chunk=episode_chunk, video_key=image_dir, episode_index=old_index + ), + ) + source_image_dir = os.path.join(old_folder, "images", image_dir, f"episode_{old_index:06d}") + image_dir_exists = os.path.exists(source_image_dir) + video_file_exists = os.path.exists(source_video_path) + if not video_file_exists: + print(f"{colored('WARNING', 'yellow', attrs=['bold'])}: Video file not found for {image_dir}, episode {old_index} in {old_folder}") + if image_dir_exists: + print(" Image directory exists, encoding video from images.") + from lerobot.common.datasets.video_utils import encode_video_frames + encode_video_frames(source_image_dir, source_video_path, fps, overwrite=True) + print(" Encoded video frames successfully.") + else: + print(f"{colored('ERROR', 'red', attrs=['bold'])}: No video or image directory found for {image_dir}, episode {old_index} in {old_folder}") + validation_results[validation_key]["is_valid"] = False + validation_failed = True + continue + + # Count video frames + video_frame_count = count_video_frames_torchvision(source_video_path) + validation_results[validation_key]["video_frames"][image_dir] = video_frame_count + + # Check if image directory exists + + if image_dir_exists: + # Count image files + image_files = sorted([f for f in os.listdir(source_image_dir) if f.endswith('.png')]) + images_count = len(image_files) + validation_results[validation_key]["image_counts"][image_dir] = images_count + + error_msg = f"expected_frames: {expected_frames}, images_count: {images_count}, video_frame_count: {video_frame_count}" + assert expected_frames > 0 and expected_frames == images_count, ( + f"{colored('ERROR', 'red', attrs=['bold'])}: Image count should match expected frames for {source_image_dir}.\n {error_msg}" + ) + assert expected_frames >= video_frame_count, ( + f"{colored('ERROR', 'red', attrs=['bold'])}: Video frame count should be less or equal than expected frames for {source_video_path}.\n {error_msg}" + ) + # Validate frame counts + if video_frame_count != expected_frames: + print(f"{colored('WARNING', 'yellow', attrs=['bold'])}: Video frame count mismatch for {source_video_path}") + print(f" Expected: {expected_frames}, Found: {video_frame_count}") + print(f" Re-encoded video frames from {source_image_dir} to {source_video_path}") + + from lerobot.common.datasets.video_utils import encode_video_frames + encode_video_frames(source_image_dir, source_video_path, fps, overwrite=True) + print(" Re-encoded video frames successfully.") + + + else: + print(f"{colored('WARNING', 'yellow', attrs=['bold'])}: No image directory {image_dir} found for episode {old_index} in {old_folder}") + print(" You can ignore this if you are not using images and your video frame count is equal to expected frames.") + # If no images directory, the video frames must match expected frames + if expected_frames > 0 and video_frame_count != expected_frames: + print(f"{colored('ERROR', 'red', attrs=['bold'])}: Video frame count mismatch for {source_video_path}") + print(f" Expected: {expected_frames}, Found: {video_frame_count}") + + validation_results[validation_key]["is_valid"] = False + validation_failed = True + + # Print validation summary + print("\nValidation Results:") + valid_count = sum(1 for result in validation_results.values() if result["is_valid"]) + print(f"{valid_count} of {len(validation_results)} episodes are valid") + + # If validation failed, stop the process + if validation_failed: + print(colored("Validation failed. Please fix the issues before continuing.", "red", attrs=["bold"])) + + +def copy_images(source_folders, output_folder, episode_mapping, default_fps=20, fps=None): + """ + Copy image files from source folders to output folder. + This function assumes validation has already been performed with early_validation(). + + Args: + source_folders (list): List of source dataset folder paths + output_folder (str): Output folder path + episode_mapping (list): List of tuples containing (old_folder, old_index, new_index) + default_fps (int): Default frame rate to use if not specified + fps (int): Frame rate to use for video encoding + + Returns: + int: Number of images copied + """ + if fps is None: + info_path = os.path.join(source_folders[0], "meta", "info.json") + if os.path.exists(info_path): + with open(info_path) as f: + info = json.load(f) + fps = info.get("fps", default_fps) + else: + fps = default_fps + + # Get video path template and video keys + info_path = os.path.join(source_folders[0], "meta", "info.json") + with open(info_path) as f: + info = json.load(f) + + video_path_template = info["video_path"] + image_keys = [] + + for feature_name, feature_info in info["features"].items(): + if feature_info.get("dtype") == "video": + image_keys.append(feature_name) + + # Create image directories in output folder + os.makedirs(os.path.join(output_folder, "images"), exist_ok=True) + + print(f"Starting to copy images for {len(image_keys)} video keys...") + total_copied = 0 + skipped_episodes = 0 + + # Copy images for each episode + for old_folder, old_index, new_index in episode_mapping: + episode_chunk = old_index // info["chunks_size"] + new_episode_chunk = new_index // info["chunks_size"] + + episode_copied = False + + for image_dir in image_keys: + # Create target directory for this video key + os.makedirs(os.path.join(output_folder, "images", image_dir), exist_ok=True) + + # Check if source image directory exists + source_image_dir = os.path.join(old_folder, "images", image_dir, f"episode_{old_index:06d}") + + if os.path.exists(source_image_dir): + # Create target directory + target_image_dir = os.path.join(output_folder, "images", image_dir, f"episode_{new_index:06d}") + os.makedirs(target_image_dir, exist_ok=True) + + # Copy image files + image_files = sorted([f for f in os.listdir(source_image_dir) if f.endswith('.png')]) + num_images = len(image_files) + + if num_images > 0: + print(f"Copying {num_images} images from {source_image_dir} to {target_image_dir}") + + for image_file in image_files: + try: + # Extract frame number from filename + frame_part = image_file.split('_')[1] if '_' in image_file else image_file + frame_num = int(frame_part.split('.')[0]) + + # Copy the file with consistent naming + dest_file = os.path.join(target_image_dir, f"frame_{frame_num:06d}.png") + shutil.copy2( + os.path.join(source_image_dir, image_file), + dest_file + ) + total_copied += 1 + episode_copied = True + except Exception as e: + print(f"Error copying image {image_file}: {e}") + + if not episode_copied: + skipped_episodes += 1 + + print(f"\nCopied {total_copied} images for {len(episode_mapping) - skipped_episodes} episodes") + if skipped_episodes > 0: + print(f"{colored('WARNING', 'yellow', attrs=['bold'])}: Skipped {skipped_episodes} episodes with no images") + + +def merge_datasets( + source_folders, output_folder, validate_ts=False, tolerance_s=1e-4, + state_max_dim=14, action_max_dim=14, default_fps=20 +): + """ + 将多个数据集文件夹合并为一个,处理索引、维度和元数据 + (Merge multiple dataset folders into one, handling indices, dimensions, and metadata) + + Args: + source_folders (list): 源数据集文件夹路径列表 (List of source dataset folder paths) + output_folder (str): 输出文件夹路径 (Output folder path) + validate_ts (bool): 是否验证时间戳 (Whether to validate timestamps) + tolerance_s (float): 时间戳不连续性的容差值,以秒为单位 (Tolerance for timestamp discontinuities in seconds) + state_max_dim (int): 状态向量的最大维度 (Maximum dimension for state vectors) + action_max_dim (int): 动作向量的最大维度 (Maximum dimension for action vectors) + default_fps (float): 默认帧率 (Default frame rate) + """ + # Create output folder if it doesn't exist + os.makedirs(output_folder, exist_ok=True) + os.makedirs(os.path.join(output_folder, "meta"), exist_ok=True) + + fps = default_fps + print(f"使用默认FPS值: {fps}") + + # Load episodes from all source folders + all_episodes = [] + all_episodes_stats = [] + all_tasks = [] + + total_frames = 0 + total_episodes = 0 + + # Keep track of episode mapping (old_folder, old_index, new_index) + episode_mapping = [] + + # Collect all stats for proper merging + all_stats_data = [] + + # Track dimensions for each folder + folder_state_dimensions = {} # 存储每个文件夹的状态向量维度 + folder_action_dimensions = {} # 存储每个文件夹的动作向量维度 + + # 添加一个变量来跟踪累积的帧数 + cumulative_frame_count = 0 + + # 创建一个映射,用于存储每个新的episode索引对应的起始帧索引 + episode_to_frame_index = {} + + # 创建一个映射,用于跟踪旧的任务描述到新任务索引的映射 + task_desc_to_new_index = {} + # 创建一个映射,用于存储每个源文件夹和旧任务索引到新任务索引的映射 + folder_task_mapping = {} + + # 首先收集所有不同的任务描述 + all_unique_tasks = [] + + # 从info.json获取chunks_size + info_path = os.path.join(source_folders[0], "meta", "info.json") + # Check if all source folders have images directory + images_dir_exists = all(os.path.exists(os.path.join(folder, "images")) for folder in source_folders) + chunks_size = 1000 # 默认值 + if os.path.exists(info_path): + with open(info_path) as f: + info = json.load(f) + chunks_size = info.get("chunks_size", 1000) + + # 使用更简单的方法计算视频总数 (Use simpler method to calculate total videos) + total_videos = 0 + + for folder in source_folders: + try: + # 从每个数据集的info.json直接获取total_videos + # (Get total_videos directly from each dataset's info.json) + folder_info_path = os.path.join(folder, "meta", "info.json") + if os.path.exists(folder_info_path): + with open(folder_info_path) as f: + folder_info = json.load(f) + if "total_videos" in folder_info: + folder_videos = folder_info["total_videos"] + total_videos += folder_videos + print( + f"从{folder}的info.json中读取到视频数量: {folder_videos} (Read video count from {folder}'s info.json: {folder_videos})" + ) + + # 分别检查状态和动作向量的维度 + folder_state_dim = state_max_dim # 默认使用传入的状态最大维度 + folder_action_dim = action_max_dim # 默认使用传入的动作最大维度 + + # Try to find a parquet file to determine dimensions + for root, _dirs, files in os.walk(folder): + for file in files: + if file.endswith(".parquet"): + try: + df = pd.read_parquet(os.path.join(root, file)) + # 检查状态向量维度 + if "observation.state" in df.columns: + for state_val in df["observation.state"]: + if state_val is not None and isinstance(state_val, (list, np.ndarray)): + folder_state_dim = len(state_val) + print(f"Detected {folder_state_dim} dimensions for state in {folder}") + break + + # 检查动作向量维度 + if "action" in df.columns: + for action_val in df["action"]: + if action_val is not None and isinstance(action_val, (list, np.ndarray)): + folder_action_dim = len(action_val) + print(f"Detected {folder_action_dim} dimensions for action in {folder}") + break + + # 如果两个维度都已检测到,可以停止搜索 + if folder_state_dim != state_max_dim and folder_action_dim != action_max_dim: + break + except Exception as e: + print(f"Error checking dimensions in {folder}: {e}") + break + # 如果两个维度都已检测到,可以停止搜索 + if folder_state_dim != state_max_dim and folder_action_dim != action_max_dim: + break + + folder_state_dimensions[folder] = folder_state_dim + folder_action_dimensions[folder] = folder_action_dim + + # Load episodes + episodes_path = os.path.join(folder, "meta", "episodes.jsonl") + if not os.path.exists(episodes_path): + print(f"Warning: Episodes file not found in {folder}, skipping") + continue + + episodes = load_jsonl(episodes_path) + + # Load episode stats + episodes_stats_path = os.path.join(folder, "meta", "episodes_stats.jsonl") + episodes_stats = [] + if os.path.exists(episodes_stats_path): + episodes_stats = load_jsonl(episodes_stats_path) + + # Create a mapping of episode_index to stats + stats_map = {} + for stat in episodes_stats: + if "episode_index" in stat: + stats_map[stat["episode_index"]] = stat + + # Load tasks + tasks_path = os.path.join(folder, "meta", "tasks.jsonl") + folder_tasks = [] + if os.path.exists(tasks_path): + folder_tasks = load_jsonl(tasks_path) + + # 创建此文件夹的任务映射 + folder_task_mapping[folder] = {} + + # 处理每个任务 + for task in folder_tasks: + task_desc = task["task"] + old_index = task["task_index"] + + # 检查任务描述是否已存在 + if task_desc not in task_desc_to_new_index: + # 添加新任务描述,分配新索引 + new_index = len(all_unique_tasks) + task_desc_to_new_index[task_desc] = new_index + all_unique_tasks.append({"task_index": new_index, "task": task_desc}) + + # 保存此文件夹中旧索引到新索引的映射 + folder_task_mapping[folder][old_index] = task_desc_to_new_index[task_desc] + + # Process all episodes from this folder + for episode in episodes: + old_index = episode["episode_index"] + new_index = total_episodes + + # Update episode index + episode["episode_index"] = new_index + all_episodes.append(episode) + + # Update stats if available + if old_index in stats_map: + stats = stats_map[old_index] + stats["episode_index"] = new_index + + # 填充统计数据 + if "stats" in stats: + # 分别填充observation.state的统计数据 + if "observation.state" in stats["stats"] and folder_state_dimensions[folder] < state_max_dim: + for stat_type in ["mean", "std", "max", "min"]: + if stat_type in stats["stats"]["observation.state"]: + values = stats["stats"]["observation.state"][stat_type] + if isinstance(values, list) and len(values) < state_max_dim: + padded = values + [0.0] * (state_max_dim - len(values)) + stats["stats"]["observation.state"][stat_type] = padded + + # 分别处理action的统计数据 + if "action" in stats["stats"] and folder_action_dimensions[folder] < action_max_dim: + for stat_type in ["mean", "std", "max", "min"]: + if stat_type in stats["stats"]["action"]: + values = stats["stats"]["action"][stat_type] + if isinstance(values, list) and len(values) < action_max_dim: + padded = values + [0.0] * (action_max_dim - len(values)) + stats["stats"]["action"][stat_type] = padded + + all_episodes_stats.append(stats) + + # Add to all_stats_data for proper merging + if "stats" in stats: + all_stats_data.append(stats["stats"]) + + # Add to mapping + episode_mapping.append((folder, old_index, new_index)) + + # Update counters + total_episodes += 1 + total_frames += episode["length"] + + # 处理每个episode时收集此信息 + episode_to_frame_index[new_index] = cumulative_frame_count + cumulative_frame_count += episode["length"] + + # 使用收集的唯一任务列表替换之前的任务处理逻辑 + all_tasks = all_unique_tasks + + except Exception as e: + print(f"Error processing folder {folder}: {e}") + continue + + print(f"Processed {total_episodes} episodes from {len(source_folders)} folders") + + # Save combined episodes and stats + save_jsonl(all_episodes, os.path.join(output_folder, "meta", "episodes.jsonl")) + save_jsonl(all_episodes_stats, os.path.join(output_folder, "meta", "episodes_stats.jsonl")) + save_jsonl(all_tasks, os.path.join(output_folder, "meta", "tasks.jsonl")) + + # Merge and save stats + stats_list = [] + for folder in source_folders: + stats_path = os.path.join(folder, "meta", "stats.json") + if os.path.exists(stats_path): + with open(stats_path) as f: + stats = json.load(f) + stats_list.append(stats) + + if stats_list: + # Merge global stats + merged_stats = merge_stats(stats_list) + + # Update merged stats with episode-specific stats if available + if all_stats_data: + # For each feature in the stats + for feature in merged_stats: + if feature in all_stats_data[0]: + # Recalculate statistics based on all episodes + values = [stat[feature] for stat in all_stats_data if feature in stat] + + # Find the maximum dimension for this feature + max_dim = max( + len(np.array(val.get("mean", [0])).flatten()) for val in values if "mean" in val + ) + + # Update count + if "count" in merged_stats[feature]: + merged_stats[feature]["count"] = [ + sum(stat.get("count", [0])[0] for stat in values if "count" in stat) + ] + + # Update min/max with padding + if "min" in merged_stats[feature] and all("min" in stat for stat in values): + # Pad min values + padded_mins = [] + for val in values: + val_array = np.array(val["min"]) + val_flat = val_array.flatten() + if len(val_flat) < max_dim: + padded = np.zeros(max_dim) + padded[: len(val_flat)] = val_flat + padded_mins.append(padded) + else: + padded_mins.append(val_flat) + merged_stats[feature]["min"] = np.minimum.reduce(padded_mins).tolist() + + if "max" in merged_stats[feature] and all("max" in stat for stat in values): + # Pad max values + padded_maxs = [] + for val in values: + val_array = np.array(val["max"]) + val_flat = val_array.flatten() + if len(val_flat) < max_dim: + padded = np.zeros(max_dim) + padded[: len(val_flat)] = val_flat + padded_maxs.append(padded) + else: + padded_maxs.append(val_flat) + merged_stats[feature]["max"] = np.maximum.reduce(padded_maxs).tolist() + + # Update mean and std (weighted by count if available) + if "mean" in merged_stats[feature] and all("mean" in stat for stat in values): + # Pad mean values + padded_means = [] + for val in values: + val_array = np.array(val["mean"]) + val_flat = val_array.flatten() + if len(val_flat) < max_dim: + padded = np.zeros(max_dim) + padded[: len(val_flat)] = val_flat + padded_means.append(padded) + else: + padded_means.append(val_flat) + + if all("count" in stat for stat in values): + counts = [stat["count"][0] for stat in values] + total_count = sum(counts) + weighted_means = [ + mean * count / total_count + for mean, count in zip(padded_means, counts, strict=False) + ] + merged_stats[feature]["mean"] = np.sum(weighted_means, axis=0).tolist() + else: + merged_stats[feature]["mean"] = np.mean(padded_means, axis=0).tolist() + + if "std" in merged_stats[feature] and all("std" in stat for stat in values): + # Pad std values + padded_stds = [] + for val in values: + val_array = np.array(val["std"]) + val_flat = val_array.flatten() + if len(val_flat) < max_dim: + padded = np.zeros(max_dim) + padded[: len(val_flat)] = val_flat + padded_stds.append(padded) + else: + padded_stds.append(val_flat) + + if all("count" in stat for stat in values): + counts = [stat["count"][0] for stat in values] + total_count = sum(counts) + variances = [std**2 for std in padded_stds] + weighted_variances = [ + var * count / total_count + for var, count in zip(variances, counts, strict=False) + ] + merged_stats[feature]["std"] = np.sqrt( + np.sum(weighted_variances, axis=0) + ).tolist() + else: + # Simple average of standard deviations + merged_stats[feature]["std"] = np.mean(padded_stds, axis=0).tolist() + + with open(os.path.join(output_folder, "meta", "stats.json"), "w") as f: + json.dump(merged_stats, f, indent=4) + + # Update and save info.json + info_path = os.path.join(source_folders[0], "meta", "info.json") + with open(info_path) as f: + info = json.load(f) + + # Update info with correct counts + info["total_episodes"] = total_episodes + info["total_frames"] = total_frames + info["total_tasks"] = len(all_tasks) + info["total_chunks"] = (total_episodes + info["chunks_size"] - 1) // info[ + "chunks_size" + ] # Ceiling division + + # Update splits + info["splits"] = {"train": f"0:{total_episodes}"} + + # Update feature dimensions to the maximum dimension + if "features" in info: + # 使用检测到的最大状态和动作维度 + actual_state_max_dim = state_max_dim + actual_action_max_dim = action_max_dim + + for _folder, dim in folder_state_dimensions.items(): + actual_state_max_dim = max(actual_state_max_dim, dim) + + for _folder, dim in folder_action_dimensions.items(): + actual_action_max_dim = max(actual_action_max_dim, dim) + + # 更新状态向量维度 + if "observation.state" in info["features"] and "shape" in info["features"]["observation.state"]: + info["features"]["observation.state"]["shape"] = [actual_state_max_dim] + print(f"Updated observation.state shape to {actual_state_max_dim}") + + # 更新动作向量维度 + if "action" in info["features"] and "shape" in info["features"]["action"]: + info["features"]["action"]["shape"] = [actual_action_max_dim] + print(f"Updated action shape to {actual_action_max_dim}") + + # 更新视频总数 (Update total videos) + info["total_videos"] = total_videos + print(f"更新视频总数为: {total_videos} (Update total videos to: {total_videos})") + + with open(os.path.join(output_folder, "meta", "info.json"), "w") as f: + json.dump(info, f, indent=4) + + # Validate before video copying + if images_dir_exists: + early_validation( + source_folders, + episode_mapping, + ) + + # Copy video and data files + copy_videos(source_folders, output_folder, episode_mapping) + copy_data_files( + source_folders, + output_folder, + episode_mapping, + state_max_dim=state_max_dim, + action_max_dim=action_max_dim, + fps=fps, + episode_to_frame_index=episode_to_frame_index, + folder_task_mapping=folder_task_mapping, + chunks_size=chunks_size, + ) + + # Copy images and check with video frames + if args.copy_images: + print("Starting to copy images and validate video frame counts") + copy_images(source_folders, output_folder, episode_mapping) + + + print(f"Merged {total_episodes} episodes with {total_frames} frames into {output_folder}") + + +if __name__ == "__main__": + # Set up argument parser + parser = argparse.ArgumentParser(description="Merge datasets from multiple sources.") + + # Add arguments + parser.add_argument("--sources", nargs="+", required=True, help="List of source folder paths") + parser.add_argument("--output", required=True, help="Output folder path") + parser.add_argument("--state_max_dim", type=int, default=32, help="Maximum state vector dimension (default: 32)") + parser.add_argument("--action_max_dim", type=int, default=32, help="Maximum action vector dimension (default: 32)") + parser.add_argument("--fps", type=int, default=30, help="Your datasets FPS (default: 20)") + parser.add_argument("--copy_images", action="store_true", help="Whether to copy images (default: False)") + + # Parse arguments + args = parser.parse_args() + + # Use parsed arguments + merge_datasets( + args.sources, + args.output, + state_max_dim=args.state_max_dim, + action_max_dim=args.action_max_dim, + default_fps=args.fps + ) diff --git a/policy/openpi-InternData-A1/examples/arx/merge_lerobot_data_v2.py b/policy/openpi-InternData-A1/examples/arx/merge_lerobot_data_v2.py new file mode 100644 index 0000000..8261ddb --- /dev/null +++ b/policy/openpi-InternData-A1/examples/arx/merge_lerobot_data_v2.py @@ -0,0 +1,1509 @@ +import argparse +import contextlib +import json +import os +import shutil +import traceback + +import numpy as np +import pandas as pd +from termcolor import colored + + +def load_jsonl(file_path): + """ + 从JSONL文件加载数据 + (Load data from a JSONL file) + + Args: + file_path (str): JSONL文件路径 (Path to the JSONL file) + + Returns: + list: 包含文件中每行JSON对象的列表 (List containing JSON objects from each line) + """ + data = [] + + # Special handling for episodes_stats.jsonl + if "episodes_stats.jsonl" in file_path: + try: + # Try to load the entire file as a JSON array + with open(file_path) as f: + content = f.read() + # Check if the content starts with '[' and ends with ']' + if content.strip().startswith("[") and content.strip().endswith("]"): + return json.loads(content) + else: + # Try to add brackets and parse + try: + return json.loads("[" + content + "]") + except json.JSONDecodeError: + pass + except Exception as e: + print(f"Error loading {file_path} as JSON array: {e}") + + # Fall back to line-by-line parsing + try: + with open(file_path) as f: + for line in f: + if line.strip(): + with contextlib.suppress(json.JSONDecodeError): + data.append(json.loads(line)) + except Exception as e: + print(f"Error loading {file_path} line by line: {e}") + else: + # Standard JSONL parsing for other files + with open(file_path) as f: + for line in f: + if line.strip(): + with contextlib.suppress(json.JSONDecodeError): + data.append(json.loads(line)) + + return data + + +def save_jsonl(data, file_path): + """ + 将数据保存为JSONL格式 + (Save data in JSONL format) + + Args: + data (list): 要保存的JSON对象列表 (List of JSON objects to save) + file_path (str): 输出文件路径 (Path to the output file) + """ + with open(file_path, "w") as f: + for item in data: + f.write(json.dumps(item) + "\n") + + +def merge_stats(stats_list): + """ + 合并多个数据集的统计信息,确保维度一致性 + (Merge statistics from multiple datasets, ensuring dimensional consistency) + + Args: + stats_list (list): 包含每个数据集统计信息的字典列表 + (List of dictionaries containing statistics for each dataset) + + Returns: + dict: 合并后的统计信息 (Merged statistics) + """ + # Initialize merged stats with the structure of the first stats + merged_stats = {} + + # Find common features across all stats + common_features = set(stats_list[0].keys()) + for stats in stats_list[1:]: + common_features = common_features.intersection(set(stats.keys())) + + # Process features in the order they appear in the first stats file + for feature in stats_list[0]: + if feature not in common_features: + continue + + merged_stats[feature] = {} + + # Find common stat types for this feature + common_stat_types = [] + for stat_type in ["mean", "std", "max", "min"]: + if all(stat_type in stats[feature] for stats in stats_list): + common_stat_types.append(stat_type) + + # Determine the original shape of each value + original_shapes = [] + for stats in stats_list: + if "mean" in stats[feature]: + shape = np.array(stats[feature]["mean"]).shape + original_shapes.append(shape) + + # Special handling for image features to preserve nested structure + if feature.startswith("observation.images."): + for stat_type in common_stat_types: + try: + # Get all values + values = [stats[feature][stat_type] for stats in stats_list] + + # For image features, we need to preserve the nested structure + # Initialize with the first value's structure + result = [] + + # For RGB channels + for channel_idx in range(len(values[0])): + channel_result = [] + + # For each pixel row + for pixel_idx in range(len(values[0][channel_idx])): + pixel_result = [] + + # For each pixel value + for value_idx in range(len(values[0][channel_idx][pixel_idx])): + # Calculate statistic based on type + if stat_type == "mean": + # Simple average + avg = sum( + values[i][channel_idx][pixel_idx][value_idx] + for i in range(len(values)) + ) / len(values) + pixel_result.append(avg) + elif stat_type == "std": + # Simple average of std + avg = sum( + values[i][channel_idx][pixel_idx][value_idx] + for i in range(len(values)) + ) / len(values) + pixel_result.append(avg) + elif stat_type == "max": + # Maximum + max_val = max( + values[i][channel_idx][pixel_idx][value_idx] + for i in range(len(values)) + ) + pixel_result.append(max_val) + elif stat_type == "min": + # Minimum + min_val = min( + values[i][channel_idx][pixel_idx][value_idx] + for i in range(len(values)) + ) + pixel_result.append(min_val) + + channel_result.append(pixel_result) + + result.append(channel_result) + + merged_stats[feature][stat_type] = result + except Exception as e: + print(f"Warning: Error processing image feature {feature}.{stat_type}: {e}") + # Fallback to first value + merged_stats[feature][stat_type] = values[0] + # If all shapes are the same, no need for special handling + elif len({str(shape) for shape in original_shapes}) == 1: + # All shapes are the same, use standard merging + for stat_type in common_stat_types: + values = [stats[feature][stat_type] for stats in stats_list] + + try: + # Calculate the new statistic based on the type + if stat_type == "mean": + if all("count" in stats[feature] for stats in stats_list): + counts = [stats[feature]["count"][0] for stats in stats_list] + total_count = sum(counts) + weighted_values = [ + np.array(val) * count / total_count + for val, count in zip(values, counts, strict=False) + ] + merged_stats[feature][stat_type] = np.sum(weighted_values, axis=0).tolist() + else: + merged_stats[feature][stat_type] = np.mean(np.array(values), axis=0).tolist() + + elif stat_type == "std": + if all("count" in stats[feature] for stats in stats_list): + counts = [stats[feature]["count"][0] for stats in stats_list] + total_count = sum(counts) + variances = [np.array(std) ** 2 for std in values] + weighted_variances = [ + var * count / total_count + for var, count in zip(variances, counts, strict=False) + ] + merged_stats[feature][stat_type] = np.sqrt( + np.sum(weighted_variances, axis=0) + ).tolist() + else: + merged_stats[feature][stat_type] = np.mean(np.array(values), axis=0).tolist() + + elif stat_type == "max": + merged_stats[feature][stat_type] = np.maximum.reduce(np.array(values)).tolist() + + elif stat_type == "min": + merged_stats[feature][stat_type] = np.minimum.reduce(np.array(values)).tolist() + except Exception as e: + print(f"Warning: Error processing {feature}.{stat_type}: {e}") + continue + else: + # Shapes are different, need special handling for state vectors + if feature in ["observation.state", "action"]: + # For state vectors, we need to handle different dimensions + max_dim = max(len(np.array(stats[feature]["mean"]).flatten()) for stats in stats_list) + + for stat_type in common_stat_types: + try: + # Get values and their original dimensions + values_with_dims = [] + for stats in stats_list: + val = np.array(stats[feature][stat_type]).flatten() + dim = len(val) + values_with_dims.append((val, dim)) + + # Initialize result array with zeros + result = np.zeros(max_dim) + + # Calculate statistics for each dimension separately + if stat_type == "mean": + if all("count" in stats[feature] for stats in stats_list): + counts = [stats[feature]["count"][0] for stats in stats_list] + total_count = sum(counts) + + # For each dimension, calculate weighted mean of available values + for d in range(max_dim): + dim_values = [] + dim_weights = [] + for (val, dim), count in zip(values_with_dims, counts, strict=False): + if d < dim: # Only use values that have this dimension + dim_values.append(val[d]) + dim_weights.append(count) + + if dim_values: # If we have values for this dimension + weighted_sum = sum( + v * w for v, w in zip(dim_values, dim_weights, strict=False) + ) + result[d] = weighted_sum / sum(dim_weights) + else: + # Simple average for each dimension + for d in range(max_dim): + dim_values = [val[d] for val, dim in values_with_dims if d < dim] + if dim_values: + result[d] = sum(dim_values) / len(dim_values) + + elif stat_type == "std": + if all("count" in stats[feature] for stats in stats_list): + counts = [stats[feature]["count"][0] for stats in stats_list] + total_count = sum(counts) + + # For each dimension, calculate weighted variance + for d in range(max_dim): + dim_variances = [] + dim_weights = [] + for (val, dim), count in zip(values_with_dims, counts, strict=False): + if d < dim: # Only use values that have this dimension + dim_variances.append(val[d] ** 2) # Square for variance + dim_weights.append(count) + + if dim_variances: # If we have values for this dimension + weighted_var = sum( + v * w for v, w in zip(dim_variances, dim_weights, strict=False) + ) / sum(dim_weights) + result[d] = np.sqrt(weighted_var) # Take sqrt for std + else: + # Simple average of std for each dimension + for d in range(max_dim): + dim_values = [val[d] for val, dim in values_with_dims if d < dim] + if dim_values: + result[d] = sum(dim_values) / len(dim_values) + + elif stat_type == "max": + # For each dimension, take the maximum of available values + for d in range(max_dim): + dim_values = [val[d] for val, dim in values_with_dims if d < dim] + if dim_values: + result[d] = max(dim_values) + + elif stat_type == "min": + # For each dimension, take the minimum of available values + for d in range(max_dim): + dim_values = [val[d] for val, dim in values_with_dims if d < dim] + if dim_values: + result[d] = min(dim_values) + + # Convert result to list and store + merged_stats[feature][stat_type] = result.tolist() + + except Exception as e: + print( + f"Warning: Error processing {feature}.{stat_type} with different dimensions: {e}" + ) + continue + else: + # For other features with different shapes, use the first shape as template + template_shape = original_shapes[0] + print(f"Using shape {template_shape} as template for {feature}") + + for stat_type in common_stat_types: + try: + # Use the first stats as template + merged_stats[feature][stat_type] = stats_list[0][feature][stat_type] + except Exception as e: + print( + f"Warning: Error processing {feature}.{stat_type} with shape {template_shape}: {e}" + ) + continue + + # Add count if available in all stats + if all("count" in stats[feature] for stats in stats_list): + try: + merged_stats[feature]["count"] = [sum(stats[feature]["count"][0] for stats in stats_list)] + except Exception as e: + print(f"Warning: Error processing {feature}.count: {e}") + + return merged_stats + + +def copy_videos(source_folders, output_folder, episode_mapping): + """ + 从源文件夹复制视频文件到输出文件夹,保持正确的索引和结构 + (Copy video files from source folders to output folder, maintaining correct indices and structure) + + Args: + source_folders (list): 源数据集文件夹路径列表 (List of source dataset folder paths) + output_folder (str): 输出文件夹路径 (Output folder path) + episode_mapping (list): 包含(旧文件夹,旧索引,新索引)元组的列表 + (List of tuples containing (old_folder, old_index, new_index)) + """ + # Get info.json to determine video structure + info_path = os.path.join(source_folders[0], "meta", "info.json") + with open(info_path) as f: + info = json.load(f) + + video_path_template = info["video_path"] + + # Identify video keys from the template + # Example: "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4" + video_keys = [] + for feature_name, feature_info in info["features"].items(): + if feature_info.get("dtype") == "video": + # Use the full feature name as the video key + video_keys.append(feature_name) + + print(f"Found video keys: {video_keys}") + + # Copy videos for each episode + for old_folder, old_index, new_index in episode_mapping: + # Determine episode chunk (usually 0 for small datasets) + episode_chunk = old_index // info["chunks_size"] + new_episode_chunk = new_index // info["chunks_size"] + + for video_key in video_keys: + # Try different possible source paths + source_patterns = [ + # Standard path with the episode index from metadata + os.path.join( + old_folder, + video_path_template.format( + episode_chunk=episode_chunk, video_key=video_key, episode_index=old_index + ), + ), + # Try with 0-based indexing + os.path.join( + old_folder, + video_path_template.format(episode_chunk=0, video_key=video_key, episode_index=0), + ), + # Try with different formatting + os.path.join( + old_folder, f"videos/chunk-{episode_chunk:03d}/{video_key}/episode_{old_index}.mp4" + ), + os.path.join(old_folder, f"videos/chunk-000/{video_key}/episode_000000.mp4"), + ] + + # Find the first existing source path + source_video_path = None + for pattern in source_patterns: + if os.path.exists(pattern): + source_video_path = pattern + break + + if source_video_path: + # Construct destination path + dest_video_path = os.path.join( + output_folder, + video_path_template.format( + episode_chunk=new_episode_chunk, video_key=video_key, episode_index=new_index + ), + ) + + # Create destination directory if it doesn't exist + os.makedirs(os.path.dirname(dest_video_path), exist_ok=True) + + print(f"Copying video: {source_video_path} -> {dest_video_path}") + shutil.copy2(source_video_path, dest_video_path) + else: + # If no file is found, search the directory recursively + found = False + for root, _, files in os.walk(os.path.join(old_folder, "videos")): + for file in files: + if file.endswith(".mp4") and video_key in root: + source_video_path = os.path.join(root, file) + + # Construct destination path + dest_video_path = os.path.join( + output_folder, + video_path_template.format( + episode_chunk=new_episode_chunk, + video_key=video_key, + episode_index=new_index, + ), + ) + + # Create destination directory if it doesn't exist + os.makedirs(os.path.dirname(dest_video_path), exist_ok=True) + + print( + f"Copying video (found by search): {source_video_path} -> {dest_video_path}" + ) + shutil.copy2(source_video_path, dest_video_path) + found = True + break + if found: + break + + if not found: + print( + f"Warning: Video file not found for {video_key}, episode {old_index} in {old_folder}" + ) + + +def validate_timestamps(source_folders, tolerance_s=1e-4): + """ + 验证源数据集的时间戳结构,识别潜在问题 + (Validate timestamp structure of source datasets, identify potential issues) + + Args: + source_folders (list): 源数据集文件夹路径列表 (List of source dataset folder paths) + tolerance_s (float): 时间戳不连续性的容差值,以秒为单位 (Tolerance for timestamp discontinuities in seconds) + + Returns: + tuple: (issues, fps_values) - 问题列表和检测到的FPS值列表 + (List of issues and list of detected FPS values) + """ + issues = [] + fps_values = [] + + for folder in source_folders: + try: + # 尝试从 info.json 获取 FPS (Try to get FPS from info.json) + info_path = os.path.join(folder, "meta", "info.json") + if os.path.exists(info_path): + with open(info_path) as f: + info = json.load(f) + if "fps" in info: + fps = info["fps"] + fps_values.append(fps) + print(f"数据集 {folder} FPS={fps} (Dataset {folder} FPS={fps})") + + # 检查是否有parquet文件包含时间戳 (Check if any parquet files contain timestamps) + parquet_path = None + for root, _, files in os.walk(os.path.join(folder, "parquet")): + for file in files: + if file.endswith(".parquet"): + parquet_path = os.path.join(root, file) + break + if parquet_path: + break + + if not parquet_path: + for root, _, files in os.walk(os.path.join(folder, "data")): + for file in files: + if file.endswith(".parquet"): + parquet_path = os.path.join(root, file) + break + if parquet_path: + break + + if parquet_path: + df = pd.read_parquet(parquet_path) + timestamp_cols = [col for col in df.columns if "timestamp" in col or "time" in col] + if timestamp_cols: + print( + f"数据集 {folder} 包含时间戳列: {timestamp_cols} (Dataset {folder} contains timestamp columns: {timestamp_cols})" + ) + else: + issues.append( + f"警告: 数据集 {folder} 没有时间戳列 (Warning: Dataset {folder} has no timestamp columns)" + ) + else: + issues.append( + f"警告: 数据集 {folder} 未找到parquet文件 (Warning: No parquet files found in dataset {folder})" + ) + + except Exception as e: + issues.append( + f"错误: 验证数据集 {folder} 失败: {e} (Error: Failed to validate dataset {folder}: {e})" + ) + print(f"验证错误: {e} (Validation error: {e})") + traceback.print_exc() + + # 检查FPS是否一致 (Check if FPS values are consistent) + if len(set(fps_values)) > 1: + issues.append( + f"警告: 数据集FPS不一致: {fps_values} (Warning: Inconsistent FPS across datasets: {fps_values})" + ) + + return issues, fps_values + + +def copy_data_files( + source_folders, + output_folder, + episode_mapping, + fps=None, + episode_to_frame_index=None, + folder_task_mapping=None, + chunks_size=1000, + default_fps=20, +): + """ + 从源文件夹复制数据文件到输出文件夹,同时处理索引映射和维度填充 + (Copy data files from source folders to output folder, handling index mapping and dimension padding) + + Args: + source_folders (list): 源数据集文件夹路径列表 (List of source dataset folder paths) + output_folder (str): 输出文件夹路径 (Output folder path) + episode_mapping (list): 包含(旧文件夹,旧索引,新索引)元组的列表 + (List of tuples containing (old_folder, old_index, new_index)) + fps (float): 帧率 (frames per second) + episode_to_frame_index (dict): 每个episode对应的起始帧索引 + (Start frame index for each episode) + folder_task_mapping (dict): 文件夹任务映射 (Folder task mapping) + chunks_size (int): 数据块大小 (Chunk size) + default_fps (float): 默认帧率 (Default frame rate) + """ + # 获取第一个数据集的FPS(如果未提供)(Get FPS from first dataset if not provided) + if fps is None: + info_path = os.path.join(source_folders[0], "meta", "info.json") + if os.path.exists(info_path): + with open(info_path) as f: + info = json.load(f) + fps = info.get( + "fps", default_fps + ) # 使用变量替代硬编码的20 (Use variable instead of hardcoded 20) + else: + fps = default_fps # 使用变量替代硬编码的20 (Use variable instead of hardcoded 20) + + print(f"使用FPS={fps}") + + # 为每个episode复制和处理数据文件 (Copy and process data files for each episode) + total_copied = 0 + total_failed = 0 + + # 添加一个列表来记录失败的文件及原因 + # (Add a list to record failed files and reasons) + failed_files = [] + + for i, (old_folder, old_index, new_index) in enumerate(episode_mapping): + # 尝试找到源parquet文件 (Try to find source parquet file) + episode_str = f"episode_{old_index:06d}.parquet" + source_paths = [ + os.path.join(old_folder, "parquet", episode_str), + os.path.join(old_folder, "data", episode_str), + ] + + source_path = None + for path in source_paths: + if os.path.exists(path): + source_path = path + break + + if source_path: + try: + # 读取parquet文件 (Read parquet file) + df = pd.read_parquet(source_path) + + # 更新episode_index列 (Update episode_index column) + if "episode_index" in df.columns: + print( + f"更新episode_index从 {df['episode_index'].iloc[0]} 到 {new_index} (Update episode_index from {df['episode_index'].iloc[0]} to {new_index})" + ) + df["episode_index"] = new_index + + # 更新index列 (Update index column) + if "index" in df.columns: + if episode_to_frame_index and new_index in episode_to_frame_index: + # 使用预先计算的帧索引起始值 (Use pre-calculated frame index start value) + first_index = episode_to_frame_index[new_index] + print( + f"更新index列,起始值: {first_index}(使用全局累积帧计数)(Update index column, start value: {first_index} (using global cumulative frame count))" + ) + else: + # 如果没有提供映射,使用当前的计算方式作为回退 + # (If no mapping provided, use current calculation as fallback) + first_index = new_index * len(df) + print( + f"更新index列,起始值: {first_index}(使用episode索引乘以长度)(Update index column, start value: {first_index} (using episode index multiplied by length))" + ) + + # 更新所有帧的索引 (Update indices for all frames) + df["index"] = [first_index + i for i in range(len(df))] + + # 更新task_index列 (Update task_index column) + if "task_index" in df.columns and folder_task_mapping and old_folder in folder_task_mapping: + # 获取当前task_index (Get current task_index) + current_task_index = df["task_index"].iloc[0] + + # 检查是否有对应的新索引 (Check if there's a corresponding new index) + if current_task_index in folder_task_mapping[old_folder]: + new_task_index = folder_task_mapping[old_folder][current_task_index] + print( + f"更新task_index从 {current_task_index} 到 {new_task_index} (Update task_index from {current_task_index} to {new_task_index})" + ) + df["task_index"] = new_task_index + else: + print( + f"警告: 找不到task_index {current_task_index}的映射关系 (Warning: No mapping found for task_index {current_task_index})" + ) + + # 计算chunk编号 (Calculate chunk number) + chunk_index = new_index // chunks_size + + # 创建正确的目标目录 (Create correct target directory) + chunk_dir = os.path.join(output_folder, "data", f"chunk-{chunk_index:03d}") + os.makedirs(chunk_dir, exist_ok=True) + + # 构建正确的目标路径 (Build correct target path) + dest_path = os.path.join(chunk_dir, f"episode_{new_index:06d}.parquet") + + # 保存到正确位置 (Save to correct location) + df.to_parquet(dest_path, index=False) + + total_copied += 1 + print(f"已处理并保存: {dest_path} (Processed and saved: {dest_path})") + + except Exception as e: + error_msg = f"处理 {source_path} 失败: {e} (Processing {source_path} failed: {e})" + print(error_msg) + traceback.print_exc() + failed_files.append({"file": source_path, "reason": str(e), "episode": old_index}) + total_failed += 1 + else: + # 文件不在标准位置,尝试递归搜索 + found = False + for root, _, files in os.walk(old_folder): + for file in files: + if file.endswith(".parquet") and f"episode_{old_index:06d}" in file: + try: + source_path = os.path.join(root, file) + + # 读取parquet文件 (Read parquet file) + df = pd.read_parquet(source_path) + + # 更新episode_index列 (Update episode_index column) + if "episode_index" in df.columns: + print( + f"更新episode_index从 {df['episode_index'].iloc[0]} 到 {new_index} (Update episode_index from {df['episode_index'].iloc[0]} to {new_index})" + ) + df["episode_index"] = new_index + + # 更新index列 (Update index column) + if "index" in df.columns: + if episode_to_frame_index and new_index in episode_to_frame_index: + # 使用预先计算的帧索引起始值 (Use pre-calculated frame index start value) + first_index = episode_to_frame_index[new_index] + print( + f"更新index列,起始值: {first_index}(使用全局累积帧计数)(Update index column, start value: {first_index} (using global cumulative frame count))" + ) + else: + # 如果没有提供映射,使用当前的计算方式作为回退 + # (If no mapping provided, use current calculation as fallback) + first_index = new_index * len(df) + print( + f"更新index列,起始值: {first_index}(使用episode索引乘以长度)(Update index column, start value: {first_index} (using episode index multiplied by length))" + ) + + # 更新所有帧的索引 (Update indices for all frames) + df["index"] = [first_index + i for i in range(len(df))] + + # 更新task_index列 (Update task_index column) + if ( + "task_index" in df.columns + and folder_task_mapping + and old_folder in folder_task_mapping + ): + # 获取当前task_index (Get current task_index) + current_task_index = df["task_index"].iloc[0] + + # 检查是否有对应的新索引 (Check if there's a corresponding new index) + if current_task_index in folder_task_mapping[old_folder]: + new_task_index = folder_task_mapping[old_folder][current_task_index] + print( + f"更新task_index从 {current_task_index} 到 {new_task_index} (Update task_index from {current_task_index} to {new_task_index})" + ) + df["task_index"] = new_task_index + else: + print( + f"警告: 找不到task_index {current_task_index}的映射关系 (Warning: No mapping found for task_index {current_task_index})" + ) + + # 计算chunk编号 (Calculate chunk number) + chunk_index = new_index // chunks_size + + # 创建正确的目标目录 (Create correct target directory) + chunk_dir = os.path.join(output_folder, "data", f"chunk-{chunk_index:03d}") + os.makedirs(chunk_dir, exist_ok=True) + + # 构建正确的目标路径 (Build correct target path) + dest_path = os.path.join(chunk_dir, f"episode_{new_index:06d}.parquet") + + # 保存到正确位置 (Save to correct location) + df.to_parquet(dest_path, index=False) + + total_copied += 1 + found = True + print(f"已处理并保存: {dest_path} (Processed and saved: {dest_path})") + break + except Exception as e: + error_msg = f"处理 {source_path} 失败: {e} (Processing {source_path} failed: {e})" + print(error_msg) + traceback.print_exc() + failed_files.append({"file": source_path, "reason": str(e), "episode": old_index}) + total_failed += 1 + if found: + break + + if not found: + error_msg = f"找不到episode {old_index}的parquet文件,源文件夹: {old_folder}" + print(error_msg) + failed_files.append( + {"file": f"episode_{old_index:06d}.parquet", "reason": "文件未找到", "folder": old_folder} + ) + total_failed += 1 + + print(f"共复制 {total_copied} 个数据文件,{total_failed} 个失败") + + # 打印所有失败的文件详情 (Print details of all failed files) + if failed_files: + print("\n失败的文件详情 (Details of failed files):") + for i, failed in enumerate(failed_files): + print(f"{i + 1}. 文件 (File): {failed['file']}") + if "folder" in failed: + print(f" 文件夹 (Folder): {failed['folder']}") + if "episode" in failed: + print(f" Episode索引 (Episode index): {failed['episode']}") + print(f" 原因 (Reason): {failed['reason']}") + print("---") + + return total_copied > 0 + + +def pad_parquet_data(source_path, target_path, original_dim=14, target_dim=18): + """ + 通过零填充将parquet数据从原始维度扩展到目标维度 + (Extend parquet data from original dimension to target dimension by zero-padding) + + Args: + source_path (str): 源parquet文件路径 (Source parquet file path) + target_path (str): 目标parquet文件路径 (Target parquet file path) + original_dim (int): 原始向量维度 (Original vector dimension) + target_dim (int): 目标向量维度 (Target vector dimension) + """ + # 读取parquet文件 + df = pd.read_parquet(source_path) + + # 打印列名以便调试 + print(f"Columns in {source_path}: {df.columns.tolist()}") + + # 创建新的DataFrame来存储填充后的数据 + new_df = df.copy() + + # 检查observation.state和action列是否存在 + if "observation.state" in df.columns: + # 检查第一行数据,确认是否为向量 + first_state = df["observation.state"].iloc[0] + print(f"First observation.state type: {type(first_state)}, value: {first_state}") + + # 如果是向量(列表或numpy数组) + if isinstance(first_state, (list, np.ndarray)): + # 检查维度 + state_dim = len(first_state) + print(f"observation.state dimension: {state_dim}") + + if state_dim < target_dim: + # 填充向量 + print(f"Padding observation.state from {state_dim} to {target_dim} dimensions") + new_df["observation.state"] = df["observation.state"].apply( + lambda x: np.pad(x, (0, target_dim - len(x)), "constant").tolist() + ) + + # 同样处理action列 + if "action" in df.columns: + # 检查第一行数据 + first_action = df["action"].iloc[0] + print(f"First action type: {type(first_action)}, value: {first_action}") + + # 如果是向量 + if isinstance(first_action, (list, np.ndarray)): + # 检查维度 + action_dim = len(first_action) + print(f"action dimension: {action_dim}") + + if action_dim < target_dim: + # 填充向量 + print(f"Padding action from {action_dim} to {target_dim} dimensions") + new_df["action"] = df["action"].apply( + lambda x: np.pad(x, (0, target_dim - len(x)), "constant").tolist() + ) + + # 确保目标目录存在 + os.makedirs(os.path.dirname(target_path), exist_ok=True) + + # 保存到新的parquet文件 + new_df.to_parquet(target_path, index=False) + + print(f"已将{source_path}处理并保存到{target_path}") + + return new_df + + +def count_video_frames_torchvision(video_path): + """ + Count the number of frames in a video file using torchvision + + Args: + video_path (str): + + Returns: + Frame count (int): + """ + try: + import torchvision + + # Ensure torchvision version is recent enough for VideoReader and AV1 support + # (This is a general good practice, specific version checks might be needed + # depending on the exact AV1 library used by torchvision's backend) + # print(f"Torchvision version: {torchvision.__version__}") + # print(f"PyTorch version: {torch.__version__}") + + # VideoReader requires the video path as a string + reader = torchvision.io.VideoReader(video_path, "video") + + # Attempt to get frame count from metadata + # Metadata structure can vary; "video" stream usually has "num_frames" + metadata = reader.get_metadata() + frame_count = 0 + + if "video" in metadata and "num_frames" in metadata["video"] and len(metadata["video"]["num_frames"]) > 0: + # num_frames is often a list, take the first element + frame_count = int(metadata["video"]["num_frames"][0]) + if frame_count > 0: + # If metadata provides a positive frame count, we can often trust it. + # For some backends/formats, this might be the most reliable way. + return frame_count + + # If metadata didn't provide a reliable frame count, or to be absolutely sure, + # we can iterate through the frames. + # This is more robust but potentially slower. + count_manually = 0 + for _ in reader: # Iterating through the reader yields frames + count_manually += 1 + + # If manual count is zero but metadata had a count, it might indicate an issue + # or an empty video. Prioritize manual count if it's > 0. + if count_manually > 0: + return count_manually + elif frame_count > 0 : # Fallback to metadata if manual count was 0 but metadata had a value + print(f"Warning: Manual count is 0, but metadata indicates {frame_count} frames. Video might be empty or there was a read issue. Returning metadata count.") + return frame_count + else: + # This case means both metadata (if available) and manual iteration yielded 0. + print(f"Video appears to have no frames: {video_path}") + return 0 + + except ImportError: + print("Warning: torchvision or its dependencies (like ffmpeg) not installed, cannot count video frames") + return 0 + except RuntimeError as e: + # RuntimeError can be raised by VideoReader for various issues (e.g., file not found, corrupt file, unsupported codec by the backend) + if "No video stream found" in str(e): + print(f"Error: No video stream found in video file: {video_path}") + elif "Could not open" in str(e) or "Demuxing video" in str(e): + print(f"Error: Could not open or demux video file (possibly unsupported format or corrupted file): {video_path} - {e}") + else: + print(f"Runtime error counting video frames: {e}") + return 0 + except Exception as e: + print(f"Error counting video frames: {e}") + return 0 + finally: + # VideoReader does not have an explicit close() or release() method. + # It's managed by its destructor when it goes out of scope. + pass + + +def early_validation(source_folders, episode_mapping, default_fps=20, fps=None): + """ + Validate and copy image files from source folders to output folder. + Performs validation first before any copying to ensure dataset consistency. + + Args: + source_folders (list): List of source dataset folder paths + output_folder (str): Output folder path + episode_mapping (list): List of tuples containing (old_folder, old_index, new_index) + default_fps (int): Default frame rate to use if not specified + fps (int): Frame rate to use for video encoding + + Returns: + dict: Validation results containing expected frame count and actual image count for each episode + """ + if fps is None: + info_path = os.path.join(source_folders[0], "meta", "info.json") + if os.path.exists(info_path): + with open(info_path) as f: + info = json.load(f) + fps = info.get("fps", default_fps) + else: + fps = default_fps + + print(f"Using FPS={fps}") + + # Get video path template and video keys + info_path = os.path.join(source_folders[0], "meta", "info.json") + with open(info_path) as f: + info = json.load(f) + + video_path_template = info["video_path"] + image_keys = [] + + for feature_name, feature_info in info["features"].items(): + if feature_info.get("dtype") == "video": + image_keys.append(feature_name) + + print(f"Found video/image keys: {image_keys}") + + # Validate first before copying anything + print("Starting validation of images and videos...") + validation_results = {} + validation_failed = False + + episode_file_mapping = {} + for old_folder, old_index, new_index in episode_mapping: + # Get expected frame count from episodes.jsonl + episode_file = os.path.join(old_folder, "meta", "episodes.jsonl") + expected_frames = 0 + if os.path.exists(episode_file): + if episode_file not in episode_file_mapping: + episodes = load_jsonl(episode_file) + episodes = {ep["episode_index"]: ep for ep in episodes} + episode_file_mapping[episode_file] = episodes + episode_data = episode_file_mapping[episode_file].get(old_index, None) + if episode_data and "length" in episode_data: + expected_frames = episode_data["length"] + + validation_key = f"{old_folder}_{old_index}" + validation_results[validation_key] = { + "expected_frames": expected_frames, + "image_counts": {}, + "video_frames": {}, + "old_index": old_index, + "new_index": new_index, + "is_valid": True # Default to valid + } + + # Check each image directory and video + episode_chunk = old_index // info["chunks_size"] + for image_dir in image_keys: + # Find the video file + source_video_path = os.path.join( + old_folder, + video_path_template.format( + episode_chunk=episode_chunk, video_key=image_dir, episode_index=old_index + ), + ) + source_image_dir = os.path.join(old_folder, "images", image_dir, f"episode_{old_index:06d}") + image_dir_exists = os.path.exists(source_image_dir) + video_file_exists = os.path.exists(source_video_path) + if not video_file_exists: + print(f"{colored('WARNING', 'yellow', attrs=['bold'])}: Video file not found for {image_dir}, episode {old_index} in {old_folder}") + if image_dir_exists: + print(" Image directory exists, encoding video from images.") + from lerobot.common.datasets.video_utils import encode_video_frames + encode_video_frames(source_image_dir, source_video_path, fps, overwrite=True) + print(" Encoded video frames successfully.") + else: + print(f"{colored('ERROR', 'red', attrs=['bold'])}: No video or image directory found for {image_dir}, episode {old_index} in {old_folder}") + validation_results[validation_key]["is_valid"] = False + validation_failed = True + continue + + # Count video frames + video_frame_count = count_video_frames_torchvision(source_video_path) + validation_results[validation_key]["video_frames"][image_dir] = video_frame_count + + # Check if image directory exists + + if image_dir_exists: + # Count image files + image_files = sorted([f for f in os.listdir(source_image_dir) if f.endswith('.png')]) + images_count = len(image_files) + validation_results[validation_key]["image_counts"][image_dir] = images_count + + error_msg = f"expected_frames: {expected_frames}, images_count: {images_count}, video_frame_count: {video_frame_count}" + assert expected_frames > 0 and expected_frames == images_count, ( + f"{colored('ERROR', 'red', attrs=['bold'])}: Image count should match expected frames for {source_image_dir}.\n {error_msg}" + ) + assert expected_frames >= video_frame_count, ( + f"{colored('ERROR', 'red', attrs=['bold'])}: Video frame count should be less or equal than expected frames for {source_video_path}.\n {error_msg}" + ) + # Validate frame counts + if video_frame_count != expected_frames: + print(f"{colored('WARNING', 'yellow', attrs=['bold'])}: Video frame count mismatch for {source_video_path}") + print(f" Expected: {expected_frames}, Found: {video_frame_count}") + print(f" Re-encoded video frames from {source_image_dir} to {source_video_path}") + + from lerobot.common.datasets.video_utils import encode_video_frames + encode_video_frames(source_image_dir, source_video_path, fps, overwrite=True) + print(" Re-encoded video frames successfully.") + + + else: + print(f"{colored('WARNING', 'yellow', attrs=['bold'])}: No image directory {image_dir} found for episode {old_index} in {old_folder}") + print(" You can ignore this if you are not using images and your video frame count is equal to expected frames.") + # If no images directory, the video frames must match expected frames + if expected_frames > 0 and video_frame_count != expected_frames: + print(f"{colored('ERROR', 'red', attrs=['bold'])}: Video frame count mismatch for {source_video_path}") + print(f" Expected: {expected_frames}, Found: {video_frame_count}") + + validation_results[validation_key]["is_valid"] = False + validation_failed = True + + # Print validation summary + print("\nValidation Results:") + valid_count = sum(1 for result in validation_results.values() if result["is_valid"]) + print(f"{valid_count} of {len(validation_results)} episodes are valid") + + # If validation failed, stop the process + if validation_failed: + print(colored("Validation failed. Please fix the issues before continuing.", "red", attrs=["bold"])) + + +def copy_images(source_folders, output_folder, episode_mapping, default_fps=20, fps=None): + """ + Copy image files from source folders to output folder. + This function assumes validation has already been performed with early_validation(). + + Args: + source_folders (list): List of source dataset folder paths + output_folder (str): Output folder path + episode_mapping (list): List of tuples containing (old_folder, old_index, new_index) + default_fps (int): Default frame rate to use if not specified + fps (int): Frame rate to use for video encoding + + Returns: + int: Number of images copied + """ + if fps is None: + info_path = os.path.join(source_folders[0], "meta", "info.json") + if os.path.exists(info_path): + with open(info_path) as f: + info = json.load(f) + fps = info.get("fps", default_fps) + else: + fps = default_fps + + # Get video path template and video keys + info_path = os.path.join(source_folders[0], "meta", "info.json") + with open(info_path) as f: + info = json.load(f) + + video_path_template = info["video_path"] + image_keys = [] + + for feature_name, feature_info in info["features"].items(): + if feature_info.get("dtype") == "video": + image_keys.append(feature_name) + + # Create image directories in output folder + os.makedirs(os.path.join(output_folder, "images"), exist_ok=True) + + print(f"Starting to copy images for {len(image_keys)} video keys...") + total_copied = 0 + skipped_episodes = 0 + + # Copy images for each episode + for old_folder, old_index, new_index in episode_mapping: + episode_chunk = old_index // info["chunks_size"] + new_episode_chunk = new_index // info["chunks_size"] + + episode_copied = False + + for image_dir in image_keys: + # Create target directory for this video key + os.makedirs(os.path.join(output_folder, "images", image_dir), exist_ok=True) + + # Check if source image directory exists + source_image_dir = os.path.join(old_folder, "images", image_dir, f"episode_{old_index:06d}") + + if os.path.exists(source_image_dir): + # Create target directory + target_image_dir = os.path.join(output_folder, "images", image_dir, f"episode_{new_index:06d}") + os.makedirs(target_image_dir, exist_ok=True) + + # Copy image files + image_files = sorted([f for f in os.listdir(source_image_dir) if f.endswith('.png')]) + num_images = len(image_files) + + if num_images > 0: + print(f"Copying {num_images} images from {source_image_dir} to {target_image_dir}") + + for image_file in image_files: + try: + # Extract frame number from filename + frame_part = image_file.split('_')[1] if '_' in image_file else image_file + frame_num = int(frame_part.split('.')[0]) + + # Copy the file with consistent naming + dest_file = os.path.join(target_image_dir, f"frame_{frame_num:06d}.png") + shutil.copy2( + os.path.join(source_image_dir, image_file), + dest_file + ) + total_copied += 1 + episode_copied = True + except Exception as e: + print(f"Error copying image {image_file}: {e}") + + if not episode_copied: + skipped_episodes += 1 + + print(f"\nCopied {total_copied} images for {len(episode_mapping) - skipped_episodes} episodes") + if skipped_episodes > 0: + print(f"{colored('WARNING', 'yellow', attrs=['bold'])}: Skipped {skipped_episodes} episodes with no images") + + +def merge_datasets( + source_folders, output_folder, validate_ts=False, tolerance_s=1e-4, + default_fps=30 +): + """ + 将多个数据集文件夹合并为一个,处理索引、维度和元数据 + (Merge multiple dataset folders into one, handling indices, dimensions, and metadata) + + Args: + source_folders (list): 源数据集文件夹路径列表 (List of source dataset folder paths) + output_folder (str): 输出文件夹路径 (Output folder path) + validate_ts (bool): 是否验证时间戳 (Whether to validate timestamps) + tolerance_s (float): 时间戳不连续性的容差值,以秒为单位 (Tolerance for timestamp discontinuities in seconds) + default_fps (float): 默认帧率 (Default frame rate) + """ + # Create output folder if it doesn't exist + os.makedirs(output_folder, exist_ok=True) + os.makedirs(os.path.join(output_folder, "meta"), exist_ok=True) + + fps = default_fps + print(f"使用默认FPS值: {fps}") + + # Load episodes from all source folders + all_episodes = [] + all_episodes_stats = [] + all_tasks = [] + + total_frames = 0 + total_episodes = 0 + + # Keep track of episode mapping (old_folder, old_index, new_index) + episode_mapping = [] + + # Collect all stats for proper merging + all_stats_data = [] + + # 添加一个变量来跟踪累积的帧数 + cumulative_frame_count = 0 + + # 创建一个映射,用于存储每个新的episode索引对应的起始帧索引 + episode_to_frame_index = {} + + # 创建一个映射,用于跟踪旧的任务描述到新任务索引的映射 + task_desc_to_new_index = {} + # 创建一个映射,用于存储每个源文件夹和旧任务索引到新任务索引的映射 + folder_task_mapping = {} + + # 首先收集所有不同的任务描述 + all_unique_tasks = [] + + # 从info.json获取chunks_size + info_path = os.path.join(source_folders[0], "meta", "info.json") + # Check if all source folders have images directory + images_dir_exists = all(os.path.exists(os.path.join(folder, "images")) for folder in source_folders) + chunks_size = 1000 # 默认值 + if os.path.exists(info_path): + with open(info_path) as f: + info = json.load(f) + chunks_size = info.get("chunks_size", 1000) + + # 使用更简单的方法计算视频总数 (Use simpler method to calculate total videos) + total_videos = 0 + + for folder in source_folders: + try: + # 从每个数据集的info.json直接获取total_videos + # (Get total_videos directly from each dataset's info.json) + folder_info_path = os.path.join(folder, "meta", "info.json") + if os.path.exists(folder_info_path): + with open(folder_info_path) as f: + folder_info = json.load(f) + if "total_videos" in folder_info: + folder_videos = folder_info["total_videos"] + total_videos += folder_videos + print( + f"从{folder}的info.json中读取到视频数量: {folder_videos} (Read video count from {folder}'s info.json: {folder_videos})" + ) + + # Load episodes + episodes_path = os.path.join(folder, "meta", "episodes.jsonl") + if not os.path.exists(episodes_path): + print(f"Warning: Episodes file not found in {folder}, skipping") + continue + + episodes = load_jsonl(episodes_path) + + # Load episode stats + episodes_stats_path = os.path.join(folder, "meta", "episodes_stats.jsonl") + episodes_stats = [] + if os.path.exists(episodes_stats_path): + episodes_stats = load_jsonl(episodes_stats_path) + + # Create a mapping of episode_index to stats + stats_map = {} + for stat in episodes_stats: + if "episode_index" in stat: + stats_map[stat["episode_index"]] = stat + + # Load tasks + tasks_path = os.path.join(folder, "meta", "tasks.jsonl") + folder_tasks = [] + if os.path.exists(tasks_path): + folder_tasks = load_jsonl(tasks_path) + + # 创建此文件夹的任务映射 + folder_task_mapping[folder] = {} + + # 处理每个任务 + for task in folder_tasks: + task_desc = task["task"] + old_index = task["task_index"] + + # 检查任务描述是否已存在 + if task_desc not in task_desc_to_new_index: + # 添加新任务描述,分配新索引 + new_index = len(all_unique_tasks) + task_desc_to_new_index[task_desc] = new_index + all_unique_tasks.append({"task_index": new_index, "task": task_desc}) + + # 保存此文件夹中旧索引到新索引的映射 + folder_task_mapping[folder][old_index] = task_desc_to_new_index[task_desc] + + # Process all episodes from this folder + for episode in episodes: + old_index = episode["episode_index"] + new_index = total_episodes + + # Update episode index + episode["episode_index"] = new_index + all_episodes.append(episode) + + # Update stats if available + if old_index in stats_map: + stats = stats_map[old_index] + stats["episode_index"] = new_index + all_episodes_stats.append(stats) + + # Add to all_stats_data for proper merging + if "stats" in stats: + all_stats_data.append(stats["stats"]) + + # Add to mapping + episode_mapping.append((folder, old_index, new_index)) + + # Update counters + total_episodes += 1 + total_frames += episode["length"] + + # 处理每个episode时收集此信息 + episode_to_frame_index[new_index] = cumulative_frame_count + cumulative_frame_count += episode["length"] + + # 使用收集的唯一任务列表替换之前的任务处理逻辑 + all_tasks = all_unique_tasks + + except Exception as e: + print(f"Error processing folder {folder}: {e}") + continue + + print(f"Processed {total_episodes} episodes from {len(source_folders)} folders") + + # Save combined episodes and stats + save_jsonl(all_episodes, os.path.join(output_folder, "meta", "episodes.jsonl")) + save_jsonl(all_episodes_stats, os.path.join(output_folder, "meta", "episodes_stats.jsonl")) + save_jsonl(all_tasks, os.path.join(output_folder, "meta", "tasks.jsonl")) + + # Merge and save stats + stats_list = [] + for folder in source_folders: + stats_path = os.path.join(folder, "meta", "stats.json") + if os.path.exists(stats_path): + with open(stats_path) as f: + stats = json.load(f) + stats_list.append(stats) + + if stats_list: + # Merge global stats + merged_stats = merge_stats(stats_list) + + # Update merged stats with episode-specific stats if available + if all_stats_data: + # For each feature in the stats + for feature in merged_stats: + if feature in all_stats_data[0]: + # Recalculate statistics based on all episodes + values = [stat[feature] for stat in all_stats_data if feature in stat] + + # Find the maximum dimension for this feature + max_dim = max( + len(np.array(val.get("mean", [0])).flatten()) for val in values if "mean" in val + ) + + # Update count + if "count" in merged_stats[feature]: + merged_stats[feature]["count"] = [ + sum(stat.get("count", [0])[0] for stat in values if "count" in stat) + ] + + # Update min/max with padding + if "min" in merged_stats[feature] and all("min" in stat for stat in values): + # Pad min values + padded_mins = [] + for val in values: + val_array = np.array(val["min"]) + val_flat = val_array.flatten() + if len(val_flat) < max_dim: + padded = np.zeros(max_dim) + padded[: len(val_flat)] = val_flat + padded_mins.append(padded) + else: + padded_mins.append(val_flat) + merged_stats[feature]["min"] = np.minimum.reduce(padded_mins).tolist() + + if "max" in merged_stats[feature] and all("max" in stat for stat in values): + # Pad max values + padded_maxs = [] + for val in values: + val_array = np.array(val["max"]) + val_flat = val_array.flatten() + if len(val_flat) < max_dim: + padded = np.zeros(max_dim) + padded[: len(val_flat)] = val_flat + padded_maxs.append(padded) + else: + padded_maxs.append(val_flat) + merged_stats[feature]["max"] = np.maximum.reduce(padded_maxs).tolist() + + # Update mean and std (weighted by count if available) + if "mean" in merged_stats[feature] and all("mean" in stat for stat in values): + # Pad mean values + padded_means = [] + for val in values: + val_array = np.array(val["mean"]) + val_flat = val_array.flatten() + if len(val_flat) < max_dim: + padded = np.zeros(max_dim) + padded[: len(val_flat)] = val_flat + padded_means.append(padded) + else: + padded_means.append(val_flat) + + if all("count" in stat for stat in values): + counts = [stat["count"][0] for stat in values] + total_count = sum(counts) + weighted_means = [ + mean * count / total_count + for mean, count in zip(padded_means, counts, strict=False) + ] + merged_stats[feature]["mean"] = np.sum(weighted_means, axis=0).tolist() + else: + merged_stats[feature]["mean"] = np.mean(padded_means, axis=0).tolist() + + if "std" in merged_stats[feature] and all("std" in stat for stat in values): + # Pad std values + padded_stds = [] + for val in values: + val_array = np.array(val["std"]) + val_flat = val_array.flatten() + if len(val_flat) < max_dim: + padded = np.zeros(max_dim) + padded[: len(val_flat)] = val_flat + padded_stds.append(padded) + else: + padded_stds.append(val_flat) + + if all("count" in stat for stat in values): + counts = [stat["count"][0] for stat in values] + total_count = sum(counts) + variances = [std**2 for std in padded_stds] + weighted_variances = [ + var * count / total_count + for var, count in zip(variances, counts, strict=False) + ] + merged_stats[feature]["std"] = np.sqrt( + np.sum(weighted_variances, axis=0) + ).tolist() + else: + # Simple average of standard deviations + merged_stats[feature]["std"] = np.mean(padded_stds, axis=0).tolist() + + with open(os.path.join(output_folder, "meta", "stats.json"), "w") as f: + json.dump(merged_stats, f, indent=4) + + # Update and save info.json + info_path = os.path.join(source_folders[0], "meta", "info.json") + with open(info_path) as f: + info = json.load(f) + + # Update info with correct counts + info["total_episodes"] = total_episodes + info["total_frames"] = total_frames + info["total_tasks"] = len(all_tasks) + info["total_chunks"] = (total_episodes + info["chunks_size"] - 1) // info[ + "chunks_size" + ] # Ceiling division + + # Update splits + info["splits"] = {"train": f"0:{total_episodes}"} + + # 更新视频总数 (Update total videos) + info["total_videos"] = total_videos + print(f"更新视频总数为: {total_videos} (Update total videos to: {total_videos})") + + with open(os.path.join(output_folder, "meta", "info.json"), "w") as f: + json.dump(info, f, indent=4) + + # Validate before video copying + if images_dir_exists: + early_validation( + source_folders, + episode_mapping, + ) + + # Copy video and data files + copy_videos(source_folders, output_folder, episode_mapping) + copy_data_files( + source_folders, + output_folder, + episode_mapping, + fps=fps, + episode_to_frame_index=episode_to_frame_index, + folder_task_mapping=folder_task_mapping, + chunks_size=chunks_size, + ) + + # Copy images and check with video frames + if args.copy_images: + print("Starting to copy images and validate video frame counts") + copy_images(source_folders, output_folder, episode_mapping) + + + print(f"Merged {total_episodes} episodes with {total_frames} frames into {output_folder}") + + +if __name__ == "__main__": + # Set up argument parser + parser = argparse.ArgumentParser(description="Merge datasets from multiple sources.") + + # Add arguments + parser.add_argument("--sources", nargs="+", required=True, help="List of source folder paths") + parser.add_argument("--output", required=True, help="Output folder path") + parser.add_argument("--fps", type=int, default=30, help="Your datasets FPS (default: 20)") + parser.add_argument("--copy_images", action="store_true", help="Whether to copy images (default: False)") + + # Parse arguments + args = parser.parse_args() + + # Use parsed arguments + merge_datasets( + args.sources, + args.output, + default_fps=args.fps + ) diff --git a/policy/openpi-InternData-A1/examples/convert_jax_model_to_pytorch.py b/policy/openpi-InternData-A1/examples/convert_jax_model_to_pytorch.py new file mode 100644 index 0000000..a605d0d --- /dev/null +++ b/policy/openpi-InternData-A1/examples/convert_jax_model_to_pytorch.py @@ -0,0 +1,587 @@ +#!/usr/bin/env python3 +""" +Load a JAX model and print all parameter keys, with optional conversion to PyTorch. + +This script loads a JAX model checkpoint using orbax and can either: +1. Print out all the parameter keys in a hierarchical structure for inspection +2. Convert the JAX model to PyTorch format using our PI0Pytorch model + +Usage: + # Just inspect keys: + python examples/convert_jax_model_to_pytorch.py --checkpoint_dir /path/to/checkpoint --inspect_only + python examples/convert_jax_model_to_pytorch.py --checkpoint_dir /path/to/checkpoint --inspect_only + + # Convert to PyTorch: + python examples/convert_jax_model_to_pytorch.py --checkpoint_dir /path/to/checkpoint --output_path /path/to/output + python examples/convert_jax_model_to_pytorch.py --checkpoint_dir /path/to/checkpoint --output_path /path/to/output + +Example: + # pi0_droid + python examples/convert_jax_model_to_pytorch.py --checkpoint_dir /home/$USER/.cache/openpi/openpi-assets/checkpoints/pi0_droid/params --output_path /home/$USER/.cache/openpi/openpi-assets/checkpoints/pi0_droid_pytorch + + # pi0_aloha_sim + python examples/convert_jax_model_to_pytorch.py --checkpoint_dir /home/$USER/.cache/openpi/openpi-assets/checkpoints/pi0_aloha_sim/params --output_path /home/$USER/.cache/openpi/openpi-assets/checkpoints/pi0_aloha_sim_pytorch + + # pi05_droid + python examples/convert_jax_model_to_pytorch.py --checkpoint_dir /home/$USER/.cache/openpi/openpi-assets/checkpoints/pi05_droid/params --output_path /home/$USER/.cache/openpi/openpi-assets/checkpoints/pi05_droid_pytorch +""" + +import json +import os +import pathlib +import shutil +from typing import Literal + +from flax.nnx import traversals +import numpy as np +import orbax.checkpoint as ocp +import safetensors +import torch +import tyro + +import openpi.models.gemma +import openpi.models.model +import openpi.models.pi0_config +import openpi.models_pytorch.pi0_pytorch +from openpi.training import utils +import openpi.training.config as _config + + +def slice_paligemma_state_dict(state_dict, config): + """Convert PaliGemma JAX parameters to PyTorch format.""" + suffix = "/value" if "img/embedding/kernel/value" in state_dict else "" + + # patch embeddings + jax_key = f"img/embedding/kernel{suffix}" + pytorch_key = "paligemma_with_expert.paligemma.model.vision_tower.vision_model.embeddings.patch_embedding.weight" + state_dict[pytorch_key] = state_dict.pop(jax_key).transpose(3, 2, 0, 1) + + jax_key = f"img/embedding/bias{suffix}" + pytorch_key = "paligemma_with_expert.paligemma.model.vision_tower.vision_model.embeddings.patch_embedding.bias" + state_dict[pytorch_key] = state_dict.pop(jax_key) + + # positional embeddings + jax_key = f"img/pos_embedding{suffix}" + pytorch_key = "paligemma_with_expert.paligemma.model.vision_tower.vision_model.embeddings.position_embedding.weight" + state_dict[pytorch_key] = state_dict.pop(jax_key).reshape(-1, config.vision_config.hidden_size) + + # extract vision layers to be sliced at index 0. There are 27 layers in the base model. + encoderblock_layernorm0_scale = state_dict.pop(f"img/Transformer/encoderblock/LayerNorm_0/scale{suffix}") + encoderblock_layernorm0_bias = state_dict.pop(f"img/Transformer/encoderblock/LayerNorm_0/bias{suffix}") + encoderblock_layernorm1_scale = state_dict.pop(f"img/Transformer/encoderblock/LayerNorm_1/scale{suffix}") + encoderblock_layernorm1_bias = state_dict.pop(f"img/Transformer/encoderblock/LayerNorm_1/bias{suffix}") + + encoderblock_mlp_dense0_kernel = state_dict.pop(f"img/Transformer/encoderblock/MlpBlock_0/Dense_0/kernel{suffix}") + encoderblock_mlp_dense0_bias = state_dict.pop(f"img/Transformer/encoderblock/MlpBlock_0/Dense_0/bias{suffix}") + encoderblock_mlp_dense1_kernel = state_dict.pop(f"img/Transformer/encoderblock/MlpBlock_0/Dense_1/kernel{suffix}") + encoderblock_mlp_dense1_bias = state_dict.pop(f"img/Transformer/encoderblock/MlpBlock_0/Dense_1/bias{suffix}") + + encoderblock_attention_0_key_kernel = state_dict.pop( + f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/key/kernel{suffix}" + ) + encoderblock_attention_0_key_bias = state_dict.pop( + f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/key/bias{suffix}" + ) + encoderblock_attention_0_value_kernel = state_dict.pop( + f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/value/kernel{suffix}" + ) + encoderblock_attention_0_value_bias = state_dict.pop( + f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/value/bias{suffix}" + ) + encoderblock_attention_0_query_kernel = state_dict.pop( + f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/query/kernel{suffix}" + ) + encoderblock_attention_0_query_bias = state_dict.pop( + f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/query/bias{suffix}" + ) + encoderblock_attention_0_out_kernel = state_dict.pop( + f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/out/kernel{suffix}" + ) + encoderblock_attention_0_out_bias = state_dict.pop( + f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/out/bias{suffix}" + ) + + for i in range(config.vision_config.num_hidden_layers): + state_dict[ + f"paligemma_with_expert.paligemma.model.vision_tower.vision_model.encoder.layers.{i}.layer_norm1.weight" + ] = encoderblock_layernorm0_scale[i].transpose() + state_dict[ + f"paligemma_with_expert.paligemma.model.vision_tower.vision_model.encoder.layers.{i}.layer_norm1.bias" + ] = encoderblock_layernorm0_bias[i] + state_dict[ + f"paligemma_with_expert.paligemma.model.vision_tower.vision_model.encoder.layers.{i}.layer_norm2.weight" + ] = encoderblock_layernorm1_scale[i].transpose() + state_dict[ + f"paligemma_with_expert.paligemma.model.vision_tower.vision_model.encoder.layers.{i}.layer_norm2.bias" + ] = encoderblock_layernorm1_bias[i] + state_dict[ + f"paligemma_with_expert.paligemma.model.vision_tower.vision_model.encoder.layers.{i}.mlp.fc1.weight" + ] = encoderblock_mlp_dense0_kernel[i].transpose() + state_dict[ + f"paligemma_with_expert.paligemma.model.vision_tower.vision_model.encoder.layers.{i}.mlp.fc1.bias" + ] = encoderblock_mlp_dense0_bias[i] + state_dict[ + f"paligemma_with_expert.paligemma.model.vision_tower.vision_model.encoder.layers.{i}.mlp.fc2.weight" + ] = encoderblock_mlp_dense1_kernel[i].transpose() + state_dict[ + f"paligemma_with_expert.paligemma.model.vision_tower.vision_model.encoder.layers.{i}.mlp.fc2.bias" + ] = encoderblock_mlp_dense1_bias[i] + state_dict[ + f"paligemma_with_expert.paligemma.model.vision_tower.vision_model.encoder.layers.{i}.self_attn.k_proj.weight" + ] = encoderblock_attention_0_key_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose() + state_dict[ + f"paligemma_with_expert.paligemma.model.vision_tower.vision_model.encoder.layers.{i}.self_attn.k_proj.bias" + ] = encoderblock_attention_0_key_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1) + state_dict[ + f"paligemma_with_expert.paligemma.model.vision_tower.vision_model.encoder.layers.{i}.self_attn.v_proj.weight" + ] = encoderblock_attention_0_value_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose() + state_dict[ + f"paligemma_with_expert.paligemma.model.vision_tower.vision_model.encoder.layers.{i}.self_attn.v_proj.bias" + ] = encoderblock_attention_0_value_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1) + state_dict[ + f"paligemma_with_expert.paligemma.model.vision_tower.vision_model.encoder.layers.{i}.self_attn.q_proj.weight" + ] = encoderblock_attention_0_query_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose() + state_dict[ + f"paligemma_with_expert.paligemma.model.vision_tower.vision_model.encoder.layers.{i}.self_attn.q_proj.bias" + ] = encoderblock_attention_0_query_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1) + state_dict[ + f"paligemma_with_expert.paligemma.model.vision_tower.vision_model.encoder.layers.{i}.self_attn.out_proj.weight" + ] = encoderblock_attention_0_out_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose() + state_dict[ + f"paligemma_with_expert.paligemma.model.vision_tower.vision_model.encoder.layers.{i}.self_attn.out_proj.bias" + ] = encoderblock_attention_0_out_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1) + + jax_key = f"img/Transformer/encoder_norm/scale{suffix}" + pytorch_key = "paligemma_with_expert.paligemma.model.vision_tower.vision_model.post_layernorm.weight" + state_dict[pytorch_key] = state_dict.pop(jax_key).transpose() + + jax_key = f"img/Transformer/encoder_norm/bias{suffix}" + pytorch_key = "paligemma_with_expert.paligemma.model.vision_tower.vision_model.post_layernorm.bias" + state_dict[pytorch_key] = state_dict.pop(jax_key) + + # multimodal projector + jax_key = f"img/head/kernel{suffix}" + pytorch_key = "paligemma_with_expert.paligemma.model.multi_modal_projector.linear.weight" + state_dict[pytorch_key] = state_dict.pop(jax_key).transpose() + + jax_key = f"img/head/bias{suffix}" + pytorch_key = "paligemma_with_expert.paligemma.model.multi_modal_projector.linear.bias" + state_dict[pytorch_key] = state_dict.pop(jax_key) + + # text decoder (gemma) + jax_key = f"llm/embedder/input_embedding{suffix}" + pytorch_key = "paligemma_with_expert.paligemma.model.language_model.embed_tokens.weight" + state_dict[pytorch_key] = state_dict.pop(jax_key) + + # pop the einsum attention + mlp representations + llm_attention_attn_vec_einsum = state_dict.pop(f"llm/layers/attn/attn_vec_einsum/w{suffix}") + llm_attention_kv_einsum = state_dict.pop(f"llm/layers/attn/kv_einsum/w{suffix}") + llm_attention_q_einsum = state_dict.pop(f"llm/layers/attn/q_einsum/w{suffix}") + + llm_mlp_gating_einsum = state_dict.pop(f"llm/layers/mlp/gating_einsum{suffix}") + llm_mlp_linear = state_dict.pop(f"llm/layers/mlp/linear{suffix}") + + llm_input_layernorm = state_dict.pop(f"llm/layers/pre_attention_norm/scale{suffix}") + llm_post_attention_layernorm = state_dict.pop(f"llm/layers/pre_ffw_norm/scale{suffix}") + + for i in range(config.text_config.num_hidden_layers): + q_proj_weight_reshaped = ( + llm_attention_q_einsum[i] + .transpose(0, 2, 1) + .reshape( + config.text_config.num_attention_heads * config.text_config.head_dim, config.text_config.hidden_size + ) + ) + state_dict[f"paligemma_with_expert.paligemma.model.language_model.layers.{i}.self_attn.q_proj.weight"] = ( + q_proj_weight_reshaped + ) + + k_proj_weight_reshaped = llm_attention_kv_einsum[i, 0, 0].transpose() + state_dict[f"paligemma_with_expert.paligemma.model.language_model.layers.{i}.self_attn.k_proj.weight"] = ( + k_proj_weight_reshaped + ) + v_proj_weight_reshaped = llm_attention_kv_einsum[i, 1, 0].transpose() + state_dict[f"paligemma_with_expert.paligemma.model.language_model.layers.{i}.self_attn.v_proj.weight"] = ( + v_proj_weight_reshaped + ) + + o_proj_weight_reshaped = ( + llm_attention_attn_vec_einsum[i] + .transpose(2, 0, 1) + .reshape( + config.text_config.num_attention_heads * config.text_config.head_dim, config.text_config.hidden_size + ) + ) + state_dict[f"paligemma_with_expert.paligemma.model.language_model.layers.{i}.self_attn.o_proj.weight"] = ( + o_proj_weight_reshaped + ) + + gate_proj_weight = llm_mlp_gating_einsum[i, 0] + state_dict[f"paligemma_with_expert.paligemma.model.language_model.layers.{i}.mlp.gate_proj.weight"] = ( + gate_proj_weight.transpose() + ) + up_proj_weight = llm_mlp_gating_einsum[i, 1] + state_dict[f"paligemma_with_expert.paligemma.model.language_model.layers.{i}.mlp.up_proj.weight"] = ( + up_proj_weight.transpose() + ) + state_dict[f"paligemma_with_expert.paligemma.model.language_model.layers.{i}.mlp.down_proj.weight"] = ( + llm_mlp_linear[i].transpose() + ) + state_dict[f"paligemma_with_expert.paligemma.model.language_model.layers.{i}.input_layernorm.weight"] = ( + llm_input_layernorm[i] + ) + state_dict[ + f"paligemma_with_expert.paligemma.model.language_model.layers.{i}.post_attention_layernorm.weight" + ] = llm_post_attention_layernorm[i] + + jax_key = f"llm/final_norm/scale{suffix}" + pytorch_key = "paligemma_with_expert.paligemma.model.language_model.norm.weight" + state_dict[pytorch_key] = state_dict.pop(jax_key) + + expert_dict = {} + final_state_dict = {} + + # Expert-related keys to extract (including pi05 Dense layer parameters) + expert_keys = [ + f"llm/final_norm_1/scale{suffix}", + f"llm/final_norm_1/Dense_0/bias{suffix}", + f"llm/final_norm_1/Dense_0/kernel{suffix}", + f"llm/layers/attn/attn_vec_einsum_1/w{suffix}", + f"llm/layers/attn/kv_einsum_1/w{suffix}", + f"llm/layers/attn/q_einsum_1/w{suffix}", + f"llm/layers/mlp_1/gating_einsum{suffix}", + f"llm/layers/mlp_1/linear{suffix}", + f"llm/layers/pre_attention_norm_1/scale{suffix}", + f"llm/layers/pre_attention_norm_1/Dense_0/bias{suffix}", + f"llm/layers/pre_attention_norm_1/Dense_0/kernel{suffix}", + f"llm/layers/pre_ffw_norm_1/scale{suffix}", + f"llm/layers/pre_ffw_norm_1/Dense_0/bias{suffix}", + f"llm/layers/pre_ffw_norm_1/Dense_0/kernel{suffix}", + ] + + for key, value in state_dict.items(): + if key not in expert_keys: + final_state_dict[key] = torch.from_numpy(value) + else: + expert_dict[key] = value + + return final_state_dict, expert_dict + + +def slice_gemma_state_dict(state_dict, config, *, num_expert, checkpoint_dir, pi05): + """Convert Gemma JAX parameters to PyTorch format.""" + # Add missing attributes to config if they don't exist + if not hasattr(config, "vocab_size"): + config.vocab_size = 257152 # PALIGEMMA_VOCAB_SIZE + if not hasattr(config, "hidden_size"): + config.hidden_size = config.width + if not hasattr(config, "num_hidden_layers"): + config.num_hidden_layers = config.depth + if not hasattr(config, "num_attention_heads"): + config.num_attention_heads = config.num_heads + + suffix = "/value" if f"llm/layers/attn/attn_vec_einsum_{num_expert}/w/value" in state_dict else "" + + llm_attention_attn_vec_einsum = state_dict.pop(f"llm/layers/attn/attn_vec_einsum_{num_expert}/w{suffix}") + llm_attention_kv_einsum = state_dict.pop(f"llm/layers/attn/kv_einsum_{num_expert}/w{suffix}") + llm_attention_q_einsum = state_dict.pop(f"llm/layers/attn/q_einsum_{num_expert}/w{suffix}") + + llm_mlp_gating_einsum = state_dict.pop(f"llm/layers/mlp_{num_expert}/gating_einsum{suffix}") + llm_mlp_linear = state_dict.pop(f"llm/layers/mlp_{num_expert}/linear{suffix}") + + # Check if we have Dense layers (for pi05/adaptive normalization) or scale layers (for regular pi0) + if "pi05" in checkpoint_dir: + # Pi05 with adaptive normalization + llm_input_layernorm_bias = state_dict.pop(f"llm/layers/pre_attention_norm_{num_expert}/Dense_0/bias{suffix}") + llm_post_attention_layernorm_bias = state_dict.pop(f"llm/layers/pre_ffw_norm_{num_expert}/Dense_0/bias{suffix}") + llm_input_layernorm_kernel = state_dict.pop( + f"llm/layers/pre_attention_norm_{num_expert}/Dense_0/kernel{suffix}" + ) + llm_post_attention_layernorm_kernel = state_dict.pop( + f"llm/layers/pre_ffw_norm_{num_expert}/Dense_0/kernel{suffix}" + ) + else: + # Regular pi0 with standard RMSNorm + llm_input_layernorm = state_dict.pop(f"llm/layers/pre_attention_norm_{num_expert}/scale{suffix}") + llm_post_attention_layernorm = state_dict.pop(f"llm/layers/pre_ffw_norm_{num_expert}/scale{suffix}") + + for i in range(config.num_hidden_layers): + q_proj_weight_reshaped = ( + llm_attention_q_einsum[i] + .transpose(0, 2, 1) + .reshape(config.num_attention_heads * config.head_dim, config.hidden_size) + ) + state_dict[f"paligemma_with_expert.gemma_expert.model.layers.{i}.self_attn.q_proj.weight"] = ( + q_proj_weight_reshaped + ) + + k_proj_weight_reshaped = llm_attention_kv_einsum[i, 0, 0].transpose() + state_dict[f"paligemma_with_expert.gemma_expert.model.layers.{i}.self_attn.k_proj.weight"] = ( + k_proj_weight_reshaped + ) + v_proj_weight_reshaped = llm_attention_kv_einsum[i, 1, 0].transpose() + state_dict[f"paligemma_with_expert.gemma_expert.model.layers.{i}.self_attn.v_proj.weight"] = ( + v_proj_weight_reshaped + ) + + o_proj_weight_reshaped = ( + llm_attention_attn_vec_einsum[i] + .reshape(config.num_attention_heads * config.head_dim, config.hidden_size) + .transpose(1, 0) + ) + state_dict[f"paligemma_with_expert.gemma_expert.model.layers.{i}.self_attn.o_proj.weight"] = ( + o_proj_weight_reshaped + ) + + gate_proj_weight = llm_mlp_gating_einsum[i, 0] + state_dict[f"paligemma_with_expert.gemma_expert.model.layers.{i}.mlp.gate_proj.weight"] = ( + gate_proj_weight.transpose() + ) + up_proj_weight = llm_mlp_gating_einsum[i, 1] + state_dict[f"paligemma_with_expert.gemma_expert.model.layers.{i}.mlp.up_proj.weight"] = ( + up_proj_weight.transpose() + ) + state_dict[f"paligemma_with_expert.gemma_expert.model.layers.{i}.mlp.down_proj.weight"] = llm_mlp_linear[ + i + ].transpose() + + if "pi05" in checkpoint_dir: + # Pi05 with adaptive normalization - use Dense layer parameters directly + state_dict[f"paligemma_with_expert.gemma_expert.model.layers.{i}.input_layernorm.dense.bias"] = ( + llm_input_layernorm_bias[i] + ) + state_dict[f"paligemma_with_expert.gemma_expert.model.layers.{i}.post_attention_layernorm.dense.bias"] = ( + llm_post_attention_layernorm_bias[i] + ) + state_dict[f"paligemma_with_expert.gemma_expert.model.layers.{i}.input_layernorm.dense.weight"] = ( + llm_input_layernorm_kernel[i].transpose() + ) + state_dict[f"paligemma_with_expert.gemma_expert.model.layers.{i}.post_attention_layernorm.dense.weight"] = ( + llm_post_attention_layernorm_kernel[i].transpose() + ) + else: + # Regular pi0 with standard RMSNorm + state_dict[f"paligemma_with_expert.gemma_expert.model.layers.{i}.input_layernorm.weight"] = ( + llm_input_layernorm[i] + ) + state_dict[f"paligemma_with_expert.gemma_expert.model.layers.{i}.post_attention_layernorm.weight"] = ( + llm_post_attention_layernorm[i] + ) + + # Handle final norm layer + if "pi05" in checkpoint_dir: + # Pi05 with adaptive normalization - use Dense layer parameters directly + final_norm_bias = state_dict.pop(f"llm/final_norm_{num_expert}/Dense_0/bias{suffix}") + final_norm_kernel = state_dict.pop(f"llm/final_norm_{num_expert}/Dense_0/kernel{suffix}") + state_dict["paligemma_with_expert.gemma_expert.model.norm.dense.bias"] = final_norm_bias + state_dict["paligemma_with_expert.gemma_expert.model.norm.dense.weight"] = final_norm_kernel.transpose() + else: + # Regular pi0 with standard RMSNorm + state_dict["paligemma_with_expert.gemma_expert.model.norm.weight"] = state_dict.pop( + f"llm/final_norm_{num_expert}/scale{suffix}" + ) + + # state_dict["paligemma_with_expert.gemma_expert.lm_head.weight"] = embedding_vector # weights are tied. + + final_state_dict = {} + for key, value in state_dict.items(): + if not isinstance(value, torch.Tensor): + final_state_dict[key] = torch.from_numpy(value) + else: + final_state_dict[key] = value + + return final_state_dict + + +def slice_initial_orbax_checkpoint(checkpoint_dir: str, restore_precision: str | None = None): + """Load and process params by restoring via JAX model loader first. + This respects dtype conversions that occur during model restore. + """ + # Use repository restore utility to load a pure dict of params (value suffix removed) + params = openpi.models.model.restore_params( + f"{checkpoint_dir}/params/", restore_type=np.ndarray, dtype=restore_precision + ) + + return {"paligemma_params": traversals.flatten_mapping(params["PaliGemma"], sep="/"), "projection_params": params} + + +def load_jax_model_and_print_keys(checkpoint_dir: str): + """ + Load JAX model from checkpoint and print all parameter keys. + + Args: + checkpoint_dir: Path to the checkpoint directory + """ + checkpoint_dir = os.path.abspath(checkpoint_dir) if not checkpoint_dir.startswith("gs://") else checkpoint_dir + # Initialize checkpointer + checkpointer = ocp.PyTreeCheckpointer() + metadata = checkpointer.metadata(f"{checkpoint_dir}/params") + print(utils.array_tree_to_info(metadata)) + + +def convert_pi0_checkpoint( + checkpoint_dir: str, precision: str, output_path: str, model_config: openpi.models.pi0_config.Pi0Config +): + """ + Convert PI0 JAX checkpoint to PyTorch format. + + Args: + checkpoint_dir: Path to the JAX checkpoint + precision: Model precision (float32, bfloat16, float16) + output_path: Path to save the converted PyTorch model + model_config: Model config + """ + print(f"Converting PI0 checkpoint from {checkpoint_dir} to {output_path}") + print(f"Model config: {model_config}") + + # Break down orbax ckpts by restoring via JAX to respect dtype + initial_params = slice_initial_orbax_checkpoint(checkpoint_dir=checkpoint_dir, restore_precision="float32") + + # Process projection params + if model_config.pi05: + keys = [ + "action_in_proj", + "action_out_proj", + "time_mlp_in", + "time_mlp_out", + ] + else: + keys = [ + "state_proj", + "action_in_proj", + "action_out_proj", + "action_time_mlp_in", + "action_time_mlp_out", + ] + + projection_params = {} + for key in keys: + kernel_params = initial_params["projection_params"][key]["kernel"] + bias_params = initial_params["projection_params"][key]["bias"] + if isinstance(kernel_params, dict): + weight = kernel_params["value"] + bias = bias_params["value"] + else: + weight = kernel_params + bias = bias_params + + pytorch_weight_key = f"{key}.weight" + pytorch_bias_key = f"{key}.bias" + + projection_params[pytorch_weight_key] = torch.from_numpy(np.array(weight)).T + projection_params[pytorch_bias_key] = torch.from_numpy(np.array(bias)) + + # Create configs based on checkpoint path + # All models use the same PaliGemma config structure + class PaliGemmaConfig: + def __init__(self): + self.vision_config = type( + "obj", + (object,), + { + "hidden_size": 1152, + "num_hidden_layers": 27, + "num_attention_heads": 16, + "intermediate_size": 4304, + "patch_size": 14, + "projection_dim": 2048, + }, + )() + self.text_config = type( + "obj", + (object,), + { + "hidden_size": 2048, + "num_hidden_layers": 18, + "num_attention_heads": 8, + "head_dim": 256, + "intermediate_size": 16384, + }, + )() + + paligemma_config = PaliGemmaConfig() + action_expert_config = openpi.models.gemma.get_config("gemma_300m") + + # Process PaliGemma weights + paligemma_params, expert_params = slice_paligemma_state_dict(initial_params["paligemma_params"], paligemma_config) + + # Process Gemma weights from expert_params + gemma_params = slice_gemma_state_dict( + expert_params, action_expert_config, num_expert=1, checkpoint_dir=checkpoint_dir, pi05=model_config.pi05 + ) + + # Instantiate model + pi0_model = openpi.models_pytorch.pi0_pytorch.PI0Pytorch(model_config) + + # Combine all parameters (no prefix needed for our model structure) + all_params = {**paligemma_params, **gemma_params, **projection_params} + + # Load state dict + pi0_model.load_state_dict(all_params, strict=False) + + if precision == "float32": + pi0_model = pi0_model.to(torch.float32) + elif precision == "bfloat16": + pi0_model = pi0_model.to(torch.bfloat16) + else: + raise ValueError(f"Invalid precision: {precision}") + + # Save the converted model using safetensors + os.makedirs(output_path, exist_ok=True) + + # Save model weights as SafeTensors using save_model to handle tied weights + safetensors.torch.save_model(pi0_model, os.path.join(output_path, "model.safetensors")) + + # Copy assets folder if it exists + assets_source = pathlib.Path(checkpoint_dir).parent / "assets" + if assets_source.exists(): + assets_dest = pathlib.Path(output_path) / "assets" + if assets_dest.exists(): + shutil.rmtree(assets_dest) + shutil.copytree(assets_source, assets_dest) + + # Save config as JSON for reference + config_dict = { + "action_dim": model_config.action_dim, + "action_horizon": model_config.action_horizon, + "paligemma_variant": model_config.paligemma_variant, + "action_expert_variant": model_config.action_expert_variant, + "precision": precision, + } + with open(os.path.join(output_path, "config.json"), "w") as f: + json.dump(config_dict, f, indent=2) + + print("Model conversion completed successfully!") + print(f"Model saved to {output_path}") + + +def main( + checkpoint_dir: str, + config_name: str, + output_path: str | None = None, + precision: Literal["float32", "bfloat16", "float16"] = "bfloat16", + *, + inspect_only: bool = False, +): + """Load JAX model and optionally convert to PyTorch. + + Args: + checkpoint_dir: Path to the JAX checkpoint directory + output_path: Path to save converted PyTorch model (required for conversion) + precision: Precision for model conversion + inspect_only: Only inspect parameter keys, don't convert + """ + model_config = _config.get_config(config_name).model + if not isinstance(model_config, openpi.models.pi0_config.Pi0Config): + raise ValueError(f"Config {config_name} is not a Pi0Config") + if inspect_only: + load_jax_model_and_print_keys(checkpoint_dir) + else: + if not output_path: + print("Error: --output_path is required for conversion. Use --inspect_only to only view keys.") + return + convert_pi0_checkpoint(checkpoint_dir, precision, output_path, model_config) + + +if __name__ == "__main__": + tyro.cli(main) diff --git a/policy/openpi-InternData-A1/examples/droid/README.md b/policy/openpi-InternData-A1/examples/droid/README.md new file mode 100644 index 0000000..6a7edee --- /dev/null +++ b/policy/openpi-InternData-A1/examples/droid/README.md @@ -0,0 +1,84 @@ +# DROID Policies in openpi + +We offer instructions for: +- [Running inference for our best $pi_{0.5}$-DROID policy](./README.md#running-droid-inference) +- [Running inference for other pre-trained DROID policies ($\pi_0$, $\pi_0$-FAST, ...)](./README.md#running-roboarena-baseline-policies) +- [Pre-training *generalist* policies on the *full* DROID dataset](./README_train.md#training-on-droid) +- [Fine-tuning expert $\pi_{0.5}$ on your custom DROID dataset](./README_train.md#fine-tuning-on-custom-droid-datasets) + +## Running DROID Inference + +This example shows how to run the fine-tuned $\pi_{0.5}$-DROID model on the [DROID robot platform](https://github.com/droid-dataset/droid). Based on the [public RoboArena benchmark](https://robo-arena.github.io/leaderboard), this is currently our strongest generalist DROID policy. + + +### Step 1: Start a policy server + +Since the DROID control laptop does not have a powerful GPU, we will start a remote policy server on a different machine with a more powerful GPU and then query it from the DROID control laptop during inference. + +1. On a machine with a powerful GPU (~NVIDIA 4090), clone and install the `openpi` repository following the instructions in the [README](https://github.com/Physical-Intelligence/openpi). +2. Start the OpenPI server via the following command: + +```bash +uv run scripts/serve_policy.py policy:checkpoint --policy.config=pi05_droid --policy.dir=gs://openpi-assets/checkpoints/pi05_droid +``` + +You can also run the equivalent command below: + +```bash +uv run scripts/serve_policy.py --env=DROID +``` + +### Step 2: Run the DROID robot + +1. Make sure you have the most recent version of the DROID package installed on both the DROID control laptop and the NUC. +2. On the control laptop, activate your DROID conda environment. +3. Clone the openpi repo and install the openpi client, which we will use to connect to the policy server (this has very few dependencies and should be very fast to install): with the DROID conda environment activated, run `cd $OPENPI_ROOT/packages/openpi-client && pip install -e .`. +4. Install `tyro`, which we will use for command line parsing: `pip install tyro`. +5. Copy the `main.py` file from this directory to the `$DROID_ROOT/scripts` directory. +6. Replace the camera IDs in the `main.py` file with the IDs of your cameras (you can find the camera IDs by running `ZED_Explorer` in the command line, which will open a tool that shows you all connected cameras and their IDs -- you can also use it to make sure that the cameras are well-positioned to see the scene you want the robot to interact with). +7. Run the `main.py` file. Make sure to point the IP and host address to the policy server. (To make sure the server machine is reachable from the DROID laptop, you can run `ping ` from the DROID laptop.) Also make sure to specify the external camera to use for the policy (we only input one external camera), choose from ["left", "right"]. + +```bash +python3 scripts/main.py --remote_host= --remote_port= --external_camera="left" +``` + +The script will ask you to enter a free-form language instruction for the robot to follow. Make sure to point the cameras at the scene you want the robot to interact with. You _do not_ need to carefully control camera angle, object positions, etc. The policy is fairly robust in our experience. Happy prompting! + +## Troubleshooting + +| Issue | Solution | +|-------|----------| +| Cannot reach policy server | Make sure the server is running and the IP and port are correct. You can check that the server machine is reachable by running `ping ` from the DROID laptop. | +| Cannot find cameras | Make sure the camera IDs are correct and that the cameras are connected to the DROID laptop. Sometimes replugging the cameras can help. You can check all connected cameras by running `ZED_Explore` in the command line. | +| Policy inference is slow / inconsistent | Try using a wired internet connection for the DROID laptop to reduce latency (0.5 - 1 sec latency per chunk is normal). | +| Policy does not perform the task well | In our experiments, the policy could perform simple table top manipulation tasks (pick-and-place) across a wide range of environments, camera positions, and lighting conditions. If the policy does not perform the task well, you can try modifying the scene or object placement to make the task easier. Also make sure that the camera view you are passing to the policy can see all relevant objects in the scene (the policy is only conditioned on a single external camera + wrist camera, make sure you are feeding the desired camera to the policy). Use `ZED_Explore` to check that the camera view you are passing to the policy can see all relevant objects in the scene. Finally, the policy is far from perfect and will fail on more complex manipulation tasks, but it usually makes a decent effort. :) | + + +## Running Other Policies + +We provide configs for running the baseline DROID policies from the [RoboArena](https://robo-arena.github.io/) paper. Simply run the commands below to start inference servers for the respective policies. Then follow the instructions above to run evaluation on the DROID robot. + +``` +# Train from pi0-FAST, using FAST tokenizer +uv run scripts/serve_policy.py policy:checkpoint --policy.config=pi0_fast_droid --policy.dir=gs://openpi-assets/checkpoints/pi0_fast_droid + +# Train from pi0, using flow matching +uv run scripts/serve_policy.py policy:checkpoint --policy.config=pi0_droid --policy.dir=gs://openpi-assets/checkpoints/pi0_droid + +# Trained from PaliGemma, using RT-2 / OpenVLA style binning tokenizer. +uv run scripts/serve_policy.py policy:checkpoint --policy.config=paligemma_binning_droid --policy.dir=gs://openpi-assets/checkpoints/roboarena/paligemma_binning_droid + +# Trained from PaliGemma, using FAST tokenizer (using universal FAST+ tokenizer). +uv run scripts/serve_policy.py policy:checkpoint --policy.config=paligemma_fast_droid --policy.dir=gs://openpi-assets/checkpoints/roboarena/paligemma_fast_droid + +# Trained from PaliGemma, using FAST tokenizer (tokenizer trained on DROID dataset). +uv run scripts/serve_policy.py policy:checkpoint --policy.config=paligemma_fast_specialist_droid --policy.dir=gs://openpi-assets/checkpoints/roboarena/paligemma_fast_specialist_droid + +# Trained from PaliGemma, using FSQ tokenizer. +uv run scripts/serve_policy.py policy:checkpoint --policy.config=paligemma_vq_droid --policy.dir=gs://openpi-assets/checkpoints/roboarena/paligemma_vq_droid + +# pi0-style diffusion / flow VLA, trained on DROID from PaliGemma. +uv run scripts/serve_policy.py policy:checkpoint --policy.config=paligemma_diffusion_droid --policy.dir=gs://openpi-assets/checkpoints/roboarena/paligemma_diffusion_droid +``` + +You can find the inference configs in [roboarena_config.py](../../src/openpi/training/misc/roboarena_config.py). diff --git a/policy/openpi-InternData-A1/examples/droid/README_train.md b/policy/openpi-InternData-A1/examples/droid/README_train.md new file mode 100644 index 0000000..c8ad660 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/droid/README_train.md @@ -0,0 +1,106 @@ +# Training on DROID + +Here we describe how to fine-tune the pi0.5 model on the *full* DROID dataset. This is an approximate open-source reproduction of the pi05-DROID training pipeline. +(small differences in data loading and the used action space) -- For a tutorial on how to fine-tune your model with a smaller, custom dataset collected on the DROID platform, see below. + +In contrast to the rest of openpi, which uses LeRobot for data loading, we need to use RLDS as the data format for full DROID training (since at the moment LeRobot isn't scalable enough +for larger datasets like DROID -- they are working on improving it though). Below, we provide instructions for updating your openpi environment for RLDS data loading and where to download the DROID dataset. + +## Install + +We need a few additional dependencies for RLDS data loading. Run: +```bash +uv sync --group rlds +``` + +## Download DROID dataset + +You can download the DROID dataset with the following command (after installing the `gsutil` google cloud CLI): +``` +gsutil -m cp -r gs://gresearch/robotics/droid/1.0.1 /droid/1.0.1 +``` + +Note that downloading version 1.0.1 is important (not v1.0.0): it contains the complete set of language annotations (~75k episodes) while v1.0.0 only has annotations for 30k episodes. If for some reason you would like to use another version, modify the line `version="1.0.1"` in the `DroidRldsDataset` object [here](src/openpi/training/droid_rlds_dataset.py). + +You will need 1.8TB of disk storage to download the DROID RLDS dataset. + +## Run + +First, change the `rlds_data_dir` path in your `TrainConfig` to the directory that you downloaded the `droid` dataset into (see [src/openpi/training/config.py](src/openpi/training/config.py)). + +Then, compute normalization statistics (this will take ~10 minutes): +```bash +uv run --group rlds scripts/compute_norm_stats.py --config-name pi05_full_droid_finetune --max-frames 10_000_000 +``` + +Run training: +```bash +XLA_PYTHON_CLIENT_MEM_FRACTION=0.9 uv run --group rlds scripts/train.py pi05_full_droid_finetune --exp-name=my_experiment --overwrite +``` + +**Note**: The original pi0.5-DROID model was trained with joint velocity actions. +Joint velocity actions are not compatible with simulated evaluation environments (much harder to simulate). +Thus, we do not recommend training with joint velocity actions and instead use joint position actions here. + + +## Compute Requirements + +Our DROID training config requires approximately 2 days on 8x H100 GPUs for convergence (100k iterations, bs256, approx. 1 epoch). +If you start from PaliGemma instead of pi0 initialization, plan with ~5 days on 8x H100s (240k iterations, i.e. 3 epochs). + +We have experimented with LoRA for cheaper finetuning, but haven't found the policies to perform well so far. + + +## Data Filtering + +Like any diverse real-robot dataset, the DROID dataset isn't perfectly "clean" and we have found data filtering to significantly improve policy performance. Concretely, the DROID dataset contains many *idle* timesteps in which the robot does not move (in part due to the VR teleoperation interface that was used during data collection, we will not go into too much detail here). Appropriate filtering of these idle transitions can improve policy performance. + +By default, our openpi training recipe implements the same idle filter used to train all pi-DROID models. We implement it by pre-computing which dataset indices to sample during training. You can check [compute_droid_nonidle_ranges.py](examples/droid/compute_droid_nonidle_ranges.py) for how we compute these indices. Roughly speaking, we filter any time steps for which the next chunk of actions would be largely idle. During training, our code automatically pulls our pre-computed list of indices from cloud storage and applies them. If you want to modify the idle filter / create your custom sampling logic, you can modify our script to generate a new index list and provide it via the `filter_dict_path=""` argument in [src/openpi/training/config.py](src/openpi/training/config.py). + +**Note**: our list of filtering indices is only valid for the `droid/1.0.1` dataset mentioned in the download section above, and will not provide valid filtering for any other version of the DROID dataset, so make sure you download the dataset above! If you have a custom DROID version, you can rerun the [compute_droid_nonidle_ranges.py](examples/droid/compute_droid_nonidle_ranges.py) script to generate a new list of sampling indices. + +## RoboArena + +Consider submitting your DROID policies to the [RoboArena benchmark](https://robo-arena.github.io/), which allows you to evaluate your policies on diverse tasks & scenes, **in the real world**! :) + +If you have questions about RoboArena, please email [karl.pertsch@gmail.com](mailto:karl.pertsch@gmail.com). + + +# Fine-Tuning on Custom DROID Datasets + +Here we describe how to fine-tune a model on a custom (smaller) dataset collected on the DROID platform. Like for other datasets, we will first convert the custom DROID dataset to LeRobot and then fine-tune a model (pi05-droid) on it. + +Note: We use LeRobot here, since we assume the custom DROID fine-tuning dataset to be relatively small (<10s of hours). For larger datasets (like the full DROID dataset) we recommend using RLDS for it's better efficiency (see the example above). + + +## Step 1: Converting your custom DROID dataset to LeRobot + +We will use a small subset of the real DROID dataset for this example. This is a subset of just 30 demonstrations -- we assume that you will use your own dataset instead, but here is the command to download our subset (1.6GB): +``` +gsutil -m cp -r gs://gresearch/robotics/droid_raw/1.0.1/IRIS/success/2023-12-04 +``` + +We will also download the language annotations for the DROID dataset so we can pair our demonstrations with language instructions. Again, for your own data you can manually enter your language instructions and don't need to download our annotations. To download the DROID language annotations (12MB), run: +``` +gsutil -m cp -r gs://gresearch/robotics/droid_raw/1.0.1/aggregated-annotations-030724.json +``` + +For your own dataset, make sure that each episode's directory contains a folder called `recordings/MP4` -- if not, you need to first run the MP4 video extraction (from SVO files) using the script [here](https://github.com/droid-dataset/droid/blob/main/scripts/convert/svo_to_mp4.py). + +Now, we will use the `convert_droid_to_lerobot.py` script to create a LeRobot version of this dataset (takes <5min for the 30 demonstrations): +``` +uv run examples/droid/convert_droid_data_to_lerobot.py --data_dir +``` + +## Step 2: Run fine-tuning with your custom dataset + +Now we can run fine-tuning with our converted custom dataset. We provide an example config for fine-tuning `pi05_droid` on the custom dataset we created. +You can modify the config easily to work with other base models, or use your custom DROID dataset in `config.py` (seach for `pi05_droid_finetune`). + +To launch training: +``` +uv run scripts/train.py pi05_droid_finetune --exp-name=my_experiment --overwrite +``` + +Once trained, you can follow the instructions in [`examples/droid/README.md`](examples/droid/README.md) to serve the policy and run it on the robot. + diff --git a/policy/openpi-InternData-A1/examples/droid/compute_droid_nonidle_ranges.py b/policy/openpi-InternData-A1/examples/droid/compute_droid_nonidle_ranges.py new file mode 100644 index 0000000..fd73ea5 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/droid/compute_droid_nonidle_ranges.py @@ -0,0 +1,103 @@ +""" +Iterates through the DROID dataset and creates a json mapping from episode unique IDs to ranges of time steps +that should be sampled during training (all others are filtered out). + +Filtering logic: +We look for ranges of consecutive steps that contain at most min_idle_len consecutive idle frames +(default to 7 -- as most DROID action-chunking policies run the first 8 actions generated in each chunk, filtering +this way means the policy will not get stuck outputting stationary actions). Additionally, we also only keep non-idle +ranges of length at least min_non_idle_len (default to 16 frames = ~1 second), while also removing the last +filter_last_n_in_ranges frames from the end of each range (as those all correspond to action chunks with many idle actions). + +This leaves us with trajectory segments consisting of contiguous, significant movement. Training on this filtered set +yields policies that output fewer stationary actions (i.e., get "stuck" in states less). +""" + +import json +import os +from pathlib import Path + +import numpy as np +import tensorflow as tf +import tensorflow_datasets as tfds +from tqdm import tqdm + +os.environ["CUDA_VISIBLE_DEVICES"] = "" # Set to the GPU you want to use, or leave empty for CPU + +builder = tfds.builder_from_directory( + # path to the `droid` directory (not its parent) + builder_dir="", +) +ds = builder.as_dataset(split="train", shuffle_files=False) +tf.data.experimental.ignore_errors(ds) + +keep_ranges_path = "" + +min_idle_len = 7 # If more than this number of consecutive idle frames, filter all of them out +min_non_idle_len = 16 # If fewer than this number of consecutive non-idle frames, filter all of them out +filter_last_n_in_ranges = 10 # When using a filter dict, remove this many frames from the end of each range + +keep_ranges_map = {} +if Path(keep_ranges_path).exists(): + with Path(keep_ranges_path).open("r") as f: + keep_ranges_map = json.load(f) + print(f"Resuming from {len(keep_ranges_map)} episodes already processed") + +for ep_idx, ep in enumerate(tqdm(ds)): + recording_folderpath = ep["episode_metadata"]["recording_folderpath"].numpy().decode() + file_path = ep["episode_metadata"]["file_path"].numpy().decode() + + key = f"{recording_folderpath}--{file_path}" + if key in keep_ranges_map: + continue + + joint_velocities = [step["action_dict"]["joint_velocity"].numpy() for step in ep["steps"]] + joint_velocities = np.array(joint_velocities) + + is_idle_array = np.hstack( + [np.array([False]), np.all(np.abs(joint_velocities[1:] - joint_velocities[:-1]) < 1e-3, axis=1)] + ) + + # Find what steps go from idle to non-idle and vice-versa + is_idle_padded = np.concatenate( + [[False], is_idle_array, [False]] + ) # Start and end with False, so idle at first step is a start of motion + + is_idle_diff = np.diff(is_idle_padded.astype(int)) + is_idle_true_starts = np.where(is_idle_diff == 1)[0] # +1 transitions --> going from idle to non-idle + is_idle_true_ends = np.where(is_idle_diff == -1)[0] # -1 transitions --> going from non-idle to idle + + # Find which steps correspond to idle segments of length at least min_idle_len + true_segment_masks = (is_idle_true_ends - is_idle_true_starts) >= min_idle_len + is_idle_true_starts = is_idle_true_starts[true_segment_masks] + is_idle_true_ends = is_idle_true_ends[true_segment_masks] + + keep_mask = np.ones(len(joint_velocities), dtype=bool) + for start, end in zip(is_idle_true_starts, is_idle_true_ends, strict=True): + keep_mask[start:end] = False + + # Get all non-idle ranges of at least 16 + # Same logic as above, but for keep_mask, allowing us to filter out contiguous ranges of length < min_non_idle_len + keep_padded = np.concatenate([[False], keep_mask, [False]]) + + keep_diff = np.diff(keep_padded.astype(int)) + keep_true_starts = np.where(keep_diff == 1)[0] # +1 transitions --> going from filter out to keep + keep_true_ends = np.where(keep_diff == -1)[0] # -1 transitions --> going from keep to filter out + + # Find which steps correspond to non-idle segments of length at least min_non_idle_len + true_segment_masks = (keep_true_ends - keep_true_starts) >= min_non_idle_len + keep_true_starts = keep_true_starts[true_segment_masks] + keep_true_ends = keep_true_ends[true_segment_masks] + + # Add mapping from episode unique ID key to list of non-idle ranges to keep + keep_ranges_map[key] = [] + for start, end in zip(keep_true_starts, keep_true_ends, strict=True): + keep_ranges_map[key].append((int(start), int(end) - filter_last_n_in_ranges)) + + if ep_idx % 1000 == 0: + with Path(keep_ranges_path).open("w") as f: + json.dump(keep_ranges_map, f) + +print("Done!") +with Path(keep_ranges_path).open("w") as f: + json.dump(keep_ranges_map, f) diff --git a/policy/openpi-InternData-A1/examples/droid/convert_droid_data_to_lerobot.py b/policy/openpi-InternData-A1/examples/droid/convert_droid_data_to_lerobot.py new file mode 100644 index 0000000..a6078f5 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/droid/convert_droid_data_to_lerobot.py @@ -0,0 +1,477 @@ +""" +Minimal example script for converting a dataset collected on the DROID platform to LeRobot format. + +Usage: +uv run examples/droid/convert_droid_data_to_lerobot.py --data_dir /path/to/your/data + +If you want to push your dataset to the Hugging Face Hub, you can use the following command: +uv run examples/droid/convert_droid_data_to_lerobot.py --data_dir /path/to/your/data --push_to_hub + +The resulting dataset will get saved to the $LEROBOT_HOME directory. +""" + +from collections import defaultdict +import copy +import glob +import json +from pathlib import Path +import shutil + +import cv2 +import h5py +from lerobot.common.datasets.lerobot_dataset import HF_LEROBOT_HOME +from lerobot.common.datasets.lerobot_dataset import LeRobotDataset +import numpy as np +from PIL import Image +from tqdm import tqdm +import tyro + +REPO_NAME = "your_hf_username/my_droid_dataset" # Name of the output dataset, also used for the Hugging Face Hub + + +def resize_image(image, size): + image = Image.fromarray(image) + return np.array(image.resize(size, resample=Image.BICUBIC)) + + +def main(data_dir: str, *, push_to_hub: bool = False): + # Clean up any existing dataset in the output directory + output_path = HF_LEROBOT_HOME / REPO_NAME + if output_path.exists(): + shutil.rmtree(output_path) + data_dir = Path(data_dir) + + # Create LeRobot dataset, define features to store + # We will follow the DROID data naming conventions here. + # LeRobot assumes that dtype of image data is `image` + dataset = LeRobotDataset.create( + repo_id=REPO_NAME, + robot_type="panda", + fps=15, # DROID data is typically recorded at 15fps + features={ + # We call this "left" since we will only use the left stereo camera (following DROID RLDS convention) + "exterior_image_1_left": { + "dtype": "image", + "shape": (180, 320, 3), # This is the resolution used in the DROID RLDS dataset + "names": ["height", "width", "channel"], + }, + "exterior_image_2_left": { + "dtype": "image", + "shape": (180, 320, 3), + "names": ["height", "width", "channel"], + }, + "wrist_image_left": { + "dtype": "image", + "shape": (180, 320, 3), + "names": ["height", "width", "channel"], + }, + "joint_position": { + "dtype": "float32", + "shape": (7,), + "names": ["joint_position"], + }, + "gripper_position": { + "dtype": "float32", + "shape": (1,), + "names": ["gripper_position"], + }, + "actions": { + "dtype": "float32", + "shape": (8,), # We will use joint *velocity* actions here (7D) + gripper position (1D) + "names": ["actions"], + }, + }, + image_writer_threads=10, + image_writer_processes=5, + ) + + # Load language annotations + # Note: we load the DROID language annotations for this example, but you can manually define them for your own data + with (data_dir / "aggregated-annotations-030724.json").open() as f: + language_annotations = json.load(f) + + # Loop over raw DROID fine-tuning datasets and write episodes to the LeRobot dataset + # We assume the following directory structure: + # RAW_DROID_PATH/ + # - <...>/ + # - recordings/ + # - MP4/ + # - .mp4 # single-view video of left stereo pair camera + # - trajectory.hdf5 + # - <...>/ + episode_paths = list(data_dir.glob("**/trajectory.h5")) + print(f"Found {len(episode_paths)} episodes for conversion") + + # We will loop over each dataset_name and write episodes to the LeRobot dataset + for episode_path in tqdm(episode_paths, desc="Converting episodes"): + # Load raw data + recording_folderpath = episode_path.parent / "recordings" / "MP4" + trajectory = load_trajectory(str(episode_path), recording_folderpath=str(recording_folderpath)) + + # To load the language instruction, we need to parse out the episode_id from the metadata file + # Again, you can modify this step for your own data, to load your own language instructions + metadata_filepath = next(iter(episode_path.parent.glob("metadata_*.json"))) + episode_id = metadata_filepath.name.split(".")[0].split("_")[-1] + language_instruction = language_annotations.get(episode_id, {"language_instruction1": "Do something"})[ + "language_instruction1" + ] + print(f"Converting episode with language instruction: {language_instruction}") + + # Write to LeRobot dataset + for step in trajectory: + camera_type_dict = step["observation"]["camera_type"] + wrist_ids = [k for k, v in camera_type_dict.items() if v == 0] + exterior_ids = [k for k, v in camera_type_dict.items() if v != 0] + dataset.add_frame( + { + # Note: need to flip BGR --> RGB for loaded images + "exterior_image_1_left": resize_image( + step["observation"]["image"][exterior_ids[0]][..., ::-1], (320, 180) + ), + "exterior_image_2_left": resize_image( + step["observation"]["image"][exterior_ids[1]][..., ::-1], (320, 180) + ), + "wrist_image_left": resize_image(step["observation"]["image"][wrist_ids[0]][..., ::-1], (320, 180)), + "joint_position": np.asarray( + step["observation"]["robot_state"]["joint_positions"], dtype=np.float32 + ), + "gripper_position": np.asarray( + step["observation"]["robot_state"]["gripper_position"][None], dtype=np.float32 + ), + # Important: we use joint velocity actions here since pi05-droid was pre-trained on joint velocity actions + "actions": np.concatenate( + [step["action"]["joint_velocity"], step["action"]["gripper_position"][None]], dtype=np.float32 + ), + "task": language_instruction, + } + ) + dataset.save_episode() + + # Optionally push to the Hugging Face Hub + if push_to_hub: + dataset.push_to_hub( + tags=["libero", "panda", "rlds"], + private=False, + push_videos=True, + license="apache-2.0", + ) + + +########################################################################################################## +################ The rest of this file are functions to parse the raw DROID data ######################### +################ You don't need to worry about understanding this part ######################### +################ It was copied from here: https://github.com/JonathanYang0127/r2d2_rlds_dataset_builder/blob/parallel_convert/r2_d2/r2_d2.py +########################################################################################################## + + +camera_type_dict = { + "hand_camera_id": 0, + "varied_camera_1_id": 1, + "varied_camera_2_id": 1, +} + +camera_type_to_string_dict = { + 0: "hand_camera", + 1: "varied_camera", + 2: "fixed_camera", +} + + +def get_camera_type(cam_id): + if cam_id not in camera_type_dict: + return None + type_int = camera_type_dict[cam_id] + return camera_type_to_string_dict[type_int] + + +class MP4Reader: + def __init__(self, filepath, serial_number): + # Save Parameters # + self.serial_number = serial_number + self._index = 0 + + # Open Video Reader # + self._mp4_reader = cv2.VideoCapture(filepath) + if not self._mp4_reader.isOpened(): + raise RuntimeError("Corrupted MP4 File") + + def set_reading_parameters( + self, + image=True, # noqa: FBT002 + concatenate_images=False, # noqa: FBT002 + resolution=(0, 0), + resize_func=None, + ): + # Save Parameters # + self.image = image + self.concatenate_images = concatenate_images + self.resolution = resolution + self.resize_func = cv2.resize + self.skip_reading = not image + if self.skip_reading: + return + + def get_frame_resolution(self): + width = self._mp4_reader.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH) + height = self._mp4_reader.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT) + return (width, height) + + def get_frame_count(self): + if self.skip_reading: + return 0 + return int(self._mp4_reader.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)) + + def set_frame_index(self, index): + if self.skip_reading: + return + + if index < self._index: + self._mp4_reader.set(cv2.CAP_PROP_POS_FRAMES, index - 1) + self._index = index + + while self._index < index: + self.read_camera(ignore_data=True) + + def _process_frame(self, frame): + frame = copy.deepcopy(frame) + if self.resolution == (0, 0): + return frame + return self.resize_func(frame, self.resolution) + + def read_camera(self, ignore_data=False, correct_timestamp=None): # noqa: FBT002 + # Skip if Read Unnecesary # + if self.skip_reading: + return {} + + # Read Camera # + success, frame = self._mp4_reader.read() + + self._index += 1 + if not success: + return None + if ignore_data: + return None + + # Return Data # + data_dict = {} + + if self.concatenate_images or "stereo" not in self.serial_number: + data_dict["image"] = {self.serial_number: self._process_frame(frame)} + else: + single_width = frame.shape[1] // 2 + data_dict["image"] = { + self.serial_number + "_left": self._process_frame(frame[:, :single_width, :]), + self.serial_number + "_right": self._process_frame(frame[:, single_width:, :]), + } + + return data_dict + + def disable_camera(self): + if hasattr(self, "_mp4_reader"): + self._mp4_reader.release() + + +class RecordedMultiCameraWrapper: + def __init__(self, recording_folderpath, camera_kwargs={}): # noqa: B006 + # Save Camera Info # + self.camera_kwargs = camera_kwargs + + # Open Camera Readers # + mp4_filepaths = glob.glob(recording_folderpath + "/*.mp4") + all_filepaths = mp4_filepaths + + self.camera_dict = {} + for f in all_filepaths: + serial_number = f.split("/")[-1][:-4] + cam_type = get_camera_type(serial_number) + camera_kwargs.get(cam_type, {}) + + if f.endswith(".mp4"): + Reader = MP4Reader # noqa: N806 + else: + raise ValueError + + self.camera_dict[serial_number] = Reader(f, serial_number) + + def read_cameras(self, index=None, camera_type_dict={}, timestamp_dict={}): # noqa: B006 + full_obs_dict = defaultdict(dict) + + # Read Cameras In Randomized Order # + all_cam_ids = list(self.camera_dict.keys()) + # random.shuffle(all_cam_ids) + + for cam_id in all_cam_ids: + if "stereo" in cam_id: + continue + try: + cam_type = camera_type_dict[cam_id] + except KeyError: + print(f"{self.camera_dict} -- {camera_type_dict}") + raise ValueError(f"Camera type {cam_id} not found in camera_type_dict") # noqa: B904 + curr_cam_kwargs = self.camera_kwargs.get(cam_type, {}) + self.camera_dict[cam_id].set_reading_parameters(**curr_cam_kwargs) + + timestamp = timestamp_dict.get(cam_id + "_frame_received", None) + if index is not None: + self.camera_dict[cam_id].set_frame_index(index) + + data_dict = self.camera_dict[cam_id].read_camera(correct_timestamp=timestamp) + + # Process Returned Data # + if data_dict is None: + return None + for key in data_dict: + full_obs_dict[key].update(data_dict[key]) + + return full_obs_dict + + +def get_hdf5_length(hdf5_file, keys_to_ignore=[]): # noqa: B006 + length = None + + for key in hdf5_file: + if key in keys_to_ignore: + continue + + curr_data = hdf5_file[key] + if isinstance(curr_data, h5py.Group): + curr_length = get_hdf5_length(curr_data, keys_to_ignore=keys_to_ignore) + elif isinstance(curr_data, h5py.Dataset): + curr_length = len(curr_data) + else: + raise ValueError + + if length is None: + length = curr_length + assert curr_length == length + + return length + + +def load_hdf5_to_dict(hdf5_file, index, keys_to_ignore=[]): # noqa: B006 + data_dict = {} + + for key in hdf5_file: + if key in keys_to_ignore: + continue + + curr_data = hdf5_file[key] + if isinstance(curr_data, h5py.Group): + data_dict[key] = load_hdf5_to_dict(curr_data, index, keys_to_ignore=keys_to_ignore) + elif isinstance(curr_data, h5py.Dataset): + data_dict[key] = curr_data[index] + else: + raise ValueError + + return data_dict + + +class TrajectoryReader: + def __init__(self, filepath, read_images=True): # noqa: FBT002 + self._hdf5_file = h5py.File(filepath, "r") + is_video_folder = "observations/videos" in self._hdf5_file + self._read_images = read_images and is_video_folder + self._length = get_hdf5_length(self._hdf5_file) + self._video_readers = {} + self._index = 0 + + def length(self): + return self._length + + def read_timestep(self, index=None, keys_to_ignore=[]): # noqa: B006 + # Make Sure We Read Within Range # + if index is None: + index = self._index + else: + assert not self._read_images + self._index = index + assert index < self._length + + # Load Low Dimensional Data # + keys_to_ignore = [*keys_to_ignore.copy(), "videos"] + timestep = load_hdf5_to_dict(self._hdf5_file, self._index, keys_to_ignore=keys_to_ignore) + + # Increment Read Index # + self._index += 1 + + # Return Timestep # + return timestep + + def close(self): + self._hdf5_file.close() + + +def load_trajectory( + filepath=None, + read_cameras=True, # noqa: FBT002 + recording_folderpath=None, + camera_kwargs={}, # noqa: B006 + remove_skipped_steps=False, # noqa: FBT002 + num_samples_per_traj=None, + num_samples_per_traj_coeff=1.5, +): + read_recording_folderpath = read_cameras and (recording_folderpath is not None) + + traj_reader = TrajectoryReader(filepath) + if read_recording_folderpath: + camera_reader = RecordedMultiCameraWrapper(recording_folderpath, camera_kwargs) + + horizon = traj_reader.length() + timestep_list = [] + + # Choose Timesteps To Save # + if num_samples_per_traj: + num_to_save = num_samples_per_traj + if remove_skipped_steps: + num_to_save = int(num_to_save * num_samples_per_traj_coeff) + max_size = min(num_to_save, horizon) + indices_to_save = np.sort(np.random.choice(horizon, size=max_size, replace=False)) + else: + indices_to_save = np.arange(horizon) + + # Iterate Over Trajectory # + for i in indices_to_save: + # Get HDF5 Data # + timestep = traj_reader.read_timestep(index=i) + + # If Applicable, Get Recorded Data # + if read_recording_folderpath: + timestamp_dict = timestep["observation"]["timestamp"]["cameras"] + camera_type_dict = { + k: camera_type_to_string_dict[v] for k, v in timestep["observation"]["camera_type"].items() + } + camera_obs = camera_reader.read_cameras( + index=i, camera_type_dict=camera_type_dict, timestamp_dict=timestamp_dict + ) + camera_failed = camera_obs is None + + # Add Data To Timestep If Successful # + if camera_failed: + break + timestep["observation"].update(camera_obs) + + # Filter Steps # + step_skipped = not timestep["observation"]["controller_info"].get("movement_enabled", True) + delete_skipped_step = step_skipped and remove_skipped_steps + + # Save Filtered Timesteps # + if delete_skipped_step: + del timestep + else: + timestep_list.append(timestep) + + # Remove Extra Transitions # + timestep_list = np.array(timestep_list) + if (num_samples_per_traj is not None) and (len(timestep_list) > num_samples_per_traj): + ind_to_keep = np.random.choice(len(timestep_list), size=num_samples_per_traj, replace=False) + timestep_list = timestep_list[ind_to_keep] + + # Close Readers # + traj_reader.close() + + # Return Data # + return timestep_list + + +if __name__ == "__main__": + tyro.cli(main) diff --git a/policy/openpi-InternData-A1/examples/droid/main.py b/policy/openpi-InternData-A1/examples/droid/main.py new file mode 100644 index 0000000..93b7654 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/droid/main.py @@ -0,0 +1,246 @@ +# ruff: noqa + +import contextlib +import dataclasses +import datetime +import faulthandler +import os +import signal +import time +from moviepy.editor import ImageSequenceClip +import numpy as np +from openpi_client import image_tools +from openpi_client import websocket_client_policy +import pandas as pd +from PIL import Image +from droid.robot_env import RobotEnv +import tqdm +import tyro + +faulthandler.enable() + +# DROID data collection frequency -- we slow down execution to match this frequency +DROID_CONTROL_FREQUENCY = 15 + + +@dataclasses.dataclass +class Args: + # Hardware parameters + left_camera_id: str = "" # e.g., "24259877" + right_camera_id: str = "" # e.g., "24514023" + wrist_camera_id: str = "" # e.g., "13062452" + + # Policy parameters + external_camera: str | None = ( + None # which external camera should be fed to the policy, choose from ["left", "right"] + ) + + # Rollout parameters + max_timesteps: int = 600 + # How many actions to execute from a predicted action chunk before querying policy server again + # 8 is usually a good default (equals 0.5 seconds of action execution). + open_loop_horizon: int = 8 + + # Remote server parameters + remote_host: str = "0.0.0.0" # point this to the IP address of the policy server, e.g., "192.168.1.100" + remote_port: int = ( + 8000 # point this to the port of the policy server, default server port for openpi servers is 8000 + ) + + +# We are using Ctrl+C to optionally terminate rollouts early -- however, if we press Ctrl+C while the policy server is +# waiting for a new action chunk, it will raise an exception and the server connection dies. +# This context manager temporarily prevents Ctrl+C and delays it after the server call is complete. +@contextlib.contextmanager +def prevent_keyboard_interrupt(): + """Temporarily prevent keyboard interrupts by delaying them until after the protected code.""" + interrupted = False + original_handler = signal.getsignal(signal.SIGINT) + + def handler(signum, frame): + nonlocal interrupted + interrupted = True + + signal.signal(signal.SIGINT, handler) + try: + yield + finally: + signal.signal(signal.SIGINT, original_handler) + if interrupted: + raise KeyboardInterrupt + + +def main(args: Args): + # Make sure external camera is specified by user -- we only use one external camera for the policy + assert ( + args.external_camera is not None and args.external_camera in ["left", "right"] + ), f"Please specify an external camera to use for the policy, choose from ['left', 'right'], but got {args.external_camera}" + + # Initialize the Panda environment. Using joint velocity action space and gripper position action space is very important. + env = RobotEnv(action_space="joint_velocity", gripper_action_space="position") + print("Created the droid env!") + + # Connect to the policy server + policy_client = websocket_client_policy.WebsocketClientPolicy(args.remote_host, args.remote_port) + + df = pd.DataFrame(columns=["success", "duration", "video_filename"]) + + while True: + instruction = input("Enter instruction: ") + + # Rollout parameters + actions_from_chunk_completed = 0 + pred_action_chunk = None + + # Prepare to save video of rollout + timestamp = datetime.datetime.now().strftime("%Y_%m_%d_%H:%M:%S") + video = [] + bar = tqdm.tqdm(range(args.max_timesteps)) + print("Running rollout... press Ctrl+C to stop early.") + for t_step in bar: + start_time = time.time() + try: + # Get the current observation + curr_obs = _extract_observation( + args, + env.get_observation(), + # Save the first observation to disk + save_to_disk=t_step == 0, + ) + + video.append(curr_obs[f"{args.external_camera}_image"]) + + # Send websocket request to policy server if it's time to predict a new chunk + if actions_from_chunk_completed == 0 or actions_from_chunk_completed >= args.open_loop_horizon: + actions_from_chunk_completed = 0 + + # We resize images on the robot laptop to minimize the amount of data sent to the policy server + # and improve latency. + request_data = { + "observation/exterior_image_1_left": image_tools.resize_with_pad( + curr_obs[f"{args.external_camera}_image"], 224, 224 + ), + "observation/wrist_image_left": image_tools.resize_with_pad(curr_obs["wrist_image"], 224, 224), + "observation/joint_position": curr_obs["joint_position"], + "observation/gripper_position": curr_obs["gripper_position"], + "prompt": instruction, + } + + # Wrap the server call in a context manager to prevent Ctrl+C from interrupting it + # Ctrl+C will be handled after the server call is complete + with prevent_keyboard_interrupt(): + # this returns action chunk [10, 8] of 10 joint velocity actions (7) + gripper position (1) + pred_action_chunk = policy_client.infer(request_data)["actions"] + assert pred_action_chunk.shape == (10, 8) + + # Select current action to execute from chunk + action = pred_action_chunk[actions_from_chunk_completed] + actions_from_chunk_completed += 1 + + # Binarize gripper action + if action[-1].item() > 0.5: + # action[-1] = 1.0 + action = np.concatenate([action[:-1], np.ones((1,))]) + else: + # action[-1] = 0.0 + action = np.concatenate([action[:-1], np.zeros((1,))]) + + # clip all dimensions of action to [-1, 1] + action = np.clip(action, -1, 1) + + env.step(action) + + # Sleep to match DROID data collection frequency + elapsed_time = time.time() - start_time + if elapsed_time < 1 / DROID_CONTROL_FREQUENCY: + time.sleep(1 / DROID_CONTROL_FREQUENCY - elapsed_time) + except KeyboardInterrupt: + break + + video = np.stack(video) + save_filename = "video_" + timestamp + ImageSequenceClip(list(video), fps=10).write_videofile(save_filename + ".mp4", codec="libx264") + + success: str | float | None = None + while not isinstance(success, float): + success = input( + "Did the rollout succeed? (enter y for 100%, n for 0%), or a numeric value 0-100 based on the evaluation spec" + ) + if success == "y": + success = 1.0 + elif success == "n": + success = 0.0 + + success = float(success) / 100 + if not (0 <= success <= 1): + print(f"Success must be a number in [0, 100] but got: {success * 100}") + + df = df.append( + { + "success": success, + "duration": t_step, + "video_filename": save_filename, + }, + ignore_index=True, + ) + + if input("Do one more eval? (enter y or n) ").lower() != "y": + break + env.reset() + + os.makedirs("results", exist_ok=True) + timestamp = datetime.datetime.now().strftime("%I:%M%p_%B_%d_%Y") + csv_filename = os.path.join("results", f"eval_{timestamp}.csv") + df.to_csv(csv_filename) + print(f"Results saved to {csv_filename}") + + +def _extract_observation(args: Args, obs_dict, *, save_to_disk=False): + image_observations = obs_dict["image"] + left_image, right_image, wrist_image = None, None, None + for key in image_observations: + # Note the "left" below refers to the left camera in the stereo pair. + # The model is only trained on left stereo cams, so we only feed those. + if args.left_camera_id in key and "left" in key: + left_image = image_observations[key] + elif args.right_camera_id in key and "left" in key: + right_image = image_observations[key] + elif args.wrist_camera_id in key and "left" in key: + wrist_image = image_observations[key] + + # Drop the alpha dimension + left_image = left_image[..., :3] + right_image = right_image[..., :3] + wrist_image = wrist_image[..., :3] + + # Convert to RGB + left_image = left_image[..., ::-1] + right_image = right_image[..., ::-1] + wrist_image = wrist_image[..., ::-1] + + # In addition to image observations, also capture the proprioceptive state + robot_state = obs_dict["robot_state"] + cartesian_position = np.array(robot_state["cartesian_position"]) + joint_position = np.array(robot_state["joint_positions"]) + gripper_position = np.array([robot_state["gripper_position"]]) + + # Save the images to disk so that they can be viewed live while the robot is running + # Create one combined image to make live viewing easy + if save_to_disk: + combined_image = np.concatenate([left_image, wrist_image, right_image], axis=1) + combined_image = Image.fromarray(combined_image) + combined_image.save("robot_camera_views.png") + + return { + "left_image": left_image, + "right_image": right_image, + "wrist_image": wrist_image, + "cartesian_position": cartesian_position, + "joint_position": joint_position, + "gripper_position": gripper_position, + } + + +if __name__ == "__main__": + args: Args = tyro.cli(Args) + main(args) diff --git a/policy/openpi-InternData-A1/examples/inference.ipynb b/policy/openpi-InternData-A1/examples/inference.ipynb new file mode 100644 index 0000000..2f12588 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/inference.ipynb @@ -0,0 +1,137 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import dataclasses\n", + "\n", + "import jax\n", + "\n", + "from openpi.models import model as _model\n", + "from openpi.policies import droid_policy\n", + "from openpi.policies import policy_config as _policy_config\n", + "from openpi.shared import download\n", + "from openpi.training import config as _config\n", + "from openpi.training import data_loader as _data_loader" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Policy inference\n", + "\n", + "The following example shows how to create a policy from a checkpoint and run inference on a dummy example." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = _config.get_config(\"pi0_fast_droid\")\n", + "checkpoint_dir = download.maybe_download(\"gs://openpi-assets/checkpoints/pi0_fast_droid\")\n", + "\n", + "# Create a trained policy.\n", + "policy = _policy_config.create_trained_policy(config, checkpoint_dir)\n", + "\n", + "# Run inference on a dummy example. This example corresponds to observations produced by the DROID runtime.\n", + "example = droid_policy.make_droid_example()\n", + "result = policy.infer(example)\n", + "\n", + "# Delete the policy to free up memory.\n", + "del policy\n", + "\n", + "print(\"Actions shape:\", result[\"actions\"].shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Working with a live model\n", + "\n", + "\n", + "The following example shows how to create a live model from a checkpoint and compute training loss. First, we are going to demonstrate how to do it with fake data.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = _config.get_config(\"pi0_aloha_sim\")\n", + "\n", + "checkpoint_dir = download.maybe_download(\"gs://openpi-assets/checkpoints/pi0_aloha_sim\")\n", + "key = jax.random.key(0)\n", + "\n", + "# Create a model from the checkpoint.\n", + "model = config.model.load(_model.restore_params(checkpoint_dir / \"params\"))\n", + "\n", + "# We can create fake observations and actions to test the model.\n", + "obs, act = config.model.fake_obs(), config.model.fake_act()\n", + "\n", + "# Sample actions from the model.\n", + "loss = model.compute_loss(key, obs, act)\n", + "print(\"Loss shape:\", loss.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, we are going to create a data loader and use a real batch of training data to compute the loss." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Reduce the batch size to reduce memory usage.\n", + "config = dataclasses.replace(config, batch_size=2)\n", + "\n", + "# Load a single batch of data. This is the same data that will be used during training.\n", + "# NOTE: In order to make this example self-contained, we are skipping the normalization step\n", + "# since it requires the normalization statistics to be generated using `compute_norm_stats`.\n", + "loader = _data_loader.create_data_loader(config, num_batches=1, skip_norm_stats=True)\n", + "obs, act = next(iter(loader))\n", + "\n", + "# Sample actions from the model.\n", + "loss = model.compute_loss(key, obs, act)\n", + "\n", + "# Delete the model to free up memory.\n", + "del model\n", + "\n", + "print(\"Loss shape:\", loss.shape)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/policy/openpi-InternData-A1/examples/libero/Dockerfile b/policy/openpi-InternData-A1/examples/libero/Dockerfile new file mode 100644 index 0000000..9750796 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/libero/Dockerfile @@ -0,0 +1,59 @@ +# Dockerfile for the LIBERO benchmark. + +# Build the container: +# docker build . -t libero -f examples/libero/Dockerfile + +# Run the container: +# docker run --rm -it --network=host -v .:/app -v /tmp/.X11-unix:/tmp/.X11-unix:ro -e DISPLAY=$DISPLAY --gpus all libero /bin/bash + +FROM nvidia/cuda:12.2.2-cudnn8-runtime-ubuntu22.04@sha256:2d913b09e6be8387e1a10976933642c73c840c0b735f0bf3c28d97fc9bc422e0 +COPY --from=ghcr.io/astral-sh/uv:0.5.1 /uv /uvx /bin/ + +RUN apt-get update && \ + apt-get install -y \ + make \ + g++ \ + clang \ + libosmesa6-dev \ + libgl1-mesa-glx \ + libglew-dev \ + libglfw3-dev \ + libgles2-mesa-dev \ + libglib2.0-0 \ + libsm6 \ + libxrender1 \ + libxext6 + +WORKDIR /app + +# Copy from the cache instead of linking since it's a mounted volume +ENV UV_LINK_MODE=copy + +# Write the virtual environment outside of the project directory so it doesn't +# leak out of the container when we mount the application code. +ENV UV_PROJECT_ENVIRONMENT=/.venv + +# Copy the requirements files so we can install dependencies. +# The rest of the project is mounted as a volume, so we don't need to rebuild on changes. +# This strategy is best for development-style usage. +COPY ./examples/libero/requirements.txt /tmp/requirements.txt +COPY ./third_party/libero/requirements.txt /tmp/requirements-libero.txt +COPY ./packages/openpi-client/pyproject.toml /tmp/openpi-client/pyproject.toml + +# Install python dependencies. +RUN uv venv --python 3.8 $UV_PROJECT_ENVIRONMENT +RUN uv pip sync /tmp/requirements.txt /tmp/requirements-libero.txt /tmp/openpi-client/pyproject.toml --extra-index-url https://download.pytorch.org/whl/cu113 --index-strategy=unsafe-best-match +ENV PYTHONPATH=/app:/app/packages/openpi-client/src:/app/third_party/libero + +# Create a default config file to avoid an input prompt from LIBERO's init script. +# https://github.com/Lifelong-Robot-Learning/LIBERO/blob/master/libero/libero/__init__.py +ENV LIBERO_CONFIG_PATH=/tmp/libero +RUN mkdir -p /tmp/libero && cat <<'EOF' > /tmp/libero/config.yaml +benchmark_root: /app/third_party/libero/libero/libero +bddl_files: /app/third_party/libero/libero/libero/bddl_files +init_states: /app/third_party/libero/libero/libero/init_files +datasets: /app/third_party/libero/libero/datasets +assets: /app/third_party/libero/libero/libero/assets +EOF + +CMD ["/bin/bash", "-c", "source /.venv/bin/activate && python examples/libero/main.py $CLIENT_ARGS"] diff --git a/policy/openpi-InternData-A1/examples/libero/README.md b/policy/openpi-InternData-A1/examples/libero/README.md new file mode 100644 index 0000000..a2d1d93 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/libero/README.md @@ -0,0 +1,71 @@ +# LIBERO Benchmark + +This example runs the LIBERO benchmark: https://github.com/Lifelong-Robot-Learning/LIBERO + +Note: When updating requirements.txt in this directory, there is an additional flag `--extra-index-url https://download.pytorch.org/whl/cu113` that must be added to the `uv pip compile` command. + +This example requires git submodules to be initialized. Don't forget to run: + +```bash +git submodule update --init --recursive +``` + +## With Docker (recommended) + +```bash +# Grant access to the X11 server: +sudo xhost +local:docker + +# To run with the default checkpoint and task suite: +SERVER_ARGS="--env LIBERO" docker compose -f examples/libero/compose.yml up --build + +# To run with glx for Mujoco instead (use this if you have egl errors): +MUJOCO_GL=glx SERVER_ARGS="--env LIBERO" docker compose -f examples/libero/compose.yml up --build +``` + +You can customize the loaded checkpoint by providing additional `SERVER_ARGS` (see `scripts/serve_policy.py`), and the LIBERO task suite by providing additional `CLIENT_ARGS` (see `examples/libero/main.py`). +For example: + +```bash +# To load a custom checkpoint (located in the top-level openpi/ directory): +export SERVER_ARGS="--env LIBERO policy:checkpoint --policy.config pi05_libero --policy.dir ./my_custom_checkpoint" + +# To run the libero_10 task suite: +export CLIENT_ARGS="--args.task-suite-name libero_10" +``` + +## Without Docker (not recommended) + +Terminal window 1: + +```bash +# Create virtual environment +uv venv --python 3.8 examples/libero/.venv +source examples/libero/.venv/bin/activate +uv pip sync examples/libero/requirements.txt third_party/libero/requirements.txt --extra-index-url https://download.pytorch.org/whl/cu113 --index-strategy=unsafe-best-match +uv pip install -e packages/openpi-client +uv pip install -e third_party/libero +export PYTHONPATH=$PYTHONPATH:$PWD/third_party/libero + +# Run the simulation +python examples/libero/main.py + +# To run with glx for Mujoco instead (use this if you have egl errors): +MUJOCO_GL=glx python examples/libero/main.py +``` + +Terminal window 2: + +```bash +# Run the server +uv run scripts/serve_policy.py --env LIBERO +``` + +## Results + +If you want to reproduce the following numbers, you can evaluate the checkpoint at `gs://openpi-assets/checkpoints/pi05_libero/`. This +checkpoint was trained in openpi with the `pi05_libero` config. + +| Model | Libero Spatial | Libero Object | Libero Goal | Libero 10 | Average | +|-------|---------------|---------------|-------------|-----------|---------| +| π0.5 @ 30k (finetuned) | 98.8 | 98.2 | 98.0 | 92.4 | 96.85 diff --git a/policy/openpi-InternData-A1/examples/libero/compose.yml b/policy/openpi-InternData-A1/examples/libero/compose.yml new file mode 100644 index 0000000..3498ce5 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/libero/compose.yml @@ -0,0 +1,54 @@ +# Run with: +# docker compose -f examples/libero/compose.yml up --build +services: + runtime: + image: libero + depends_on: + - openpi_server + build: + context: ../.. + dockerfile: examples/libero/Dockerfile + init: true + tty: true + network_mode: host + privileged: true + volumes: + - $PWD:/app + - ../../data:/data + - /tmp/.X11-unix:/tmp/.X11-unix:ro + environment: + - CLIENT_ARGS + - DISPLAY=$DISPLAY + - MUJOCO_GL=${MUJOCO_GL:-egl} + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: 1 + capabilities: [gpu] + + openpi_server: + image: openpi_server + build: + context: ../.. + dockerfile: scripts/docker/serve_policy.Dockerfile + init: true + tty: true + network_mode: host + volumes: + - $PWD:/app + - ${OPENPI_DATA_HOME:-~/.cache/openpi}:/openpi_assets + environment: + - SERVER_ARGS + - OPENPI_DATA_HOME=/openpi_assets + - IS_DOCKER=true + + # Comment out this block if not running on a machine with GPUs. + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: 1 + capabilities: [gpu] diff --git a/policy/openpi-InternData-A1/examples/libero/convert_libero_data_to_lerobot.py b/policy/openpi-InternData-A1/examples/libero/convert_libero_data_to_lerobot.py new file mode 100644 index 0000000..51db6f1 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/libero/convert_libero_data_to_lerobot.py @@ -0,0 +1,104 @@ +""" +Minimal example script for converting a dataset to LeRobot format. + +We use the Libero dataset (stored in RLDS) for this example, but it can be easily +modified for any other data you have saved in a custom format. + +Usage: +uv run examples/libero/convert_libero_data_to_lerobot.py --data_dir /path/to/your/data + +If you want to push your dataset to the Hugging Face Hub, you can use the following command: +uv run examples/libero/convert_libero_data_to_lerobot.py --data_dir /path/to/your/data --push_to_hub + +Note: to run the script, you need to install tensorflow_datasets: +`uv pip install tensorflow tensorflow_datasets` + +You can download the raw Libero datasets from https://huggingface.co/datasets/openvla/modified_libero_rlds +The resulting dataset will get saved to the $HF_LEROBOT_HOME directory. +Running this conversion script will take approximately 30 minutes. +""" + +import shutil + +from lerobot.common.datasets.lerobot_dataset import HF_LEROBOT_HOME +from lerobot.common.datasets.lerobot_dataset import LeRobotDataset +import tensorflow_datasets as tfds +import tyro + +REPO_NAME = "your_hf_username/libero" # Name of the output dataset, also used for the Hugging Face Hub +RAW_DATASET_NAMES = [ + "libero_10_no_noops", + "libero_goal_no_noops", + "libero_object_no_noops", + "libero_spatial_no_noops", +] # For simplicity we will combine multiple Libero datasets into one training dataset + + +def main(data_dir: str, *, push_to_hub: bool = False): + # Clean up any existing dataset in the output directory + output_path = HF_LEROBOT_HOME / REPO_NAME + if output_path.exists(): + shutil.rmtree(output_path) + + # Create LeRobot dataset, define features to store + # OpenPi assumes that proprio is stored in `state` and actions in `action` + # LeRobot assumes that dtype of image data is `image` + dataset = LeRobotDataset.create( + repo_id=REPO_NAME, + robot_type="panda", + fps=10, + features={ + "image": { + "dtype": "image", + "shape": (256, 256, 3), + "names": ["height", "width", "channel"], + }, + "wrist_image": { + "dtype": "image", + "shape": (256, 256, 3), + "names": ["height", "width", "channel"], + }, + "state": { + "dtype": "float32", + "shape": (8,), + "names": ["state"], + }, + "actions": { + "dtype": "float32", + "shape": (7,), + "names": ["actions"], + }, + }, + image_writer_threads=10, + image_writer_processes=5, + ) + + # Loop over raw Libero datasets and write episodes to the LeRobot dataset + # You can modify this for your own data format + for raw_dataset_name in RAW_DATASET_NAMES: + raw_dataset = tfds.load(raw_dataset_name, data_dir=data_dir, split="train") + for episode in raw_dataset: + for step in episode["steps"].as_numpy_iterator(): + dataset.add_frame( + { + "image": step["observation"]["image"], + "wrist_image": step["observation"]["wrist_image"], + "state": step["observation"]["state"], + "actions": step["action"], + "task": step["language_instruction"].decode(), + } + ) + dataset.save_episode() + + # Optionally push to the Hugging Face Hub + if push_to_hub: + dataset.push_to_hub( + tags=["libero", "panda", "rlds"], + private=False, + push_videos=True, + license="apache-2.0", + ) + + +if __name__ == "__main__": + tyro.cli(main) diff --git a/policy/openpi-InternData-A1/examples/libero/main.py b/policy/openpi-InternData-A1/examples/libero/main.py new file mode 100644 index 0000000..dc015a6 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/libero/main.py @@ -0,0 +1,219 @@ +import collections +import dataclasses +import logging +import math +import pathlib + +import imageio +from libero.libero import benchmark +from libero.libero import get_libero_path +from libero.libero.envs import OffScreenRenderEnv +import numpy as np +from openpi_client import image_tools +from openpi_client import websocket_client_policy as _websocket_client_policy +import tqdm +import tyro + +LIBERO_DUMMY_ACTION = [0.0] * 6 + [-1.0] +LIBERO_ENV_RESOLUTION = 256 # resolution used to render training data + + +@dataclasses.dataclass +class Args: + ################################################################################################################# + # Model server parameters + ################################################################################################################# + host: str = "0.0.0.0" + port: int = 8000 + resize_size: int = 224 + replan_steps: int = 5 + + ################################################################################################################# + # LIBERO environment-specific parameters + ################################################################################################################# + task_suite_name: str = ( + "libero_spatial" # Task suite. Options: libero_spatial, libero_object, libero_goal, libero_10, libero_90 + ) + num_steps_wait: int = 10 # Number of steps to wait for objects to stabilize i n sim + num_trials_per_task: int = 50 # Number of rollouts per task + + ################################################################################################################# + # Utils + ################################################################################################################# + video_out_path: str = "data/libero/videos" # Path to save videos + + seed: int = 7 # Random Seed (for reproducibility) + + +def eval_libero(args: Args) -> None: + # Set random seed + np.random.seed(args.seed) + + # Initialize LIBERO task suite + benchmark_dict = benchmark.get_benchmark_dict() + task_suite = benchmark_dict[args.task_suite_name]() + num_tasks_in_suite = task_suite.n_tasks + logging.info(f"Task suite: {args.task_suite_name}") + + pathlib.Path(args.video_out_path).mkdir(parents=True, exist_ok=True) + + if args.task_suite_name == "libero_spatial": + max_steps = 220 # longest training demo has 193 steps + elif args.task_suite_name == "libero_object": + max_steps = 280 # longest training demo has 254 steps + elif args.task_suite_name == "libero_goal": + max_steps = 300 # longest training demo has 270 steps + elif args.task_suite_name == "libero_10": + max_steps = 520 # longest training demo has 505 steps + elif args.task_suite_name == "libero_90": + max_steps = 400 # longest training demo has 373 steps + else: + raise ValueError(f"Unknown task suite: {args.task_suite_name}") + + client = _websocket_client_policy.WebsocketClientPolicy(args.host, args.port) + + # Start evaluation + total_episodes, total_successes = 0, 0 + for task_id in tqdm.tqdm(range(num_tasks_in_suite)): + # Get task + task = task_suite.get_task(task_id) + + # Get default LIBERO initial states + initial_states = task_suite.get_task_init_states(task_id) + + # Initialize LIBERO environment and task description + env, task_description = _get_libero_env(task, LIBERO_ENV_RESOLUTION, args.seed) + + # Start episodes + task_episodes, task_successes = 0, 0 + for episode_idx in tqdm.tqdm(range(args.num_trials_per_task)): + logging.info(f"\nTask: {task_description}") + + # Reset environment + env.reset() + action_plan = collections.deque() + + # Set initial states + obs = env.set_init_state(initial_states[episode_idx]) + + # Setup + t = 0 + replay_images = [] + + logging.info(f"Starting episode {task_episodes+1}...") + while t < max_steps + args.num_steps_wait: + try: + # IMPORTANT: Do nothing for the first few timesteps because the simulator drops objects + # and we need to wait for them to fall + if t < args.num_steps_wait: + obs, reward, done, info = env.step(LIBERO_DUMMY_ACTION) + t += 1 + continue + + # Get preprocessed image + # IMPORTANT: rotate 180 degrees to match train preprocessing + img = np.ascontiguousarray(obs["agentview_image"][::-1, ::-1]) + wrist_img = np.ascontiguousarray(obs["robot0_eye_in_hand_image"][::-1, ::-1]) + img = image_tools.convert_to_uint8( + image_tools.resize_with_pad(img, args.resize_size, args.resize_size) + ) + wrist_img = image_tools.convert_to_uint8( + image_tools.resize_with_pad(wrist_img, args.resize_size, args.resize_size) + ) + + # Save preprocessed image for replay video + replay_images.append(img) + + if not action_plan: + # Finished executing previous action chunk -- compute new chunk + # Prepare observations dict + element = { + "observation/image": img, + "observation/wrist_image": wrist_img, + "observation/state": np.concatenate( + ( + obs["robot0_eef_pos"], + _quat2axisangle(obs["robot0_eef_quat"]), + obs["robot0_gripper_qpos"], + ) + ), + "prompt": str(task_description), + } + + # Query model to get action + action_chunk = client.infer(element)["actions"] + assert ( + len(action_chunk) >= args.replan_steps + ), f"We want to replan every {args.replan_steps} steps, but policy only predicts {len(action_chunk)} steps." + action_plan.extend(action_chunk[: args.replan_steps]) + + action = action_plan.popleft() + + # Execute action in environment + obs, reward, done, info = env.step(action.tolist()) + if done: + task_successes += 1 + total_successes += 1 + break + t += 1 + + except Exception as e: + logging.error(f"Caught exception: {e}") + break + + task_episodes += 1 + total_episodes += 1 + + # Save a replay video of the episode + suffix = "success" if done else "failure" + task_segment = task_description.replace(" ", "_") + imageio.mimwrite( + pathlib.Path(args.video_out_path) / f"rollout_{task_segment}_{suffix}.mp4", + [np.asarray(x) for x in replay_images], + fps=10, + ) + + # Log current results + logging.info(f"Success: {done}") + logging.info(f"# episodes completed so far: {total_episodes}") + logging.info(f"# successes: {total_successes} ({total_successes / total_episodes * 100:.1f}%)") + + # Log final results + logging.info(f"Current task success rate: {float(task_successes) / float(task_episodes)}") + logging.info(f"Current total success rate: {float(total_successes) / float(total_episodes)}") + + logging.info(f"Total success rate: {float(total_successes) / float(total_episodes)}") + logging.info(f"Total episodes: {total_episodes}") + + +def _get_libero_env(task, resolution, seed): + """Initializes and returns the LIBERO environment, along with the task description.""" + task_description = task.language + task_bddl_file = pathlib.Path(get_libero_path("bddl_files")) / task.problem_folder / task.bddl_file + env_args = {"bddl_file_name": task_bddl_file, "camera_heights": resolution, "camera_widths": resolution} + env = OffScreenRenderEnv(**env_args) + env.seed(seed) # IMPORTANT: seed seems to affect object positions even when using fixed initial state + return env, task_description + + +def _quat2axisangle(quat): + """ + Copied from robosuite: https://github.com/ARISE-Initiative/robosuite/blob/eafb81f54ffc104f905ee48a16bb15f059176ad3/robosuite/utils/transform_utils.py#L490C1-L512C55 + """ + # clip quaternion + if quat[3] > 1.0: + quat[3] = 1.0 + elif quat[3] < -1.0: + quat[3] = -1.0 + + den = np.sqrt(1.0 - quat[3] * quat[3]) + if math.isclose(den, 0.0): + # This is (close to) a zero degree rotation, immediately return + return np.zeros(3) + + return (quat[:3] * 2.0 * math.acos(quat[3])) / den + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + tyro.cli(eval_libero) diff --git a/policy/openpi-InternData-A1/examples/libero/requirements.in b/policy/openpi-InternData-A1/examples/libero/requirements.in new file mode 100644 index 0000000..1490065 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/libero/requirements.in @@ -0,0 +1,11 @@ +imageio[ffmpeg] +numpy==1.22.4 +tqdm +tyro +PyYaml +opencv-python==4.6.0.66 +torch==1.11.0+cu113 +torchvision==0.12.0+cu113 +torchaudio==0.11.0+cu113 +robosuite==1.4.1 +matplotlib==3.5.3 diff --git a/policy/openpi-InternData-A1/examples/libero/requirements.txt b/policy/openpi-InternData-A1/examples/libero/requirements.txt new file mode 100644 index 0000000..1a52b42 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/libero/requirements.txt @@ -0,0 +1,136 @@ +# This file was autogenerated by uv via the following command: +# uv pip compile examples/libero/requirements.in -o examples/libero/requirements.txt --python-version 3.8 --index-strategy=unsafe-best-match +absl-py==2.1.0 + # via mujoco +certifi==2024.12.14 + # via requests +charset-normalizer==3.4.0 + # via requests +cycler==0.12.1 + # via matplotlib +docstring-parser==0.16 + # via tyro +etils==1.3.0 + # via mujoco +eval-type-backport==0.2.0 + # via tyro +evdev==1.7.1 + # via pynput +fonttools==4.55.3 + # via matplotlib +glfw==1.12.0 + # via mujoco +idna==3.10 + # via requests +imageio==2.35.1 + # via -r examples/libero/requirements.in +imageio-ffmpeg==0.5.1 + # via imageio +importlib-metadata==8.5.0 + # via typeguard +importlib-resources==6.4.5 + # via etils +kiwisolver==1.4.7 + # via matplotlib +llvmlite==0.36.0 + # via numba +markdown-it-py==3.0.0 + # via rich +matplotlib==3.5.3 + # via -r examples/libero/requirements.in +mdurl==0.1.2 + # via markdown-it-py +mujoco==3.2.3 + # via robosuite +numba==0.53.1 + # via robosuite +numpy==1.22.4 + # via + # -r examples/libero/requirements.in + # imageio + # matplotlib + # mujoco + # numba + # opencv-python + # robosuite + # scipy + # torchvision +opencv-python==4.6.0.66 + # via + # -r examples/libero/requirements.in + # robosuite +packaging==24.2 + # via matplotlib +pillow==10.4.0 + # via + # imageio + # matplotlib + # robosuite + # torchvision +psutil==6.1.0 + # via imageio +pygments==2.18.0 + # via rich +pynput==1.7.7 + # via robosuite +pyopengl==3.1.7 + # via mujoco +pyparsing==3.1.4 + # via matplotlib +python-dateutil==2.9.0.post0 + # via matplotlib +python-xlib==0.33 + # via pynput +pyyaml==6.0.2 + # via -r examples/libero/requirements.in +requests==2.32.3 + # via torchvision +rich==13.9.4 + # via tyro +robosuite==1.4.1 + # via -r examples/libero/requirements.in +scipy==1.10.1 + # via robosuite +setuptools==75.3.0 + # via + # imageio-ffmpeg + # numba +shtab==1.7.1 + # via tyro +six==1.17.0 + # via + # pynput + # python-dateutil + # python-xlib +termcolor==2.4.0 + # via robosuite +torch==1.11.0+cu113 + # via + # -r examples/libero/requirements.in + # torchaudio + # torchvision +torchaudio==0.11.0+cu113 + # via -r examples/libero/requirements.in +torchvision==0.12.0+cu113 + # via -r examples/libero/requirements.in +tqdm==4.67.1 + # via -r examples/libero/requirements.in +typeguard==4.4.0 + # via tyro +typing-extensions==4.12.2 + # via + # etils + # rich + # torch + # torchvision + # typeguard + # tyro +tyro==0.9.2 + # via -r examples/libero/requirements.in +urllib3==2.2.3 + # via requests +zipp==3.20.2 + # via + # etils + # importlib-metadata + # importlib-resources diff --git a/policy/openpi-InternData-A1/examples/policy_records.ipynb b/policy/openpi-InternData-A1/examples/policy_records.ipynb new file mode 100644 index 0000000..ee6f268 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/policy_records.ipynb @@ -0,0 +1,134 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pathlib\n", + "\n", + "import numpy as np\n", + "\n", + "record_path = pathlib.Path(\"../policy_records\")\n", + "num_steps = len(list(record_path.glob(\"step_*.npy\")))\n", + "\n", + "records = []\n", + "for i in range(num_steps):\n", + " record = np.load(record_path / f\"step_{i}.npy\", allow_pickle=True).item()\n", + " records.append(record)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"length of records\", len(records))\n", + "print(\"keys in records\", records[0].keys())\n", + "\n", + "for k in records[0]:\n", + " print(f\"{k} shape: {records[0][k].shape}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from PIL import Image\n", + "\n", + "\n", + "def get_image(step: int, idx: int = 0):\n", + " img = (255 * records[step][\"inputs/image\"]).astype(np.uint8)\n", + " return img[idx].transpose(1, 2, 0)\n", + "\n", + "\n", + "def show_image(step: int, idx_lst: list[int]):\n", + " imgs = [get_image(step, idx) for idx in idx_lst]\n", + " return Image.fromarray(np.hstack(imgs))\n", + "\n", + "\n", + "for i in range(2):\n", + " display(show_image(i, [0]))" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "\n", + "def get_axis(name, axis):\n", + " return np.array([record[name][axis] for record in records])\n", + "\n", + "\n", + "# qpos is [..., 14] of type float:\n", + "# 0-5: left arm joint angles\n", + "# 6: left arm gripper\n", + "# 7-12: right arm joint angles\n", + "# 13: right arm gripper\n", + "names = [(\"left_joint\", 6), (\"left_gripper\", 1), (\"right_joint\", 6), (\"right_gripper\", 1)]\n", + "\n", + "\n", + "def make_data():\n", + " cur_dim = 0\n", + " in_data = {}\n", + " out_data = {}\n", + " for name, dim_size in names:\n", + " for i in range(dim_size):\n", + " in_data[f\"{name}_{i}\"] = get_axis(\"inputs/qpos\", cur_dim)\n", + " out_data[f\"{name}_{i}\"] = get_axis(\"outputs/qpos\", cur_dim)\n", + " cur_dim += 1\n", + " return pd.DataFrame(in_data), pd.DataFrame(out_data)\n", + "\n", + "\n", + "in_data, out_data = make_data()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for name in in_data.columns:\n", + " data = pd.DataFrame({f\"in_{name}\": in_data[name], f\"out_{name}\": out_data[name]})\n", + " data.plot()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/policy/openpi-InternData-A1/examples/simple_client/Dockerfile b/policy/openpi-InternData-A1/examples/simple_client/Dockerfile new file mode 100644 index 0000000..0599163 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/simple_client/Dockerfile @@ -0,0 +1,32 @@ +# Dockerfile for the simple client. + +# Build the container: +# docker build . -t simple_client -f examples/simple_client/Dockerfile + +# Run the container: +# docker run --rm -it --network=host -v .:/app simple_client /bin/bash + +FROM python:3.7-slim +COPY --from=ghcr.io/astral-sh/uv:0.5.1 /uv /uvx /bin/ + +WORKDIR /app + +# Copy from the cache instead of linking since it's a mounted volume +ENV UV_LINK_MODE=copy + +# Write the virtual environment outside of the project directory so it doesn't +# leak out of the container when we mount the application code. +ENV UV_PROJECT_ENVIRONMENT=/.venv + +# Copy the requirements files so we can install dependencies. +# The rest of the project is mounted as a volume, so we don't need to rebuild on changes. +# This strategy is best for development-style usage. +COPY ./examples/simple_client/requirements.txt /tmp/requirements.txt +COPY ./packages/openpi-client/pyproject.toml /tmp/openpi-client/pyproject.toml + +# Install python dependencies. +RUN uv venv --python 3.11.9 $UV_PROJECT_ENVIRONMENT +RUN uv pip sync /tmp/requirements.txt /tmp/openpi-client/pyproject.toml +ENV PYTHONPATH=/app:/app/src:/app/packages/openpi-client/src + +CMD /bin/bash -c "source /.venv/bin/activate && python examples/simple_client/main.py $SERVER_ARGS" diff --git a/policy/openpi-InternData-A1/examples/simple_client/README.md b/policy/openpi-InternData-A1/examples/simple_client/README.md new file mode 100644 index 0000000..bc381c1 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/simple_client/README.md @@ -0,0 +1,30 @@ +# Simple Client + +A minimal client that sends observations to the server and prints the inference rate. + +You can specify which runtime environment to use using the `--env` flag. You can see the available options by running: + +```bash +uv run examples/simple_client/main.py --help +``` + +## With Docker + +```bash +export SERVER_ARGS="--env ALOHA_SIM" +docker compose -f examples/simple_client/compose.yml up --build +``` + +## Without Docker + +Terminal window 1: + +```bash +uv run examples/simple_client/main.py --env DROID +``` + +Terminal window 2: + +```bash +uv run scripts/serve_policy.py --env DROID +``` diff --git a/policy/openpi-InternData-A1/examples/simple_client/compose.yml b/policy/openpi-InternData-A1/examples/simple_client/compose.yml new file mode 100644 index 0000000..977e361 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/simple_client/compose.yml @@ -0,0 +1,42 @@ +# Run with: +# docker compose -f examples/simple_client/compose.yml up --build +services: + runtime: + image: simple_client + depends_on: + - openpi_server + build: + context: ../.. + dockerfile: examples/simple_client/Dockerfile + init: true + tty: true + network_mode: host + volumes: + - $PWD:/app + environment: + - SERVER_ARGS + + openpi_server: + image: openpi_server + build: + context: ../.. + dockerfile: scripts/docker/serve_policy.Dockerfile + init: true + tty: true + network_mode: host + volumes: + - $PWD:/app + - ${OPENPI_DATA_HOME:-~/.cache/openpi}:/openpi_assets + environment: + - SERVER_ARGS + - OPENPI_DATA_HOME=/openpi_assets + - IS_DOCKER=true + + # Comment out this block if not running on a machine with GPUs. + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: 1 + capabilities: [gpu] diff --git a/policy/openpi-InternData-A1/examples/simple_client/main.py b/policy/openpi-InternData-A1/examples/simple_client/main.py new file mode 100644 index 0000000..cd7eda1 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/simple_client/main.py @@ -0,0 +1,187 @@ +import dataclasses +import enum +import logging +import pathlib +import time + +import numpy as np +from openpi_client import websocket_client_policy as _websocket_client_policy +import polars as pl +import rich +import tqdm +import tyro + +logger = logging.getLogger(__name__) + + +class EnvMode(enum.Enum): + """Supported environments.""" + + ALOHA = "aloha" + ALOHA_SIM = "aloha_sim" + DROID = "droid" + LIBERO = "libero" + + +@dataclasses.dataclass +class Args: + """Command line arguments.""" + + # Host and port to connect to the server. + host: str = "0.0.0.0" + # Port to connect to the server. If None, the server will use the default port. + port: int | None = 8000 + # API key to use for the server. + api_key: str | None = None + # Number of steps to run the policy for. + num_steps: int = 20 + # Path to save the timings to a parquet file. (e.g., timing.parquet) + timing_file: pathlib.Path | None = None + # Environment to run the policy in. + env: EnvMode = EnvMode.ALOHA_SIM + + +class TimingRecorder: + """Records timing measurements for different keys.""" + + def __init__(self) -> None: + self._timings: dict[str, list[float]] = {} + + def record(self, key: str, time_ms: float) -> None: + """Record a timing measurement for the given key.""" + if key not in self._timings: + self._timings[key] = [] + self._timings[key].append(time_ms) + + def get_stats(self, key: str) -> dict[str, float]: + """Get statistics for the given key.""" + times = self._timings[key] + return { + "mean": float(np.mean(times)), + "std": float(np.std(times)), + "p25": float(np.quantile(times, 0.25)), + "p50": float(np.quantile(times, 0.50)), + "p75": float(np.quantile(times, 0.75)), + "p90": float(np.quantile(times, 0.90)), + "p95": float(np.quantile(times, 0.95)), + "p99": float(np.quantile(times, 0.99)), + } + + def print_all_stats(self) -> None: + """Print statistics for all keys in a concise format.""" + + table = rich.table.Table( + title="[bold blue]Timing Statistics[/bold blue]", + show_header=True, + header_style="bold white", + border_style="blue", + title_justify="center", + ) + + # Add metric column with custom styling + table.add_column("Metric", style="cyan", justify="left", no_wrap=True) + + # Add statistical columns with consistent styling + stat_columns = [ + ("Mean", "yellow", "mean"), + ("Std", "yellow", "std"), + ("P25", "magenta", "p25"), + ("P50", "magenta", "p50"), + ("P75", "magenta", "p75"), + ("P90", "magenta", "p90"), + ("P95", "magenta", "p95"), + ("P99", "magenta", "p99"), + ] + + for name, style, _ in stat_columns: + table.add_column(name, justify="right", style=style, no_wrap=True) + + # Add rows for each metric with formatted values + for key in sorted(self._timings.keys()): + stats = self.get_stats(key) + values = [f"{stats[key]:.1f}" for _, _, key in stat_columns] + table.add_row(key, *values) + + # Print with custom console settings + console = rich.console.Console(width=None, highlight=True) + console.print(table) + + def write_parquet(self, path: pathlib.Path) -> None: + """Save the timings to a parquet file.""" + logger.info(f"Writing timings to {path}") + frame = pl.DataFrame(self._timings) + path.parent.mkdir(parents=True, exist_ok=True) + frame.write_parquet(path) + + +def main(args: Args) -> None: + obs_fn = { + EnvMode.ALOHA: _random_observation_aloha, + EnvMode.ALOHA_SIM: _random_observation_aloha, + EnvMode.DROID: _random_observation_droid, + EnvMode.LIBERO: _random_observation_libero, + }[args.env] + + policy = _websocket_client_policy.WebsocketClientPolicy( + host=args.host, + port=args.port, + api_key=args.api_key, + ) + logger.info(f"Server metadata: {policy.get_server_metadata()}") + + # Send a few observations to make sure the model is loaded. + for _ in range(2): + policy.infer(obs_fn()) + + timing_recorder = TimingRecorder() + + for _ in tqdm.trange(args.num_steps, desc="Running policy"): + inference_start = time.time() + action = policy.infer(obs_fn()) + timing_recorder.record("client_infer_ms", 1000 * (time.time() - inference_start)) + for key, value in action.get("server_timing", {}).items(): + timing_recorder.record(f"server_{key}", value) + for key, value in action.get("policy_timing", {}).items(): + timing_recorder.record(f"policy_{key}", value) + + timing_recorder.print_all_stats() + + if args.timing_file is not None: + timing_recorder.write_parquet(args.timing_file) + + +def _random_observation_aloha() -> dict: + return { + "state": np.ones((14,)), + "images": { + "cam_high": np.random.randint(256, size=(3, 224, 224), dtype=np.uint8), + "cam_low": np.random.randint(256, size=(3, 224, 224), dtype=np.uint8), + "cam_left_wrist": np.random.randint(256, size=(3, 224, 224), dtype=np.uint8), + "cam_right_wrist": np.random.randint(256, size=(3, 224, 224), dtype=np.uint8), + }, + "prompt": "do something", + } + + +def _random_observation_droid() -> dict: + return { + "observation/exterior_image_1_left": np.random.randint(256, size=(224, 224, 3), dtype=np.uint8), + "observation/wrist_image_left": np.random.randint(256, size=(224, 224, 3), dtype=np.uint8), + "observation/joint_position": np.random.rand(7), + "observation/gripper_position": np.random.rand(1), + "prompt": "do something", + } + + +def _random_observation_libero() -> dict: + return { + "observation/state": np.random.rand(8), + "observation/image": np.random.randint(256, size=(224, 224, 3), dtype=np.uint8), + "observation/wrist_image": np.random.randint(256, size=(224, 224, 3), dtype=np.uint8), + "prompt": "do something", + } + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + main(tyro.cli(Args)) diff --git a/policy/openpi-InternData-A1/examples/simple_client/requirements.in b/policy/openpi-InternData-A1/examples/simple_client/requirements.in new file mode 100644 index 0000000..f4c5c0c --- /dev/null +++ b/policy/openpi-InternData-A1/examples/simple_client/requirements.in @@ -0,0 +1,5 @@ +numpy>=1.22.4,<2.0.0 +rich +tqdm +tyro +polars \ No newline at end of file diff --git a/policy/openpi-InternData-A1/examples/simple_client/requirements.txt b/policy/openpi-InternData-A1/examples/simple_client/requirements.txt new file mode 100644 index 0000000..86143b5 --- /dev/null +++ b/policy/openpi-InternData-A1/examples/simple_client/requirements.txt @@ -0,0 +1,30 @@ +# This file was autogenerated by uv via the following command: +# uv pip compile examples/simple_client/requirements.in -o examples/simple_client/requirements.txt --python-version 3.11.9 +docstring-parser==0.16 + # via tyro +markdown-it-py==3.0.0 + # via rich +mdurl==0.1.2 + # via markdown-it-py +numpy==1.26.4 + # via -r examples/simple_client/requirements.in +polars==1.30.0 + # via -r examples/simple_client/requirements.in +pygments==2.19.1 + # via rich +rich==14.0.0 + # via + # -r examples/simple_client/requirements.in + # tyro +shtab==1.7.2 + # via tyro +tqdm==4.67.1 + # via -r examples/simple_client/requirements.in +typeguard==4.4.2 + # via tyro +typing-extensions==4.13.2 + # via + # typeguard + # tyro +tyro==0.9.22 + # via -r examples/simple_client/requirements.in diff --git a/policy/openpi-InternData-A1/examples/ur5/README.md b/policy/openpi-InternData-A1/examples/ur5/README.md new file mode 100644 index 0000000..a66e91d --- /dev/null +++ b/policy/openpi-InternData-A1/examples/ur5/README.md @@ -0,0 +1,142 @@ +# UR5 Example + +Below we provide an outline of how to implement the key components mentioned in the "Finetune on your data" section of the [README](../README.md) for finetuning on UR5 datasets. + +First, we will define the `UR5Inputs` and `UR5Outputs` classes, which map the UR5 environment to the model and vice versa. Check the corresponding files in `src/openpi/policies/libero_policy.py` for comments explaining each line. + +```python + +@dataclasses.dataclass(frozen=True) +class UR5Inputs(transforms.DataTransformFn): + + model_type: _model.ModelType = _model.ModelType.PI0 + + def __call__(self, data: dict) -> dict: + # First, concatenate the joints and gripper into the state vector. + state = np.concatenate([data["joints"], data["gripper"]]) + + # Possibly need to parse images to uint8 (H,W,C) since LeRobot automatically + # stores as float32 (C,H,W), gets skipped for policy inference. + base_image = _parse_image(data["base_rgb"]) + wrist_image = _parse_image(data["wrist_rgb"]) + + # Create inputs dict. + inputs = { + "state": state, + "image": { + "base_0_rgb": base_image, + "left_wrist_0_rgb": wrist_image, + # Since there is no right wrist, replace with zeros + "right_wrist_0_rgb": np.zeros_like(base_image), + }, + "image_mask": { + "base_0_rgb": np.True_, + "left_wrist_0_rgb": np.True_, + # Since the "slot" for the right wrist is not used, this mask is set + # to False + "right_wrist_0_rgb": np.True_ if self.model_type == _model.ModelType.PI0_FAST else np.False_, + }, + } + + if "actions" in data: + inputs["actions"] = data["actions"] + + # Pass the prompt (aka language instruction) to the model. + if "prompt" in data: + inputs["prompt"] = data["prompt"] + + return inputs + + +@dataclasses.dataclass(frozen=True) +class UR5Outputs(transforms.DataTransformFn): + + def __call__(self, data: dict) -> dict: + # Since the robot has 7 action dimensions (6 DoF + gripper), return the first 7 dims + return {"actions": np.asarray(data["actions"][:, :7])} + +``` + +Next, we will define the `UR5DataConfig` class, which defines how to process raw UR5 data from LeRobot dataset for training. For a full example, see the `LeRobotLiberoDataConfig` config in the [training config file](https://github.com/physical-intelligence/openpi/blob/main/src/openpi/training/config.py). + +```python + +@dataclasses.dataclass(frozen=True) +class LeRobotUR5DataConfig(DataConfigFactory): + + @override + def create(self, assets_dirs: pathlib.Path, model_config: _model.BaseModelConfig) -> DataConfig: + # Boilerplate for remapping keys from the LeRobot dataset. We assume no renaming needed here. + repack_transform = _transforms.Group( + inputs=[ + _transforms.RepackTransform( + { + "base_rgb": "image", + "wrist_rgb": "wrist_image", + "joints": "joints", + "gripper": "gripper", + "prompt": "prompt", + } + ) + ] + ) + + # These transforms are the ones we wrote earlier. + data_transforms = _transforms.Group( + inputs=[UR5Inputs(action_dim=model_config.action_dim, model_type=model_config.model_type)], + outputs=[UR5Outputs()], + ) + + # Convert absolute actions to delta actions. + # By convention, we do not convert the gripper action (7th dimension). + delta_action_mask = _transforms.make_bool_mask(6, -1) + data_transforms = data_transforms.push( + inputs=[_transforms.DeltaActions(delta_action_mask)], + outputs=[_transforms.AbsoluteActions(delta_action_mask)], + ) + + # Model transforms include things like tokenizing the prompt and action targets + # You do not need to change anything here for your own dataset. + model_transforms = ModelTransformFactory()(model_config) + + # We return all data transforms for training and inference. No need to change anything here. + return dataclasses.replace( + self.create_base_config(assets_dirs), + repack_transforms=repack_transform, + data_transforms=data_transforms, + model_transforms=model_transforms, + ) + +``` + +Finally, we define the TrainConfig for our UR5 dataset. Here, we define a config for fine-tuning pi0 on our UR5 dataset. See the [training config file](https://github.com/physical-intelligence/openpi/blob/main/src/openpi/training/config.py) for more examples, e.g. for pi0-FAST or for LoRA fine-tuning. + +```python +TrainConfig( + name="pi0_ur5", + model=pi0.Pi0Config(), + data=LeRobotUR5DataConfig( + repo_id="your_username/ur5_dataset", + # This config lets us reload the UR5 normalization stats from the base model checkpoint. + # Reloading normalization stats can help transfer pre-trained models to new environments. + # See the [norm_stats.md](../docs/norm_stats.md) file for more details. + assets=AssetsConfig( + assets_dir="gs://openpi-assets/checkpoints/pi0_base/assets", + asset_id="ur5e", + ), + base_config=DataConfig( + # This flag determines whether we load the prompt (i.e. the task instruction) from the + # ``task`` field in the LeRobot dataset. The recommended setting is True. + prompt_from_task=True, + ), + ), + # Load the pi0 base model checkpoint. + weight_loader=weight_loaders.CheckpointWeightLoader("gs://openpi-assets/checkpoints/pi0_base/params"), + num_train_steps=30_000, +) +``` + + + + + diff --git a/policy/openpi-InternData-A1/packages/openpi-client/pyproject.toml b/policy/openpi-InternData-A1/packages/openpi-client/pyproject.toml new file mode 100644 index 0000000..fba7b66 --- /dev/null +++ b/policy/openpi-InternData-A1/packages/openpi-client/pyproject.toml @@ -0,0 +1,23 @@ +[project] +name = "openpi-client" +version = "0.1.0" +requires-python = ">=3.7" +dependencies = [ + "dm-tree>=0.1.8", + "msgpack>=1.0.5", + "numpy>=1.22.4,<2.0.0", + "pillow>=9.0.0", + "tree>=0.2.4", + "websockets>=11.0", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.uv] +dev-dependencies = ["pytest>=8.3.4"] + +[tool.ruff] +line-length = 120 +target-version = "py37" diff --git a/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/__init__.py b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/__init__.py new file mode 100644 index 0000000..3dc1f76 --- /dev/null +++ b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/__init__.py @@ -0,0 +1 @@ +__version__ = "0.1.0" diff --git a/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/action_chunk_broker.py b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/action_chunk_broker.py new file mode 100644 index 0000000..8fa9d83 --- /dev/null +++ b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/action_chunk_broker.py @@ -0,0 +1,50 @@ +from typing import Dict + +import numpy as np +import tree +from typing_extensions import override + +from openpi_client import base_policy as _base_policy + + +class ActionChunkBroker(_base_policy.BasePolicy): + """Wraps a policy to return action chunks one-at-a-time. + + Assumes that the first dimension of all action fields is the chunk size. + + A new inference call to the inner policy is only made when the current + list of chunks is exhausted. + """ + + def __init__(self, policy: _base_policy.BasePolicy, action_horizon: int): + self._policy = policy + self._action_horizon = action_horizon + self._cur_step: int = 0 + + self._last_results: Dict[str, np.ndarray] | None = None + + @override + def infer(self, obs: Dict) -> Dict: # noqa: UP006 + if self._last_results is None: + self._last_results = self._policy.infer(obs) + self._cur_step = 0 + + def slicer(x): + if isinstance(x, np.ndarray): + return x[self._cur_step, ...] + else: + return x + + results = tree.map_structure(slicer, self._last_results) + self._cur_step += 1 + + if self._cur_step >= self._action_horizon: + self._last_results = None + + return results + + @override + def reset(self) -> None: + self._policy.reset() + self._last_results = None + self._cur_step = 0 diff --git a/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/base_policy.py b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/base_policy.py new file mode 100644 index 0000000..2f42906 --- /dev/null +++ b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/base_policy.py @@ -0,0 +1,12 @@ +import abc +from typing import Dict + + +class BasePolicy(abc.ABC): + @abc.abstractmethod + def infer(self, obs: Dict) -> Dict: + """Infer actions from observations.""" + + def reset(self) -> None: + """Reset the policy to its initial state.""" + pass diff --git a/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/image_tools.py b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/image_tools.py new file mode 100644 index 0000000..7a971b9 --- /dev/null +++ b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/image_tools.py @@ -0,0 +1,58 @@ +import numpy as np +from PIL import Image + + +def convert_to_uint8(img: np.ndarray) -> np.ndarray: + """Converts an image to uint8 if it is a float image. + + This is important for reducing the size of the image when sending it over the network. + """ + if np.issubdtype(img.dtype, np.floating): + img = (255 * img).astype(np.uint8) + return img + + +def resize_with_pad(images: np.ndarray, height: int, width: int, method=Image.BILINEAR) -> np.ndarray: + """Replicates tf.image.resize_with_pad for multiple images using PIL. Resizes a batch of images to a target height. + + Args: + images: A batch of images in [..., height, width, channel] format. + height: The target height of the image. + width: The target width of the image. + method: The interpolation method to use. Default is bilinear. + + Returns: + The resized images in [..., height, width, channel]. + """ + # If the images are already the correct size, return them as is. + if images.shape[-3:-1] == (height, width): + return images + + original_shape = images.shape + + images = images.reshape(-1, *original_shape[-3:]) + resized = np.stack([_resize_with_pad_pil(Image.fromarray(im), height, width, method=method) for im in images]) + return resized.reshape(*original_shape[:-3], *resized.shape[-3:]) + + +def _resize_with_pad_pil(image: Image.Image, height: int, width: int, method: int) -> Image.Image: + """Replicates tf.image.resize_with_pad for one image using PIL. Resizes an image to a target height and + width without distortion by padding with zeros. + + Unlike the jax version, note that PIL uses [width, height, channel] ordering instead of [batch, h, w, c]. + """ + cur_width, cur_height = image.size + if cur_width == width and cur_height == height: + return image # No need to resize if the image is already the correct size. + + ratio = max(cur_width / width, cur_height / height) + resized_height = int(cur_height / ratio) + resized_width = int(cur_width / ratio) + resized_image = image.resize((resized_width, resized_height), resample=method) + + zero_image = Image.new(resized_image.mode, (width, height), 0) + pad_height = max(0, int((height - resized_height) / 2)) + pad_width = max(0, int((width - resized_width) / 2)) + zero_image.paste(resized_image, (pad_width, pad_height)) + assert zero_image.size == (width, height) + return zero_image diff --git a/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/image_tools_test.py b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/image_tools_test.py new file mode 100644 index 0000000..8d4b4b9 --- /dev/null +++ b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/image_tools_test.py @@ -0,0 +1,37 @@ +import numpy as np + +import openpi_client.image_tools as image_tools + + +def test_resize_with_pad_shapes(): + # Test case 1: Resize image with larger dimensions + images = np.zeros((2, 10, 10, 3), dtype=np.uint8) # Input images of shape (batch_size, height, width, channels) + height = 20 + width = 20 + resized_images = image_tools.resize_with_pad(images, height, width) + assert resized_images.shape == (2, height, width, 3) + assert np.all(resized_images == 0) + + # Test case 2: Resize image with smaller dimensions + images = np.zeros((3, 30, 30, 3), dtype=np.uint8) + height = 15 + width = 15 + resized_images = image_tools.resize_with_pad(images, height, width) + assert resized_images.shape == (3, height, width, 3) + assert np.all(resized_images == 0) + + # Test case 3: Resize image with the same dimensions + images = np.zeros((1, 50, 50, 3), dtype=np.uint8) + height = 50 + width = 50 + resized_images = image_tools.resize_with_pad(images, height, width) + assert resized_images.shape == (1, height, width, 3) + assert np.all(resized_images == 0) + + # Test case 3: Resize image with odd-numbered padding + images = np.zeros((1, 256, 320, 3), dtype=np.uint8) + height = 60 + width = 80 + resized_images = image_tools.resize_with_pad(images, height, width) + assert resized_images.shape == (1, height, width, 3) + assert np.all(resized_images == 0) diff --git a/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/msgpack_numpy.py b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/msgpack_numpy.py new file mode 100644 index 0000000..007f755 --- /dev/null +++ b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/msgpack_numpy.py @@ -0,0 +1,57 @@ +"""Adds NumPy array support to msgpack. + +msgpack is good for (de)serializing data over a network for multiple reasons: +- msgpack is secure (as opposed to pickle/dill/etc which allow for arbitrary code execution) +- msgpack is widely used and has good cross-language support +- msgpack does not require a schema (as opposed to protobuf/flatbuffers/etc) which is convenient in dynamically typed + languages like Python and JavaScript +- msgpack is fast and efficient (as opposed to readable formats like JSON/YAML/etc); I found that msgpack was ~4x faster + than pickle for serializing large arrays using the below strategy + +The code below is adapted from https://github.com/lebedov/msgpack-numpy. The reason not to use that library directly is +that it falls back to pickle for object arrays. +""" + +import functools + +import msgpack +import numpy as np + + +def pack_array(obj): + if (isinstance(obj, (np.ndarray, np.generic))) and obj.dtype.kind in ("V", "O", "c"): + raise ValueError(f"Unsupported dtype: {obj.dtype}") + + if isinstance(obj, np.ndarray): + return { + b"__ndarray__": True, + b"data": obj.tobytes(), + b"dtype": obj.dtype.str, + b"shape": obj.shape, + } + + if isinstance(obj, np.generic): + return { + b"__npgeneric__": True, + b"data": obj.item(), + b"dtype": obj.dtype.str, + } + + return obj + + +def unpack_array(obj): + if b"__ndarray__" in obj: + return np.ndarray(buffer=obj[b"data"], dtype=np.dtype(obj[b"dtype"]), shape=obj[b"shape"]) + + if b"__npgeneric__" in obj: + return np.dtype(obj[b"dtype"]).type(obj[b"data"]) + + return obj + + +Packer = functools.partial(msgpack.Packer, default=pack_array) +packb = functools.partial(msgpack.packb, default=pack_array) + +Unpacker = functools.partial(msgpack.Unpacker, object_hook=unpack_array) +unpackb = functools.partial(msgpack.unpackb, object_hook=unpack_array) diff --git a/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/msgpack_numpy_test.py b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/msgpack_numpy_test.py new file mode 100644 index 0000000..4c774ba --- /dev/null +++ b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/msgpack_numpy_test.py @@ -0,0 +1,45 @@ +import numpy as np +import pytest +import tree + +from openpi_client import msgpack_numpy + + +def _check(expected, actual): + if isinstance(expected, np.ndarray): + assert expected.shape == actual.shape + assert expected.dtype == actual.dtype + assert np.array_equal(expected, actual, equal_nan=expected.dtype.kind == "f") + else: + assert expected == actual + + +@pytest.mark.parametrize( + "data", + [ + 1, # int + 1.0, # float + "hello", # string + np.bool_(True), # boolean scalar + np.array([1, 2, 3])[0], # int scalar + np.str_("asdf"), # string scalar + [1, 2, 3], # list + {"key": "value"}, # dict + {"key": [1, 2, 3]}, # nested dict + np.array(1.0), # 0D array + np.array([1, 2, 3], dtype=np.int32), # 1D integer array + np.array(["asdf", "qwer"]), # string array + np.array([True, False]), # boolean array + np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32), # 2D float array + np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.int16), # 3D integer array + np.array([np.nan, np.inf, -np.inf]), # special float values + {"arr": np.array([1, 2, 3]), "nested": {"arr": np.array([4, 5, 6])}}, # nested dict with arrays + [np.array([1, 2]), np.array([3, 4])], # list of arrays + np.zeros((3, 4, 5), dtype=np.float32), # 3D zeros + np.ones((2, 3), dtype=np.float64), # 2D ones with double precision + ], +) +def test_pack_unpack(data): + packed = msgpack_numpy.packb(data) + unpacked = msgpack_numpy.unpackb(packed) + tree.map_structure(_check, data, unpacked) diff --git a/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/runtime/agent.py b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/runtime/agent.py new file mode 100644 index 0000000..a2c3ab6 --- /dev/null +++ b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/runtime/agent.py @@ -0,0 +1,17 @@ +import abc + + +class Agent(abc.ABC): + """An Agent is the thing with agency, i.e. the entity that makes decisions. + + Agents receive observations about the state of the world, and return actions + to take in response. + """ + + @abc.abstractmethod + def get_action(self, observation: dict) -> dict: + """Query the agent for the next action.""" + + @abc.abstractmethod + def reset(self) -> None: + """Reset the agent to its initial state.""" diff --git a/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/runtime/agents/policy_agent.py b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/runtime/agents/policy_agent.py new file mode 100644 index 0000000..65227c4 --- /dev/null +++ b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/runtime/agents/policy_agent.py @@ -0,0 +1,18 @@ +from typing_extensions import override + +from openpi_client import base_policy as _base_policy +from openpi_client.runtime import agent as _agent + + +class PolicyAgent(_agent.Agent): + """An agent that uses a policy to determine actions.""" + + def __init__(self, policy: _base_policy.BasePolicy) -> None: + self._policy = policy + + @override + def get_action(self, observation: dict) -> dict: + return self._policy.infer(observation) + + def reset(self) -> None: + self._policy.reset() diff --git a/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/runtime/environment.py b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/runtime/environment.py new file mode 100644 index 0000000..664ac46 --- /dev/null +++ b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/runtime/environment.py @@ -0,0 +1,32 @@ +import abc + + +class Environment(abc.ABC): + """An Environment represents the robot and the environment it inhabits. + + The primary contract of environments is that they can be queried for observations + about their state, and have actions applied to them to change that state. + """ + + @abc.abstractmethod + def reset(self) -> None: + """Reset the environment to its initial state. + + This will be called once before starting each episode. + """ + + @abc.abstractmethod + def is_episode_complete(self) -> bool: + """Allow the environment to signal that the episode is complete. + + This will be called after each step. It should return `True` if the episode is + complete (either successfully or unsuccessfully), and `False` otherwise. + """ + + @abc.abstractmethod + def get_observation(self) -> dict: + """Query the environment for the current state.""" + + @abc.abstractmethod + def apply_action(self, action: dict) -> None: + """Take an action in the environment.""" diff --git a/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/runtime/runtime.py b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/runtime/runtime.py new file mode 100644 index 0000000..9552be0 --- /dev/null +++ b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/runtime/runtime.py @@ -0,0 +1,92 @@ +import logging +import threading +import time + +from openpi_client.runtime import agent as _agent +from openpi_client.runtime import environment as _environment +from openpi_client.runtime import subscriber as _subscriber + + +class Runtime: + """The core module orchestrating interactions between key components of the system.""" + + def __init__( + self, + environment: _environment.Environment, + agent: _agent.Agent, + subscribers: list[_subscriber.Subscriber], + max_hz: float = 0, + num_episodes: int = 1, + max_episode_steps: int = 0, + ) -> None: + self._environment = environment + self._agent = agent + self._subscribers = subscribers + self._max_hz = max_hz + self._num_episodes = num_episodes + self._max_episode_steps = max_episode_steps + + self._in_episode = False + self._episode_steps = 0 + + def run(self) -> None: + """Runs the runtime loop continuously until stop() is called or the environment is done.""" + for _ in range(self._num_episodes): + self._run_episode() + + # Final reset, this is important for real environments to move the robot to its home position. + self._environment.reset() + + def run_in_new_thread(self) -> threading.Thread: + """Runs the runtime loop in a new thread.""" + thread = threading.Thread(target=self.run) + thread.start() + return thread + + def mark_episode_complete(self) -> None: + """Marks the end of an episode.""" + self._in_episode = False + + def _run_episode(self) -> None: + """Runs a single episode.""" + logging.info("Starting episode...") + self._environment.reset() + self._agent.reset() + for subscriber in self._subscribers: + subscriber.on_episode_start() + + self._in_episode = True + self._episode_steps = 0 + step_time = 1 / self._max_hz if self._max_hz > 0 else 0 + last_step_time = time.time() + + while self._in_episode: + self._step() + self._episode_steps += 1 + + # Sleep to maintain the desired frame rate + now = time.time() + dt = now - last_step_time + if dt < step_time: + time.sleep(step_time - dt) + last_step_time = time.time() + else: + last_step_time = now + + logging.info("Episode completed.") + for subscriber in self._subscribers: + subscriber.on_episode_end() + + def _step(self) -> None: + """A single step of the runtime loop.""" + observation = self._environment.get_observation() + action = self._agent.get_action(observation) + self._environment.apply_action(action) + + for subscriber in self._subscribers: + subscriber.on_step(observation, action) + + if self._environment.is_episode_complete() or ( + self._max_episode_steps > 0 and self._episode_steps >= self._max_episode_steps + ): + self.mark_episode_complete() diff --git a/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/runtime/subscriber.py b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/runtime/subscriber.py new file mode 100644 index 0000000..7c69eda --- /dev/null +++ b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/runtime/subscriber.py @@ -0,0 +1,20 @@ +import abc + + +class Subscriber(abc.ABC): + """Subscribes to events in the runtime. + + Subscribers can be used to save data, visualize, etc. + """ + + @abc.abstractmethod + def on_episode_start(self) -> None: + """Called when an episode starts.""" + + @abc.abstractmethod + def on_step(self, observation: dict, action: dict) -> None: + """Append a step to the episode.""" + + @abc.abstractmethod + def on_episode_end(self) -> None: + """Called when an episode ends.""" diff --git a/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/websocket_client_policy.py b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/websocket_client_policy.py new file mode 100644 index 0000000..d6244f0 --- /dev/null +++ b/policy/openpi-InternData-A1/packages/openpi-client/src/openpi_client/websocket_client_policy.py @@ -0,0 +1,55 @@ +import logging +import time +from typing import Dict, Optional, Tuple + +from typing_extensions import override +import websockets.sync.client + +from openpi_client import base_policy as _base_policy +from openpi_client import msgpack_numpy + + +class WebsocketClientPolicy(_base_policy.BasePolicy): + """Implements the Policy interface by communicating with a server over websocket. + + See WebsocketPolicyServer for a corresponding server implementation. + """ + + def __init__(self, host: str = "0.0.0.0", port: Optional[int] = None, api_key: Optional[str] = None) -> None: + self._uri = f"ws://{host}" + if port is not None: + self._uri += f":{port}" + self._packer = msgpack_numpy.Packer() + self._api_key = api_key + self._ws, self._server_metadata = self._wait_for_server() + + def get_server_metadata(self) -> Dict: + return self._server_metadata + + def _wait_for_server(self) -> Tuple[websockets.sync.client.ClientConnection, Dict]: + logging.info(f"Waiting for server at {self._uri}...") + while True: + try: + headers = {"Authorization": f"Api-Key {self._api_key}"} if self._api_key else None + conn = websockets.sync.client.connect( + self._uri, compression=None, max_size=None, additional_headers=headers + ) + metadata = msgpack_numpy.unpackb(conn.recv()) + return conn, metadata + except ConnectionRefusedError: + logging.info("Still waiting for server...") + time.sleep(5) + + @override + def infer(self, obs: Dict) -> Dict: # noqa: UP006 + data = self._packer.pack(obs) + self._ws.send(data) + response = self._ws.recv() + if isinstance(response, str): + # we're expecting bytes; if the server sends a string, it's an error. + raise RuntimeError(f"Error in inference server:\n{response}") + return msgpack_numpy.unpackb(response) + + @override + def reset(self) -> None: + pass diff --git a/policy/openpi-InternData-A1/pyproject.toml b/policy/openpi-InternData-A1/pyproject.toml new file mode 100644 index 0000000..6b36694 --- /dev/null +++ b/policy/openpi-InternData-A1/pyproject.toml @@ -0,0 +1,136 @@ +[project] +name = "openpi" +version = "0.1.0" +description = "Physical Intelligence open source repo" +readme = "README.md" +requires-python = ">=3.11" +license = { file = "LICENSE" } +dependencies = [ + "augmax>=0.3.4", + "dm-tree>=0.1.8", + "einops>=0.8.0", + "equinox>=0.11.8", + "flatbuffers>=24.3.25", + "flax==0.10.2", + "fsspec[gcs]>=2024.6.0", + "gym-aloha>=0.1.1", + "imageio>=2.36.1", + "jax[cuda12]==0.5.3", + "jaxtyping==0.2.36", + "ml_collections==1.0.0", + "numpy>=1.22.4,<2.0.0", + "numpydantic>=1.6.6", + "opencv-python>=4.10.0.84", + "openpi-client", + "orbax-checkpoint==0.11.13", + "pillow>=11.0.0", + "sentencepiece>=0.2.0", + "torch==2.7.1", + "tqdm-loggable>=0.2", + "typing-extensions>=4.12.2", + "tyro>=0.9.5", + "wandb>=0.19.1", + "filelock>=3.16.1", + "beartype==0.19.0", + "treescope>=0.1.7", + "transformers==4.53.2", + "rich>=14.0.0", + "polars>=1.30.0", +] + + +[project.urls] +Repository = "https://github.com/Physical-Intelligence/openpi" + +[dependency-groups] +dev = [ + "pytest>=8.3.4", + "ruff>=0.8.6", + "pre-commit>=4.0.1", + "ipykernel>=6.29.5", + "ipywidgets>=8.1.5", + "matplotlib>=3.10.0", + "pynvml>=12.0.0", +] +rlds = [ + "dlimp", + "tensorflow-cpu==2.15.0", + "tensorflow-datasets==4.9.9", +] + +[tool.uv] +override-dependencies = ["ml-dtypes==0.4.1", "tensorstore==0.1.74"] + +[tool.uv.sources] +openpi-client = { workspace = true } +lerobot = { git = "https://github.com/huggingface/lerobot", rev = "0cf864870cf29f4738d3ade893e6fd13fbd7cdb5" } +dlimp = { git = "https://github.com/kvablack/dlimp", rev = "ad72ce3a9b414db2185bc0b38461d4101a65477a" } + +[tool.uv.workspace] +members = ["packages/*"] + +[tool.ruff] +line-length = 120 +target-version = "py311" +extend-exclude = ["docker", "third_party", "src/openpi/models_pytorch/transformers_replace/*"] + +[tool.ruff.lint] +# https://docs.astral.sh/ruff/rules/ +select = [ + "B", + "C4", + "DTZ", + "E4", + "E7", + "E9", + "F", + "FBT", + "FURB", + "I", + "ICN", + "ISC", + "LOG", + "N", + "PD", + "PERF", + "PIE", + "PLC", + "PLE", + "PLR1", + "PLR5", + "PLW", + "PT", + "Q", + "RET", + "RUF", + "SIM", + "SLF", + "T10", + "T20", + "UP", + "W", +] +ignore = [ + "F722", # Conflicts with array typing. + "T201", # We use print statements. + "PD008", # Lots of false positives. + "ISC001", # Disabling to support ruff format. + "LOG015", # Use logger.info. +] +unfixable = [ + "B905", # Fix defaults to strict=False, which is not what we want. +] + +[tool.ruff.lint.isort] +force-single-line = true +force-sort-within-sections = true +single-line-exclusions = ["collections.abc", "typing", "typing_extensions"] +known-third-party = ["wandb"] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.pytest.ini_options] +markers = ["manual: should be run manually."] +testpaths = ["src", "scripts", "packages"] diff --git a/policy/openpi-InternData-A1/requirements_wo_torch_lerobot.txt b/policy/openpi-InternData-A1/requirements_wo_torch_lerobot.txt new file mode 100644 index 0000000..331c8e7 --- /dev/null +++ b/policy/openpi-InternData-A1/requirements_wo_torch_lerobot.txt @@ -0,0 +1,36 @@ +augmax>=0.3.4 +dm-tree>=0.1.8 +einops>=0.8.0 +equinox>=0.11.8 +flatbuffers>=24.3.25 +flax==0.10.2 +fsspec[gcs]>=2024.6.0 +gym-aloha>=0.1.1 +imageio>=2.36.1 +jax[cuda12]==0.5.3 +jaxtyping==0.2.36 +ml_collections==1.0.0 +numpy>=1.22.4,<2.0.0 +numpydantic>=1.6.6 +opencv-python>=4.10.0.84 +orbax-checkpoint==0.11.13 +pillow>=11.0.0 +sentencepiece>=0.2.0 +tqdm-loggable>=0.2 +typing-extensions>=4.12.2 +tyro>=0.9.5 +wandb>=0.19.1 +filelock>=3.16.1 +beartype==0.19.0 +treescope>=0.1.7 +transformers==4.53.2 +rich>=14.0.0 +polars>=1.30.0 +ml-dtypes==0.5.3 +tensorstore==0.1.74 +# tensorflow==2.20.0 +tensorflow-datasets==4.9.9 +lmdb==1.7.3 +pytest==8.4.1 +nvidia-cudnn-cu12==9.10.2.21 +# dlimp \ No newline at end of file diff --git a/policy/openpi-InternData-A1/scripts/__init__.py b/policy/openpi-InternData-A1/scripts/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/policy/openpi-InternData-A1/scripts/compute_norm_stats_real.py b/policy/openpi-InternData-A1/scripts/compute_norm_stats_real.py new file mode 100644 index 0000000..bd1b852 --- /dev/null +++ b/policy/openpi-InternData-A1/scripts/compute_norm_stats_real.py @@ -0,0 +1,218 @@ +"""Compute normalization statistics for real-world tasks. + +This script is used to compute the normalization statistics for a given real-world task. It +will compute the mean and standard deviation of the data in the dataset and save it +to the config directory. +""" +import os +import glob +import numpy as np +import tqdm +import tyro +import json + +import openpi.models.model as _model +import openpi.shared.normalize as normalize +import openpi.training.config as _config +import openpi.training.mixture_dataset as _mixture_dataset +import openpi.training.data_loader as _data_loader +import openpi.transforms as transforms + +### training config ### +import openpi.training.weight_loaders as weight_loaders +import openpi.models.pi0_config as pi0_config +from openpi.training.config import MultiLeRobotReala2dDataConfig, MultiLeRobotRealArxLift2DataConfig, MultiDataConfig, DataConfig, TrainConfig + +from pdb import set_trace + +class RemoveStrings(transforms.DataTransformFn): + def __call__(self, x: dict) -> dict: + return {k: v for k, v in x.items() if not np.issubdtype(np.asarray(v).dtype, np.str_)} + + +def create_torch_dataloader( + data_config: _config.DataConfig, + action_horizon: int, + batch_size: int, + model_config: _model.BaseModelConfig, + num_workers: int, + max_frames: int | None = None, +) -> tuple[_data_loader.Dataset, int]: + dataset = _mixture_dataset.create_mixture_dataset_calculate_norm_stats(data_config, action_horizon, model_config) + dataset = _mixture_dataset.TransformedDataset( + dataset, + [ + *data_config[0].repack_transforms.inputs, + *data_config[0].data_transforms.inputs, + RemoveStrings(), + ], + ) + if max_frames is not None and max_frames < len(dataset): + num_batches = max_frames // batch_size + shuffle = True + else: + num_batches = len(dataset) // batch_size + shuffle = False + data_loader = _data_loader.TorchDataLoader( + dataset, + local_batch_size=batch_size, + num_workers=num_workers, + shuffle=shuffle, + num_batches=num_batches, + ) + return data_loader, num_batches + + +def main(dataset_path, robot_name, task_name, save_path): + if robot_name == "lift2" or robot_name == "split_aloha" or robot_name == "acone": + config = TrainConfig( + name="lift2", + model=pi0_config.Pi0Config(), + data=[ + MultiLeRobotRealArxLift2DataConfig( + repo_dir=dataset_path, + task_id=None, + use_gripper_aug=False, + stats_dir='', + base_config=MultiDataConfig( + prompt_from_task=True, + ), + asset_id=robot_name, + robot_name=robot_name, + repack_transforms=transforms.Group( + inputs=[ + transforms.RepackTransform( + { + "state_dict": { + "left_joint": "states.left_joint.position", + "right_joint": "states.right_joint.position", + "left_gripper": "states.left_gripper.position", + "right_gripper": "states.right_gripper.position" + }, + "action_dict": { + "left_joint": "actions.left_joint.position", + "right_joint": "actions.right_joint.position", + "left_gripper": "actions.left_gripper.position", + "right_gripper": "actions.right_gripper.position" + }, + "prompt": "task" + } + ) + ] + ) + ), + ], + # pretrain model path + weight_loader=weight_loaders.CheckpointWeightLoader("checkpoints/jax/pi0_base/params"), + pytorch_weight_path="checkpoints/pytorch/pi0_base", + num_train_steps=30_000, + num_workers=4, + fsdp_devices=4, + batch_size=8, + ) + elif robot_name == "genie1": + config = TrainConfig( + name="genie1", + model=pi0_config.Pi0Config(), + data=[ + MultiLeRobotReala2dDataConfig( + repo_dir=dataset_path, + task_id=None, + use_gripper_aug=False, + stats_dir='', + base_config=MultiDataConfig( + prompt_from_task=True, + ), + asset_id=robot_name, + robot_name=robot_name, + repack_transforms=transforms.Group( + inputs=[ + transforms.RepackTransform( + { + "state_dict": { + "joint": "observation.states.joint.position", + "gripper": "observation.states.effector.position", + }, + "action_dict": { + "joint": "actions.joint.position", + "gripper": "actions.effector.position", + }, + "prompt": "task" + } + ) + ] + ) + ), + ], + # pretrain model path + weight_loader=weight_loaders.CheckpointWeightLoader("checkpoints/jax/pi0_base/params"), + pytorch_weight_path="checkpoints/pytorch/pi0_base", + num_train_steps=30_000, + num_workers=4, + fsdp_devices=4, + batch_size=8, + ) + data_config = config.data[0].create(config.model) + print("done") + output_path = os.path.join(save_path, robot_name, task_name) + stats_json_path = os.path.join(output_path, "norm_stats.json") + if os.path.isfile(stats_json_path): + with open(stats_json_path, 'r', encoding='utf-8') as f: + json.load(f) + return True + + data_loader, num_batches = create_torch_dataloader( + data_config, config.model.action_horizon, config.batch_size, config.model, config.num_workers, max_frames=None + ) + + keys = ["state", "actions"] + stats = {key: normalize.RunningStats() for key in keys} + + step_id = 0 + for batch in tqdm.tqdm(data_loader, total=num_batches, desc="Computing stats"): + step_id += 1 + for key in keys: + stats[key].update(np.asarray(batch[key])) + + norm_stats = {key: stats.get_statistics() for key, stats in stats.items()} + + print(f"Writing stats to: {output_path}") + normalize.save(output_path, norm_stats) + +def check_lerobot_repo(repo_dir: str): + if os.path.isdir(os.path.join(repo_dir, "data")) and os.path.isdir(os.path.join(repo_dir, "meta")) and os.path.isdir(os.path.join(repo_dir, "videos")): + print(repo_dir, "true") + return True + else: + print(repo_dir, "false") + return False + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser() + parser.add_argument("--task_path", type=str, default="data/InternData-A1/real/genie1/Put_the_pen_from_the_table_into_the_pen_holder/*") + parser.add_argument("--robot_name", type=str, default="genie1") + parser.add_argument("--save_path", type=str, default="stats/real") + + args, unknown = parser.parse_known_args() + dataset_path=args.task_path + save_path = args.save_path + parts = dataset_path.split("/") + robot_idx = next((i for i, p in enumerate(parts) if p == args.robot_name), None) + if robot_idx is None: + raise ValueError( + f"Cannot find robot name in path. Expected {args.robot_name}, " + f"but got path: {dataset_path}" + ) + + if robot_idx + 1 >= len(parts): + raise ValueError( + f"Path ends at robot name '{parts[robot_idx]}', cannot determine task_name: {local_path}" + ) + robot_name = parts[robot_idx] + task_name = parts[robot_idx + 1] + try: + main(dataset_path, robot_name, task_name, save_path) + except: + print(dataset_path) + diff --git a/policy/openpi-InternData-A1/scripts/compute_norm_stats_sim.py b/policy/openpi-InternData-A1/scripts/compute_norm_stats_sim.py new file mode 100644 index 0000000..79f1071 --- /dev/null +++ b/policy/openpi-InternData-A1/scripts/compute_norm_stats_sim.py @@ -0,0 +1,314 @@ +"""Compute normalization statistics for interndata-a1 sim tasks. + +This script is used to compute the normalization statistics for interndata-a1 sim tasks. It +will compute the mean and standard deviation of the data in the dataset and save it +to the config assets directory. +""" +import os +import glob +import numpy as np +import tqdm +import tyro +import json + +import openpi.models.model as _model +import openpi.shared.normalize as normalize +import openpi.training.config as _config +import openpi.training.mixture_dataset as _mixture_dataset +import openpi.training.data_loader as _data_loader +import openpi.transforms as transforms + +### training config ### +import openpi.training.weight_loaders as weight_loaders +import openpi.models.pi0_config as pi0_config +from openpi.training.config import MultiSimGenieDataConfig, MultiSimSplitAlohaDataConfig, MultiSimFrankaDataConfig, MultiDataConfig, DataConfig, TrainConfig + +from pdb import set_trace + +class RemoveStrings(transforms.DataTransformFn): + def __call__(self, x: dict) -> dict: + return {k: v for k, v in x.items() if not np.issubdtype(np.asarray(v).dtype, np.str_)} + + +def create_torch_dataloader( + data_config: _config.DataConfig, + action_horizon: int, + batch_size: int, + model_config: _model.BaseModelConfig, + num_workers: int, + max_frames: int | None = None, +) -> tuple[_data_loader.Dataset, int]: + dataset = _mixture_dataset.create_mixture_dataset_calculate_norm_stats(data_config, action_horizon, model_config) + dataset = _mixture_dataset.TransformedDataset( + dataset, + [ + *data_config[0].repack_transforms.inputs, + *data_config[0].data_transforms.inputs, + RemoveStrings(), + ], + ) + if max_frames is not None and max_frames < len(dataset): + num_batches = max_frames // batch_size + shuffle = True + else: + num_batches = len(dataset) // batch_size + shuffle = False + data_loader = _data_loader.TorchDataLoader( + dataset, + local_batch_size=batch_size, + num_workers=num_workers, + shuffle=shuffle, + num_batches=num_batches, + ) + return data_loader, num_batches + + +def main(dataset_path, task_category, robot_name, task_name, collect_name, save_path): + if robot_name == "lift2" or robot_name == "split_aloha": + config = TrainConfig( + name="lift2", + model=pi0_config.Pi0Config(), + data=[ + MultiSimSplitAlohaDataConfig( + repo_dir=dataset_path, + task_id=None, + use_gripper_aug=True, + gripper_aug_config={ + "gripper_action_keys": ["master_actions.left_gripper.openness", "master_actions.right_gripper.openness"], + "gripper_dim": -1, + "gripper_threshold_method": "std_multiplier", + "gripper_threshold_multiplier": 1.0, + "gripper_min_threshold": 0.001, + "gripper_max_threshold": 1.0, + }, + stats_dir='', + base_config=MultiDataConfig( + prompt_from_task=True, + ), + asset_id=robot_name, + robot_name=robot_name, + repack_transforms=transforms.Group( + inputs=[ + transforms.RepackTransform( + { + "state_dict": { + "left_joint": "states.left_joint.position", + "right_joint": "states.right_joint.position", + "left_gripper": "states.left_gripper.position", + "right_gripper": "states.right_gripper.position" + }, + "action_dict": { + "left_joint": "actions.left_joint.position", + "right_joint": "actions.right_joint.position", + "left_gripper": "actions.left_gripper.position", + "right_gripper": "actions.right_gripper.position", + "left_gripper_openness": "master_actions.left_gripper.openness", + "right_gripper_openness": "master_actions.right_gripper.openness" + }, + "prompt": "task" + } + ) + ] + ) + ), + ], + weight_loader=weight_loaders.CheckpointWeightLoader("checkpoints/jax/pi0_base/params"), + pytorch_weight_path="checkpoints/pytorch/pi0_base", + num_train_steps=30_000, + num_workers=4, + fsdp_devices=4, + batch_size=8, + ) + elif robot_name == "genie1": + config = TrainConfig( + name="genie1", + model=pi0_config.Pi0Config(), + data=[ + MultiSimGenieDataConfig( + repo_dir=dataset_path, + task_id=None, + use_gripper_aug=True, + gripper_aug_config={ + "gripper_action_keys": ["master_actions.left_gripper.openness", "master_actions.right_gripper.openness"], + "gripper_dim": -1, + "gripper_threshold_method": "std_multiplier", + "gripper_threshold_multiplier": 1.0, + "gripper_min_threshold": 0.001, + "gripper_max_threshold": 1.0, + }, + stats_dir='', + base_config=MultiDataConfig( + prompt_from_task=True, + ), + asset_id=robot_name, + robot_name=robot_name, + repack_transforms=transforms.Group( + inputs=[ + transforms.RepackTransform( + { + "state_dict": { + "left_joint": "states.left_joint.position", + "right_joint": "states.right_joint.position", + "left_gripper": "states.left_gripper.position", + "right_gripper": "states.right_gripper.position" + }, + "action_dict": { + "left_joint": "actions.left_joint.position", + "right_joint": "actions.right_joint.position", + "left_gripper": "actions.left_gripper.position", + "right_gripper": "actions.right_gripper.position", + "left_gripper_openness": "master_actions.left_gripper.openness", + "right_gripper_openness": "master_actions.right_gripper.openness" + }, + "prompt": "task" + } + ) + ] + ) + ), + ], + weight_loader=weight_loaders.CheckpointWeightLoader("checkpoints/jax/pi0_base/params"), + pytorch_weight_path="checkpoints/pytorch/pi0_base", + num_train_steps=30_000, + num_workers=4, + fsdp_devices=4, + batch_size=8, + ) + elif "franka" in robot_name: + config = TrainConfig( + name="franka", + model=pi0_config.Pi0Config(), + data=[ + MultiSimFrankaDataConfig( + repo_dir=dataset_path, + task_id=None, + use_gripper_aug=True, + gripper_aug_config={ + "gripper_action_keys": ["actions.gripper.openness"], + "gripper_dim": -1, + "gripper_threshold_method": "std_multiplier", + "gripper_threshold_multiplier": 1.0, + "gripper_min_threshold": 0.001, + "gripper_max_threshold": 1.0, + }, + stats_dir='', + base_config=MultiDataConfig( + prompt_from_task=True, + ), + asset_id=robot_name, + robot_name=robot_name, + repack_transforms=transforms.Group( + inputs=[ + transforms.RepackTransform( + { + "state_dict": { + "joint_position": "states.joint.position", + "gripper_pose": "states.gripper.pose", + "gripper_position": "states.gripper.position", + }, + "action_dict": { + "gripper_pose": "actions.gripper.pose", + "gripper_position": "actions.gripper.position", + "gripper_openness": "actions.gripper.openness", + }, + "prompt": "task" + } + ) + ] + ) + ), + ], + weight_loader=weight_loaders.CheckpointWeightLoader("checkpoints/jax/pi0_base/params"), + pytorch_weight_path="checkpoints/pytorch/pi0_base", + num_train_steps=30_000, + num_workers=4, + fsdp_devices=4, + batch_size=8, + ) + + data_config = config.data[0].create(config.model) + print("done") + output_path = os.path.join(save_path, task_category, robot_name, task_name, collect_name) + stats_json_path = os.path.join(output_path, "norm_stats.json") + if os.path.isfile(stats_json_path): + with open(stats_json_path, 'r', encoding='utf-8') as f: + json.load(f) + return True + + data_loader, num_batches = create_torch_dataloader( + data_config, config.model.action_horizon, config.batch_size, config.model, config.num_workers, max_frames=None + ) + + keys = ["state", "actions"] + stats = {key: normalize.RunningStats() for key in keys} + + step_id = 0 + for batch in tqdm.tqdm(data_loader, total=num_batches, desc="Computing stats"): + step_id += 1 + for key in keys: + stats[key].update(np.asarray(batch[key])) + if step_id > 10000: + break + + norm_stats = {key: stats.get_statistics() for key, stats in stats.items()} + + print(f"Writing stats to: {output_path}") + normalize.save(output_path, norm_stats) + +def check_lerobot_repo(repo_dir: str): + if os.path.isdir(os.path.join(repo_dir, "data")) and os.path.isdir(os.path.join(repo_dir, "meta")) and os.path.isdir(os.path.join(repo_dir, "videos")): + print(repo_dir, "true") + return True + else: + print(repo_dir, "false") + return False + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser() + parser.add_argument("--root_data_dir", type=str, default="data/InternData-A1/sim") + parser.add_argument("--task_category", type=str, default="pick_and_place_tasks") + parser.add_argument("--save_path", type=str, default="stats/sim") + parser.add_argument("--start_ratio", type=float, default=0.0) + parser.add_argument("--end_ratio", type=float, default=1) + args, unknown = parser.parse_known_args() + root_data_dir = os.path.join(args.root_data_dir, args.task_category) + + dataset_paths = glob.glob(os.path.join(root_data_dir, "*", "*")) + dataset_paths.sort() + valid_paths = [ + p for p in dataset_paths + if check_lerobot_repo(p) + ] + + start_idx = int(len(valid_paths) * args.start_ratio) + end_idx = int(len(valid_paths) * args.end_ratio) + 1 + valid_paths = valid_paths[start_idx:end_idx] + for dataset_path in tqdm.tqdm(valid_paths): + task_category = dataset_path.split('/')[-3] + robot_name = dataset_path.split('/')[-2] + task_name = dataset_path.split('/')[-1] + collect_name = "" + try: + main(dataset_path, task_category, robot_name, task_name, collect_name, args.save_path) + except: + print(dataset_path) + + dataset_paths_w_subtask = glob.glob(os.path.join(root_data_dir, "*", "*","*")) + dataset_paths_w_subtask.sort() + valid_paths_w_subtask = [ + p for p in dataset_paths_w_subtask + if check_lerobot_repo(p) + ] + start_idx = int(len(valid_paths_w_subtask) * args.start_ratio) + end_idx = int(len(valid_paths_w_subtask) * args.end_ratio) + 1 + valid_paths_w_subtask = valid_paths_w_subtask[start_idx:end_idx] + for dataset_path in tqdm.tqdm(valid_paths_w_subtask): + task_category = dataset_path.split('/')[-4] + robot_name = dataset_path.split('/')[-3] + task_name = dataset_path.split('/')[-2] + collect_name = dataset_path.split('/')[-1] + try: + main(dataset_path, task_category, robot_name, task_name, collect_name, args.save_path) + except: + print(dataset_path) diff --git a/policy/openpi-InternData-A1/scripts/compute_norm_stats_sim2real.py b/policy/openpi-InternData-A1/scripts/compute_norm_stats_sim2real.py new file mode 100644 index 0000000..82265d2 --- /dev/null +++ b/policy/openpi-InternData-A1/scripts/compute_norm_stats_sim2real.py @@ -0,0 +1,181 @@ +"""Compute normalization statistics for real-world tasks. + +This script is used to compute the normalization statistics for a given real-world task. It +will compute the mean and standard deviation of the data in the dataset and save it +to the config directory. +""" +import os +import glob +import numpy as np +import tqdm +import tyro +import json + +import openpi.models.model as _model +import openpi.shared.normalize as normalize +import openpi.training.config as _config +import openpi.training.mixture_dataset as _mixture_dataset +import openpi.training.data_loader as _data_loader +import openpi.transforms as transforms + +### training config ### +import openpi.training.weight_loaders as weight_loaders +import openpi.models.pi0_config as pi0_config +from openpi.training.config import MultiSim2RealSplitAlohaDataConfig, MultiDataConfig, DataConfig, TrainConfig + +from pdb import set_trace + +class RemoveStrings(transforms.DataTransformFn): + def __call__(self, x: dict) -> dict: + return {k: v for k, v in x.items() if not np.issubdtype(np.asarray(v).dtype, np.str_)} + + +def create_torch_dataloader( + data_config: _config.DataConfig, + action_horizon: int, + batch_size: int, + model_config: _model.BaseModelConfig, + num_workers: int, + max_frames: int | None = None, +) -> tuple[_data_loader.Dataset, int]: + dataset = _mixture_dataset.create_mixture_dataset_calculate_norm_stats(data_config, action_horizon, model_config) + dataset = _mixture_dataset.TransformedDataset( + dataset, + [ + *data_config[0].repack_transforms.inputs, + *data_config[0].data_transforms.inputs, + RemoveStrings(), + ], + ) + if max_frames is not None and max_frames < len(dataset): + num_batches = max_frames // batch_size + shuffle = True + else: + num_batches = len(dataset) // batch_size + shuffle = False + data_loader = _data_loader.TorchDataLoader( + dataset, + local_batch_size=batch_size, + num_workers=num_workers, + shuffle=shuffle, + num_batches=num_batches, + ) + return data_loader, num_batches + + +def main(dataset_path, robot_name, task_name, save_path): + if robot_name == "lift2": + config = TrainConfig( + name="lift2", + model=pi0_config.Pi0Config(), + data=[ + MultiSim2RealSplitAlohaDataConfig( + repo_dir=dataset_path, + task_id=None, + use_gripper_aug=False, + stats_dir='', + base_config=MultiDataConfig( + prompt_from_task=True, + ), + asset_id=robot_name, + robot_name=robot_name, + repack_transforms=transforms.Group( + inputs=[ + transforms.RepackTransform( + { + "state_dict": { + "left_joint": "states.left_joint.position", + "right_joint": "states.right_joint.position", + "left_gripper": "states.left_gripper.position", + "right_gripper": "states.right_gripper.position" + }, + "action_dict": { + "left_joint": "actions.left_joint.position", + "right_joint": "actions.right_joint.position", + "left_gripper": "actions.left_gripper.position", + "right_gripper": "actions.right_gripper.position", + "left_gripper_openness": "master_actions.left_gripper.openness", + "right_gripper_openness": "master_actions.right_gripper.openness" + }, + "prompt": "task" + } + ) + ] + ) + ), + ], + # pretrain model path + weight_loader=weight_loaders.CheckpointWeightLoader("checkpoints/jax/pi0_base/params"), + pytorch_weight_path="checkpoints/pytorch/pi0_base", + num_train_steps=30_000, + num_workers=4, + fsdp_devices=4, + batch_size=8, + ) + + data_config = config.data[0].create(config.model) + print("done") + output_path = os.path.join(save_path, robot_name, task_name) + stats_json_path = os.path.join(output_path, "norm_stats.json") + if os.path.isfile(stats_json_path): + with open(stats_json_path, 'r', encoding='utf-8') as f: + json.load(f) + return True + + data_loader, num_batches = create_torch_dataloader( + data_config, config.model.action_horizon, config.batch_size, config.model, config.num_workers, max_frames=None + ) + + keys = ["state", "actions"] + stats = {key: normalize.RunningStats() for key in keys} + + step_id = 0 + for batch in tqdm.tqdm(data_loader, total=num_batches, desc="Computing stats"): + step_id += 1 + for key in keys: + stats[key].update(np.asarray(batch[key])) + if step_id > 10000: + break + + norm_stats = {key: stats.get_statistics() for key, stats in stats.items()} + + print(f"Writing stats to: {output_path}") + normalize.save(output_path, norm_stats) + +def check_lerobot_repo(repo_dir: str): + if os.path.isdir(os.path.join(repo_dir, "data")) and os.path.isdir(os.path.join(repo_dir, "meta")) and os.path.isdir(os.path.join(repo_dir, "videos")): + print(repo_dir, "true") + return True + else: + print(repo_dir, "false") + return False + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser() + parser.add_argument("--task_path", type=str, default="data/InternData-A1/sim/long_horizon_tasks/lift2/sort_the_rubbish/*") + parser.add_argument("--robot_name", type=str, default="lift2") + parser.add_argument("--save_path", type=str, default="stats/sim2real") + + args, unknown = parser.parse_known_args() + dataset_path=args.task_path + save_path = args.save_path + parts = dataset_path.split("/") + robot_idx = next((i for i, p in enumerate(parts) if p == args.robot_name), None) + if robot_idx is None: + raise ValueError( + f"Cannot find robot name in path. Expected {args.robot_name}, " + f"but got path: {dataset_path}" + ) + + if robot_idx + 1 >= len(parts): + raise ValueError( + f"Path ends at robot name '{parts[robot_idx]}', cannot determine task_name: {local_path}" + ) + robot_name = parts[robot_idx] + task_name = parts[robot_idx + 1] + try: + main(dataset_path, robot_name, task_name, save_path) + except: + print(dataset_path) + diff --git a/policy/openpi-InternData-A1/scripts/docker/compose.yml b/policy/openpi-InternData-A1/scripts/docker/compose.yml new file mode 100644 index 0000000..564d276 --- /dev/null +++ b/policy/openpi-InternData-A1/scripts/docker/compose.yml @@ -0,0 +1,29 @@ +# Run with: +# docker compose -f scripts/docker/compose.yml up --build +services: + openpi_server: + image: openpi_server + build: + context: ../.. + dockerfile: scripts/docker/serve_policy.Dockerfile + init: true + tty: true + network_mode: host + # Populate configured openpi data home to /openpi_assets inside the container. + # Populate aws credential inside the container. + volumes: + - $PWD:/app + - ${OPENPI_DATA_HOME:-~/.cache/openpi}:/openpi_assets + environment: + - SERVER_ARGS + - OPENPI_DATA_HOME=/openpi_assets + - IS_DOCKER=true + + # Comment out this block if not running on a machine with GPUs. + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: 1 + capabilities: [gpu] diff --git a/policy/openpi-InternData-A1/scripts/docker/install_docker_ubuntu22.sh b/policy/openpi-InternData-A1/scripts/docker/install_docker_ubuntu22.sh new file mode 100755 index 0000000..38873b3 --- /dev/null +++ b/policy/openpi-InternData-A1/scripts/docker/install_docker_ubuntu22.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Add Docker's official GPG key: +sudo apt-get update +sudo apt-get install -y ca-certificates curl +sudo install -m 0755 -d /etc/apt/keyrings +sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc +sudo chmod a+r /etc/apt/keyrings/docker.asc + +# Add the repository to Apt sources: +echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | + sudo tee /etc/apt/sources.list.d/docker.list >/dev/null +sudo apt-get update + +sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + +# Add current user to the 'docker' group, which allows them to use docker commands (docker build, docker run, etc). +# See https://docs.docker.com/engine/install/linux-postinstall/ +username=$(whoami) +sudo usermod -aG docker $username + +# Configure docker to start automatically on system boot. +sudo systemctl enable docker.service +sudo systemctl enable containerd.service + +# https://forums.docker.com/t/docker-credential-desktop-exe-executable-file-not-found-in-path-using-wsl2/100225/5 +if [ ~/.docker/config.json ]; then + sed -i 's/credsStore/credStore/g' ~/.docker/config.json +fi + +echo "" +echo "********************************************************************" +echo "**** Restart to allow Docker permission changes to take effect. ****" +echo "********************************************************************" +echo "" diff --git a/policy/openpi-InternData-A1/scripts/docker/install_nvidia_container_toolkit.sh b/policy/openpi-InternData-A1/scripts/docker/install_nvidia_container_toolkit.sh new file mode 100755 index 0000000..a4c67f1 --- /dev/null +++ b/policy/openpi-InternData-A1/scripts/docker/install_nvidia_container_toolkit.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# Installs the NVIDIA Container Toolkit, which allows Docker containers to access NVIDIA GPUs. +# NVIDIA's official documentation: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html + +curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg && + curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | + sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | + sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list + +# NVIDIA's documenation omits 'sudo' in the following command, but it is required. +sudo sed -i -e '/experimental/ s/^#//g' /etc/apt/sources.list.d/nvidia-container-toolkit.list +sudo apt-get update +sudo apt-get install -y nvidia-container-toolkit + +sudo nvidia-ctk runtime configure --runtime=docker +sudo systemctl restart docker diff --git a/policy/openpi-InternData-A1/scripts/docker/serve_policy.Dockerfile b/policy/openpi-InternData-A1/scripts/docker/serve_policy.Dockerfile new file mode 100644 index 0000000..bd88a7e --- /dev/null +++ b/policy/openpi-InternData-A1/scripts/docker/serve_policy.Dockerfile @@ -0,0 +1,38 @@ +# Dockerfile for serving a PI policy. +# Based on UV's instructions: https://docs.astral.sh/uv/guides/integration/docker/#developing-in-a-container + +# Build the container: +# docker build . -t openpi_server -f scripts/docker/serve_policy.Dockerfile + +# Run the container: +# docker run --rm -it --network=host -v .:/app --gpus=all openpi_server /bin/bash + +FROM nvidia/cuda:12.2.2-cudnn8-runtime-ubuntu22.04@sha256:2d913b09e6be8387e1a10976933642c73c840c0b735f0bf3c28d97fc9bc422e0 +COPY --from=ghcr.io/astral-sh/uv:0.5.1 /uv /uvx /bin/ + +WORKDIR /app + +# Needed because LeRobot uses git-lfs. +RUN apt-get update && apt-get install -y git git-lfs linux-headers-generic build-essential clang + +# Copy from the cache instead of linking since it's a mounted volume +ENV UV_LINK_MODE=copy + +# Write the virtual environment outside of the project directory so it doesn't +# leak out of the container when we mount the application code. +ENV UV_PROJECT_ENVIRONMENT=/.venv + +# Install the project's dependencies using the lockfile and settings +RUN uv venv --python 3.11.9 $UV_PROJECT_ENVIRONMENT +RUN --mount=type=cache,target=/root/.cache/uv \ + --mount=type=bind,source=uv.lock,target=uv.lock \ + --mount=type=bind,source=pyproject.toml,target=pyproject.toml \ + --mount=type=bind,source=packages/openpi-client/pyproject.toml,target=packages/openpi-client/pyproject.toml \ + --mount=type=bind,source=packages/openpi-client/src,target=packages/openpi-client/src \ + GIT_LFS_SKIP_SMUDGE=1 uv sync --frozen --no-install-project --no-dev + +# Copy transformers_replace files while preserving directory structure +COPY src/openpi/models_pytorch/transformers_replace/ /tmp/transformers_replace/ +RUN /.venv/bin/python -c "import transformers; print(transformers.__file__)" | xargs dirname | xargs -I{} cp -r /tmp/transformers_replace/* {} && rm -rf /tmp/transformers_replace + +CMD /bin/bash -c "uv run scripts/serve_policy.py $SERVER_ARGS" diff --git a/policy/openpi-InternData-A1/scripts/download_paligemma.py b/policy/openpi-InternData-A1/scripts/download_paligemma.py new file mode 100644 index 0000000..fa2a9c1 --- /dev/null +++ b/policy/openpi-InternData-A1/scripts/download_paligemma.py @@ -0,0 +1,27 @@ +import os +from pathlib import Path + +def download_from_gcs(gcs_uri: str, local_path: str): + local_path = Path(local_path) + local_path.parent.mkdir(parents=True, exist_ok=True) + + if os.system("which gsutil > /dev/null 2>&1") == 0: + cmd = f"gsutil cp {gcs_uri} {local_path}" + else: + gcs_http = gcs_uri.replace("gs://", "https://storage.googleapis.com/") + cmd = f"wget -O {local_path} {gcs_http}" + + print(f"⬇️ Executing: {cmd}") + ret = os.system(cmd) + if ret == 0: + print("✅ Download complete:", local_path) + else: + raise RuntimeError(f"Download failed: {gcs_uri}") + + return local_path + + +if __name__ == "__main__": + gcs_uri = "gs://vertex-model-garden-paligemma-us/paligemma/pt_224.npz" + save_path = "checkpoints/jax/paligemma/pt_224.npz" + download_from_gcs(gcs_uri, save_path) \ No newline at end of file diff --git a/policy/openpi-InternData-A1/scripts/serve_policy.py b/policy/openpi-InternData-A1/scripts/serve_policy.py new file mode 100644 index 0000000..30f121a --- /dev/null +++ b/policy/openpi-InternData-A1/scripts/serve_policy.py @@ -0,0 +1,122 @@ +import dataclasses +import enum +import logging +import socket + +import tyro + +from openpi.policies import policy as _policy +from openpi.policies import policy_config as _policy_config +from openpi.serving import websocket_policy_server +from openpi.training import config as _config + + +class EnvMode(enum.Enum): + """Supported environments.""" + + ALOHA = "aloha" + ALOHA_SIM = "aloha_sim" + DROID = "droid" + LIBERO = "libero" + + +@dataclasses.dataclass +class Checkpoint: + """Load a policy from a trained checkpoint.""" + + # Training config name (e.g., "pi0_aloha_sim"). + config: str + # Checkpoint directory (e.g., "checkpoints/pi0_aloha_sim/exp/10000"). + dir: str + + +@dataclasses.dataclass +class Default: + """Use the default policy for the given environment.""" + + +@dataclasses.dataclass +class Args: + """Arguments for the serve_policy script.""" + + # Environment to serve the policy for. This is only used when serving default policies. + env: EnvMode = EnvMode.ALOHA_SIM + + # If provided, will be used in case the "prompt" key is not present in the data, or if the model doesn't have a default + # prompt. + default_prompt: str | None = None + + # Port to serve the policy on. + port: int = 8000 + # Record the policy's behavior for debugging. + record: bool = False + + # Specifies how to load the policy. If not provided, the default policy for the environment will be used. + policy: Checkpoint | Default = dataclasses.field(default_factory=Default) + + +# Default checkpoints that should be used for each environment. +DEFAULT_CHECKPOINT: dict[EnvMode, Checkpoint] = { + EnvMode.ALOHA: Checkpoint( + config="pi05_aloha", + dir="gs://openpi-assets/checkpoints/pi05_base", + ), + EnvMode.ALOHA_SIM: Checkpoint( + config="pi0_aloha_sim", + dir="gs://openpi-assets/checkpoints/pi0_aloha_sim", + ), + EnvMode.DROID: Checkpoint( + config="pi05_droid", + dir="gs://openpi-assets/checkpoints/pi05_droid", + ), + EnvMode.LIBERO: Checkpoint( + config="pi05_libero", + dir="gs://openpi-assets/checkpoints/pi05_libero", + ), +} + + +def create_default_policy(env: EnvMode, *, default_prompt: str | None = None) -> _policy.Policy: + """Create a default policy for the given environment.""" + if checkpoint := DEFAULT_CHECKPOINT.get(env): + return _policy_config.create_trained_policy( + _config.get_config(checkpoint.config), checkpoint.dir, default_prompt=default_prompt + ) + raise ValueError(f"Unsupported environment mode: {env}") + + +def create_policy(args: Args) -> _policy.Policy: + """Create a policy from the given arguments.""" + match args.policy: + case Checkpoint(): + return _policy_config.create_trained_policy( + _config.get_config(args.policy.config), args.policy.dir, default_prompt=args.default_prompt + ) + case Default(): + return create_default_policy(args.env, default_prompt=args.default_prompt) + + +def main(args: Args) -> None: + policy = create_policy(args) + policy_metadata = policy.metadata + + # Record the policy's behavior. + if args.record: + policy = _policy.PolicyRecorder(policy, "policy_records") + + hostname = socket.gethostname() + local_ip = socket.gethostbyname(hostname) + logging.info("Creating server (host: %s, ip: %s)", hostname, local_ip) + + server = websocket_policy_server.WebsocketPolicyServer( + policy=policy, + host="0.0.0.0", + port=args.port, + metadata=policy_metadata, + ) + server.serve_forever() + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO, force=True) + main(tyro.cli(Args)) diff --git a/policy/openpi-InternData-A1/scripts/train.py b/policy/openpi-InternData-A1/scripts/train.py new file mode 100644 index 0000000..5bc7576 --- /dev/null +++ b/policy/openpi-InternData-A1/scripts/train.py @@ -0,0 +1,290 @@ +import dataclasses +import functools +import logging +import platform +from typing import Any + +import etils.epath as epath +import flax.nnx as nnx +from flax.training import common_utils +import flax.traverse_util as traverse_util +import jax +import jax.experimental +import jax.numpy as jnp +import numpy as np +import optax +import tqdm_loggable.auto as tqdm +import wandb + +import openpi.models.model as _model +import openpi.shared.array_typing as at +import openpi.shared.nnx_utils as nnx_utils +import openpi.training.checkpoints as _checkpoints +import openpi.training.config as _config +import openpi.training.data_loader as _data_loader +import openpi.training.optimizer as _optimizer +import openpi.training.sharding as sharding +import openpi.training.utils as training_utils +import openpi.training.weight_loaders as _weight_loaders +from memory_profiler import profile +import psutil +from openpi.shared.online_compute_norm_stats import compute_norm_stats + +def init_logging(): + """Custom logging format for better readability.""" + level_mapping = {"DEBUG": "D", "INFO": "I", "WARNING": "W", "ERROR": "E", "CRITICAL": "C"} + + class CustomFormatter(logging.Formatter): + def format(self, record): + record.levelname = level_mapping.get(record.levelname, record.levelname) + return super().format(record) + + formatter = CustomFormatter( + fmt="%(asctime)s.%(msecs)03d [%(levelname)s] %(message)-80s (%(process)d:%(filename)s:%(lineno)s)", + datefmt="%H:%M:%S", + ) + + logger = logging.getLogger() + logger.setLevel(logging.INFO) + logger.handlers[0].setFormatter(formatter) + + +def init_wandb(config: _config.TrainConfig, *, resuming: bool, log_code: bool = False, enabled: bool = True): + if not enabled: + wandb.init(mode="disabled") + return + + ckpt_dir = config.checkpoint_dir + if not ckpt_dir.exists(): + raise FileNotFoundError(f"Checkpoint directory {ckpt_dir} does not exist.") + if resuming: + run_id = (ckpt_dir / "wandb_id.txt").read_text().strip() + wandb.init(id=run_id, resume="must", project=config.project_name) + else: + wandb.init( + name=config.exp_name, + config=dataclasses.asdict(config), + project=config.project_name, + ) + (ckpt_dir / "wandb_id.txt").write_text(wandb.run.id) + + if log_code: + wandb.run.log_code(epath.Path(__file__).parent.parent) + + +def _load_weights_and_validate(loader: _weight_loaders.WeightLoader, params_shape: at.Params) -> at.Params: + """Loads and validates the weights. Returns a loaded subset of the weights.""" + loaded_params = loader.load(params_shape) + at.check_pytree_equality(expected=params_shape, got=loaded_params, check_shapes=True, check_dtypes=True) + + # Remove jax.ShapeDtypeStruct from the loaded params. This makes sure that only the loaded params are returned. + return traverse_util.unflatten_dict( + {k: v for k, v in traverse_util.flatten_dict(loaded_params).items() if not isinstance(v, jax.ShapeDtypeStruct)} + ) + + +@at.typecheck +def init_train_state( + config: _config.TrainConfig, init_rng: at.KeyArrayLike, mesh: jax.sharding.Mesh, *, resume: bool +) -> tuple[training_utils.TrainState, Any]: + tx = _optimizer.create_optimizer(config.optimizer, config.lr_schedule, weight_decay_mask=None) + + def init(rng: at.KeyArrayLike, partial_params: at.Params | None = None) -> training_utils.TrainState: + rng, model_rng = jax.random.split(rng) + # initialize the model (and its parameters). + model = config.model.create(model_rng) + + # Merge the partial params into the model. + if partial_params is not None: + graphdef, state = nnx.split(model) + # This will produce an error if the partial params are not a subset of the state. + state.replace_by_pure_dict(partial_params) + model = nnx.merge(graphdef, state) + + params = nnx.state(model) + # Convert frozen params to bfloat16. + params = nnx_utils.state_map(params, config.freeze_filter, lambda p: p.replace(p.value.astype(jnp.bfloat16))) + + return training_utils.TrainState( + step=0, + params=params, + model_def=nnx.graphdef(model), + tx=tx, + opt_state=tx.init(params.filter(config.trainable_filter)), + ema_decay=config.ema_decay, + ema_params=None if config.ema_decay is None else params, + ) + + train_state_shape = jax.eval_shape(init, init_rng) + state_sharding = sharding.fsdp_sharding(train_state_shape, mesh, log=True) + + if resume: + return train_state_shape, state_sharding + + partial_params = _load_weights_and_validate(config.weight_loader, train_state_shape.params.to_pure_dict()) + replicated_sharding = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec()) + + # Initialize the train state and mix in the partial params. + train_state = jax.jit( + init, + donate_argnums=(1,), # donate the partial params buffer. + in_shardings=replicated_sharding, + out_shardings=state_sharding, + )(init_rng, partial_params) + + return train_state, state_sharding + + +@at.typecheck +def train_step( + config: _config.TrainConfig, + rng: at.KeyArrayLike, + state: training_utils.TrainState, + batch: tuple[_model.Observation, _model.Actions], +) -> tuple[training_utils.TrainState, dict[str, at.Array]]: + model = nnx.merge(state.model_def, state.params) + model.train() + + @at.typecheck + def loss_fn( + model: _model.BaseModel, rng: at.KeyArrayLike, observation: _model.Observation, actions: _model.Actions + ): + chunked_loss = model.compute_loss(rng, observation, actions, train=True) + return jnp.mean(chunked_loss) + + train_rng = jax.random.fold_in(rng, state.step) + observation, actions = batch + + # Filter out frozen params. + diff_state = nnx.DiffState(0, config.trainable_filter) + loss, grads = nnx.value_and_grad(loss_fn, argnums=diff_state)(model, train_rng, observation, actions) + + params = state.params.filter(config.trainable_filter) + updates, new_opt_state = state.tx.update(grads, state.opt_state, params) + new_params = optax.apply_updates(params, updates) + + # Update the model in place and return the new full state. + nnx.update(model, new_params) + new_params = nnx.state(model) + + new_state = dataclasses.replace(state, step=state.step + 1, params=new_params, opt_state=new_opt_state) + if state.ema_decay is not None: + new_state = dataclasses.replace( + new_state, + ema_params=jax.tree.map( + lambda old, new: state.ema_decay * old + (1 - state.ema_decay) * new, state.ema_params, new_params + ), + ) + + # Filter out params that aren't kernels. + kernel_params = nnx.state( + model, + nnx.All( + nnx.Param, + nnx.Not(nnx_utils.PathRegex(".*/(bias|scale|pos_embedding|input_embedding)")), + lambda _, x: x.value.ndim > 1, + ), + ) + info = { + "loss": loss, + "grad_norm": optax.global_norm(grads), + "param_norm": optax.global_norm(kernel_params), + } + return new_state, info + + +def main(config: _config.TrainConfig): + init_logging() + logging.info(f"Running on: {platform.node()}") + + if config.batch_size % jax.device_count() != 0: + raise ValueError( + f"Batch size {config.batch_size} must be divisible by the number of devices {jax.device_count()}." + ) + + jax.config.update("jax_compilation_cache_dir", str(epath.Path("~/.cache/jax").expanduser())) + + rng = jax.random.key(config.seed) + train_rng, init_rng = jax.random.split(rng) + + mesh = sharding.make_mesh(config.fsdp_devices) + data_sharding = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec(sharding.DATA_AXIS)) + replicated_sharding = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec()) + + checkpoint_manager, resuming = _checkpoints.initialize_checkpoint_dir( + config.checkpoint_dir, + keep_period=config.keep_period, + overwrite=config.overwrite, + resume=config.resume, + ) + init_wandb(config, resuming=resuming, enabled=config.wandb_enabled) + + if config.online_compute_norm_stats: + global_norm_stats = compute_norm_stats(config.name) + else: + global_norm_stats = None + + data_loader = _data_loader.create_data_loader_multi( + config, + sharding=data_sharding, + shuffle=True, + global_norm_stats=global_norm_stats, + ) + # @profile + data_iter = iter(data_loader) + batch = next(data_iter) + logging.info(f"Initialized data loader:\n{training_utils.array_tree_to_info(batch)}") + print(psutil.Process().memory_info().rss/1024**2) + # set_trace() + # Log images from first batch to sanity check. + images_to_log = [ + wandb.Image(np.concatenate([np.array(img[i]) for img in batch[0].images.values()], axis=1)) + for i in range(min(5, len(next(iter(batch[0].images.values()))))) + ] + wandb.log({"camera_views": images_to_log}, step=0) + + train_state, train_state_sharding = init_train_state(config, init_rng, mesh, resume=resuming) + jax.block_until_ready(train_state) + logging.info(f"Initialized train state:\n{training_utils.array_tree_to_info(train_state.params)}") + + if resuming: + train_state = _checkpoints.restore_state(checkpoint_manager, train_state, data_loader) + + ptrain_step = jax.jit( + functools.partial(train_step, config), + in_shardings=(replicated_sharding, train_state_sharding, data_sharding), + out_shardings=(train_state_sharding, replicated_sharding), + donate_argnums=(1,), + ) + + start_step = int(train_state.step) + pbar = tqdm.tqdm( + range(start_step, config.num_train_steps), + initial=start_step, + total=config.num_train_steps, + dynamic_ncols=True, + ) + + infos = [] + for step in pbar: + with sharding.set_mesh(mesh): + train_state, info = ptrain_step(train_rng, train_state, batch) + infos.append(info) + if step % config.log_interval == 0: + stacked_infos = common_utils.stack_forest(infos) + reduced_info = jax.device_get(jax.tree.map(jnp.mean, stacked_infos)) + info_str = ", ".join(f"{k}={v:.4f}" for k, v in reduced_info.items()) + pbar.write(f"Step {step}: {info_str}") + wandb.log(reduced_info, step=step) + infos = [] + batch = next(data_iter) + + if (step % config.save_interval == 0 and step > start_step) or step == config.num_train_steps - 1: + _checkpoints.save_state(checkpoint_manager, train_state, data_loader, step) + + logging.info("Waiting for checkpoint manager to finish") + checkpoint_manager.wait_until_finished() + + +if __name__ == "__main__": + main(_config.cli()) diff --git a/policy/openpi-InternData-A1/scripts/train_jax_multinode.py b/policy/openpi-InternData-A1/scripts/train_jax_multinode.py new file mode 100755 index 0000000..20c7997 --- /dev/null +++ b/policy/openpi-InternData-A1/scripts/train_jax_multinode.py @@ -0,0 +1,341 @@ +""" +Multi-host training entrypoint (JAX). + +How to run multi-host (example: 2 nodes): + # node0 + export JAX_COORDINATOR_ADDRESS=node0:12345 + export JAX_PROCESS_COUNT=2 + export JAX_PROCESS_INDEX=0 + uv run python scripts/train.py --exp_name + + # node1 + export JAX_COORDINATOR_ADDRESS=node0:12345 + export JAX_PROCESS_COUNT=2 + export JAX_PROCESS_INDEX=1 + uv run python scripts/train.py --exp_name + +Notes: + - Initialize distributed BEFORE any device query. + - Only process_index==0 performs side-effects (wandb, checkpoints, progress bar). + - Total devices across hosts must be divisible by config.fsdp_devices. +""" + +import dataclasses +import functools +import logging +import platform +import os +from typing import Any + +import etils.epath as epath +import flax.nnx as nnx +from flax.training import common_utils +import flax.traverse_util as traverse_util +import jax +import jax.experimental +import jax.numpy as jnp +import numpy as np +import optax +import tqdm_loggable.auto as tqdm +import wandb + +import openpi.models.model as _model +import openpi.shared.array_typing as at +import openpi.shared.nnx_utils as nnx_utils +import openpi.training.checkpoints as _checkpoints +import openpi.training.config as _config +import openpi.training.data_loader as _data_loader +import openpi.training.optimizer as _optimizer +import openpi.training.sharding as sharding +import openpi.training.utils as training_utils +import openpi.training.weight_loaders as _weight_loaders +from pdb import set_trace + + +def maybe_initialize_distributed() -> bool: + coordinator = os.environ.get("JAX_COORDINATOR_ADDRESS") + process_count = int(os.environ.get("JAX_PROCESS_COUNT", "1")) + process_index = int(os.environ.get("JAX_PROCESS_INDEX", "0")) + if process_count > 1 and coordinator: + jax.distributed.initialize( + coordinator_address=coordinator, + num_processes=process_count, + process_id=process_index, + ) + return True + return False + + +def init_logging(): + """Custom logging format for better readability.""" + level_mapping = {"DEBUG": "D", "INFO": "I", "WARNING": "W", "ERROR": "E", "CRITICAL": "C"} + + class CustomFormatter(logging.Formatter): + def format(self, record): + record.levelname = level_mapping.get(record.levelname, record.levelname) + return super().format(record) + + formatter = CustomFormatter( + fmt="%(asctime)s.%(msecs)03d [%(levelname)s] %(message)-80s (%(process)d:%(filename)s:%(lineno)s)", + datefmt="%H:%M:%S", + ) + + logger = logging.getLogger() + logger.setLevel(logging.INFO) + if not logger.handlers: + ch = logging.StreamHandler() + ch.setFormatter(formatter) + logger.addHandler(ch) + else: + logger.handlers[0].setFormatter(formatter) + + +def init_wandb(config: _config.TrainConfig, *, resuming: bool, log_code: bool = False, enabled: bool = True): + if not enabled: + wandb.init(mode="disabled") + return + + ckpt_dir = config.checkpoint_dir + if not ckpt_dir.exists(): + raise FileNotFoundError(f"Checkpoint directory {ckpt_dir} does not exist.") + if resuming: + run_id = (ckpt_dir / "wandb_id.txt").read_text().strip() + wandb.init(id=run_id, resume="must", project=config.project_name) + else: + wandb.init( + name=config.exp_name, + config=dataclasses.asdict(config), + project=config.project_name, + ) + (ckpt_dir / "wandb_id.txt").write_text(wandb.run.id) + + if log_code: + wandb.run.log_code(epath.Path(__file__).parent.parent) + + +def _load_weights_and_validate(loader: _weight_loaders.WeightLoader, params_shape: at.Params) -> at.Params: + """Loads and validates the weights. Returns a loaded subset of the weights.""" + loaded_params = loader.load(params_shape) + at.check_pytree_equality(expected=params_shape, got=loaded_params, check_shapes=True, check_dtypes=True) + + # Remove jax.ShapeDtypeStruct from the loaded params. This makes sure that only the loaded params are returned. + return traverse_util.unflatten_dict( + {k: v for k, v in traverse_util.flatten_dict(loaded_params).items() if not isinstance(v, jax.ShapeDtypeStruct)} + ) + + +@at.typecheck +def init_train_state( + config: _config.TrainConfig, init_rng: at.KeyArrayLike, mesh: jax.sharding.Mesh, *, resume: bool +) -> tuple[training_utils.TrainState, Any]: + tx = _optimizer.create_optimizer(config.optimizer, config.lr_schedule, weight_decay_mask=None) + + def init(rng: at.KeyArrayLike, partial_params: at.Params | None = None) -> training_utils.TrainState: + rng, model_rng = jax.random.split(rng) + # initialize the model (and its parameters). + model = config.model.create(model_rng) + + # Merge the partial params into the model. + if partial_params is not None: + graphdef, state = nnx.split(model) + # This will produce an error if the partial params are not a subset of the state. + state.replace_by_pure_dict(partial_params) + model = nnx.merge(graphdef, state) + + params = nnx.state(model) + # Convert frozen params to bfloat16. + params = nnx_utils.state_map(params, config.freeze_filter, lambda p: p.replace(p.value.astype(jnp.bfloat16))) + + return training_utils.TrainState( + step=0, + params=params, + model_def=nnx.graphdef(model), + tx=tx, + opt_state=tx.init(params.filter(config.trainable_filter)), + ema_decay=config.ema_decay, + ema_params=None if config.ema_decay is None else params, + ) + + train_state_shape = jax.eval_shape(init, init_rng) + state_sharding = sharding.fsdp_sharding(train_state_shape, mesh, log=True) + + if resume: + return train_state_shape, state_sharding + + partial_params = _load_weights_and_validate(config.weight_loader, train_state_shape.params.to_pure_dict()) + replicated_sharding = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec()) + + # Initialize the train state and mix in the partial params. + train_state = jax.jit( + init, + donate_argnums=(1,), # donate the partial params buffer. + in_shardings=replicated_sharding, + out_shardings=state_sharding, + )(init_rng, partial_params) + + return train_state, state_sharding + + +@at.typecheck +def train_step( + config: _config.TrainConfig, + rng: at.KeyArrayLike, + state: training_utils.TrainState, + batch: tuple[_model.Observation, _model.Actions], +) -> tuple[training_utils.TrainState, dict[str, at.Array]]: + model = nnx.merge(state.model_def, state.params) + model.train() + + @at.typecheck + def loss_fn( + model: _model.BaseModel, rng: at.KeyArrayLike, observation: _model.Observation, actions: _model.Actions + ): + chunked_loss = model.compute_loss(rng, observation, actions, train=True) + return jnp.mean(chunked_loss) + # set_trace() + train_rng = jax.random.fold_in(rng, state.step) + observation, actions = batch + + # Filter out frozen params. + diff_state = nnx.DiffState(0, config.trainable_filter) + loss, grads = nnx.value_and_grad(loss_fn, argnums=diff_state)(model, train_rng, observation, actions) + + params = state.params.filter(config.trainable_filter) + updates, new_opt_state = state.tx.update(grads, state.opt_state, params) + new_params = optax.apply_updates(params, updates) + + # Update the model in place and return the new full state. + nnx.update(model, new_params) + new_params = nnx.state(model) + + new_state = dataclasses.replace(state, step=state.step + 1, params=new_params, opt_state=new_opt_state) + if state.ema_decay is not None: + new_state = dataclasses.replace( + new_state, + ema_params=jax.tree.map( + lambda old, new: state.ema_decay * old + (1 - state.ema_decay) * new, state.ema_params, new_params + ), + ) + + # Filter out params that aren't kernels. + kernel_params = nnx.state( + model, + nnx.All( + nnx.Param, + nnx.Not(nnx_utils.PathRegex(".*/(bias|scale|pos_embedding|input_embedding)")), + lambda _, x: x.value.ndim > 1, + ), + ) + info = { + "loss": loss, + "grad_norm": optax.global_norm(grads), + "param_norm": optax.global_norm(kernel_params), + } + return new_state, info + + +def main(config: _config.TrainConfig): + init_logging() + logging.info(f"Running on: {platform.node()}") + + # Initialize multi-host distributed if environment variables are set + distributed_initialized = maybe_initialize_distributed() + is_main = jax.process_index() == 0 + + if config.batch_size % jax.device_count() != 0: + raise ValueError( + f"Batch size {config.batch_size} must be divisible by the number of devices {jax.device_count()}." + ) + + jax.config.update("jax_compilation_cache_dir", str(epath.Path("~/.cache/jax").expanduser())) + + rng = jax.random.key(config.seed) + train_rng, init_rng = jax.random.split(rng) + + mesh = sharding.make_mesh(config.fsdp_devices) + data_sharding = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec(sharding.DATA_AXIS)) + replicated_sharding = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec()) + + checkpoint_manager, resuming = _checkpoints.initialize_checkpoint_dir( + config.checkpoint_dir, + keep_period=config.keep_period, + overwrite=config.overwrite, + resume=config.resume, + ) + init_wandb(config, resuming=resuming, enabled=(config.wandb_enabled and is_main)) + + data_loader = _data_loader.create_data_loader_multi( + config, + sharding=data_sharding, + shuffle=True, + ) + data_iter = iter(data_loader) + batch = next(data_iter) + logging.info(f"Initialized data loader:\n{training_utils.array_tree_to_info(batch)}") + + # Note: Wandb image logging is disabled in multi-node setup to avoid potential hanging issues + # caused by concurrent access to sharded arrays across processes. + + train_state, train_state_sharding = init_train_state(config, init_rng, mesh, resume=resuming) + jax.block_until_ready(train_state) + logging.info(f"Initialized train state:\n{training_utils.array_tree_to_info(train_state.params)}") + + if resuming: + train_state = _checkpoints.restore_state(checkpoint_manager, train_state, data_loader) + + ptrain_step = jax.jit( + functools.partial(train_step, config), + in_shardings=(replicated_sharding, train_state_sharding, data_sharding), + out_shardings=(train_state_sharding, replicated_sharding), + donate_argnums=(1,), + ) + + start_step = int(train_state.step) + step_iter = range(start_step, config.num_train_steps) + pbar = ( + tqdm.tqdm( + step_iter, + initial=start_step, + total=config.num_train_steps, + dynamic_ncols=True, + ) + if is_main + else None + ) + + infos = [] + for step in step_iter: + with sharding.set_mesh(mesh): + train_state, info = ptrain_step(train_rng, train_state, batch) + if is_main and pbar is not None: + pbar.update(1) + infos.append(info) + if step % config.log_interval == 0: + # print("log!") + stacked_infos = common_utils.stack_forest(infos) + reduced_info = jax.device_get(jax.tree.map(jnp.mean, stacked_infos)) + if is_main: + info_str = ", ".join(f"{k}={v:.4f}" for k, v in reduced_info.items()) + if pbar is not None: + pbar.write(f"Step {step}: {info_str}") + else: + logging.info(f"Step {step}: {info_str}") + if config.wandb_enabled: + wandb.log(reduced_info, step=step) + infos = [] + batch = next(data_iter) + if ((step % config.save_interval == 0 and step > start_step) or step == config.num_train_steps - 1): + _checkpoints.save_state(checkpoint_manager, train_state, data_loader, step) + + if is_main: + if pbar is not None: + pbar.close() + logging.info("Waiting for checkpoint manager to finish") + checkpoint_manager.wait_until_finished() + + if distributed_initialized: + jax.distributed.shutdown() + + +if __name__ == "__main__": + main(_config.cli()) diff --git a/policy/openpi-InternData-A1/scripts/train_pytorch.py b/policy/openpi-InternData-A1/scripts/train_pytorch.py new file mode 100644 index 0000000..d7643e5 --- /dev/null +++ b/policy/openpi-InternData-A1/scripts/train_pytorch.py @@ -0,0 +1,632 @@ +""" +PyTorch training entrypoint for PI0/PI05 with multi-GPU and multi-node (DDP) support. +This script mirrors the behavior of the JAX trainer (`scripts/train.py`) but runs +entirely in PyTorch using the `PI0Pytorch` model and your existing config/data +pipeline from `src/openpi/training/config.py` and `src/openpi/training/data_loader.py`. + +Usage +Single GPU: + python scripts/train_pytorch.py --exp_name --save_interval + Example: + python scripts/train_pytorch.py debug --exp_name pytorch_ddp_test + python scripts/train_pytorch.py debug --exp_name pytorch_ddp_test --resume # Resume from latest checkpoint +Multi-GPU (single node): + torchrun --standalone --nnodes=1 --nproc_per_node= scripts/train_pytorch.py --exp_name + Example: + torchrun --standalone --nnodes=1 --nproc_per_node=2 scripts/train_pytorch.py pi0_aloha_sim --exp_name pytorch_ddp_test + torchrun --standalone --nnodes=1 --nproc_per_node=2 scripts/train_pytorch.py pi0_aloha_sim --exp_name pytorch_ddp_test --resume +Multi-Node Training: + torchrun \ + --nnodes= --nproc_per_node= --node_rank= \ + --master_addr= --master_port= \ + scripts/train_pytorch.py --exp_name= --save_interval + +""" + +import dataclasses +import gc +import logging +import os +import platform +import shutil +import time + +import jax +import numpy as np +import safetensors.torch +import torch +import torch.distributed as dist +import torch.nn.parallel +import tqdm +import wandb + +import openpi.models.pi0_config +import openpi.models_pytorch.pi0_pytorch +import openpi.shared.normalize as _normalize +import openpi.training.config as _config +import openpi.training.data_loader as _data + + +def init_logging(): + level_mapping = {"DEBUG": "D", "INFO": "I", "WARNING": "W", "ERROR": "E", "CRITICAL": "C"} + + class CustomFormatter(logging.Formatter): + def format(self, record): + record.levelname = level_mapping.get(record.levelname, record.levelname) + return super().format(record) + + formatter = CustomFormatter( + fmt="%(asctime)s.%(msecs)03d [%(levelname)s] %(message)-80s (%(process)d:%(filename)s:%(lineno)s)", + datefmt="%H:%M:%S", + ) + logger = logging.getLogger() + logger.setLevel(logging.INFO) + if not logger.handlers: + ch = logging.StreamHandler() + ch.setFormatter(formatter) + logger.addHandler(ch) + else: + logger.handlers[0].setFormatter(formatter) + + +def init_wandb(config: _config.TrainConfig, *, resuming: bool, enabled: bool = True): + """Initialize wandb logging.""" + if not enabled: + wandb.init(mode="disabled") + return + + ckpt_dir = config.checkpoint_dir + if not ckpt_dir.exists(): + raise FileNotFoundError(f"Checkpoint directory {ckpt_dir} does not exist.") + + if resuming: + run_id = (ckpt_dir / "wandb_id.txt").read_text().strip() + wandb.init(id=run_id, resume="must", project=config.project_name) + else: + wandb.init( + name=config.exp_name, + config=dataclasses.asdict(config), + project=config.project_name, + ) + (ckpt_dir / "wandb_id.txt").write_text(wandb.run.id) + + +def setup_ddp(): + world_size = int(os.environ.get("WORLD_SIZE", "1")) + use_ddp = world_size > 1 + if use_ddp and not torch.distributed.is_initialized(): + backend = "nccl" if torch.cuda.is_available() else "gloo" + torch.distributed.init_process_group(backend=backend, init_method="env://") + + # Set up debugging environment variables for DDP issues + if os.environ.get("TORCH_DISTRIBUTED_DEBUG") is None: + os.environ["TORCH_DISTRIBUTED_DEBUG"] = "INFO" + + local_rank = int(os.environ.get("LOCAL_RANK", os.environ.get("RANK", "0"))) + device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu") + if torch.cuda.is_available(): + torch.cuda.set_device(device) + return use_ddp, local_rank, device + + +def cleanup_ddp(): + if torch.distributed.is_initialized(): + torch.distributed.barrier() + torch.distributed.destroy_process_group() + + +def set_seed(seed: int, local_rank: int): + torch.manual_seed(seed + local_rank) + np.random.seed(seed + local_rank) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed + local_rank) + + +def build_datasets(config: _config.TrainConfig): + # Use the unified data loader with PyTorch framework + data_loader = _data.create_data_loader(config, framework="pytorch", shuffle=True) + return data_loader, data_loader.data_config() + + +def get_model_state_dict(model): + """Get state dict from model, handling DDP wrapper.""" + return ( + model.module.state_dict() + if isinstance(model, torch.nn.parallel.DistributedDataParallel) + else model.state_dict() + ) + + +def get_model_parameters(model): + """Get parameters from model, handling DDP wrapper.""" + return ( + model.module.parameters() + if isinstance(model, torch.nn.parallel.DistributedDataParallel) + else model.parameters() + ) + + +def save_checkpoint(model, optimizer, global_step, config, is_main, data_config): + """Save a checkpoint with model state, optimizer state, and metadata.""" + if not is_main: + return + + # Only save if it's time to save or if it's the final step + if (global_step % config.save_interval == 0 and global_step > 0) or global_step == config.num_train_steps - 1: + # Create temporary directory for atomic checkpoint saving + final_ckpt_dir = config.checkpoint_dir / f"{global_step}" + tmp_ckpt_dir = config.checkpoint_dir / f"tmp_{global_step}" + + # Remove any existing temp directory and create new one + if tmp_ckpt_dir.exists(): + shutil.rmtree(tmp_ckpt_dir) + tmp_ckpt_dir.mkdir(parents=True, exist_ok=True) + + # Save model state using safetensors (handle shared tensors) + model_to_save = model.module if isinstance(model, torch.nn.parallel.DistributedDataParallel) else model + safetensors.torch.save_model(model_to_save, tmp_ckpt_dir / "model.safetensors") + + # Save optimizer state using PyTorch format + torch.save(optimizer.state_dict(), tmp_ckpt_dir / "optimizer.pt") + + # Save training metadata (avoid saving full config to prevent JAX/Flax compatibility issues) + metadata = { + "global_step": global_step, + "config": dataclasses.asdict(config), + "timestamp": time.time(), + } + torch.save(metadata, tmp_ckpt_dir / "metadata.pt") + + # save norm stats + norm_stats = data_config.norm_stats + if norm_stats is not None and data_config.asset_id is not None: + _normalize.save(tmp_ckpt_dir / "assets" / data_config.asset_id, norm_stats) + + # Atomically move temp directory to final location + if final_ckpt_dir.exists(): + shutil.rmtree(final_ckpt_dir) + tmp_ckpt_dir.rename(final_ckpt_dir) + + logging.info(f"Saved checkpoint at step {global_step} -> {final_ckpt_dir}") + + # Log checkpoint to wandb + if config.wandb_enabled: + wandb.log({"checkpoint_step": global_step}, step=global_step) + + +def load_checkpoint(model, optimizer, checkpoint_dir, device): + """Load the latest checkpoint and return the global step.""" + checkpoint_steps = [ + int(d.name) + for d in checkpoint_dir.iterdir() + if d.is_dir() and d.name.isdigit() and not d.name.startswith("tmp_") + ] + + if not checkpoint_steps: + raise FileNotFoundError(f"No checkpoints found in {checkpoint_dir}") + + latest_step = max(checkpoint_steps) + ckpt_dir = checkpoint_dir / f"{latest_step}" + + # Clear memory before loading checkpoints + if torch.cuda.is_available(): + torch.cuda.empty_cache() + gc.collect() + log_memory_usage(device, latest_step, "before_loading_checkpoint") + + try: + # Load model state with error handling + logging.info("Loading model state...") + safetensors_path = ckpt_dir / "model.safetensors" + + if safetensors_path.exists(): + model_to_load = model.module if isinstance(model, torch.nn.parallel.DistributedDataParallel) else model + safetensors.torch.load_model(model_to_load, safetensors_path, device=str(device)) + logging.info("Loaded model state from safetensors format") + else: + raise FileNotFoundError(f"No model checkpoint found at {ckpt_dir}") + + torch.cuda.empty_cache() + gc.collect() + log_memory_usage(device, latest_step, "after_loading_model") + + # Load optimizer state with error handling + logging.info("Loading optimizer state...") + optimizer_path = ckpt_dir / "optimizer.pt" + + if optimizer_path.exists(): + optimizer_state_dict = torch.load(optimizer_path, map_location=device, weights_only=False) + logging.info("Loaded optimizer state from pt format") + else: + raise FileNotFoundError(f"No optimizer checkpoint found at {ckpt_dir}") + + optimizer.load_state_dict(optimizer_state_dict) + del optimizer_state_dict + torch.cuda.empty_cache() + gc.collect() + log_memory_usage(device, latest_step, "after_loading_optimizer") + + # Load metadata + logging.info("Loading metadata...") + metadata = torch.load(ckpt_dir / "metadata.pt", map_location=device, weights_only=False) + global_step = metadata.get("global_step", latest_step) + del metadata + torch.cuda.empty_cache() + gc.collect() + log_memory_usage(device, latest_step, "after_loading_metadata") + + logging.info(f"Successfully loaded all checkpoint components from step {latest_step}") + return global_step + + except RuntimeError as e: + if "out of memory" in str(e): + # Clear memory and provide detailed error message + torch.cuda.empty_cache() + gc.collect() + logging.error(f"Out of memory error while loading checkpoint: {e!s}") + log_memory_usage(device, latest_step, "after_oom_error") + raise RuntimeError( + "Out of memory while loading checkpoint. Try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True" + ) from e + raise + + +def get_latest_checkpoint_step(checkpoint_dir): + """Get the latest checkpoint step number from a checkpoint directory.""" + checkpoint_steps = [ + int(d.name) + for d in checkpoint_dir.iterdir() + if d.is_dir() and d.name.isdigit() and not d.name.startswith("tmp_") + ] + return max(checkpoint_steps) if checkpoint_steps else None + + +def log_memory_usage(device, step, phase="unknown"): + """Log detailed memory usage information.""" + if not torch.cuda.is_available(): + return + + memory_allocated = torch.cuda.memory_allocated(device) / 1e9 + memory_reserved = torch.cuda.memory_reserved(device) / 1e9 + memory_free = torch.cuda.memory_reserved(device) - torch.cuda.memory_allocated(device) + memory_free = memory_free / 1e9 + + # Get more detailed memory info + memory_stats = torch.cuda.memory_stats(device) + max_memory_allocated = memory_stats.get("allocated_bytes.all.peak", 0) / 1e9 + max_memory_reserved = memory_stats.get("reserved_bytes.all.peak", 0) / 1e9 + + # Get DDP info if available + ddp_info = "" + if dist.is_initialized(): + ddp_info = f" | DDP: rank={dist.get_rank()}, world_size={dist.get_world_size()}" + + logging.info( + f"Step {step} ({phase}): GPU memory - allocated: {memory_allocated:.2f}GB, reserved: {memory_reserved:.2f}GB, free: {memory_free:.2f}GB, peak_allocated: {max_memory_allocated:.2f}GB, peak_reserved: {max_memory_reserved:.2f}GB{ddp_info}" + ) + + +def train_loop(config: _config.TrainConfig): + use_ddp, local_rank, device = setup_ddp() + is_main = (not use_ddp) or (dist.get_rank() == 0) + set_seed(config.seed, local_rank) + + # Initialize checkpoint directory and wandb + resuming = False + if config.resume: + # Find checkpoint directory based on experiment name + exp_checkpoint_dir = config.checkpoint_dir + if exp_checkpoint_dir.exists(): + # Use validation to find the latest working checkpoint + latest_step = get_latest_checkpoint_step(exp_checkpoint_dir) + if latest_step is not None: + resuming = True + logging.info( + f"Resuming from experiment checkpoint directory: {exp_checkpoint_dir} at step {latest_step}" + ) + else: + raise FileNotFoundError(f"No valid checkpoints found in {exp_checkpoint_dir} for resume") + else: + raise FileNotFoundError(f"Experiment checkpoint directory {exp_checkpoint_dir} does not exist for resume") + elif config.overwrite and config.checkpoint_dir.exists(): + shutil.rmtree(config.checkpoint_dir) + logging.info(f"Overwriting checkpoint directory: {config.checkpoint_dir}") + + # Create checkpoint directory with experiment name + if not resuming: + # For new runs, create experiment-specific checkpoint directory + exp_checkpoint_dir = config.checkpoint_dir + exp_checkpoint_dir.mkdir(parents=True, exist_ok=True) + logging.info(f"Created experiment checkpoint directory: {exp_checkpoint_dir}") + else: + # For resume, checkpoint_dir is already set to the experiment directory + logging.info(f"Using existing experiment checkpoint directory: {config.checkpoint_dir}") + + # Initialize wandb (only on main process) + if is_main: + init_wandb(config, resuming=resuming, enabled=config.wandb_enabled) + + # Build data loader using the unified data loader + # Calculate effective batch size per GPU for DDP + # For N GPUs, each GPU should get batch_size/N samples, so total across all GPUs is batch_size + world_size = torch.distributed.get_world_size() if use_ddp else 1 + effective_batch_size = config.batch_size // world_size + logging.info( + f"Using batch size per GPU: {effective_batch_size} (total batch size across {world_size} GPUs: {config.batch_size})" + ) + + # Pass the original batch size to data loader - it will handle DDP splitting internally + loader, data_config = build_datasets(config) + + # Log sample images to wandb on first batch + if is_main and config.wandb_enabled and not resuming: + # Create a separate data loader for sample batch to avoid consuming the main loader + sample_data_loader = _data.create_data_loader(config, framework="pytorch", shuffle=False) + sample_batch = next(iter(sample_data_loader)) + # Convert observation and actions to torch tensors + observation, actions = sample_batch + sample_batch = observation.to_dict() + sample_batch["actions"] = actions + + # Create sample images for wandb + images_to_log = [] + # Get batch size from the first image tensor + batch_size = next(iter(sample_batch["image"].values())).shape[0] + for i in range(min(5, batch_size)): + # Concatenate all camera views horizontally for this batch item + # Convert from NCHW to NHWC format for wandb + img_concatenated = torch.cat([img[i].permute(1, 2, 0) for img in sample_batch["image"].values()], axis=1) + img_concatenated = img_concatenated.cpu().numpy() + images_to_log.append(wandb.Image(img_concatenated)) + + wandb.log({"camera_views": images_to_log}, step=0) + + # Clear sample batch from memory aggressively + del sample_batch, observation, actions, images_to_log, img_concatenated + del sample_data_loader # Also delete the sample data loader + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + logging.info("Cleared sample batch and data loader from memory") + + # Build model + if not isinstance(config.model, openpi.models.pi0_config.Pi0Config): + # Convert dataclass to Pi0Config if needed + model_cfg = openpi.models.pi0_config.Pi0Config( + dtype=config.pytorch_training_precision, + action_dim=config.model.action_dim, + action_horizon=config.model.action_horizon, + max_token_len=config.model.max_token_len, + paligemma_variant=getattr(config.model, "paligemma_variant", "gemma_2b"), + action_expert_variant=getattr(config.model, "action_expert_variant", "gemma_300m"), + pi05=getattr(config.model, "pi05", False), + ) + else: + model_cfg = config.model + # Update dtype to match pytorch_training_precision + object.__setattr__(model_cfg, "dtype", config.pytorch_training_precision) + + model = openpi.models_pytorch.pi0_pytorch.PI0Pytorch(model_cfg).to(device) + + if hasattr(model, "gradient_checkpointing_enable"): + enable_gradient_checkpointing = True + model.gradient_checkpointing_enable() + logging.info("Enabled gradient checkpointing for memory optimization") + else: + enable_gradient_checkpointing = False + logging.info("Gradient checkpointing is not supported for this model") + + # Log initial memory usage after model creation + if is_main and torch.cuda.is_available(): + log_memory_usage(device, 0, "after_model_creation") + + # Enable memory optimizations for large-scale training + if world_size >= 8: + torch.backends.cudnn.benchmark = True + torch.backends.cuda.matmul.allow_tf32 = True + torch.backends.cudnn.allow_tf32 = True + # Set memory allocation configuration + os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128,expandable_segments:True" + logging.info("Enabled memory optimizations for 8+ GPU training") + + if use_ddp: + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[device.index] if device.type == "cuda" else None, + find_unused_parameters=False, # Disable for memory efficiency + gradient_as_bucket_view=True, # Enable for memory efficiency + static_graph=world_size >= 8, # Enable for 8+ GPUs + ) + + # Load weights from weight_loader if specified (for fine-tuning) + # if config.pytorch_weight_path is not None: + # logging.info(f"Loading weights from: {config.pytorch_weight_path}") + + # model_path = os.path.join(config.pytorch_weight_path, "model.safetensors") + # safetensors.torch.load_model( + # (model.module if isinstance(model, torch.nn.parallel.DistributedDataParallel) else model), model_path + # ) + # logging.info(f"Loaded PyTorch weights from {config.pytorch_weight_path}") + + # Optimizer + learning rate schedule from config + warmup_steps = config.lr_schedule.warmup_steps + peak_lr = config.lr_schedule.peak_lr + decay_steps = config.lr_schedule.decay_steps + end_lr = config.lr_schedule.decay_lr + + # Create optimizer with config parameters + optim = torch.optim.AdamW( + model.parameters(), + lr=peak_lr, + betas=(config.optimizer.b1, config.optimizer.b2), + eps=config.optimizer.eps, + weight_decay=config.optimizer.weight_decay, + ) + + # Load checkpoint if resuming + global_step = 0 + if resuming: + global_step = load_checkpoint(model, optim, config.checkpoint_dir, device) + logging.info(f"Resumed training from step {global_step}") + + def lr_schedule(step: int): + if step < warmup_steps: + # Match JAX behavior: start from peak_lr / (warmup_steps + 1) + init_lr = peak_lr / (warmup_steps + 1) + return init_lr + (peak_lr - init_lr) * step / warmup_steps + # cosine decay + progress = min(1.0, (step - warmup_steps) / max(1, decay_steps - warmup_steps)) + cos = 0.5 * (1 + np.cos(np.pi * progress)) + return end_lr + (peak_lr - end_lr) * cos + + model.train() + start_time = time.time() + infos = [] # Collect stats over log interval + if is_main: + logging.info( + f"Running on: {platform.node()} | world_size={torch.distributed.get_world_size() if use_ddp else 1}" + ) + logging.info( + f"Training config: batch_size={config.batch_size}, effective_batch_size={effective_batch_size}, num_train_steps={config.num_train_steps}" + ) + logging.info(f"Memory optimizations: gradient_checkpointing={enable_gradient_checkpointing}") + logging.info( + f"LR schedule: warmup={warmup_steps}, peak_lr={peak_lr:.2e}, decay_steps={decay_steps}, end_lr={end_lr:.2e}" + ) + logging.info( + f"Optimizer: {type(config.optimizer).__name__}, weight_decay={config.optimizer.weight_decay}, clip_norm={config.optimizer.clip_gradient_norm}" + ) + logging.info("EMA is not supported for PyTorch training") + logging.info(f"Training precision: {model_cfg.dtype}") + + # Training loop - iterate until we reach num_train_steps + pbar = ( + tqdm.tqdm(total=config.num_train_steps, initial=global_step, desc="Training", disable=not is_main) + if is_main + else None + ) + + while global_step < config.num_train_steps: + # Set epoch for distributed training + if use_ddp and hasattr(loader, "set_epoch"): + loader.set_epoch(global_step // len(loader)) + + for observation, actions in loader: + # Check if we've reached the target number of steps + if global_step >= config.num_train_steps: + break + + # The unified data loader returns (observation, actions) tuple + observation = jax.tree.map(lambda x: x.to(device), observation) # noqa: PLW2901 + actions = actions.to(torch.float32) # noqa: PLW2901 + actions = actions.to(device) # noqa: PLW2901 + + # Update LR + for pg in optim.param_groups: + pg["lr"] = lr_schedule(global_step) + + # Forward pass + losses = model(observation, actions) + # Ensure losses is a tensor and handle different return types + if isinstance(losses, list | tuple): + losses = torch.stack(losses) + elif not isinstance(losses, torch.Tensor): + losses = torch.tensor(losses, device=device, dtype=torch.float32) + + loss = losses.mean() + + # Backward pass + loss.backward() + + # Log memory usage after backward pass + if global_step < 5 and is_main and torch.cuda.is_available(): + log_memory_usage(device, global_step, "after_backward") + + # Gradient clipping + grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=config.optimizer.clip_gradient_norm) + + # Optimizer step + optim.step() + optim.zero_grad(set_to_none=True) + + # Clear gradients more aggressively + for param in model.parameters(): + if param.grad is not None: + param.grad.detach_() + param.grad = None + + # Collect stats + if is_main: + infos.append( + { + "loss": loss.item(), + "learning_rate": optim.param_groups[0]["lr"], + "grad_norm": float(grad_norm) if isinstance(grad_norm, torch.Tensor) else grad_norm, + } + ) + + if is_main and (global_step % config.log_interval == 0): + elapsed = time.time() - start_time + + # Average stats over log interval + avg_loss = sum(info["loss"] for info in infos) / len(infos) + avg_lr = sum(info["learning_rate"] for info in infos) / len(infos) + + avg_grad_norm = None + if any("grad_norm" in info for info in infos): + vals = [ + info["grad_norm"] for info in infos if "grad_norm" in info and info["grad_norm"] is not None + ] + if len(vals) > 0: + avg_grad_norm = sum(vals) / len(vals) + logging.info( + f"step={global_step} loss={avg_loss:.4f} lr={avg_lr:.2e} grad_norm={avg_grad_norm:.2f} time={elapsed:.1f}s" + if avg_grad_norm is not None + else f"step={global_step} loss={avg_loss:.4f} lr={avg_lr:.2e} time={elapsed:.1f}s" + ) + + # Log to wandb + if config.wandb_enabled and len(infos) > 0: + log_payload = { + "loss": avg_loss, + "learning_rate": avg_lr, + "step": global_step, + "time_per_step": elapsed / config.log_interval, + } + if avg_grad_norm is not None: + log_payload["grad_norm"] = avg_grad_norm + wandb.log(log_payload, step=global_step) + + start_time = time.time() + infos = [] # Reset stats collection + + global_step += 1 + # Save checkpoint using the new mechanism + save_checkpoint(model, optim, global_step, config, is_main, data_config) + + # Update progress bar + if pbar is not None: + pbar.update(1) + pbar.set_postfix( + {"loss": f"{loss.item():.4f}", "lr": f"{optim.param_groups[0]['lr']:.2e}", "step": global_step} + ) + + # Close progress bar + if pbar is not None: + pbar.close() + + # Finish wandb run + if is_main and config.wandb_enabled: + wandb.finish() + + cleanup_ddp() + + +def main(): + init_logging() + config = _config.cli() + train_loop(config) + + +if __name__ == "__main__": + main() diff --git a/policy/openpi-InternData-A1/scripts/train_test.py b/policy/openpi-InternData-A1/scripts/train_test.py new file mode 100644 index 0000000..9e0bb7b --- /dev/null +++ b/policy/openpi-InternData-A1/scripts/train_test.py @@ -0,0 +1,30 @@ +import dataclasses +import os +import pathlib + +import pytest + +os.environ["JAX_PLATFORMS"] = "cpu" + +from openpi.training import config as _config + +from . import train + + +@pytest.mark.parametrize("config_name", ["debug"]) +def test_train(tmp_path: pathlib.Path, config_name: str): + config = dataclasses.replace( + _config._CONFIGS_DICT[config_name], # noqa: SLF001 + batch_size=2, + checkpoint_base_dir=str(tmp_path / "checkpoint"), + exp_name="test", + overwrite=False, + resume=False, + num_train_steps=2, + log_interval=1, + ) + train.main(config) + + # test resuming + config = dataclasses.replace(config, resume=True, num_train_steps=4) + train.main(config) diff --git a/policy/openpi-InternData-A1/scripts/training_scripts/multi_node.sh b/policy/openpi-InternData-A1/scripts/training_scripts/multi_node.sh new file mode 100755 index 0000000..2c873a7 --- /dev/null +++ b/policy/openpi-InternData-A1/scripts/training_scripts/multi_node.sh @@ -0,0 +1,209 @@ +#!/usr/bin/env bash +set -ex + +cd YOUR_PATH/openpi + +export USE_TF=0 +export USE_TORCH=0 +export USE_JAX=1 +export IMAGEIO_FFMPEG_EXE=ffmpeg +# JAX GPU memory fraction +export XLA_PYTHON_CLIENT_MEM_FRACTION="${XLA_PYTHON_CLIENT_MEM_FRACTION:-0.9}" + +# ============================================================================ +# NCCL Configuration +# ============================================================================ +export NCCL_ASYNC_ERROR_HANDLING=1 +export NCCL_TIMEOUT=3600 +export NCCL_DEBUG="${NCCL_DEBUG:-WARN}" + +# ============================================================================ +# Platform-Injected Configuration +# ============================================================================ +# The platform automatically injects these when DISTRIBUTED_JOB=true: +# - NCCL_IB_HCA, NCCL_IB_GID_INDEX, NCCL_SOCKET_IFNAME +# - NODE_RANK, NODE_COUNT, MASTER_ADDR, PROC_PER_NODE +# - CUDA_VISIBLE_DEVICES +# We trust and use these platform configurations directly. +# ============================================================================ + +echo "" +echo "==========================================" +echo "Platform Configuration" +echo "==========================================" +echo "NODE_RANK: ${NODE_RANK:-}" +echo "NODE_COUNT: ${NODE_COUNT:-}" +echo "MASTER_ADDR: ${MASTER_ADDR:-}" +echo "NCCL_IB_HCA: ${NCCL_IB_HCA:-}" +echo "NCCL_IB_GID_INDEX: ${NCCL_IB_GID_INDEX:-}" +echo "NCCL_SOCKET_IFNAME: ${NCCL_SOCKET_IFNAME:-}" +echo "==========================================" +echo "" + +# ============================================================================ +# NCCL Transport Configuration +# ============================================================================ +# Use platform-injected configuration if available, otherwise fallback +# ============================================================================ + +if [ -n "${NCCL_IB_HCA:-}" ]; then + # Platform has configured InfiniBand + echo "[NCCL] ✓ Using platform-injected InfiniBand configuration" + + # Only set NCCL_NET if not already set + if [ -z "${NCCL_NET:-}" ]; then + export NCCL_NET="IB" + fi + + # Set IB timeout if not already set + if [ -z "${NCCL_IB_TIMEOUT:-}" ]; then + export NCCL_IB_TIMEOUT=23 + fi + + echo "[NCCL] NCCL_NET: ${NCCL_NET}" + echo "[NCCL] NCCL_IB_HCA: ${NCCL_IB_HCA}" + echo "[NCCL] NCCL_IB_GID_INDEX: ${NCCL_IB_GID_INDEX}" + echo "[NCCL] NCCL_IB_TIMEOUT: ${NCCL_IB_TIMEOUT}" + +elif [ -n "${NCCL_SOCKET_IFNAME:-}" ]; then + # Platform has configured Socket + echo "[NCCL] ✓ Using platform-injected Socket configuration" + + if [ -z "${NCCL_NET:-}" ]; then + export NCCL_NET="Socket" + fi + + echo "[NCCL] NCCL_NET: ${NCCL_NET}" + echo "[NCCL] NCCL_SOCKET_IFNAME: ${NCCL_SOCKET_IFNAME}" + +else + # No platform injection - use OPENPI_NCCL_NET preference + echo "[NCCL] ⚠️ No platform-injected NCCL configuration" + + if [ "${OPENPI_NCCL_NET:-IB}" = "IB" ]; then + echo "[NCCL] ✗ InfiniBand requested but not configured by platform" + echo "[NCCL] ✗ Falling back to Socket transport" + export NCCL_NET="Socket" + export NCCL_IB_DISABLE=1 + else + export NCCL_NET="Socket" + export NCCL_IB_DISABLE=1 + echo "[NCCL] Using Socket transport" + fi +fi + +echo "" + +# ============================================================================ +# JAX Distributed Configuration +# ============================================================================ +# Map platform variables to JAX variables +# ============================================================================ + +echo "==========================================" +echo "JAX Distributed Configuration" +echo "==========================================" + +JAX_COORDINATOR_PORT="${JAX_COORDINATOR_PORT:-12345}" + +# Set JAX coordinator address +if [ -z "${JAX_COORDINATOR_ADDRESS:-}" ] && [ -n "${MASTER_ADDR:-}" ]; then + export JAX_COORDINATOR_ADDRESS="${MASTER_ADDR}:${JAX_COORDINATOR_PORT}" + echo "[JAX] ✓ Coordinator: ${JAX_COORDINATOR_ADDRESS} (from MASTER_ADDR)" +elif [ -n "${JAX_COORDINATOR_ADDRESS:-}" ]; then + echo "[JAX] ✓ Coordinator: ${JAX_COORDINATOR_ADDRESS}" +else + echo "[JAX] ✗ WARNING: No coordinator address set!" +fi + +# Set JAX process count +if [ -z "${JAX_PROCESS_COUNT:-}" ] && [ -n "${NODE_COUNT:-}" ]; then + export JAX_PROCESS_COUNT="${NODE_COUNT}" + echo "[JAX] ✓ Process count: ${JAX_PROCESS_COUNT} (from NODE_COUNT)" +elif [ -n "${JAX_PROCESS_COUNT:-}" ]; then + echo "[JAX] ✓ Process count: ${JAX_PROCESS_COUNT}" +fi + +# Set JAX process index +if [ -z "${JAX_PROCESS_INDEX:-}" ] && [ -n "${NODE_RANK:-}" ]; then + export JAX_PROCESS_INDEX="${NODE_RANK}" + echo "[JAX] ✓ Process index: ${JAX_PROCESS_INDEX} (from NODE_RANK)" +elif [ -n "${JAX_PROCESS_INDEX:-}" ]; then + echo "[JAX] ✓ Process index: ${JAX_PROCESS_INDEX}" +fi + +echo "==========================================" +echo "" + +# ============================================================================ +# Python Environment +# ============================================================================ +export PYTHONPATH=YOUR_PATH/openpi/src:YOUR_PATH/openpi/packages/openpi-client/src:YOUR_PATH/openpi/third_party/lerobot:${PYTHONPATH} +conda activate pi0 + +# ============================================================================ +# Configuration Summary +# ============================================================================ + +echo "==========================================" +echo "Configuration Summary" +echo "==========================================" +echo "NCCL_NET: ${NCCL_NET:-}" +echo "NCCL_IB_HCA: ${NCCL_IB_HCA:-}" +echo "NCCL_IB_GID_INDEX: ${NCCL_IB_GID_INDEX:-}" +echo "NCCL_SOCKET_IFNAME: ${NCCL_SOCKET_IFNAME:-}" +echo "JAX_COORDINATOR: ${JAX_COORDINATOR_ADDRESS:-}" +echo "JAX_PROCESS_COUNT: ${JAX_PROCESS_COUNT:-}" +echo "JAX_PROCESS_INDEX: ${JAX_PROCESS_INDEX:-}" +echo "==========================================" +echo "" + +# ============================================================================ +# Display Host Information +# ============================================================================ + +python - <<'EOF' +import socket +import os +import jax +hostname = socket.gethostname() +devices = jax.local_devices() +device_count = len(devices) +device_ids = [d.id for d in devices] +print(f"[JAX] host={hostname}, devices={device_count}xgpu, ids={device_ids}") +print(f"[JAX] JAX_COORDINATOR_ADDRESS={os.environ.get('JAX_COORDINATOR_ADDRESS', '')}") +print(f"[JAX] JAX_PROCESS_COUNT={os.environ.get('JAX_PROCESS_COUNT', '')}") +print(f"[JAX] JAX_PROCESS_INDEX={os.environ.get('JAX_PROCESS_INDEX', '')}") +EOF + +# ============================================================================ +# Launch Training +# ============================================================================ + +# Determine experiment name based on transport +if [ "${OPENPI_DEBUG_SINGLE_GPU:-0}" = "1" ]; then + EXP_NAME="${EXP_NAME:-dev_jax_single_gpu}" + echo "[DEBUG] Running in single-GPU mode" +else + EXP_NAME="${EXP_NAME:-dev_jax_multinode_ib}" +fi + +echo "" +echo "==========================================" +echo "Starting Training" +echo "==========================================" +echo "Experiment: $EXP_NAME" +echo "==========================================" +echo "" + +ulimit -n 1000000 + +python scripts/train_jax_multinode.py \ + pretrain-interndata-a1 \ + --exp-name=pretrain-interndata-a1 \ + --num_workers=12 \ + --fsdp_devices=8 \ + --batch_size=512 \ + --num_train_steps=2000000 \ + --save_interval=5000 + diff --git a/policy/openpi-InternData-A1/scripts/training_scripts/single_node_multi_gpu.sh b/policy/openpi-InternData-A1/scripts/training_scripts/single_node_multi_gpu.sh new file mode 100755 index 0000000..00ec4fd --- /dev/null +++ b/policy/openpi-InternData-A1/scripts/training_scripts/single_node_multi_gpu.sh @@ -0,0 +1,13 @@ +set -ex + +export IMAGEIO_FFMPEG_EXE=ffmpeg +export OMP_NUM_THREADS=128 + +export PYTHONPATH=YOUR_PATH/openpi/src:YOUR_PATH/openpi/packages/openpi-client/src:YOUR_PATH/openpi/third_party/lerobot:${PYTHONPATH} +conda activate pi0 + +cd YOUR_PATH/openpi +ulimit -n 1000000 +config_name=$1 +XLA_PYTHON_CLIENT_MEM_FRACTION=0.9 python scripts/train.py ${config_name} \ + --exp-name=${config_name} \ No newline at end of file diff --git a/policy/openpi-InternData-A1/src/openpi/__init__.py b/policy/openpi-InternData-A1/src/openpi/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/policy/openpi-InternData-A1/src/openpi/conftest.py b/policy/openpi-InternData-A1/src/openpi/conftest.py new file mode 100644 index 0000000..5002b62 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/conftest.py @@ -0,0 +1,17 @@ +import os + +import pynvml +import pytest + + +def set_jax_cpu_backend_if_no_gpu() -> None: + try: + pynvml.nvmlInit() + pynvml.nvmlShutdown() + except pynvml.NVMLError: + # No GPU found. + os.environ["JAX_PLATFORMS"] = "cpu" + + +def pytest_configure(config: pytest.Config) -> None: + set_jax_cpu_backend_if_no_gpu() diff --git a/policy/openpi-InternData-A1/src/openpi/models/__init__.py b/policy/openpi-InternData-A1/src/openpi/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/policy/openpi-InternData-A1/src/openpi/models/gemma.py b/policy/openpi-InternData-A1/src/openpi/models/gemma.py new file mode 100644 index 0000000..d1623d1 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models/gemma.py @@ -0,0 +1,459 @@ +# Copyright 2024 Big Vision Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Gemma adaptation for Pi, taken from big_vision. + +We follow this einsum axis naming convention: + B: batch + T: query length + S: k/v length + N: num query heads + K: num k/v heads + G: num query heads per k/v head + H: head dim + D: d_model ("features") +""" + +from collections.abc import Sequence +import dataclasses +from typing import Literal, TypeAlias + +import einops +import flax.linen as nn +import jax +import jax.numpy as jnp + +import openpi.models.lora as lora +import openpi.shared.array_typing as at +import openpi.training.sharding as sharding + +PALIGEMMA_VOCAB_SIZE = 257_152 + + +@dataclasses.dataclass +class Config: + width: int + depth: int + mlp_dim: int + num_heads: int + num_kv_heads: int + head_dim: int + lora_configs: dict[str, lora.LoRAConfig] = dataclasses.field(default_factory=dict) + + +Variant = Literal["dummy", "gemma_300m", "gemma_300m_lora", "gemma_2b", "gemma_2b_lora"] + + +def get_config(variant: Variant) -> Config: + """Returns config for specified gemma variant.""" + if variant == "dummy": + return Config( + width=64, + depth=4, + mlp_dim=128, + num_heads=8, + num_kv_heads=1, + head_dim=16, + ) + if variant == "gemma_300m": + # 311M params + return Config( + width=1024, + depth=18, + mlp_dim=4096, + num_heads=8, + num_kv_heads=1, + head_dim=256, + ) + if variant == "gemma_2b": + return Config( + width=2048, + depth=18, + mlp_dim=16_384, + num_heads=8, + num_kv_heads=1, + head_dim=256, + ) + if variant == "gemma_2b_lora": + return Config( + width=2048, + depth=18, + mlp_dim=16_384, + num_heads=8, + num_kv_heads=1, + head_dim=256, + lora_configs={"attn": lora.LoRAConfig(rank=16, alpha=16.0), "ffn": lora.LoRAConfig(rank=16, alpha=16.0)}, + ) + if variant == "gemma_300m_lora": + # 311M params + return Config( + width=1024, + depth=18, + mlp_dim=4096, + num_heads=8, + num_kv_heads=1, + head_dim=256, + lora_configs={"attn": lora.LoRAConfig(rank=32, alpha=32.0), "ffn": lora.LoRAConfig(rank=32, alpha=32.0)}, + ) + raise ValueError(f"Unknown variant: {variant}") + + +@at.typecheck +class RMSNorm(nn.Module): + @nn.compact + def __call__(self, x, cond): + dtype = x.dtype # original dtype, could be half-precision + var = jnp.mean(jnp.square(x.astype(jnp.float32)), axis=-1, keepdims=True) # compute variance in float32 + normed_inputs = jnp.asarray(x * jnp.reciprocal(jnp.sqrt(var + 1e-06))) # compute normalization in float32 + if cond is None: + # regular RMSNorm + scale = self.param("scale", nn.initializers.zeros_init(), (x.shape[-1])) + normed_inputs = normed_inputs * ( + 1 + scale + ) # scale by learned parameter in float32 (matches Flax implementation) + return normed_inputs.astype(dtype), None # return in original dtype + + # adaptive RMSNorm + modulation = nn.Dense(x.shape[-1] * 3, kernel_init=nn.initializers.zeros, dtype=dtype)(cond) + scale, shift, gate = jnp.split(modulation[:, None, :], 3, axis=-1) + normed_inputs = normed_inputs * (1 + scale) + shift # scale and shift in float32 + return normed_inputs.astype(dtype), gate + + +@at.typecheck +class Embedder(nn.Module): + """Embedder module.""" + + vocab_size: int + embed_dim: int + + def setup(self): + self.input_embedding_table = self.param( + "input_embedding", + nn.initializers.normal(), + (self.vocab_size, self.embed_dim), + ) + + def encode(self, x): + x = self.input_embedding_table[(x,)] + x *= jnp.sqrt(self.embed_dim).astype(x.dtype) + return x + + def decode(self, x): + return jnp.dot(x, self.input_embedding_table.T) + + +@at.typecheck +class Attention(nn.Module): + """Attention module.""" + + configs: Sequence[Config] + + @nn.compact + def __call__(self, xs, positions, attn_mask, kv_cache): + # all experts must share the same head dim, num heads, and num kv heads for self-attention to work + assert all(config.head_dim == self.configs[0].head_dim for config in self.configs) + assert all(config.num_heads == self.configs[0].num_heads for config in self.configs) + assert all(config.num_kv_heads == self.configs[0].num_kv_heads for config in self.configs) + + dtype = next(x.dtype for x in xs if x is not None) # original dtype, could be half-precision + + qkvs = [] + for i, (x, config) in enumerate(zip(xs, self.configs, strict=True)): + if x is None: + continue + if config.num_kv_heads == config.num_heads: + qkv_einsum = lora.Einsum( + shape=(3, config.num_heads, config.width, config.head_dim), + name=_name("qkv_einsum", i), + init_fn=nn.initializers.lecun_normal(in_axis=-2, out_axis=-1, batch_axis=(0, 1)), + lora_config=config.lora_configs.get("attn"), + ) + qkvs.append(qkv_einsum("BSD,3KDH->3BSKH", x)) + else: + q_einsum = lora.Einsum( + shape=(config.num_heads, config.width, config.head_dim), + name=_name("q_einsum", i), + init_fn=nn.initializers.lecun_normal(in_axis=-2, out_axis=-1, batch_axis=(0,)), + lora_config=config.lora_configs.get("attn"), + ) + q = q_einsum("BTD,NDH->BTNH", x) + kv_einsum = lora.Einsum( + shape=(2, config.num_kv_heads, config.width, config.head_dim), + name=_name("kv_einsum", i), + init_fn=nn.initializers.lecun_normal(in_axis=-2, out_axis=-1, batch_axis=(0, 1)), + lora_config=config.lora_configs.get("attn"), + ) + k, v = kv_einsum("BSD,2KDH->2BSKH", x) + qkvs.append((q, k, v)) + + q, k, v = (jnp.concatenate(y, axis=1) for y in zip(*qkvs, strict=True)) + + q = _apply_rope(q, positions=positions) + q *= self.configs[0].head_dim ** -0.5 + + k = _apply_rope(k, positions=positions) + + # should still be half-precision here (if input was half-precision) + assert q.dtype == k.dtype == v.dtype == dtype + + if kv_cache is not None: + cache_k, cache_v = kv_cache + k = jnp.concatenate([cache_k, k], axis=1) + v = jnp.concatenate([cache_v, v], axis=1) + + q = einops.rearrange(q, "B T (K G) H -> B T K G H", K=self.configs[0].num_kv_heads) + logits = jnp.einsum("BTKGH,BSKH->BKGTS", q, k, preferred_element_type=jnp.float32) + + if attn_mask.shape != (q.shape[0], 1, q.shape[1], k.shape[1]): + raise ValueError( + f"Attention mask with shape {attn_mask.shape} but shapes for q and k are: {q.shape} and {k.shape}" + ) + + # big_neg = jnp.finfo(logits.dtype).min + big_neg = -2.3819763e38 # See gemma/modules.py + masked_logits = jnp.where(attn_mask[:, :, None, :, :], logits, big_neg) + + probs = jax.nn.softmax(masked_logits, axis=-1).astype(dtype) + + encoded = jnp.einsum("BKGTS,BSKH->BTKGH", probs, v) + encoded = einops.rearrange(encoded, "B T K G H -> B T (K G) H") + + out = [] + start = 0 + for i, (x, config) in enumerate(zip(xs, self.configs, strict=True)): + if x is not None: + end = start + x.shape[1] + out_einsum = lora.Einsum( + shape=(config.num_heads, config.head_dim, config.width), + name=_name("attn_vec_einsum", i), + init_fn=nn.initializers.lecun_normal(in_axis=(-3, -2), out_axis=-1), + lora_config=config.lora_configs.get("attn"), + ) + out.append(out_einsum("BTNH,NHD->BTD", encoded[:, start:end])) + start = end + else: + out.append(None) + + return out, (k, v) + + +@at.typecheck +class FeedForward(nn.Module): + """Feed forward module.""" + + features: int + hidden_dim: int + + @nn.compact + def __call__(self, x): + dtype = x.dtype # original dtype, could be half-precision + w_gating = self.param( + "gating_einsum", + nn.initializers.lecun_normal(in_axis=-2, out_axis=-1, batch_axis=(0,)), + (2, self.features, self.hidden_dim), + ).astype(dtype) + ff_gate = jnp.dot(x, w_gating[0]) + gate_value = nn.gelu(ff_gate) + + ff1 = jnp.dot(x, w_gating[1]) + activations = gate_value * ff1 + + w_linear = self.param( + "linear", + nn.initializers.lecun_normal(in_axis=-2, out_axis=-1), + (self.hidden_dim, self.features), + ).astype(dtype) + outputs = jnp.dot(activations, w_linear) + assert outputs.dtype == dtype + return outputs + + +@at.typecheck +class Block(nn.Module): + """Transformer block.""" + + configs: tuple[Config, ...] + + dropout: float = 0.0 + dropout_bdims: tuple[int, ...] = () + + @nn.compact + def __call__(self, xs, kv_cache, positions, attn_mask, adarms_cond, deterministic=True): # noqa: FBT002 + xs = sharding.activation_sharding_constraint(xs) + drop = nn.Dropout(self.dropout, self.dropout_bdims) if self.dropout else lambda x, _: x + + attn = Attention(configs=self.configs, name="attn") + + pre_attn = [] + gates = [] + for i, x in enumerate(xs): + if x is not None: + x, gate = RMSNorm(name=_name("pre_attention_norm", i))(x, adarms_cond[i]) # noqa: PLW2901 + pre_attn.append(x) + gates.append(gate if x is not None else None) + + pre_attn = sharding.activation_sharding_constraint(pre_attn) + post_attn, kv_cache = attn(pre_attn, positions, attn_mask, kv_cache) + post_attn = jax.tree.map(lambda x: drop(x, deterministic), post_attn) + post_attn = sharding.activation_sharding_constraint(post_attn) + xs = [_gated_residual(x, y, gate) for x, y, gate in zip(xs, post_attn, gates, strict=True)] + xs = sharding.activation_sharding_constraint(xs) + + out = [] + gates = [] + for i, (x, config) in enumerate(zip(xs, self.configs, strict=True)): + if x is not None: + x, gate = RMSNorm(name=_name("pre_ffw_norm", i))(x, adarms_cond[i]) # noqa: PLW2901 + x = lora.FeedForward( # noqa: PLW2901 + features=config.width, + hidden_dim=config.mlp_dim, + name=_name("mlp", i), + lora_config=config.lora_configs.get("ffn"), + )(x) + out.append(x) + gates.append(gate if x is not None else None) + + out = sharding.activation_sharding_constraint(out) + out = jax.tree.map(lambda x: drop(x, deterministic), out) + xs = [_gated_residual(x, y, gate) for x, y, gate in zip(xs, out, gates, strict=True)] + xs = sharding.activation_sharding_constraint(xs) + + return xs, kv_cache + + +KVCache: TypeAlias = tuple[at.Float[at.Array, "l b _t _k _h"], at.Float[at.Array, "l b _t _v _h"]] + + +@at.typecheck +class Module(nn.Module): + """Transformer model, supporting a mixture of different weights for different tokens.""" + + configs: Sequence[Config] # list of configs, one for each expert + embed_dtype: str + + dropout: float = 0.0 + dropout_bdims: tuple[int, ...] = () # Every float is dropped independently. + adarms: bool = False + + def setup(self): + # all experts must have the same depth + assert all(config.depth == self.configs[0].depth for config in self.configs) + + self.embedder = Embedder( + vocab_size=PALIGEMMA_VOCAB_SIZE, + embed_dim=self.configs[0].width, # embedder for first expert only + name="embedder", + ) + block_cls = nn.remat( + Block, + prevent_cse=False, + static_argnums=(5,), # 0=self, 6=deterministic + policy=jax.checkpoint_policies.nothing_saveable, + ) + self.layers = nn.scan( + block_cls, + variable_axes={"params": 0}, + split_rngs={"params": True, "dropout": True}, + in_axes=( + 0, + nn.broadcast, + nn.broadcast, + nn.broadcast, + nn.broadcast, + ), # 0=kv_cache, 1=positions, 2=mask, 3=adarms_cond, 4=deterministic + length=self.configs[0].depth, + )( + configs=self.configs, + dropout=self.dropout, + dropout_bdims=self.dropout_bdims, + ) + self.final_norms = [RMSNorm(name=_name("final_norm", i)) for i in range(len(self.configs))] + + @at.typecheck + def embed(self, tokens: at.Int[at.Array, "b t"]) -> at.Float[at.Array, "b t d"]: + return self.embedder.encode(tokens).astype(self.embed_dtype) + + @at.typecheck + def __call__( + self, + # list of token arrays, one for each expert, or None if that expert should not be run + embedded: Sequence[at.Float[at.Array, "b _t _d"] | None], + positions: at.Int[at.Array, "b t"], + mask: at.Bool[at.Array, "b t s"], + adarms_cond: Sequence[at.Float[at.Array, "b _d"] | None] | None = None, + *, + kv_cache: KVCache | None = None, + deterministic: bool = True, + ) -> tuple[Sequence[at.Float[at.Array, "b _t _d"] | None], KVCache]: + embedded = jax.tree.map(lambda e: e.astype(self.embed_dtype), embedded) + mask = jnp.asarray(mask)[:, None, :, :] + if adarms_cond is None: + adarms_cond = [None] * len(self.configs) + + embedded, kv_cache = self.layers(embedded, kv_cache, positions, mask, adarms_cond, deterministic) + + assert all(e.dtype == jnp.dtype(self.embed_dtype) for e in embedded if e is not None) + + return [ + f(e, a)[0] if e is not None else e for f, e, a in zip(self.final_norms, embedded, adarms_cond, strict=True) + ], kv_cache + + def init(self, use_adarms: Sequence[bool]): + """Convenience method for initializing all parameters, necessary due to the quirks of linen.""" + self.embed(jnp.zeros((1, 1), dtype=jnp.int32)) + self( + [jnp.zeros((1, 1, c.width)) for c in self.configs], + jnp.zeros((1, len(self.configs)), dtype=jnp.int32), + jnp.zeros((1, len(self.configs), len(self.configs)), dtype=bool), + adarms_cond=[jnp.zeros((1, c.width)) if u else None for u, c in zip(use_adarms, self.configs, strict=True)], + ) + + +def _apply_rope(x, *, positions, max_wavelength=10_000): + """Applies RoPE positions [B, L] to x [B, L, H, D].""" + freq_exponents = (2.0 / x.shape[-1]) * jnp.arange(x.shape[-1] // 2, dtype=jnp.float32) + timescale = max_wavelength**freq_exponents + radians = positions[..., None] / timescale[None, None, :] + radians = radians[..., None, :] + assert radians.dtype == jnp.float32 + # radians.shape = [...,L,1,d=D/2] + sin, cos = jnp.sin(radians), jnp.cos(radians) + x1, x2 = jnp.split(x, 2, axis=-1) + res = jnp.concatenate([x1 * cos - x2 * sin, x2 * cos + x1 * sin], axis=-1) + assert res.dtype == jnp.float32 + # The original bigvision impl allows RoPE to upcast to float32. It is then immediately downcast again to the cache + # dtype when in inference mode (but not in training mode). I don't think any of this was intentional. Based on the + # original DeepMind impl, as well as the widely-used transformers impl, it is ok to always downcast back to bfloat16 + # here. + return res.astype(x.dtype) + + +def _name(name, i): + # we name layers like this because we want the first expert's weights to have no suffix (e.g., "attn"), so that they + # can be loaded seamlessly from the existing PaliGemma checkpoint. subsequent experts will have a suffix (e.g., + # "attn_1") and their weights will be initialized from scratch. in practice, we only use two experts -- PaliGemma, + # and the action expert. + if i == 0: + return name + return f"{name}_{i}" + + +def _gated_residual(x, y, gate): + assert (x is None) == (y is None) + if x is None: + return None + if gate is None: + return x + y + return x + y * gate diff --git a/policy/openpi-InternData-A1/src/openpi/models/gemma_fast.py b/policy/openpi-InternData-A1/src/openpi/models/gemma_fast.py new file mode 100644 index 0000000..eee39b4 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models/gemma_fast.py @@ -0,0 +1,437 @@ +# Copyright 2024 Big Vision Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Gemma model implementation from big_vision/models/ppp/gemma.py (with small modifications for NNX compatibility) +Used for FAST autoregressive policies. +""" + +import dataclasses +from typing import Literal, TypeAlias + +import einops +import flax.linen as nn +import jax +import jax.numpy as jnp +import ml_collections + +import openpi.models.lora as lora +import openpi.shared.array_typing as at + +Variant = Literal["gemma_2b", "gemma_2b_lora"] + + +def get_config(variant): + """Returns config for specified gemma variant.""" + if variant == "gemma_2b": + return ml_collections.ConfigDict( + { + "variant": variant, + "width": 2048, + "depth": 18, + "mlp_dim": 16_384, + "num_heads": 8, + "num_kv_heads": 1, + "head_dim": 256, + "norm_eps": 1e-6, + "vocab_size": 257_152, + "scan": True, + "remat_policy": "nothing_saveable", + } + ) + if variant == "gemma_2b_lora": + return ml_collections.ConfigDict( + { + "variant": variant, + "width": 2048, + "depth": 18, + "mlp_dim": 16_384, + "num_heads": 8, + "num_kv_heads": 1, + "head_dim": 256, + "norm_eps": 1e-6, + "vocab_size": 257_152, + "scan": True, + "remat_policy": "nothing_saveable", + "lora_configs": { + "attn": lora.LoRAConfig(rank=16, alpha=16.0), + "ffn": lora.LoRAConfig(rank=16, alpha=16.0), + }, + } + ) + raise ValueError(f"Unknown variant: {variant}") + + +@at.typecheck +class Einsum(nn.Module): + shape: tuple[int, ...] + + @nn.compact + def __call__(self, eqn, x): + dtype = x.dtype # original dtype, could be half-precision + w = self.param("w", nn.initializers.zeros_init(), self.shape).astype(dtype) + return jnp.einsum(eqn, x, w) + + +@at.typecheck +class RMSNorm(nn.Module): + @nn.compact + def __call__(self, x): + dtype = x.dtype # original dtype, could be half-precision + scale = self.param("scale", nn.initializers.zeros_init(), (x.shape[-1])) + var = jnp.mean(jnp.square(x.astype(jnp.float32)), axis=-1, keepdims=True) # compute variance in float32 + normed_inputs = jnp.asarray(x * jnp.reciprocal(jnp.sqrt(var + 1e-06))) # compute normalization in float32 + normed_inputs = normed_inputs * ( + 1 + scale + ) # scale by learned parameter in float32 (matches Flax implementation) + return normed_inputs.astype(dtype) # return in original dtype + + +@at.typecheck +class Embedder(nn.Module): + """Embedder module.""" + + vocab_size: int + embed_dim: int + + def setup(self): + self.input_embedding_table = self.param( + "input_embedding", + nn.initializers.zeros_init(), + (self.vocab_size, self.embed_dim), + ) + + def encode(self, x): + x = self.input_embedding_table[(x,)] + x *= jnp.sqrt(self.embed_dim).astype(x.dtype) + return x + + def decode(self, x): + return jnp.dot(x, self.input_embedding_table.T) + + +@at.typecheck +class Attention(nn.Module): + """Attention module.""" + + num_heads: int + num_kv_heads: int + features: int + head_dim: int + + cache_dtype: str | None = None + + lora_config: lora.LoRAConfig | None = None + + def setup(self): + if self.num_kv_heads == self.num_heads: + self.qkv_einsum = lora.Einsum( + shape=(3, self.num_heads, self.features, self.head_dim), + name="qkv_einsum", + init_fn=nn.initializers.lecun_normal(in_axis=-2, out_axis=-1, batch_axis=(0, 1)), + lora_config=self.lora_config, + ) + else: + self.q_einsum = lora.Einsum( + shape=(self.num_heads, self.features, self.head_dim), + name="q_einsum", + init_fn=nn.initializers.lecun_normal(in_axis=-2, out_axis=-1, batch_axis=(0,)), + lora_config=self.lora_config, + ) + self.kv_einsum = lora.Einsum( + shape=(2, self.num_kv_heads, self.features, self.head_dim), + name="kv_einsum", + init_fn=nn.initializers.lecun_normal(in_axis=-2, out_axis=-1, batch_axis=(0, 1)), + lora_config=self.lora_config, + ) + self.attn_vec_einsum = lora.Einsum( + shape=(self.num_heads, self.head_dim, self.features), + name="attn_vec_einsum", + init_fn=nn.initializers.lecun_normal(in_axis=-2, out_axis=-1, batch_axis=(0,)), + lora_config=self.lora_config, + ) + + def _init_cache(self, k, v, cache_size): + """Initialize KV cache""" + prefill_len = k.shape[1] + pad_width = ((0, 0), (0, cache_size - prefill_len), (0, 0), (0, 0)) + cache_dtype = self.cache_dtype or k.dtype + k_cache = jnp.pad(k.astype(cache_dtype), pad_width) + v_cache = jnp.pad(v.astype(cache_dtype), pad_width) + idx = jnp.zeros((k.shape[0],), dtype=jnp.int32) + prefill_len + return idx, k_cache, v_cache + + def _update_cache(self, k, v, idx, k_cache, v_cache): + """Update KV cache with new values""" + assert k.shape[1] == 1, "Only support kv-cache updates of length 1" + indices = (0, idx[0], 0, 0) + cache_dtype = self.cache_dtype or k.dtype + k_new = jax.lax.dynamic_update_slice(k_cache, k.astype(cache_dtype), indices) + v_new = jax.lax.dynamic_update_slice(v_cache, v.astype(cache_dtype), indices) + idx_new = idx + 1 + return idx_new, k_new, v_new + + @nn.compact + def __call__(self, x, positions, attn_mask, kv_cache, decode, deterministic=True): # noqa: FBT002 + dtype = x.dtype # original dtype, could be half-precision + if self.num_kv_heads == self.num_heads: + q, k, v = self.qkv_einsum("BSD,3KDH->3BSKH", x) + else: + q = self.q_einsum("BTD,NDH->BTNH", x) + k, v = self.kv_einsum("BSD,2KDH->2BSKH", x) + + q = _apply_rope(q, positions=positions) # promotes to float32 + q *= self.head_dim**-0.5 + + k = _apply_rope(k, positions=positions) # promotes to float32 + + if kv_cache is None: + idx, k_cache, v_cache = self._init_cache(k, v, attn_mask.shape[-1]) + else: + idx, k_cache, v_cache = kv_cache + idx, k_cache, v_cache = self._update_cache(k, v, idx, k_cache, v_cache) + + k, v = k_cache, v_cache + kv_cache = (idx, k_cache, v_cache) + + q = einops.rearrange(q, "B T (K G) H -> B T K G H", K=self.num_kv_heads) + logits = jnp.einsum("BTKGH,BSKH->BKGTS", q, k, preferred_element_type=jnp.float32) + + if attn_mask.shape != (q.shape[0], 1, q.shape[1], k.shape[1]): + raise ValueError( + f"Attention mask with shape {attn_mask.shape} but shapes for q and k are: {q.shape} and {k.shape}" + ) + + # big_neg = jnp.finfo(logits.dtype).min + big_neg = -2.3819763e38 # See gemma/modules.py + masked_logits = jnp.where(attn_mask[:, :, None, :, :], logits, big_neg) + + probs = jax.nn.softmax(masked_logits, axis=-1).astype(dtype) + + encoded = jnp.einsum("BKGTS,BSKH->BTKGH", probs, v) + encoded = einops.rearrange(encoded, "B T K G H -> B T (K G) H") + return self.attn_vec_einsum("BTNH,NHD->BTD", encoded), kv_cache + + +@at.typecheck +class Block(nn.Module): + """Transformer block.""" + + num_heads: int + num_kv_heads: int + embed_dim: int + head_dim: int + hidden_dim: int + + dropout: float = 0.0 + dropout_bdims: tuple[int, ...] = () + cache_dtype: str | None = None + lora_configs: ml_collections.ConfigDict = dataclasses.field(default_factory=ml_collections.ConfigDict) + + def setup(self): + self.pre_attention_norm = RMSNorm() + self.attn = Attention( + num_heads=self.num_heads, + num_kv_heads=self.num_kv_heads, + features=self.embed_dim, + head_dim=self.head_dim, + cache_dtype=self.cache_dtype, + lora_config=self.lora_configs.get("attn"), + ) + self.pre_ffw_norm = RMSNorm() + self.mlp = lora.FeedForward( + features=self.embed_dim, hidden_dim=self.hidden_dim, name="mlp", lora_config=self.lora_configs.get("ffn") + ) + if self.dropout: + self.drop = nn.Dropout(self.dropout, self.dropout_bdims) + else: + self.drop = lambda x, _: x + + def __call__(self, x, kv_cache, positions, attn_mask, decode, deterministic=True): # noqa: FBT002 + x = nn.with_logical_constraint(x, ("act_batch", "act_len", "act_emb")) + inputs_normalized = self.pre_attention_norm(x) + attn_output, kv_cache = self.attn(inputs_normalized, positions, attn_mask, kv_cache, decode, deterministic) + attn_output = self.drop(attn_output, deterministic) + attn_output += x + residual = attn_output + attn_output = self.pre_ffw_norm(attn_output) + outputs = self.mlp(attn_output) + outputs = self.drop(outputs, deterministic) + outputs = residual + outputs + return outputs, kv_cache + + +KVCache: TypeAlias = tuple[at.Int[at.Array, " b"], at.Float[at.Array, "b _t _k _h"], at.Float[at.Array, "b _t _v _h"]] + + +@at.typecheck +class Module(nn.Module): + """gemma model.""" + + variant: str + + width: int + depth: int + mlp_dim: int + num_heads: int + num_kv_heads: int + head_dim: int + norm_eps: float + vocab_size: int + embed_dtype: str + + dropout: float = 0.0 + dropout_bdims: tuple[int, ...] = () # Every float is dropped independently. + cache_dtype: str | None = None + + scan: bool = False + remat_policy: str = "none" + lora_configs: ml_collections.ConfigDict = dataclasses.field(default_factory=ml_collections.ConfigDict) + + @nn.compact + def __call__( + self, + tokens=None, + embedded_prefix=None, + embed_only=False, # noqa: FBT002 + pre_logits=None, + positions=None, + mask=None, + decode=False, # noqa: FBT002 + kv_cache=None, + deterministic=True, # noqa: FBT002 + return_prelogits=False, # noqa: FBT002 + ): + """Embed only, or complete forward pass. + + Args: + tokens: Embedded, then and appended to `embedded_prefix`. Can be None. + embedded_prefix: Optional prefix that is already embedded. + embed_only: Whether to compute embeddings only. + pre_logits: If present computes logits from pre_logits and returns. + positions: Optional `[B, T]` allows to specify the absolute position of + the tokens. + mask: Optional attention mask `[B, T, S]`. + decode: Whether to use kv-cache. Caller must pass masks and positions. + deterministic: Forwarded to all dropout layers. + return_prelogits: Whether to return the pre-logits. + + Returns: + If `embed_only=False`, then `(logits, out)` will be returned. + If `embed_only=True`, then the embeddings will be returned. + If `return_prelogits=True`, then the pre-logits will be returned. + """ + out = {} + + embedder = Embedder(vocab_size=self.vocab_size, embed_dim=self.width, name="embedder") + + if pre_logits is not None: + x = out["pre_logits"] = pre_logits + logits = out["logits"] = embedder.decode(x) + return logits, out + + x = [] + if embedded_prefix is not None: + x.append(embedded_prefix) + if tokens is not None: + x.append(embedder.encode(tokens)) + + x = jnp.concatenate(x, axis=-2) + x = x.astype(self.embed_dtype) + batch_size, seq_len, width = x.shape + + if embed_only: + return x + + if decode: + assert positions is not None and mask is not None, ( # noqa: PT018 + "Must explicitly pass positions and mask for decoding." + ) + + if positions is None: + positions = jnp.arange(seq_len).astype(jnp.int32)[None, :] + assert positions.shape[1] == x.shape[1], (positions.shape, x.shape) + + if mask is None: + mask = nn.attention.make_causal_mask(jnp.ones([batch_size, seq_len])) + if mask.ndim == 3: + mask = mask[:, None, :, :] + cache_size = max(seq_len, mask.shape[-1]) + assert mask.shape == (batch_size, 1, seq_len, cache_size), mask.shape + + if self.remat_policy == "none": + block_cls = Block + else: + block_cls = nn.remat( + Block, + prevent_cse=not self.scan, + static_argnums=(5, 6), # 0=self, 5=decode, 6=deterministic + policy=getattr(jax.checkpoint_policies, self.remat_policy), + ) + + block_kw = { + "num_heads": self.num_heads, + "head_dim": self.head_dim, + "num_kv_heads": self.num_kv_heads, + "embed_dim": width, + "hidden_dim": self.mlp_dim, + "dropout": self.dropout, + "dropout_bdims": self.dropout_bdims, + "cache_dtype": self.cache_dtype, + "lora_configs": self.lora_configs, + } + layers = self.scope.push("layers") + blocks = [ + nn.scan( + block_cls, + variable_axes={"params": 0}, + split_rngs={"params": True, "dropout": True}, + in_axes=(0, nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast), # 0=kv_cache, 1=positions, 2=mask + length=self.depth, + )(parent=layers, **block_kw) + ] + for block in blocks: + x, kv_cache = block(x, kv_cache, positions, mask, decode, deterministic) + + assert x.dtype == jnp.dtype(self.embed_dtype) # Sanity check. + out["encoded"] = x + + x = RMSNorm(name="final_norm")(x) + out["pre_logits"] = x + if return_prelogits: + return x, kv_cache, out + + x = embedder.decode(x) + out["logits"] = x + + return x, kv_cache, out + + def init(self): + """Convenience method for initializing all parameters, necessary due to the quirks of linen.""" + self(jnp.zeros((1, 1), dtype=jnp.int32)) + + +def _apply_rope(x, *, positions, max_wavelength=10_000): + """Applies RoPE positions [B, L] to x [B, L, H, D].""" + freq_exponents = (2.0 / x.shape[-1]) * jnp.arange(x.shape[-1] // 2, dtype=jnp.float32) + timescale = max_wavelength**freq_exponents + radians = positions[..., None] / timescale[None, None, :] + radians = radians[..., None, :] + assert radians.dtype == jnp.float32 + # radians.shape = [...,L,1,d=D/2] + sin, cos = jnp.sin(radians), jnp.cos(radians) + x1, x2 = jnp.split(x, 2, axis=-1) + res = jnp.concatenate([x1 * cos - x2 * sin, x2 * cos + x1 * sin], axis=-1) + assert res.dtype == jnp.float32 + return res diff --git a/policy/openpi-InternData-A1/src/openpi/models/lora.py b/policy/openpi-InternData-A1/src/openpi/models/lora.py new file mode 100644 index 0000000..3dfff5b --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models/lora.py @@ -0,0 +1,148 @@ +import math +import re + +import flax.linen as nn +import flax.struct as struct +import jax.numpy as jnp + +import openpi.shared.array_typing as at + + +@struct.dataclass +class LoRAConfig: + """Configuration for LoRA.""" + + # LoRA rank. + rank: int + # LoRA scaling factor. + alpha: float = 1.0 + # Initialization function for LoRA parameters. + init_fn: nn.initializers.Initializer = nn.initializers.normal(stddev=0.01) + # Enable rank-stabilized LoRA: https://arxiv.org/pdf/2312.03732 + rslora: bool = False + # Axes in the weight to apply LoRA to. Should typically be the last two axes. + axes: tuple[int, int] = (-2, -1) + # Axis label which is used by LoRA in einsum equations. Must not be present in the original equation. + label: str = "L" + + @property + def scaling_value(self) -> float: + return self.alpha / math.sqrt(self.rank) if self.rslora else self.alpha / self.rank + + +class Einsum(nn.Module): + """Einsum with LoRA support. Can be used as a drop-in replacement for the Gemma Einsum.""" + + # Shape of the weight. + shape: tuple[int, ...] + # Initialization function for the weight. + init_fn: nn.initializers.Initializer = nn.initializers.zeros + # If not None, apply LoRA to the weight. + lora_config: LoRAConfig | None = None + + def setup(self): + self.w = self.param("w", self.init_fn, self.shape) + + if config := self.lora_config: + # Setup LoRA parameters. + shape_a, shape_b = list(self.shape), list(self.shape) + shape_a[config.axes[1]] = config.rank + shape_b[config.axes[0]] = config.rank + self.w_a = self.param("lora_a", config.init_fn, shape_a) + self.w_b = self.param("lora_b", config.init_fn, shape_b) + + @nn.compact + def __call__(self, eqn: str, x): + dtype = x.dtype # original dtype, could be half-precision + result = jnp.einsum(eqn, x, self.w.astype(dtype)) + + if config := self.lora_config: + eqn_a, eqn_b = self._make_lora_eqns(eqn) + lora = jnp.einsum(eqn_a, x, self.w_a.astype(dtype)) + lora = jnp.einsum(eqn_b, lora, self.w_b.astype(dtype)) + result = result + lora * config.scaling_value + + return result + + def _make_lora_eqns(self, eqn: str) -> tuple[str, str]: + if "L" in eqn: + raise ValueError(f"L already in eqn: {eqn}") + if not (m := re.match("(.*),(.*)->(.*)", eqn)): + raise ValueError(f"Unsupported einsum eqn: {eqn}") + lhs, rhs, out = m.groups() + + assert self.lora_config is not None + a_label, b_label = (rhs[x] for x in self.lora_config.axes) + label = self.lora_config.label + + a_rhs = rhs.replace(b_label, label) + a_out = out.replace(b_label, label) + eqn_a = f"{lhs},{a_rhs}->{a_out}" + + b_rhs = rhs.replace(a_label, label) + eqn_b = f"{a_out},{b_rhs}->{out}" + + return eqn_a, eqn_b + + +class FeedForward(nn.Module): + """Feed forward module.""" + + features: int + hidden_dim: int + # If not None, apply LoRA to the weight. + lora_config: LoRAConfig | None = None + + def setup(self): + self.w_gating = self.param( + "gating_einsum", + nn.initializers.lecun_normal(in_axis=-2, out_axis=-1, batch_axis=(0,)), + (2, self.features, self.hidden_dim), + ) + self.w_linear = self.param( + "linear", + nn.initializers.lecun_normal(in_axis=-2, out_axis=-1), + (self.hidden_dim, self.features), + ) + self.w_gating_lora = None + self.w_linear_lora = None + if self.lora_config: + # Setup LoRA parameters. + # TODO: follow up with a simplified init_fn api. + self.w_gating_lora = ( + self.param("gating_einsum_lora_a", self.lora_config.init_fn, (2, self.features, self.lora_config.rank)), + self.param( + "gating_einsum_lora_b", self.lora_config.init_fn, (2, self.lora_config.rank, self.hidden_dim) + ), + ) + self.w_linear_lora = ( + self.param("linear_lora_a", self.lora_config.init_fn, (self.hidden_dim, self.lora_config.rank)), + self.param("linear_lora_b", self.lora_config.init_fn, (self.lora_config.rank, self.features)), + ) + + @nn.compact + def __call__(self, x): + dtype = x.dtype # original dtype, could be half-precision + ff_gate = self._dot( + x, + self.w_gating[0], + None if self.w_gating_lora is None else (self.w_gating_lora[0][0], self.w_gating_lora[1][0]), + ) + gate_value = nn.gelu(ff_gate) + + ff1 = self._dot( + x, + self.w_gating[1], + None if self.w_gating_lora is None else (self.w_gating_lora[0][1], self.w_gating_lora[1][1]), + ) + activations = gate_value * ff1 + + outputs = self._dot(activations, self.w_linear, self.w_linear_lora) + assert outputs.dtype == dtype + return outputs + + def _dot(self, x: at.Array, w: at.Array, lora_weights: tuple[at.Array, at.Array] | None) -> at.Array: + base = jnp.dot(x, w.astype(x.dtype)) + if lora_weights is None: + return base + return base + jnp.dot(jnp.dot(x, lora_weights[0].astype(x.dtype)), lora_weights[1].astype(x.dtype)) diff --git a/policy/openpi-InternData-A1/src/openpi/models/lora_test.py b/policy/openpi-InternData-A1/src/openpi/models/lora_test.py new file mode 100644 index 0000000..48b65b6 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models/lora_test.py @@ -0,0 +1,94 @@ +import flax.linen as nn +import jax +import jax.numpy as jnp + +import openpi.models.lora as lora + + +def test_lora_einsum_params_shape(): + shape = (3, 8, 32, 4) # (3KDH) + einsum = lora.Einsum(shape) + lora0 = lora.Einsum(shape, lora_config=lora.LoRAConfig(rank=2)) + lora1 = lora.Einsum(shape, lora_config=lora.LoRAConfig(rank=2, axes=(1, 2))) + + key = jax.random.key(0) + x = jax.random.normal(key, (8, 64, 32)) # (BSD) + eqn = "BSD,3KDH->3BSKH" + + # Ensure that lora parameters are not initialized when LoRA is not used. + params = einsum.init(key, eqn, x) + assert "lora_a" not in params["params"] + assert "lora_b" not in params["params"] + + # Check that default axes work. + params_lora0 = lora0.init(key, eqn, x) + assert params_lora0["params"]["lora_a"].shape == (3, 8, 32, 2) + assert params_lora0["params"]["lora_b"].shape == (3, 8, 2, 4) + + # Check that user provided axes work. + params_lora1 = lora1.init(key, eqn, x) + assert params_lora1["params"]["lora_a"].shape == (3, 8, 2, 4) + assert params_lora1["params"]["lora_b"].shape == (3, 2, 32, 4) + + +def test_lora_einsum_same_output(): + shape = (3, 8, 32, 4) # (3KDH) + einsum = lora.Einsum(shape) + einsum_lora = lora.Einsum(shape, lora_config=lora.LoRAConfig(rank=2, init_fn=nn.initializers.zeros)) + + key = jax.random.key(0) + x = jax.random.normal(key, (8, 64, 32)) # (BSD) + eqn = "BSD,3KDH->3BSKH" + + params = einsum.init(key, eqn, x) + output = einsum.apply(params, eqn, x) + + params_lora = einsum_lora.init(key, eqn, x) + output_lora = einsum_lora.apply(params_lora, eqn, x) + + # Results are the same since the LoRA parameters are initialized to zeros. + assert jnp.allclose(output, output_lora) + + +def test_lora_ffn_params_shape(): + ffn = lora.FeedForward(features=8, hidden_dim=32) + ffn_lora = lora.FeedForward( + features=8, + hidden_dim=32, + lora_config=lora.LoRAConfig(rank=2), + ) + + key = jax.random.key(0) + x = jax.random.normal(key, (2, 8)) + + params = ffn.init(key, x) + assert params["params"]["gating_einsum"].shape == (2, 8, 32) + assert params["params"]["linear"].shape == (32, 8) + + params_lora = ffn_lora.init(key, x) + assert params_lora["params"]["gating_einsum"].shape == (2, 8, 32) + assert params_lora["params"]["linear"].shape == (32, 8) + assert params_lora["params"]["gating_einsum_lora_a"].shape == (2, 8, 2) + assert params_lora["params"]["gating_einsum_lora_b"].shape == (2, 2, 32) + assert params_lora["params"]["linear_lora_a"].shape == (32, 2) + assert params_lora["params"]["linear_lora_b"].shape == (2, 8) + + +def test_lora_ffn_same_output(): + ffn = lora.FeedForward(features=8, hidden_dim=32) + ffn_lora = lora.FeedForward( + features=8, + hidden_dim=32, + lora_config=lora.LoRAConfig(rank=2, init_fn=nn.initializers.zeros), + ) + + key = jax.random.key(0) + x = jax.random.normal(key, (2, 8)) + + params = ffn.init(key, x) + output = ffn.apply(params, x) + + params_lora = ffn_lora.init(key, x) + output_lora = ffn_lora.apply(params_lora, x) + + assert jnp.allclose(output, output_lora) diff --git a/policy/openpi-InternData-A1/src/openpi/models/model.py b/policy/openpi-InternData-A1/src/openpi/models/model.py new file mode 100644 index 0000000..29618b4 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models/model.py @@ -0,0 +1,332 @@ +import abc +from collections.abc import Sequence +import dataclasses +import enum +import logging +import pathlib +from typing import Generic, TypeVar + +import augmax +from flax import nnx +from flax import struct +from flax import traverse_util +import jax +import jax.numpy as jnp +import numpy as np +import orbax.checkpoint as ocp +import safetensors +import torch + +from openpi.models_pytorch import pi0_pytorch +from openpi.shared import image_tools +import openpi.shared.array_typing as at + +logger = logging.getLogger("openpi") + +# Type variable for array types (JAX arrays, PyTorch tensors, or numpy arrays) +ArrayT = TypeVar("ArrayT", bound=jax.Array | torch.Tensor | np.ndarray) + + +class ModelType(enum.Enum): + """Supported model types.""" + + PI0 = "pi0" + PI0_FAST = "pi0_fast" + PI05 = "pi05" + + +# The model always expects these images +IMAGE_KEYS = ( + "base_0_rgb", + "left_wrist_0_rgb", + "right_wrist_0_rgb", +) + + +# This may need change if we release a small model. +IMAGE_RESOLUTION = (224, 224) + + +# Data format +# +# Data transforms produce the model input as a nested dictionary which is later converted +# into `Obesrvation` and `Actions` objects. See below. +# +# In the dictory form, this data should look like: +# { +# # Observation data. +# "image": { +# "base_0_rgb": (float32|uint8)[*b, h, w, 3], # RGB image in [-1, 1] or [0, 255] +# ... # Additional camera views +# }, +# "image_mask": { +# "base_0_rgb": bool[*b], # True if image is valid +# ... # Masks for additional views +# }, +# "state": float32[*b, s], # Low-dimensional robot state +# "tokenized_prompt": int32[*b, l], # Optional, tokenized language prompt +# "tokenized_prompt_mask": bool[*b, l], # Optional, mask for tokenized prompt +# "token_ar_mask": int32[*b, l], # Optional, autoregressive mask for FAST model +# "token_loss_mask": bool[*b, l], # Optional, loss mask for FAST model +# +# # Actions data. +# "actions": float32[*b ah ad] +# } +# where: +# *b = batch dimensions +# h,w = image height/width +# s = state dimension +# l = sequence length +# +@at.typecheck +@struct.dataclass +class Observation(Generic[ArrayT]): + """Holds observations, i.e., inputs to the model. + + See `Observation.from_dict` to see the expected dictionary form. This is the format + that should be produced by the data transforms. + """ + + # Images, in [-1, 1] float32. + images: dict[str, at.Float[ArrayT, "*b h w c"]] + # Image masks, with same keys as images. + image_masks: dict[str, at.Bool[ArrayT, "*b"]] + # Low-dimensional robot state. + state: at.Float[ArrayT, "*b s"] + + # Tokenized prompt. + tokenized_prompt: at.Int[ArrayT, "*b l"] | None = None + # Tokenized prompt mask. + tokenized_prompt_mask: at.Bool[ArrayT, "*b l"] | None = None + + # pi0-fast model specific fields. + + # Token auto-regressive mask (for FAST autoregressive model). + token_ar_mask: at.Int[ArrayT, "*b l"] | None = None + # Token loss mask (for FAST autoregressive model). + token_loss_mask: at.Bool[ArrayT, "*b l"] | None = None + + @classmethod + def from_dict(cls, data: at.PyTree[ArrayT]) -> "Observation[ArrayT]": + """This method defines the mapping between unstructured data (i.e., nested dict) to the structured Observation format.""" + # Ensure that tokenized_prompt and tokenized_prompt_mask are provided together. + if ("tokenized_prompt" in data) != ("tokenized_prompt_mask" in data): + raise ValueError("tokenized_prompt and tokenized_prompt_mask must be provided together.") + # If images are uint8, convert them to [-1, 1] float32. + for key in data["image"]: + if data["image"][key].dtype == np.uint8: + data["image"][key] = data["image"][key].astype(np.float32) / 255.0 * 2.0 - 1.0 + elif hasattr(data["image"][key], "dtype") and data["image"][key].dtype == torch.uint8: + data["image"][key] = data["image"][key].to(torch.float32).permute(0, 3, 1, 2) / 255.0 * 2.0 - 1.0 + return cls( + images=data["image"], + image_masks=data["image_mask"], + state=data["state"], + tokenized_prompt=data.get("tokenized_prompt"), + tokenized_prompt_mask=data.get("tokenized_prompt_mask"), + token_ar_mask=data.get("token_ar_mask"), + token_loss_mask=data.get("token_loss_mask"), + ) + + def to_dict(self) -> at.PyTree[ArrayT]: + """Convert the Observation to a nested dict.""" + result = dataclasses.asdict(self) + result["image"] = result.pop("images") + result["image_mask"] = result.pop("image_masks") + return result + + +# Defines the format of the actions. This field is included as "actions" inside the dictionary +# produced by the data transforms. +Actions = at.Float[ArrayT, "*b ah ad"] + + +def preprocess_observation( + rng: at.KeyArrayLike | None, + observation: Observation, + *, + train: bool = False, + image_keys: Sequence[str] = IMAGE_KEYS, + image_resolution: tuple[int, int] = IMAGE_RESOLUTION, +) -> Observation: + """Preprocess the observations by performing image augmentations (if train=True), resizing (if necessary), and + filling in a default image mask (if necessary). + """ + + if not set(image_keys).issubset(observation.images): + raise ValueError(f"images dict missing keys: expected {image_keys}, got {list(observation.images)}") + + batch_shape = observation.state.shape[:-1] + + out_images = {} + for key in image_keys: + image = observation.images[key] + if image.shape[1:3] != image_resolution: + logger.info(f"Resizing image {key} from {image.shape[1:3]} to {image_resolution}") + image = image_tools.resize_with_pad(image, *image_resolution) + + if train: + # Convert from [-1, 1] to [0, 1] for augmax. + image = image / 2.0 + 0.5 + + transforms = [] + if "wrist" not in key: + height, width = image.shape[1:3] + transforms += [ + augmax.RandomCrop(int(width * 0.95), int(height * 0.95)), + augmax.Resize(width, height), + augmax.Rotate((-5, 5)), + ] + transforms += [ + augmax.ColorJitter(brightness=0.3, contrast=0.4, saturation=0.5), + ] + sub_rngs = jax.random.split(rng, image.shape[0]) + image = jax.vmap(augmax.Chain(*transforms))(sub_rngs, image) + + # Back to [-1, 1]. + image = image * 2.0 - 1.0 + + out_images[key] = image + + # obtain mask + out_masks = {} + for key in out_images: + if key not in observation.image_masks: + # do not mask by default + out_masks[key] = jnp.ones(batch_shape, dtype=jnp.bool) + else: + out_masks[key] = jnp.asarray(observation.image_masks[key]) + + return Observation( + images=out_images, + image_masks=out_masks, + state=observation.state, + tokenized_prompt=observation.tokenized_prompt, + tokenized_prompt_mask=observation.tokenized_prompt_mask, + token_ar_mask=observation.token_ar_mask, + token_loss_mask=observation.token_loss_mask, + ) + + +@dataclasses.dataclass(frozen=True) +class BaseModelConfig(abc.ABC): + """Configuration shared by all models. Specific models should inherit from this class, and implement the `create` + method to create the corresponding model. + """ + + # Action space dimension. + action_dim: int + # Action sequence length. + action_horizon: int + # Tokenized prompt maximum length. + max_token_len: int + + @property + @abc.abstractmethod + def model_type(self) -> ModelType: + """The model type.""" + + @abc.abstractmethod + def create(self, rng: at.KeyArrayLike) -> "BaseModel": + """Create a new model, initializing parameters.""" + + def load(self, params: at.Params, *, remove_extra_params: bool = True) -> "BaseModel": + """Create a model with the given parameters.""" + model = nnx.eval_shape(self.create, jax.random.key(0)) + graphdef, state = nnx.split(model) + if remove_extra_params: + params = ocp.transform_utils.intersect_trees(state.to_pure_dict(), params) + at.check_pytree_equality(expected=state.to_pure_dict(), got=params, check_shapes=True, check_dtypes=False) + state.replace_by_pure_dict(params) + return nnx.merge(graphdef, state) + + def load_pytorch(self, train_config, weight_path: str): + logger.info(f"train_config: {train_config}") + model = pi0_pytorch.PI0Pytorch(config=train_config.model) + safetensors.torch.load_model(model, weight_path) + return model + + @abc.abstractmethod + def inputs_spec(self, *, batch_size: int = 1) -> tuple[Observation, Actions]: + """Returns the input specification for the model. Values are jax.ShapeDtypeStruct.""" + + def fake_obs(self, batch_size: int = 1) -> Observation: + observation_spec, _ = self.inputs_spec(batch_size=batch_size) + return jax.tree.map(lambda x: jnp.ones(x.shape, x.dtype), observation_spec) + + def fake_act(self, batch_size: int = 1) -> Actions: + _, action_spec = self.inputs_spec(batch_size=batch_size) + return jax.tree.map(lambda x: jnp.ones(x.shape, x.dtype), action_spec) + + +@dataclasses.dataclass +class BaseModel(nnx.Module, abc.ABC): + """Base class for all model implementations. Specific models should inherit from this class. They should call + super().__init__() to initialize the shared attributes (action_dim, action_horizon, and max_token_len). + """ + + action_dim: int + action_horizon: int + max_token_len: int + + @abc.abstractmethod + def compute_loss( + self, + rng: at.KeyArrayLike, + observation: Observation, + actions: Actions, + *, + train: bool = False, + ) -> at.Float[at.Array, "*b ah"]: ... + + @abc.abstractmethod + def sample_actions(self, rng: at.KeyArrayLike, observation: Observation, **kwargs) -> Actions: ... + + +def restore_params( + params_path: pathlib.Path | str, + *, + restore_type: type[np.ndarray] | type[jax.Array] = jax.Array, + dtype: jnp.dtype | None = None, + sharding: jax.sharding.Sharding | None = None, +) -> at.Params: + """Restores unstructured params PyTree from a checkpoint. + + This works with checkpoints saved with `save_state` during openpi training (see `training/checkpoints.py`) as + well as pre-trained checkpoints released for openpi. + + Args: + params_path: The local path to the checkpoint directory. + restore_type: The type to restore the params as. Can be set to `np.ndarray` to load the params as a numpy array. + dtype: The dtype to restore all params as. If not provided, will use the original dtype from the checkpoint. + sharding: The sharding to use for the params. If not provided, the params will be replicated across all devices. + + Returns: + The restored params. + """ + params_path = pathlib.Path(params_path).resolve() if not str(params_path).startswith("gs://") else params_path + + if restore_type is jax.Array and sharding is None: + mesh = jax.sharding.Mesh(jax.devices(), ("x",)) + sharding = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec()) + + with ocp.PyTreeCheckpointer() as ckptr: + metadata = ckptr.metadata(params_path) + item = {"params": metadata["params"]} + + params = ckptr.restore( + params_path, + ocp.args.PyTreeRestore( + item=item, + restore_args=jax.tree.map( + lambda _: ocp.ArrayRestoreArgs(sharding=sharding, restore_type=restore_type, dtype=dtype), item + ), + ), + )["params"] + + # If the params were saved with `save_state` during openpi training, every key path will end with "value", which is + # added by `nnx.State`. We remove the "value" suffix here and always return what NNX calls a "pure dict". + flat_params = traverse_util.flatten_dict(params) + if all(kp[-1] == "value" for kp in flat_params): + flat_params = {kp[:-1]: v for kp, v in flat_params.items()} + return traverse_util.unflatten_dict(flat_params) diff --git a/policy/openpi-InternData-A1/src/openpi/models/model_test.py b/policy/openpi-InternData-A1/src/openpi/models/model_test.py new file mode 100644 index 0000000..495dc18 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models/model_test.py @@ -0,0 +1,94 @@ +from flax import nnx +import jax +import pytest + +from openpi.models import model as _model +from openpi.models import pi0_config +from openpi.models import pi0_fast +from openpi.shared import download +from openpi.shared import nnx_utils + + +def test_pi0_model(): + key = jax.random.key(0) + config = pi0_config.Pi0Config() + model = config.create(key) + + batch_size = 2 + obs, act = config.fake_obs(batch_size), config.fake_act(batch_size) + + loss = nnx_utils.module_jit(model.compute_loss)(key, obs, act) + assert loss.shape == (batch_size, config.action_horizon) + + actions = nnx_utils.module_jit(model.sample_actions)(key, obs, num_steps=10) + assert actions.shape == (batch_size, model.action_horizon, model.action_dim) + + +def test_pi0_lora_model(): + key = jax.random.key(0) + config = pi0_config.Pi0Config(paligemma_variant="gemma_2b_lora") + model = config.create(key) + + batch_size = 2 + obs, act = config.fake_obs(batch_size), config.fake_act(batch_size) + + loss = nnx_utils.module_jit(model.compute_loss)(key, obs, act) + assert loss.shape == (batch_size, config.action_horizon) + + actions = nnx_utils.module_jit(model.sample_actions)(key, obs, num_steps=10) + assert actions.shape == (batch_size, model.action_horizon, model.action_dim) + + +def test_pi0_fast_model(): + key = jax.random.key(0) + config = pi0_fast.Pi0FASTConfig() + model = config.create(key) + + batch_size = 2 + obs, act = config.fake_obs(batch_size), config.fake_act(batch_size) + + loss = nnx_utils.module_jit(model.compute_loss)(key, obs, act) + assert loss.shape == (batch_size,) + + actions = nnx_utils.module_jit(model.sample_actions)(key, obs) + assert actions.shape == (batch_size, 256) + + +def test_pi0_fast_lora_model(): + key = jax.random.key(0) + config = pi0_fast.Pi0FASTConfig(paligemma_variant="gemma_2b_lora") + model = config.create(key) + + batch_size = 2 + obs, act = config.fake_obs(batch_size), config.fake_act(batch_size) + + loss = nnx_utils.module_jit(model.compute_loss)(key, obs, act) + assert loss.shape == (batch_size,) + + actions = nnx_utils.module_jit(model.sample_actions)(key, obs) + assert actions.shape == (batch_size, 256) + + lora_filter = nnx_utils.PathRegex(".*lora.*") + model_state = nnx.state(model) + + lora_state_elems = list(model_state.filter(lora_filter)) + assert len(lora_state_elems) > 0 + + +@pytest.mark.manual +def test_model_restore(): + key = jax.random.key(0) + config = pi0_config.Pi0Config() + + batch_size = 2 + obs, act = config.fake_obs(batch_size), config.fake_act(batch_size) + + model = config.load( + _model.restore_params(download.maybe_download("gs://openpi-assets/checkpoints/pi0_base/params")) + ) + + loss = model.compute_loss(key, obs, act) + assert loss.shape == (batch_size, config.action_horizon) + + actions = model.sample_actions(key, obs, num_steps=10) + assert actions.shape == (batch_size, model.action_horizon, model.action_dim) diff --git a/policy/openpi-InternData-A1/src/openpi/models/pi0.py b/policy/openpi-InternData-A1/src/openpi/models/pi0.py new file mode 100644 index 0000000..ae7c459 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models/pi0.py @@ -0,0 +1,279 @@ +import logging + +import einops +import flax.nnx as nnx +import flax.nnx.bridge as nnx_bridge +import jax +import jax.numpy as jnp +from typing_extensions import override + +from openpi.models import model as _model +from openpi.models import pi0_config +import openpi.models.gemma as _gemma +import openpi.models.siglip as _siglip +from openpi.shared import array_typing as at + +logger = logging.getLogger("openpi") + + +def make_attn_mask(input_mask, mask_ar): + """Adapted from big_vision. + + Tokens can attend to valid inputs tokens which have a cumulative mask_ar + smaller or equal to theirs. This way `mask_ar` bool[?B, N] can be used to + setup several types of attention, for example: + + [[1 1 1 1 1 1]]: pure causal attention. + + [[0 0 0 1 1 1]]: prefix-lm attention. The first 3 tokens can attend between + themselves and the last 3 tokens have a causal attention. The first + entry could also be a 1 without changing behaviour. + + [[1 0 1 0 1 0 0 1 0 0]]: causal attention between 4 blocks. Tokens of a + block can attend all previous blocks and all tokens on the same block. + + Args: + input_mask: bool[B, N] true if its part of the input, false if padding. + mask_ar: bool[?B, N] mask that's true where previous tokens cannot depend on + it and false where it shares the same attention mask as the previous token. + """ + mask_ar = jnp.broadcast_to(mask_ar, input_mask.shape) + cumsum = jnp.cumsum(mask_ar, axis=1) + attn_mask = cumsum[:, None, :] <= cumsum[:, :, None] + valid_mask = input_mask[:, None, :] * input_mask[:, :, None] + return jnp.logical_and(attn_mask, valid_mask) + + +@at.typecheck +def posemb_sincos( + pos: at.Real[at.Array, " b"], embedding_dim: int, min_period: float, max_period: float +) -> at.Float[at.Array, "b {embedding_dim}"]: + """Computes sine-cosine positional embedding vectors for scalar positions.""" + if embedding_dim % 2 != 0: + raise ValueError(f"embedding_dim ({embedding_dim}) must be divisible by 2") + + fraction = jnp.linspace(0.0, 1.0, embedding_dim // 2) + period = min_period * (max_period / min_period) ** fraction + sinusoid_input = jnp.einsum( + "i,j->ij", + pos, + 1.0 / period * 2 * jnp.pi, + precision=jax.lax.Precision.HIGHEST, + ) + return jnp.concatenate([jnp.sin(sinusoid_input), jnp.cos(sinusoid_input)], axis=-1) + + +class Pi0(_model.BaseModel): + def __init__(self, config: pi0_config.Pi0Config, rngs: nnx.Rngs): + super().__init__(config.action_dim, config.action_horizon, config.max_token_len) + self.pi05 = config.pi05 + paligemma_config = _gemma.get_config(config.paligemma_variant) + action_expert_config = _gemma.get_config(config.action_expert_variant) + # TODO: rewrite gemma in NNX. For now, use bridge. + llm = nnx_bridge.ToNNX( + _gemma.Module( + configs=[paligemma_config, action_expert_config], + embed_dtype=config.dtype, + adarms=config.pi05, + ) + ) + llm.lazy_init(rngs=rngs, method="init", use_adarms=[False, True] if config.pi05 else [False, False]) + img = nnx_bridge.ToNNX( + _siglip.Module( + num_classes=paligemma_config.width, + variant="So400m/14", + pool_type="none", + scan=True, + dtype_mm=config.dtype, + ) + ) + img.lazy_init(next(iter(config.fake_obs().images.values())), train=False, rngs=rngs) + self.PaliGemma = nnx.Dict(llm=llm, img=img) + self.action_in_proj = nnx.Linear(config.action_dim, action_expert_config.width, rngs=rngs) + if config.pi05: + self.time_mlp_in = nnx.Linear(action_expert_config.width, action_expert_config.width, rngs=rngs) + self.time_mlp_out = nnx.Linear(action_expert_config.width, action_expert_config.width, rngs=rngs) + else: + self.state_proj = nnx.Linear(config.action_dim, action_expert_config.width, rngs=rngs) + self.action_time_mlp_in = nnx.Linear(2 * action_expert_config.width, action_expert_config.width, rngs=rngs) + self.action_time_mlp_out = nnx.Linear(action_expert_config.width, action_expert_config.width, rngs=rngs) + self.action_out_proj = nnx.Linear(action_expert_config.width, config.action_dim, rngs=rngs) + + # This attribute gets automatically set by model.train() and model.eval(). + self.deterministic = True + + @at.typecheck + def embed_prefix( + self, obs: _model.Observation + ) -> tuple[at.Float[at.Array, "b s emb"], at.Bool[at.Array, "b s"], at.Bool[at.Array, " s"]]: + input_mask = [] + ar_mask = [] + tokens = [] + # embed images + for name in obs.images: + image_tokens, _ = self.PaliGemma.img(obs.images[name], train=False) + + tokens.append(image_tokens) + input_mask.append( + einops.repeat( + obs.image_masks[name], + "b -> b s", + s=image_tokens.shape[1], + ) + ) + # image tokens attend to each other + ar_mask += [False] * image_tokens.shape[1] + + # add language (aka tokenized inputs) + if obs.tokenized_prompt is not None: + tokenized_inputs = self.PaliGemma.llm(obs.tokenized_prompt, method="embed") + tokens.append(tokenized_inputs) + input_mask.append(obs.tokenized_prompt_mask) + # full attention between image and language inputs + ar_mask += [False] * tokenized_inputs.shape[1] + tokens = jnp.concatenate(tokens, axis=1) + input_mask = jnp.concatenate(input_mask, axis=1) + ar_mask = jnp.array(ar_mask) + return tokens, input_mask, ar_mask + + @at.typecheck + def embed_suffix( + self, obs: _model.Observation, noisy_actions: _model.Actions, timestep: at.Float[at.Array, " b"] + ) -> tuple[ + at.Float[at.Array, "b s emb"], + at.Bool[at.Array, "b s"], + at.Bool[at.Array, " s"], + at.Float[at.Array, "b emb"] | None, + ]: + input_mask = [] + ar_mask = [] + tokens = [] + if not self.pi05: + # add a single state token + state_token = self.state_proj(obs.state)[:, None, :] + tokens.append(state_token) + input_mask.append(jnp.ones((obs.state.shape[0], 1), dtype=jnp.bool_)) + # image/language inputs do not attend to state or actions + ar_mask += [True] + + action_tokens = self.action_in_proj(noisy_actions) + # embed timestep using sine-cosine positional encoding with sensitivity in the range [0, 1] + time_emb = posemb_sincos(timestep, self.action_in_proj.out_features, min_period=4e-3, max_period=4.0) + if self.pi05: + # time MLP (for adaRMS) + time_emb = self.time_mlp_in(time_emb) + time_emb = nnx.swish(time_emb) + time_emb = self.time_mlp_out(time_emb) + time_emb = nnx.swish(time_emb) + action_expert_tokens = action_tokens + adarms_cond = time_emb + else: + # mix timestep + action information using an MLP (no adaRMS) + time_tokens = einops.repeat(time_emb, "b emb -> b s emb", s=self.action_horizon) + action_time_tokens = jnp.concatenate([action_tokens, time_tokens], axis=-1) + action_time_tokens = self.action_time_mlp_in(action_time_tokens) + action_time_tokens = nnx.swish(action_time_tokens) + action_time_tokens = self.action_time_mlp_out(action_time_tokens) + action_expert_tokens = action_time_tokens + adarms_cond = None + tokens.append(action_expert_tokens) + input_mask.append(jnp.ones(action_expert_tokens.shape[:2], dtype=jnp.bool_)) + # image/language/state inputs do not attend to action tokens + ar_mask += [True] + ([False] * (self.action_horizon - 1)) + tokens = jnp.concatenate(tokens, axis=1) + input_mask = jnp.concatenate(input_mask, axis=1) + ar_mask = jnp.array(ar_mask) + return tokens, input_mask, ar_mask, adarms_cond + + @override + def compute_loss( + self, rng: at.KeyArrayLike, observation: _model.Observation, actions: _model.Actions, *, train: bool = False + ) -> at.Float[at.Array, "*b ah"]: + preprocess_rng, noise_rng, time_rng = jax.random.split(rng, 3) + observation = _model.preprocess_observation(preprocess_rng, observation, train=train) + + batch_shape = actions.shape[:-2] + noise = jax.random.normal(noise_rng, actions.shape) + time = jax.random.beta(time_rng, 1.5, 1, batch_shape) * 0.999 + 0.001 + time_expanded = time[..., None, None] + x_t = time_expanded * noise + (1 - time_expanded) * actions + u_t = noise - actions + + # one big forward pass of prefix + suffix at once + prefix_tokens, prefix_mask, prefix_ar_mask = self.embed_prefix(observation) + suffix_tokens, suffix_mask, suffix_ar_mask, adarms_cond = self.embed_suffix(observation, x_t, time) + input_mask = jnp.concatenate([prefix_mask, suffix_mask], axis=1) + ar_mask = jnp.concatenate([prefix_ar_mask, suffix_ar_mask], axis=0) + attn_mask = make_attn_mask(input_mask, ar_mask) + positions = jnp.cumsum(input_mask, axis=1) - 1 + (prefix_out, suffix_out), _ = self.PaliGemma.llm( + [prefix_tokens, suffix_tokens], mask=attn_mask, positions=positions, adarms_cond=[None, adarms_cond] + ) + v_t = self.action_out_proj(suffix_out[:, -self.action_horizon :]) + + return jnp.mean(jnp.square(v_t - u_t), axis=-1) + + @override + def sample_actions( + self, + rng: at.KeyArrayLike, + observation: _model.Observation, + *, + num_steps: int | at.Int[at.Array, ""] = 10, + noise: at.Float[at.Array, "b ah ad"] | None = None, + ) -> _model.Actions: + observation = _model.preprocess_observation(None, observation, train=False) + # note that we use the convention more common in diffusion literature, where t=1 is noise and t=0 is the target + # distribution. yes, this is the opposite of the pi0 paper, and I'm sorry. + dt = -1.0 / num_steps + batch_size = observation.state.shape[0] + if noise is None: + noise = jax.random.normal(rng, (batch_size, self.action_horizon, self.action_dim)) + + # first fill KV cache with a forward pass of the prefix + prefix_tokens, prefix_mask, prefix_ar_mask = self.embed_prefix(observation) + prefix_attn_mask = make_attn_mask(prefix_mask, prefix_ar_mask) + positions = jnp.cumsum(prefix_mask, axis=1) - 1 + _, kv_cache = self.PaliGemma.llm([prefix_tokens, None], mask=prefix_attn_mask, positions=positions) + + def step(carry): + x_t, time = carry + suffix_tokens, suffix_mask, suffix_ar_mask, adarms_cond = self.embed_suffix( + observation, x_t, jnp.broadcast_to(time, batch_size) + ) + # `suffix_attn_mask` is shape (b, suffix_len, suffix_len) indicating how the suffix tokens can attend to each + # other + suffix_attn_mask = make_attn_mask(suffix_mask, suffix_ar_mask) + # `prefix_attn_mask` is shape (b, suffix_len, prefix_len) indicating how the suffix tokens can attend to the + # prefix tokens + prefix_attn_mask = einops.repeat(prefix_mask, "b p -> b s p", s=suffix_tokens.shape[1]) + # `combined_mask` is shape (b, suffix_len, prefix_len + suffix_len) indicating how the suffix tokens (which + # generate the queries) can attend to the full prefix + suffix sequence (which generates the keys and values) + full_attn_mask = jnp.concatenate([prefix_attn_mask, suffix_attn_mask], axis=-1) + assert full_attn_mask.shape == ( + batch_size, + suffix_tokens.shape[1], + prefix_tokens.shape[1] + suffix_tokens.shape[1], + ) + # `positions` is shape (b, suffix_len) indicating the positions of the suffix tokens + positions = jnp.sum(prefix_mask, axis=-1)[:, None] + jnp.cumsum(suffix_mask, axis=-1) - 1 + + (prefix_out, suffix_out), _ = self.PaliGemma.llm( + [None, suffix_tokens], + mask=full_attn_mask, + positions=positions, + kv_cache=kv_cache, + adarms_cond=[None, adarms_cond], + ) + assert prefix_out is None + v_t = self.action_out_proj(suffix_out[:, -self.action_horizon :]) + + return x_t + dt * v_t, time + dt + + def cond(carry): + x_t, time = carry + # robust to floating-point error + return time >= -dt / 2 + + x_0, _ = jax.lax.while_loop(cond, step, (noise, 1.0)) + return x_0 diff --git a/policy/openpi-InternData-A1/src/openpi/models/pi0_config.py b/policy/openpi-InternData-A1/src/openpi/models/pi0_config.py new file mode 100644 index 0000000..b0f6b66 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models/pi0_config.py @@ -0,0 +1,108 @@ +import dataclasses +from typing import TYPE_CHECKING + +import flax.nnx as nnx +import jax +import jax.numpy as jnp +from typing_extensions import override + +from openpi.models import model as _model +import openpi.models.gemma as _gemma +from openpi.shared import array_typing as at +import openpi.shared.nnx_utils as nnx_utils + +if TYPE_CHECKING: + from openpi.models.pi0 import Pi0 + + +@dataclasses.dataclass(frozen=True) +class Pi0Config(_model.BaseModelConfig): + dtype: str = "bfloat16" + paligemma_variant: _gemma.Variant = "gemma_2b" + action_expert_variant: _gemma.Variant = "gemma_300m" + + # Set the model specific defaults. + action_dim: int = 32 + action_horizon: int = 50 + max_token_len: int = None # type: ignore + # Pi05 has two differences from Pi0: + # - the state input is part of the discrete language tokens rather than a continuous input that is part of the suffix + # - the action expert uses adaRMSNorm to inject the flow matching timestep + pi05: bool = False + # This config option is not used directly by the model, but it is read by the ModelTransformFactory. + discrete_state_input: bool = None # type: ignore + + def __post_init__(self): + if self.max_token_len is None: + object.__setattr__(self, "max_token_len", 200 if self.pi05 else 48) + if self.discrete_state_input is None: + object.__setattr__(self, "discrete_state_input", self.pi05) + + @property + @override + def model_type(self) -> _model.ModelType: + if self.pi05: + return _model.ModelType.PI05 + return _model.ModelType.PI0 + + @override + def create(self, rng: at.KeyArrayLike) -> "Pi0": + from openpi.models.pi0 import Pi0 + + return Pi0(self, rngs=nnx.Rngs(rng)) + + @override + def inputs_spec(self, *, batch_size: int = 1) -> tuple[_model.Observation, _model.Actions]: + image_spec = jax.ShapeDtypeStruct([batch_size, *_model.IMAGE_RESOLUTION, 3], jnp.float32) + image_mask_spec = jax.ShapeDtypeStruct([batch_size], jnp.bool_) + + with at.disable_typechecking(): + observation_spec = _model.Observation( + images={ + "base_0_rgb": image_spec, + "left_wrist_0_rgb": image_spec, + "right_wrist_0_rgb": image_spec, + }, + image_masks={ + "base_0_rgb": image_mask_spec, + "left_wrist_0_rgb": image_mask_spec, + "right_wrist_0_rgb": image_mask_spec, + }, + state=jax.ShapeDtypeStruct([batch_size, self.action_dim], jnp.float32), + tokenized_prompt=jax.ShapeDtypeStruct([batch_size, self.max_token_len], jnp.int32), + tokenized_prompt_mask=jax.ShapeDtypeStruct([batch_size, self.max_token_len], bool), + ) + action_spec = jax.ShapeDtypeStruct([batch_size, self.action_horizon, self.action_dim], jnp.float32) + + return observation_spec, action_spec + + def get_freeze_filter(self) -> nnx.filterlib.Filter: + """Returns the freeze filter based on the model config.""" + filters = [] + has_lora = False + gemma_params_filter = nnx_utils.PathRegex(".*llm.*") + action_expert_params_filter = nnx_utils.PathRegex(".*llm.*_1.*") + if "lora" in self.paligemma_variant: + filters.append( + gemma_params_filter, + ) + if "lora" not in self.action_expert_variant: + # If only freeze gemma params, exclude action expert params. + filters.append( + nnx.Not(action_expert_params_filter), + ) + has_lora = True + elif "lora" in self.action_expert_variant: + filters.append( + action_expert_params_filter, + ) + has_lora = True + + if has_lora: + # If any lora is used, exclude all lora params. + filters.append( + nnx.Not(nnx_utils.PathRegex(".*lora.*")), + ) + if not filters: + return nnx.Nothing + return nnx.All(*filters) diff --git a/policy/openpi-InternData-A1/src/openpi/models/pi0_fast.py b/policy/openpi-InternData-A1/src/openpi/models/pi0_fast.py new file mode 100644 index 0000000..e6b5bd1 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models/pi0_fast.py @@ -0,0 +1,313 @@ +import dataclasses +import logging +from typing import Any + +import einops +import flax.nnx as nnx +import flax.nnx.bridge as nnx_bridge +import jax +import jax.numpy as jnp +from typing_extensions import override + +from openpi.models import model as _model +import openpi.models.gemma_fast as _gemma +import openpi.models.siglip as _siglip +from openpi.shared import array_typing as at +import openpi.shared.nnx_utils as nnx_utils + +logger = logging.getLogger("openpi") + +PALIGEMMA_EOS_TOKEN = 1 + + +def make_attn_mask(input_mask, mask_ar): + """Adapted from big_vision. + + Tokens can attend to valid inputs tokens which have a cumulative mask_ar + smaller or equal to theirs. This way `mask_ar` bool[?B, N] can be used to + setup several types of attention, for example: + + [[1 1 1 1 1 1]]: pure causal attention. + + [[0 0 0 1 1 1]]: prefix-lm attention. The first 3 tokens can attend between + themselves and the last 3 tokens have a causal attention. The first + entry could also be a 1 without changing behaviour. + + [[1 0 1 0 1 0 0 1 0 0]]: causal attention between 4 blocks. Tokens of a + block can attend all previous blocks and all tokens on the same block. + + Args: + input_mask: bool[B, N] true if its part of the input, false if padding. + mask_ar: bool[?B, N] mask that's true where previous tokens cannot depend on + it and false where it shares the same attention mask as the previous token. + """ + mask_ar = jnp.broadcast_to(mask_ar, input_mask.shape) + cumsum = jnp.cumsum(mask_ar, axis=1) + attn_mask = cumsum[:, None, :] <= cumsum[:, :, None] + valid_mask = input_mask[:, None, :] * input_mask[:, :, None] + return jnp.logical_and(attn_mask, valid_mask) + + +@jax.vmap +def left_to_right_align(x, input_mask, attn_mask): + """Converts input from left-align to right-aligned.""" + # Due to vmap, this is operating in a single example (not batch level). + assert x.ndim == 2 + assert input_mask.ndim == 1 + assert attn_mask.ndim == 2 + assert x.shape[0] == input_mask.shape[0] + assert attn_mask.shape[0] == attn_mask.shape[1], attn_mask.shape + seqlen = jnp.max(input_mask * jnp.arange(input_mask.shape[0])) + 1 + x = jnp.roll(x, -seqlen, axis=0) + input_mask = jnp.roll(input_mask, -seqlen, axis=0) + attn_mask = jnp.roll(attn_mask, -seqlen, axis=(0, 1)) + return x, input_mask, attn_mask + + +def put_along_last_axis(arr, indices, values): + """Like np.put_along_axis(..., axis=-1), since jax is missing it.""" + assert arr.ndim == indices.ndim == values.ndim, (arr.ndim, indices.ndim, values.ndim) + onehot = jax.nn.one_hot(indices, arr.shape[-1], dtype=values.dtype) + put_mask = jnp.einsum("...i,...in->...n", jnp.ones(values.shape, jnp.int32), onehot) + put_values = jnp.einsum("...i,...in->...n", values, onehot) + return jnp.where(put_mask, put_values, arr) + + +@dataclasses.dataclass(frozen=True) +class Pi0FASTConfig(_model.BaseModelConfig): + dtype: str = "bfloat16" + paligemma_variant: _gemma.Variant = "gemma_2b" + + # Set the model specific defaults. + action_dim: int = 32 + action_horizon: int = 32 + max_token_len: int = 250 + + # Tokenizer for the fast model. + fast_model_tokenizer: Any | None = None + # Keyword arguments for the fast model tokenizer. + fast_model_tokenizer_kwargs: dict[str, Any] | None = None + + @property + @override + def model_type(self) -> _model.ModelType: + return _model.ModelType.PI0_FAST + + @override + def create(self, rng: at.KeyArrayLike) -> "Pi0FAST": + return Pi0FAST(self, rngs=nnx.Rngs(rng)) + + @override + def inputs_spec(self, *, batch_size: int = 1) -> tuple[_model.Observation, _model.Actions]: + image_spec = jax.ShapeDtypeStruct([batch_size, *_model.IMAGE_RESOLUTION, 3], jnp.float32) + image_mask_spec = jax.ShapeDtypeStruct([batch_size], jnp.bool_) + + with at.disable_typechecking(): + observation_spec = _model.Observation( + images={ + "base_0_rgb": image_spec, + "base_1_rgb": image_spec, + "wrist_0_rgb": image_spec, + }, + image_masks={ + "base_0_rgb": image_mask_spec, + "base_1_rgb": image_mask_spec, + "wrist_0_rgb": image_mask_spec, + }, + state=jax.ShapeDtypeStruct([batch_size, self.action_dim], jnp.float32), + tokenized_prompt=jax.ShapeDtypeStruct([batch_size, self.max_token_len], jnp.int32), + tokenized_prompt_mask=jax.ShapeDtypeStruct([batch_size, self.max_token_len], bool), + token_ar_mask=jax.ShapeDtypeStruct([batch_size, self.max_token_len], jnp.int32), + token_loss_mask=jax.ShapeDtypeStruct([batch_size, self.max_token_len], jnp.bool_), + ) + action_spec = jax.ShapeDtypeStruct([batch_size, self.action_horizon, self.action_dim], jnp.float32) + + return observation_spec, action_spec + + def get_freeze_filter(self) -> nnx.filterlib.Filter: + """Returns the freeze filter based on the model config.""" + if "lora" in self.paligemma_variant: + return nnx.All(nnx_utils.PathRegex(".*llm.*"), nnx.Not(nnx_utils.PathRegex(".*lora.*"))) + return nnx.Nothing + + +class Pi0FAST(_model.BaseModel): + def __init__(self, config: Pi0FASTConfig, rngs: nnx.Rngs): + super().__init__(config.action_dim, config.action_horizon, config.max_token_len) + paligemma_config = _gemma.get_config(config.paligemma_variant) + # TODO: rewrite gemma in NNX. For now, use bridge. + llm = nnx_bridge.ToNNX( + _gemma.Module( + **paligemma_config, + embed_dtype=config.dtype, + cache_dtype=config.dtype, + ) + ) + llm.lazy_init(rngs=rngs, method="init") + img = nnx_bridge.ToNNX( + _siglip.Module( + num_classes=paligemma_config.width, + variant="So400m/14", + pool_type="none", + scan=True, + dtype_mm=config.dtype, + ) + ) + img.lazy_init(next(iter(config.fake_obs().images.values())), train=False, rngs=rngs) + self.PaliGemma = nnx.Dict(llm=llm, img=img) + + @at.typecheck + def embed_inputs( + self, obs: _model.Observation + ) -> tuple[at.Float[at.Array, "b s emb"], at.Bool[at.Array, "b s"], at.Int[at.Array, "b s"]]: + input_mask = [] + ar_mask = [] + token_embeddings = [] + # embed images + for name in obs.images: + image_token_embeddings, _ = self.PaliGemma.img(obs.images[name], train=False) + + token_embeddings.append(image_token_embeddings) + input_mask.append( + einops.repeat( + obs.image_masks[name], + "b -> b s", + s=image_token_embeddings.shape[1], + ) + ) + # image tokens attend to each other --> AR mask = 0 + ar_mask.append(0 * input_mask[-1]) + + # add tokenized inputs + assert obs.tokenized_prompt is not None, "Tokenized prompt is required" + assert obs.tokenized_prompt_mask is not None, "Tokenized prompt mask is required" + assert obs.token_ar_mask is not None, "Token auto-regressive mask is required" + tokenized_inputs_embeddings = self.PaliGemma.llm(obs.tokenized_prompt, embed_only=True) + token_embeddings.append(tokenized_inputs_embeddings) + input_mask.append(obs.tokenized_prompt_mask) + ar_mask.append(obs.token_ar_mask) + + # return embeddings, input mask, and ar mask + return ( + jnp.concatenate(token_embeddings, axis=1), + jnp.concatenate(input_mask, axis=1), + jnp.concatenate(ar_mask, axis=1), + ) + + @override + def compute_loss( + self, rng: at.KeyArrayLike, observation: _model.Observation, actions: _model.Actions, *, train: bool = False + ) -> at.Float[at.Array, "*b ah"]: + observation = _model.preprocess_observation( + rng, observation, train=train, image_keys=list(observation.images.keys()) + ) + + # Compute inputs: one big forward pass of prefix + suffix at once + input_token_embeddings, input_mask, ar_mask = self.embed_inputs(observation) + attn_mask = make_attn_mask(input_mask, ar_mask) + + # Compute one-hot targets: we predict *next* token, so shift the input tokens by one. + targets = jax.nn.one_hot( + observation.tokenized_prompt[:, 1:], + self.PaliGemma.llm.module.vocab_size, + ) + + # Each input predicts *next* token, so we don't input the last token. + pre_logits, _, _ = self.PaliGemma.llm( + embedded_prefix=input_token_embeddings[:, :-1], + mask=attn_mask[:, :-1, :-1], + return_prelogits=True, + ) + + # Only decode logits for the target tokens to save memory + # (decoding matmul is large because it is a seq_len x vocab_size dense layer). + logits, _ = self.PaliGemma.llm( + pre_logits=pre_logits[:, -targets.shape[1] :], + ) + logp = jax.nn.log_softmax(logits, axis=-1) + + # Compute CE loss on token targets + assert observation.token_loss_mask is not None, "Token loss mask is required" + loss_mask = observation.token_loss_mask[:, 1:] + token_pplx = jnp.sum(targets * logp, axis=-1) + return -jnp.sum(token_pplx * loss_mask, axis=-1) / jnp.clip(jnp.sum(loss_mask, -1), 1) + + @override + def sample_actions( + self, + rng: at.KeyArrayLike, + observation: _model.Observation, + *, + max_decoding_steps: int | at.Int[at.Array, ""] = 256, + temperature: float = 0.0, + ) -> _model.Actions: + # TODO: this is a hack to get the image keys. + observation = _model.preprocess_observation( + None, observation, train=False, image_keys=list(observation.images.keys()) + ) + + # embed inputs + prefix_token_embeddings, prefix_mask, prefix_ar_mask = self.embed_inputs(observation) + prefix_attn_mask = make_attn_mask(prefix_mask, prefix_ar_mask) + + # left to right align all input token sequences + prefix_token_embeddings, prefix_mask, prefix_attn_mask = left_to_right_align( + prefix_token_embeddings, prefix_mask, prefix_attn_mask + ) + prefill_size = prefix_token_embeddings.shape[1] + prefill_len = jnp.sum(prefix_mask, axis=-1) + prefix_start = prefill_size - prefill_len + + # first fill KV cache with a forward pass of the prefix + # pad attention mask to set the size of the KV cache (prefill_size + max_decoding_steps) + prefix_attn_mask = jnp.pad(prefix_attn_mask, ((0, 0), (0, 0), (0, max_decoding_steps))) + prefix_positions = jnp.cumsum(prefix_mask, axis=-1) - 1 + prefix_logits, kv_cache, _ = self.PaliGemma.llm( + embedded_prefix=prefix_token_embeddings, mask=prefix_attn_mask, positions=prefix_positions, decode=True + ) + + # prepare decoding -- final logit decodes the first token + last_logit = prefix_logits[:, -1:] + output_tokens = jnp.zeros((last_logit.shape[0], max_decoding_steps)) + + def step(carry): + rng, last_logit, output_tokens, cache, _, step = carry + + # Sample token from last logit + # Split RNG for this step + rng, rng_step = jax.random.split(rng) + token = jax.lax.cond( + temperature > 0.0, + lambda _: jax.random.categorical(rng_step, last_logit / temperature, axis=-1), + lambda _: jnp.argmax(last_logit, axis=-1), + operand=None, + ) + output_tokens = put_along_last_axis(output_tokens, jnp.broadcast_to(step, (token.shape[0], 1)), token) + + # Check for early stopping --> stop if all batch elements have EOS token + has_eos = jnp.any(token == PALIGEMMA_EOS_TOKEN, axis=-1) + all_eos = jnp.all(has_eos) + + # Decode one step + token_embedding = self.PaliGemma.llm(token, embed_only=True) + positions = prefill_len[:, None] + step + 1 + mask = jnp.logical_and( + jnp.arange(prefill_size + max_decoding_steps)[None, None, :] >= prefix_start[:, None, None], + jnp.arange(prefill_size + max_decoding_steps)[None, None, :] + < (jnp.broadcast_to(prefill_size + step + 1, (prefix_start.shape[0], 1, 1))), + ) + last_logit, kv_cache, _ = self.PaliGemma.llm( + embedded_prefix=token_embedding, mask=mask, positions=positions, decode=True, kv_cache=cache + ) + + return rng, last_logit, output_tokens, kv_cache, all_eos, step + 1 + + def cond(carry): + _, _, _, _, all_eos, step = carry + return (~all_eos) & (step < max_decoding_steps) + + # Use lax.while_loop so we can jit the full decoding loop. + _, _, output_tokens, _, _, _ = jax.lax.while_loop( + cond, step, (rng, last_logit, output_tokens, kv_cache, False, 0) + ) + return output_tokens diff --git a/policy/openpi-InternData-A1/src/openpi/models/pi0_test.py b/policy/openpi-InternData-A1/src/openpi/models/pi0_test.py new file mode 100644 index 0000000..e5f0f84 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models/pi0_test.py @@ -0,0 +1,46 @@ +import flax.nnx as nnx +import jax + +import openpi.models.pi0_config as _pi0_config + + +def _get_frozen_state(config: _pi0_config.Pi0Config) -> nnx.State: + abstract_model = nnx.eval_shape(config.create, jax.random.key(0)) + + freeze_filter = config.get_freeze_filter() + return nnx.state(abstract_model, nnx.All(nnx.Param, freeze_filter)).flat_state() + + +def test_pi0_full_finetune(): + config = _pi0_config.Pi0Config() + state = _get_frozen_state(config) + assert len(state) == 0 + + +def test_pi0_gemma_lora(): + config = _pi0_config.Pi0Config(paligemma_variant="gemma_2b_lora") + state = _get_frozen_state(config) + assert len(state) == 9 + assert all("lora" not in p for p in state) + assert all("llm" in p for p in state) + assert all("_1" not in p for p in state) + + +def test_pi0_action_expert_lora(): + config = _pi0_config.Pi0Config(action_expert_variant="gemma_300m_lora") + state = _get_frozen_state(config) + # excluding embedder, rest of the params should be same as gemma_lora. + assert len(state) == 8 + assert all("lora" not in p for p in state) + assert all("llm" in p for p in state) + # all frozen params should have _1 in their path since it's the action expert. + assert all(any("_1" in p for p in path) for path in state) + + +def test_pi0_all_lora(): + config = _pi0_config.Pi0Config(paligemma_variant="gemma_2b_lora", action_expert_variant="gemma_300m_lora") + state = _get_frozen_state(config) + # sum of gemma_lora and action_expert_lora's frozen params. + assert len(state) == 17 + assert all("lora" not in p for p in state) + assert all("llm" in p for p in state) diff --git a/policy/openpi-InternData-A1/src/openpi/models/siglip.py b/policy/openpi-InternData-A1/src/openpi/models/siglip.py new file mode 100644 index 0000000..c74c99e --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models/siglip.py @@ -0,0 +1,373 @@ +# Copyright 2024 Big Vision Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A refactored and simplified ViT adoptation for Pi, taken from big_vision.""" + +from collections.abc import Sequence + +import flax.linen as nn +import jax +import jax.numpy as jnp +import numpy as np + +import openpi.training.sharding as sharding + + +def posemb_sincos_2d(h, w, width, temperature=10_000.0, dtype=jnp.float32): + """Follows the MoCo v3 logic.""" + y, x = jnp.mgrid[:h, :w] + + assert width % 4 == 0, "Width must be mult of 4 for sincos posemb" + omega = jnp.arange(width // 4) / (width // 4 - 1) + omega = 1.0 / (temperature**omega) + y = jnp.einsum("m,d->md", y.flatten(), omega) + x = jnp.einsum("m,d->md", x.flatten(), omega) + pe = jnp.concatenate([jnp.sin(x), jnp.cos(x), jnp.sin(y), jnp.cos(y)], axis=1) + return jnp.asarray(pe, dtype)[None, :, :] + + +def get_posemb(self, typ, seqshape, width, name, dtype=jnp.float32): + if typ == "learn": + return self.param( + name, + nn.initializers.normal(stddev=1 / np.sqrt(width)), + (1, np.prod(seqshape), width), + dtype, + ) + if typ == "sincos2d": + return posemb_sincos_2d(*seqshape, width, dtype=dtype) + raise ValueError(f"Unknown posemb type: {typ}") + + +class MlpBlock(nn.Module): + """Transformer MLP / feed-forward block.""" + + mlp_dim: int | None = None # Defaults to 4x input dim + dropout: float = 0.0 + dtype_mm: str = "float32" + + @nn.compact + def __call__(self, x, deterministic=True): # noqa: FBT002 + """Applies Transformer MlpBlock module.""" + inits = { + "kernel_init": nn.initializers.xavier_uniform(), + "bias_init": nn.initializers.normal(stddev=1e-6), + } + + _, _, d = x.shape # n,l,d + x = nn.Dense(self.mlp_dim or 4 * d, dtype=self.dtype_mm, **inits)(x) + x = nn.gelu(x) + x = nn.Dropout(rate=self.dropout)(x, deterministic) + return nn.Dense(d, dtype=self.dtype_mm, **inits)(x) + + +class Encoder1DBlock(nn.Module): + """Single transformer encoder block (MHSA + MLP).""" + + mlp_dim: int | None = None # Defaults to 4x input dim + num_heads: int = 12 + dropout: float = 0.0 + dtype_mm: str = "float32" + + @nn.compact + def __call__(self, x, deterministic=True): # noqa: FBT002 + out = {} + x = sharding.activation_sharding_constraint(x) + y = nn.LayerNorm(dtype=self.dtype_mm)(x) + y = out["sa"] = nn.MultiHeadDotProductAttention( + num_heads=self.num_heads, + kernel_init=nn.initializers.xavier_uniform(), + deterministic=deterministic, + dtype=self.dtype_mm, + )(y, y) + y = sharding.activation_sharding_constraint(y) + y = nn.Dropout(rate=self.dropout)(y, deterministic) + x = out["+sa"] = x + y + + y = nn.LayerNorm(dtype=self.dtype_mm)(x) + y = out["mlp"] = MlpBlock( + mlp_dim=self.mlp_dim, + dropout=self.dropout, + dtype_mm=self.dtype_mm, + )(y, deterministic) + y = sharding.activation_sharding_constraint(y) + y = nn.Dropout(rate=self.dropout)(y, deterministic) + x = out["+mlp"] = x + y + x = sharding.activation_sharding_constraint(x) + return x, out + + +class Encoder(nn.Module): + """Transformer Model Encoder for sequence to sequence translation.""" + + depth: int + mlp_dim: int | None = None # Defaults to 4x input dim + num_heads: int = 12 + dropout: float = 0.0 + scan: bool = False + remat_policy: str = "nothing_saveable" + dtype_mm: str = "float32" + + @nn.compact + def __call__(self, x, deterministic=True): # noqa: FBT002 + out = {} + + if self.scan: + block = nn.remat( + Encoder1DBlock, + prevent_cse=False, + static_argnums=(2,), # 0=self, 2=deterministic + policy=getattr(jax.checkpoint_policies, self.remat_policy, None), + ) + x, scan_out = nn.scan( + block, + variable_axes={"params": 0}, + split_rngs={"params": True, "dropout": True}, + in_axes=nn.broadcast, + length=self.depth, + )( + name="encoderblock", + dtype_mm=self.dtype_mm, + mlp_dim=self.mlp_dim, + num_heads=self.num_heads, + dropout=self.dropout, + )(x, deterministic) + for lyr in range(self.depth): + out[f"block{lyr:02d}"] = jax.tree.map(lambda o, lyr=lyr: o[lyr], scan_out) + else: + # Input Encoder + for lyr in range(self.depth): + block_cur = Encoder1DBlock( + name=f"encoderblock_{lyr}", + dtype_mm=self.dtype_mm, + mlp_dim=self.mlp_dim, + num_heads=self.num_heads, + dropout=self.dropout, + ) + x, out[f"block{lyr:02d}"] = block_cur(x, deterministic) + out["pre_ln"] = x # Alias for last block, but without the number in it. + + return nn.LayerNorm(name="encoder_norm", dtype=self.dtype_mm)(x), out + + +class MAPHead(nn.Module): + """Multihead Attention Pooling.""" + + mlp_dim: int | None = None # Defaults to 4x input dim + num_heads: int = 12 + dtype_mm: str = "float32" + + @nn.compact + def __call__(self, x): + n, _, d = x.shape # n,l,d + probe = self.param("probe", nn.initializers.xavier_uniform(), (1, 1, d), x.dtype) + probe = jnp.tile(probe, [n, 1, 1]) + + x = nn.MultiHeadDotProductAttention( + num_heads=self.num_heads, + dtype=self.dtype_mm, + kernel_init=nn.initializers.xavier_uniform(), + )(probe, x) + + y = nn.LayerNorm(dtype=self.dtype_mm)(x) + x = x + MlpBlock(mlp_dim=self.mlp_dim, dtype=self.dtype_mm)(y) + return x[:, 0] + + +class _Module(nn.Module): + """ViT model.""" + + num_classes: int | None = None + patch_size: Sequence[int] = (16, 16) + width: int = 768 + depth: int = 12 + mlp_dim: int | None = None # Defaults to 4x input dim + num_heads: int = 12 + posemb: str = "learn" # Can also be "sincos2d" + rep_size: int | bool = False + dropout: float = 0.0 + pool_type: str = "gap" # Can also be "map" or "tok" + head_zeroinit: bool = True + scan: bool = False + # or "dots_with_no_batch_dims_saveable" for more speed (memory costly) + remat_policy: str = "nothing_saveable" + dtype_mm: str = "float32" + + @nn.compact + def __call__(self, image, *, train=False): + out = {} + + # Kevin edit: do patch extraction and posemb in float32, + # because I feel like it's a bit safer. + image = jnp.asarray(image, jnp.float32) + + # Patch extraction + x = out["stem"] = nn.Conv( + self.width, + self.patch_size, + strides=self.patch_size, + padding="VALID", + name="embedding", + dtype=jnp.float32, + )(image) + + n, h, w, c = x.shape + x = jnp.reshape(x, [n, h * w, c]) + + # Add posemb before adding extra token. + x = out["with_posemb"] = x + get_posemb(self, self.posemb, (h, w), c, "pos_embedding", jnp.float32) + + if self.pool_type == "tok": + cls = self.param("cls", nn.initializers.zeros, (1, 1, c), x.dtype) + x = jnp.concatenate([jnp.tile(cls, [n, 1, 1]), x], axis=1) + + n, _, c = x.shape # n,l,d + x = nn.Dropout(rate=self.dropout)(x, not train) + + # Kevin edit: now cast back to dtype_mm (potentially half precision) + x = x.astype(self.dtype_mm) + + x, out["encoder"] = Encoder( + depth=self.depth, + mlp_dim=self.mlp_dim, + num_heads=self.num_heads, + dropout=self.dropout, + scan=self.scan, + remat_policy=self.remat_policy, + dtype_mm=self.dtype_mm, + name="Transformer", + )(x, deterministic=not train) + encoded = out["encoded"] = x + + if self.pool_type == "map": + x = out["head_input"] = MAPHead( + num_heads=self.num_heads, + mlp_dim=self.mlp_dim, + dtype=self.dtype_mm, + )(x) + elif self.pool_type == "gap": + x = out["head_input"] = jnp.mean(x, axis=1) + elif self.pool_type == "0": + x = out["head_input"] = x[:, 0] + elif self.pool_type == "tok": + x = out["head_input"] = x[:, 0] + encoded = encoded[:, 1:] + elif self.pool_type == "none": + pass + else: + raise ValueError(f"Unknown pool type: '{self.pool_type}'") + + x_2d = jnp.reshape(encoded, [n, h, w, -1]) + + if self.rep_size: + rep_size = self.width if self.rep_size is True else self.rep_size + hid = nn.Dense(rep_size, dtype=self.dtype_mm, name="pre_logits") + # NOTE: In the past we did not include tanh in pre_logits. + # For few-shot, it should not matter much, as it whitens anyways. + x_2d = nn.tanh(hid(x_2d)) + x = nn.tanh(hid(x)) + + out["pre_logits_2d"] = x_2d + out["pre_logits"] = x + + if self.num_classes: + kw = {"kernel_init": nn.initializers.zeros} if self.head_zeroinit else {} + head = nn.Dense(self.num_classes, dtype=self.dtype_mm, name="head", **kw) + x_2d = out["logits_2d"] = head(x_2d) + x = out["logits"] = head(x) + + return x, out + + +def Module(num_classes=None, *, variant=None, **kw): # pylint: disable=invalid-name # noqa: N802 + """Factory function, because linen really don't like what I'm doing!""" + return _Module(num_classes, **{**decode_variant(variant), **kw}) + + +def decode_variant(variant): + """Converts a string like "B" or "B/32" into a params dict.""" + if variant is None: + return {} + + v, patch = variant, {} + if "/" in variant: + v, patch = variant.split("/") + patch = {"patch_size": (int(patch), int(patch))} + + return { + # pylint:disable=line-too-long + # Reference: Table 2 of https://arxiv.org/abs/2106.04560. + "width": { + "mu": 32, + "Ti": 192, + "S": 384, + "M": 512, + "B": 768, + "L": 1024, + "So400m": 1152, + "H": 1280, + "g": 1408, + "g-opt": 1536, + "G": 1664, + "G-opt": 1536, + "e": 1792, + }[v], + "depth": { + "mu": 1, + "Ti": 12, + "S": 12, + "M": 12, + "B": 12, + "L": 24, + "So400m": 27, + "H": 32, + "g": 40, + "g-opt": 40, + "G": 48, + "G-opt": 48, + "e": 56, + }[v], + "mlp_dim": { + "mu": 128, + "Ti": 768, + "S": 1536, + "M": 2048, + "B": 3072, + "L": 4096, + "So400m": 4304, + "H": 5120, + "g": 6144, + "g-opt": 6144, + "G": 8192, + "G-opt": 8192, + "e": 15360, + }[v], + "num_heads": { + "mu": 2, + "Ti": 3, + "S": 6, + "M": 8, + "B": 12, + "L": 16, + "So400m": 16, + "H": 16, + "g": 16, + "g-opt": 16, + "G": 16, + "G-opt": 16, + "e": 16, + }[v], + # pylint:enable=line-too-long + **patch, + } diff --git a/policy/openpi-InternData-A1/src/openpi/models/tokenizer.py b/policy/openpi-InternData-A1/src/openpi/models/tokenizer.py new file mode 100644 index 0000000..8a4966d --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models/tokenizer.py @@ -0,0 +1,371 @@ +import logging +import os + +import jax +import numpy as np +import orbax.checkpoint as ocp +import sentencepiece +from transformers import AutoProcessor + +import openpi.models.utils.fsq_tokenizer as fsq_tokenizer +import openpi.shared.download as download + + +class PaligemmaTokenizer: + def __init__(self, max_len: int = 48): + self._max_len = max_len + + path = download.maybe_download("gs://big_vision/paligemma_tokenizer.model", gs={"token": "anon"}) + with path.open("rb") as f: + self._tokenizer = sentencepiece.SentencePieceProcessor(model_proto=f.read()) + + def tokenize(self, prompt: str, state: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray]: + cleaned_text = prompt.strip().replace("_", " ").replace("\n", " ") + if state is not None: + # This is the Pi05 format, where the state is part of the discrete language input. + discretized_state = np.digitize(state, bins=np.linspace(-1, 1, 256 + 1)[:-1]) - 1 + state_str = " ".join(map(str, discretized_state)) + full_prompt = f"Task: {cleaned_text}, State: {state_str};\nAction: " + tokens = self._tokenizer.encode(full_prompt, add_bos=True) + else: + # This is the Pi0 format, where the state is part of the continuous action expert input. + # tokenize "\n" separately as the "start of answer" token + tokens = self._tokenizer.encode(cleaned_text, add_bos=True) + self._tokenizer.encode("\n") + tokens_len = len(tokens) + if tokens_len < self._max_len: + padding = [False] * (self._max_len - tokens_len) + mask = [True] * tokens_len + padding + tokens = tokens + padding + else: + if len(tokens) > self._max_len: + logging.warning( + f"Token length ({len(tokens)}) exceeds max length ({self._max_len}), truncating. " + "Consider increasing the `max_token_len` in your model config if this happens frequently." + ) + tokens = tokens[: self._max_len] + mask = [True] * self._max_len + + return np.asarray(tokens), np.asarray(mask) + + +class FASTTokenizer: + def __init__(self, max_len: int = 256, fast_tokenizer_path: str = "physical-intelligence/fast"): + self._max_len = max_len + + # Download base PaliGemma tokenizer + path = download.maybe_download("gs://big_vision/paligemma_tokenizer.model", gs={"token": "anon"}) + with path.open("rb") as f: + self._paligemma_tokenizer = sentencepiece.SentencePieceProcessor(model_proto=f.read()) + + # Instantiate FAST tokenizer + self._fast_tokenizer = AutoProcessor.from_pretrained(fast_tokenizer_path, trust_remote_code=True) + self._fast_skip_tokens = 128 # Skip last 128 tokens in PaliGemma vocab since they are special tokens + + def tokenize( + self, prompt: str, state: np.ndarray, actions: np.ndarray | None + ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + cleaned_text = prompt.lower().strip().replace("_", " ") + + # Convention: state gets discretized into 256 discrete bins (assumed range after normalization: [-1, 1]) + discretized_state = np.digitize(state, bins=np.linspace(-1, 1, 256 + 1)[:-1]) - 1 + + # Convention: prefix includes prompt and string-representation of state, followed by ';' + state_str = " ".join(map(str, discretized_state)) + prefix = f"Task: {cleaned_text}, State: {state_str};\n" + prefix_tokens = self._paligemma_tokenizer.encode(prefix, add_bos=True) + + if actions is not None: + # Tokenize actions with FAST tokenizer --> map to last tokens in PaliGemma vocab + action_tokens = self._fast_tokenizer(actions[None])[0] + action_tokens_in_pg = self._act_tokens_to_paligemma_tokens(action_tokens) + + # Convention: postfix contains 'Action:' followed by FAST tokens, followed by '|' + postfix_tokens = ( + self._paligemma_tokenizer.encode("Action: ") + + action_tokens_in_pg.tolist() + + self._paligemma_tokenizer.encode("|", add_eos=True) + ) + else: + postfix_tokens = [] + + # Create output token sequence & masks + # AR mask is 0 on prefix (bidirectional attention) and 1 on postfix (causal attention to all previous tokens) + tokens = prefix_tokens + postfix_tokens + token_mask = [True] * len(tokens) + ar_mask = [0] * len(prefix_tokens) + [1] * len(postfix_tokens) + loss_mask = [False] * len(prefix_tokens) + [True] * len(postfix_tokens) # Loss on postfix only + + # Pad tokens to max length + tokens_len = len(tokens) + if tokens_len < self._max_len: + padding = [False] * (self._max_len - tokens_len) + tokens = tokens + padding + token_mask = token_mask + padding + ar_mask = ar_mask + padding + loss_mask = loss_mask + padding + else: + if len(tokens) > self._max_len: + logging.warning( + f"Token length ({len(tokens)}) exceeds max length ({self._max_len}), truncating. " + "Consider increasing the `max_token_len` in your model config if this happens frequently." + ) + tokens = tokens[: self._max_len] + token_mask = token_mask[: self._max_len] + ar_mask = ar_mask[: self._max_len] + loss_mask = loss_mask[: self._max_len] + + return np.asarray(tokens), np.asarray(token_mask), np.asarray(ar_mask), np.asarray(loss_mask) + + def extract_actions(self, tokens: np.ndarray, action_horizon: int, action_dim: int) -> np.ndarray: + # Decode predicted output tokens + decoded_tokens = self._paligemma_tokenizer.decode(tokens.tolist()) + + # Extract actions from FAST model outputs + if "Action: " not in decoded_tokens: + return np.zeros((action_horizon, action_dim), dtype=np.float32) + + # Extract actions from decoded tokens + raw_action_tokens = np.array( + self._paligemma_tokenizer.encode(decoded_tokens.split("Action: ")[1].split("|")[0].strip()) + ) + action_tokens = self._act_tokens_to_paligemma_tokens(raw_action_tokens) + return self._fast_tokenizer.decode( + [action_tokens.tolist()], time_horizon=action_horizon, action_dim=action_dim + )[0] + + def _act_tokens_to_paligemma_tokens(self, tokens: np.ndarray | list[int]) -> np.ndarray: + if isinstance(tokens, list): + tokens = np.array(tokens) + return self._paligemma_tokenizer.vocab_size() - 1 - self._fast_skip_tokens - tokens + + +########################################################################### +## The tokenizers below are used for RoboArena baseline implementations. ## +## They are *not* used for pi0-style models. ## +########################################################################### + + +class BinningTokenizer: + """ + Standard RT-2 / OpenVLA style binning tokenizer. + """ + + def __init__(self, max_len: int = 256, n_bins: int = 256): + self._max_len = max_len + self._n_bins = n_bins + + # Download base PaliGemma tokenizer + path = download.maybe_download("gs://big_vision/paligemma_tokenizer.model", gs={"token": "anon"}) + with path.open("rb") as f: + self._paligemma_tokenizer = sentencepiece.SentencePieceProcessor(model_proto=f.read()) + + self._fast_skip_tokens = 128 # Skip last 128 tokens in PaliGemma vocab since they are special tokens + + def tokenize( + self, prompt: str, state: np.ndarray, actions: np.ndarray | None + ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + """Tokenize a prompt and state into a sequence of tokens. + + Args: + prompt: The text prompt to tokenize. + state: The state array to discretize and tokenize. + actions: Must be None. Action encoding is not currently supported. + + Returns: + A tuple of (tokens, token_mask, ar_mask, targets). + + Raises: + NotImplementedError: If actions is not None. + """ + cleaned_text = prompt.lower().strip().replace("_", " ") + + # Convention: state gets discretized into 256 discrete bins (assumed range after normalization: [-1, 1]) + discretized_state = np.digitize(state, bins=np.linspace(-1, 1, 256 + 1)[:-1]) - 1 + + # Convention: prefix includes prompt and string-representation of state, followed by ';' + state_str = " ".join(map(str, discretized_state)) + prefix = f"Task: {cleaned_text}, State: {state_str};\n" + prefix_tokens = self._paligemma_tokenizer.encode(prefix, add_bos=True) + + if actions is not None: + raise NotImplementedError("BinningTokenizer does not support encoding actions atm (only for inference use)") + postfix_tokens = [] + + # Create output token sequence & masks + # AR mask is 0 on prefix (bidirectional attention) and 1 on postfix (causal attention to all previous tokens) + tokens = prefix_tokens + postfix_tokens + token_mask = [True] * len(tokens) + ar_mask = [0] * len(prefix_tokens) + [1] * len(postfix_tokens) + loss_mask = [False] * len(prefix_tokens) + [True] * len(postfix_tokens) # Loss on postfix only + + # Pad tokens to max length + tokens_len = len(tokens) + if tokens_len < self._max_len: + padding = [False] * (self._max_len - tokens_len) + tokens = tokens + padding + token_mask = token_mask + padding + ar_mask = ar_mask + padding + loss_mask = loss_mask + padding + else: + if len(tokens) > self._max_len: + logging.warning( + f"Token length ({len(tokens)}) exceeds max length ({self._max_len}), truncating. " + "Consider increasing the `max_token_len` in your model config if this happens frequently." + ) + tokens = tokens[: self._max_len] + token_mask = token_mask[: self._max_len] + ar_mask = ar_mask[: self._max_len] + loss_mask = loss_mask[: self._max_len] + + return np.asarray(tokens), np.asarray(token_mask), np.asarray(ar_mask), np.asarray(loss_mask) + + def extract_actions(self, tokens: np.ndarray, action_horizon: int, action_dim: int) -> np.ndarray: + # Decode predicted output tokens + decoded_tokens = self._paligemma_tokenizer.decode(tokens.tolist()) + + # Extract actions from FAST model outputs + if "Action: " not in decoded_tokens: + return np.zeros((action_horizon, action_dim), dtype=np.float32) + + # Extract actions from decoded tokens + raw_action_tokens = np.array( + self._paligemma_tokenizer.encode(decoded_tokens.split("Action: ")[1].split("|")[0].strip()) + ) + action_tokens = self._act_tokens_to_paligemma_tokens(raw_action_tokens) + if len(action_tokens) < action_horizon * action_dim: + return np.zeros([action_horizon, action_dim], dtype=np.float32) + action_tokens = action_tokens[: (action_horizon * action_dim)].reshape([action_horizon, action_dim]) + return action_tokens / self._n_bins * 2 - 1 + + def _act_tokens_to_paligemma_tokens(self, tokens: np.ndarray | list[int]) -> np.ndarray: + if isinstance(tokens, list): + tokens = np.array(tokens) + return self._paligemma_tokenizer.vocab_size() - 1 - self._fast_skip_tokens - tokens + + +class FSQTokenizer: + """ + FSQ tokenizer from the FAST paper baselines. + """ + + def __init__(self, max_len: int = 256, fsq_tokenizer_path: str | None = None): + self._max_len = max_len + + assert fsq_tokenizer_path is not None, "fsq_tokenizer_path must be provided" + # Download tokenizer + path = download.maybe_download(fsq_tokenizer_path) + tok_path = os.path.join(path, os.listdir(path)[0]) + + # Split step from path + step = int(tok_path.split("/")[-1]) + base_path = tok_path.rsplit("/", 1)[0] + + mgr = ocp.CheckpointManager( + base_path, + item_handlers={ + "params": ocp.StandardCheckpointHandler(), + "opt_state": ocp.StandardCheckpointHandler(), + "config": ocp.JsonCheckpointHandler(), + }, + options=ocp.CheckpointManagerOptions(max_to_keep=1), + ) + + try: + restored = mgr.restore( + step, args=ocp.args.Composite(config=ocp.args.JsonRestore(), params=ocp.args.StandardRestore()) + ) + config = restored["config"] + self._params = restored["params"] + self._fsq_tokenizer = fsq_tokenizer.FsqAttentionTokenizer(**config) + except Exception as e: + raise RuntimeError( + f"Failed to load FSQ tokenizer checkpoint from {fsq_tokenizer_path}. Error: {e!s}" + ) from e + + # Compile tokenize and detokenize functions + self._tokenize_fn = jax.jit( + lambda params, x: self._fsq_tokenizer.apply({"params": params}, x, method=self._fsq_tokenizer.tokenize) + ) + self._detokenize_fn = jax.jit( + lambda params, x: self._fsq_tokenizer.apply({"params": params}, x, method=self._fsq_tokenizer.detokenize) + ) + + # Download base PaliGemma tokenizer + path = download.maybe_download("gs://big_vision/paligemma_tokenizer.model", gs={"token": "anon"}) + with path.open("rb") as f: + self._paligemma_tokenizer = sentencepiece.SentencePieceProcessor(model_proto=f.read()) + + self._fast_skip_tokens = 128 # Skip last 128 tokens in PaliGemma vocab since they are special tokens + + def tokenize( + self, prompt: str, state: np.ndarray, actions: np.ndarray | None + ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + cleaned_text = prompt.lower().strip().replace("_", " ") + + # Convention: state gets discretized into 256 discrete bins (assumed range after normalization: [-1, 1]) + discretized_state = np.digitize(state, bins=np.linspace(-1, 1, 256 + 1)[:-1]) - 1 + + # Convention: prefix includes prompt and string-representation of state, followed by ';' + state_str = " ".join(map(str, discretized_state)) + prefix = f"Task: {cleaned_text}, State: {state_str};\n" + prefix_tokens = self._paligemma_tokenizer.encode(prefix, add_bos=True) + + if actions is not None: + raise NotImplementedError("FSQTokenizer does not support encoding actions atm (only for inference use)") + postfix_tokens = [] + + # Create output token sequence & masks + # AR mask is 0 on prefix (bidirectional attention) and 1 on postfix (causal attention to all previous tokens) + tokens = prefix_tokens + postfix_tokens + token_mask = [True] * len(tokens) + ar_mask = [0] * len(prefix_tokens) + [1] * len(postfix_tokens) + loss_mask = [False] * len(prefix_tokens) + [True] * len(postfix_tokens) # Loss on postfix only + + # Pad tokens to max length + tokens_len = len(tokens) + if tokens_len < self._max_len: + padding = [False] * (self._max_len - tokens_len) + tokens = tokens + padding + token_mask = token_mask + padding + ar_mask = ar_mask + padding + loss_mask = loss_mask + padding + else: + if len(tokens) > self._max_len: + logging.warning( + f"Token length ({len(tokens)}) exceeds max length ({self._max_len}), truncating. " + "Consider increasing the `max_token_len` in your model config if this happens frequently." + ) + tokens = tokens[: self._max_len] + token_mask = token_mask[: self._max_len] + ar_mask = ar_mask[: self._max_len] + loss_mask = loss_mask[: self._max_len] + + return np.asarray(tokens), np.asarray(token_mask), np.asarray(ar_mask), np.asarray(loss_mask) + + def extract_actions(self, tokens: np.ndarray, action_horizon: int, action_dim: int) -> np.ndarray: + # Decode predicted output tokens + decoded_tokens = self._paligemma_tokenizer.decode(tokens.tolist()) + + # Extract actions from FAST model outputs + if "Action: " not in decoded_tokens: + return np.zeros((action_horizon, action_dim), dtype=np.float32) + + # Extract actions from decoded tokens + raw_action_tokens = np.array( + self._paligemma_tokenizer.encode(decoded_tokens.split("Action: ")[1].split("|")[0].strip()) + ) + action_tokens = self._act_tokens_to_paligemma_tokens(raw_action_tokens) + try: + # Move computation to CPU and compile on-demand + device = jax.devices("cpu")[0] + with jax.default_device(device): + detok_act = self._detokenize_fn(self._params, action_tokens[None, ...])[0] + return detok_act[: action_horizon * action_dim].reshape([action_horizon, action_dim]) + except Exception as e: + logging.warning(f"Error decoding FSQ: {e}") + return np.zeros((action_horizon, action_dim)) + + def _act_tokens_to_paligemma_tokens(self, tokens: np.ndarray | list[int]) -> np.ndarray: + if isinstance(tokens, list): + tokens = np.array(tokens) + return self._paligemma_tokenizer.vocab_size() - 1 - self._fast_skip_tokens - tokens diff --git a/policy/openpi-InternData-A1/src/openpi/models/tokenizer_test.py b/policy/openpi-InternData-A1/src/openpi/models/tokenizer_test.py new file mode 100644 index 0000000..092c4ae --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models/tokenizer_test.py @@ -0,0 +1,27 @@ +import numpy as np + +from openpi.models import tokenizer as _tokenizer + + +def test_tokenize(): + tokenizer = _tokenizer.PaligemmaTokenizer(max_len=10) + tokens, masks = tokenizer.tokenize("Hello, world!") + + assert tokens.shape == (10,) + assert masks.shape == (10,) + + +def test_fast_tokenizer(): + prompt = "Hello, world!" + state = np.random.rand(5).astype(np.float32) + action = np.random.rand(3, 2).astype(np.float32) + tokenizer = _tokenizer.FASTTokenizer(max_len=256) + tokens, token_masks, ar_masks, loss_masks = tokenizer.tokenize(prompt, state, action) + + assert tokens.shape == (256,) + assert token_masks.shape == (256,) + assert ar_masks.shape == (256,) + assert loss_masks.shape == (256,) + + act = tokenizer.extract_actions(tokens, 3, 2) + assert act.shape == (3, 2) diff --git a/policy/openpi-InternData-A1/src/openpi/models/utils/fsq_tokenizer.py b/policy/openpi-InternData-A1/src/openpi/models/utils/fsq_tokenizer.py new file mode 100644 index 0000000..574d773 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models/utils/fsq_tokenizer.py @@ -0,0 +1,472 @@ +import math +from typing import Any, Literal + +import chex +from einops import einops +from flax import linen as nn +from flax.linen.module import Module +from flax.linen.module import compact +from flax.struct import dataclass +from flax.typing import Array +import jax +import jax.numpy as jnp + + +class FsqCodebook(nn.Module): + input_dim: int + target_codebook_size: int + codebook_type: Literal["fsq", "lfq"] + + _bins_per_dim: tuple[int] | None = None + + @property + def bins_per_dim(self) -> tuple[int]: + if self._bins_per_dim is not None: + return self._bins_per_dim + + if self.codebook_type == "fsq": + return self._get_bins_fsq(self.target_codebook_size) + elif self.codebook_type == "lfq": # noqa: RET505 + return self._get_bins_lfq(self.target_codebook_size) + elif self.codebook_type == "custom": + return self._get_bins_custom(self.target_codebook_size) + else: + raise ValueError(f"Codebook type {self.codebook_type} not supported.") + + @property + def place_values(self) -> jnp.ndarray: + place_values = [1] + for b in self.bins_per_dim[:-1]: + place_values.append(place_values[-1] * b) + return jnp.array(place_values) + + @staticmethod + def _get_bins_fsq(target_codebook_size: int) -> tuple[int]: + """ + Get bins per dimension based on codebook size, from the original FSQ paper. + """ + if target_codebook_size == 2**8: + return (8, 6, 5) + elif target_codebook_size == 2**10: # noqa: RET505 + return (8, 5, 5, 5) + elif target_codebook_size == 2**12: + return (7, 5, 5, 5, 5) + elif target_codebook_size == 2**14: + return (8, 8, 8, 6, 5) + elif target_codebook_size == 2**16: + return (8, 8, 8, 5, 5, 5) + else: + raise ValueError(f"Codebook size {target_codebook_size} not supported.") + + @staticmethod + def _get_bins_custom(target_codebook_size: int) -> tuple[int]: + if target_codebook_size == 2**8: + return (16, 16) + elif target_codebook_size == 2**10: # noqa: RET505 + return (32, 32) + elif target_codebook_size == 2**12: + return (64, 64) + elif target_codebook_size == 2**14: + return (128, 128) + elif target_codebook_size == 2**16: + return (256, 256) + return None + + @staticmethod + def _get_bins_lfq(target_codebook_size: int) -> tuple[int]: + """ + Get bins per dimension according to the Lookup-Free Quantization paper (2 bins per dimension) + """ + assert target_codebook_size & (target_codebook_size - 1) == 0, "Codebook size should be a power of two for LFQ" + + return (2,) * int(math.log2(target_codebook_size)) + + def setup(self): + self.proj_down = nn.Dense(len(self.bins_per_dim)) + self.proj_up = nn.Dense(self.input_dim) + + def __call__(self, inputs: jnp.ndarray) -> tuple[jnp.ndarray, jnp.ndarray]: + tokens, z = self.encode(inputs) + output = self.decode(tokens, z_grad=z) + return tokens, output + + def encode(self, inputs: jnp.ndarray) -> tuple[jnp.ndarray, jnp.ndarray]: + bases = jnp.array(self.bins_per_dim) + + x = self.proj_down(inputs) + z = jnp.tanh(x) + + # Quantize + digits = jnp.round((z + 1) * (bases - 1) / 2).astype(jnp.int32) + tokens = self.undigitize(digits) + + return tokens, z + + def decode(self, tokens: jnp.ndarray, z_grad: jax.Array | None = None) -> jnp.ndarray: + bases = jnp.array(self.bins_per_dim) + digits = self.digitize(tokens) + + z_q = digits / (bases - 1) * 2 - 1 + + if z_grad is not None: + chex.assert_equal_shape([z_q, z_grad]) + z_q = jax.lax.stop_gradient(z_q - z_grad) + z_grad + + return self.proj_up(z_q) + + def undigitize(self, digits: jnp.ndarray) -> jnp.ndarray: + return jnp.sum(digits * jnp.array(self.place_values), axis=-1) + + def digitize(self, tokens: jnp.ndarray) -> jnp.ndarray: + return (tokens[..., None] // jnp.array(self.place_values)) % jnp.array(self.bins_per_dim) + + @property + def vocab_size(self) -> int: + return math.prod(self.bins_per_dim) + + +class ResNetDownBlock(nn.Module): + stride: int = 1 + n_filters: int = 64 + dropout_rate: float = 0.0 + group_size: int = 32 + + @nn.compact + def __call__(self, x: jnp.ndarray, *, train: bool = True) -> jnp.ndarray: + skip = x + + if self.stride > 1 or x.shape[-1] != self.n_filters: + skip = nn.Conv(self.n_filters, (self.stride,), (self.stride,), "SAME")(skip) + + x = nn.Conv(self.n_filters, (3,), (self.stride,), "SAME")(x) + x = nn.GroupNorm(num_groups=self.n_filters // self.group_size)(x) + x = nn.Dropout(self.dropout_rate)(x, deterministic=not train) + x = nn.relu(x) + x = nn.Conv(self.n_filters, (3,), (1,), "SAME")(x) + + return skip + x + + +class ResNetUpBlock(nn.Module): + stride: int = 1 + n_filters: int = 64 + dropout_rate: float = 0.0 + group_size: int = 32 + + @nn.compact + def __call__(self, x: jnp.ndarray, *, train: bool = True) -> jnp.ndarray: + skip = x + + if self.stride > 1: + skip = nn.ConvTranspose(self.n_filters, (self.stride,), (self.stride,), "SAME")(skip) + + x = nn.ConvTranspose(self.n_filters, (3,), (self.stride,), "SAME")(x) + x = nn.GroupNorm(num_groups=self.n_filters // self.group_size)(x) + x = nn.Dropout(self.dropout_rate)(x, deterministic=not train) + x = nn.relu(x) + x = nn.ConvTranspose(self.n_filters, (3,), (1,), "SAME")(x) + + return skip + x + + +@dataclass +class LfqCodebookOutput: + tokens: jnp.ndarray + z: jnp.ndarray + z_q: jnp.ndarray + token_log_probs: jnp.ndarray + commit_loss: jnp.ndarray + + +class LookupFreeQuantization(nn.Module): + num_dims: int + latent_dim: int + + def setup(self): + self.codebook = jnp.array([-1, 1]) + self.activation = nn.tanh + + self.project_down = nn.Dense(self.num_dims) + self.project_up = nn.Dense(self.latent_dim) + + def encode(self, z: jnp.ndarray) -> jnp.ndarray: + z = self.project_down(z) + token_squared_distances = jnp.square(z[..., None] - self.codebook) + token_bits = jnp.argmin(token_squared_distances, axis=-1) + return jnp.sum(token_bits * (2 ** jnp.arange(self.num_dims)), axis=-1) + + def decode(self, tokens: jnp.ndarray) -> jnp.ndarray: + token_bits = (tokens[..., None] & (2 ** jnp.arange(self.num_dims))).astype(jnp.int32) + return self.project_up(self.codebook[token_bits]) + + def loss(self, x: jnp.ndarray) -> LfqCodebookOutput: + z = self.project_down(x) + z = self.activation(z) + + token_squared_distances = jnp.square(z[..., None] - self.codebook) + tokens = jnp.argmin(token_squared_distances, axis=-1) + + token_bit_log_probs = -token_squared_distances + # Compute token log probs for tokens 0..2^num_dims-1 by summing corresponding log-probs + token_bit_expansions = jnp.bitwise_and( + jnp.arange(2**self.num_dims)[None, :], 2 ** jnp.arange(self.num_dims)[:, None] + ).astype(jnp.int32) + token_log_probs = ( + token_bit_log_probs[..., 0] @ (1 - token_bit_expansions) + + token_bit_log_probs[..., 1] @ token_bit_expansions + ) # (batch_size, num_tokens, 2 ** num_dims) + token_log_probs = jax.lax.stop_gradient(jax.nn.log_softmax(token_log_probs, axis=-1)) + chex.assert_shape(token_log_probs, (*x.shape[:-1], 2**self.num_dims)) + + z_q = self.codebook[tokens] + commit_loss = jnp.square(z - z_q).mean() + z_q = jax.lax.stop_gradient(z_q - z) + z + + z_q = self.project_up(z_q) + z = self.project_up(z) + + tokens = jnp.sum(tokens * (len(self.codebook) ** jnp.arange(self.num_dims)), axis=-1) + return LfqCodebookOutput( + tokens=tokens, + z=z, + z_q=z_q, + token_log_probs=jnp.zeros(()), + commit_loss=commit_loss, + ) + + +def make_block_causal_attention_matrix(q: jnp.ndarray, k: jnp.ndarray, bs_q: int, bs_k: int) -> jnp.ndarray: + return nn.make_attention_mask(q, k, pairwise_fn=lambda x, y: jnp.greater_equal(x // bs_k, y // bs_q)) + + +class GeGLU(Module): + """Gated Linear Unit with GELU (GeGLU) activation function. + GeGLU is a Flax layer that combines a linear transformation with a GELU + activation function in a gating mechanism. It is often used in Transformer models + to provide non-linear capabilities while preserving a strong linear component. + + Attributes: + features: the number of output features (default: None). + """ + + output_dim: int = -1 + + @compact + def __call__(self, inputs: Array) -> Array: + """Applies the GeGLU activation to the inputs. + Args: + inputs: the nd-array to apply the GeGLU activation function to. + Returns: + The transformed input. + """ + output_dim = inputs.shape[-1] if self.output_dim == -1 else self.output_dim + + x = nn.Dense(output_dim * 2)(inputs) + x, gate = x[..., :output_dim], x[..., output_dim:] + return x * nn.gelu(gate) + + +class CrossAttentionLayer(nn.Module): + dropout_rate: float = 0.0 + num_heads: int = None + causal: bool = False + mlp_ratio: float = 4.0 + + @nn.compact + def __call__( + self, + x: jnp.ndarray, + y: jnp.ndarray, + *, + mask_self: jnp.ndarray | None = None, + mask_cross: jnp.ndarray | None = None, + train: bool = True, + ) -> jnp.ndarray: + d_embed = x.shape[-1] + seq_len_q = x.shape[-2] + seq_len_k = y.shape[-2] + + if self.causal: + # One block size will be 1 + bs_q = max(seq_len_q // seq_len_k, 1) + bs_k = max(seq_len_k // seq_len_q, 1) + + mask_self = nn.make_causal_mask(x[..., 0]) + mask_cross = make_block_causal_attention_matrix(x[..., 0], y[..., 0], bs_q, bs_k) + + # Self-attention block + skip = x + x = nn.LayerNorm()(x) + x = nn.MultiHeadDotProductAttention( + num_heads=self.num_heads or d_embed // 64, + dropout_rate=self.dropout_rate, + deterministic=not train, + )(x, x, x, mask=mask_self) + x = skip + x + + # Cross-attention block + skip = x + x = nn.LayerNorm()(x) + x = nn.MultiHeadDotProductAttention( + num_heads=self.num_heads or d_embed // 64, + dropout_rate=self.dropout_rate, + deterministic=not train, + )(x, y, y, mask=mask_cross) + x = skip + x + + # MLP block + skip = x + x = nn.LayerNorm()(x) + x = nn.Dense(int(d_embed * self.mlp_ratio))(x) + x = nn.Dropout(self.dropout_rate)(x, deterministic=not train) + x = GeGLU()(x) + x = nn.Dense(d_embed)(x) + return skip + x + + +def sinusoidal_pe_init(_, shape: tuple[int, int]) -> jnp.ndarray: + seq_len, d_embed = shape + + position = jnp.arange(0, seq_len, 1) + div_term = jnp.exp(jnp.arange(0, d_embed, 2) * -(jnp.log(10000.0) / d_embed)) + return jnp.concatenate( + [ + jnp.sin(position[:, jnp.newaxis] * div_term), + jnp.cos(position[:, jnp.newaxis] * div_term), + ], + axis=-1, + ) + + +class TokenizerEncoderDecoder(nn.Module): + num_tokens: int + num_cross_tokens: int + num_layers: int + causal: bool + + mlp_ratio: float = 4.0 + use_state_conditioning: bool = False + + @nn.compact + def __call__( + self, + y: jnp.ndarray, + *, + train: bool = True, + state_conditioning: jnp.ndarray | None = None, + mask: jnp.ndarray | None = None, + ) -> jnp.ndarray: + x = self.param("q_embed", sinusoidal_pe_init, (self.num_tokens, y.shape[-1])) + x = jax.numpy.broadcast_to(x, y.shape[:-2] + x.shape[-2:]) + + if mask is not None: + # mask is (batch_dims..., num_cross_tokens) + chex.assert_equal_shape([y[..., 0], mask]) + attn_mask = einops.repeat(mask, "... kv -> ... 1 q kv", q=self.num_tokens) + else: + attn_mask = jnp.ones((*y.shape[:-2], 1, self.num_tokens, self.num_cross_tokens)) + + if self.use_state_conditioning: + assert state_conditioning is not None, "State conditioning is required for this model." + state_embed = nn.Dense(y.shape[-1], name="state_proj")(state_conditioning)[..., None, :] + y = jnp.concatenate([y, state_embed], axis=-2) + attn_mask = jnp.concatenate([attn_mask, jnp.ones_like(attn_mask[..., 0:1])], axis=-1) + + y = y + self.param("y_pos_enc", sinusoidal_pe_init, y.shape[-2:]) + + for _ in range(self.num_layers): + x = CrossAttentionLayer(causal=self.causal, mlp_ratio=self.mlp_ratio)( + x, y, train=train, mask_self=None, mask_cross=attn_mask + ) + + return x + + +class FsqAttentionTokenizer(nn.Module): + embed_dim: int + data_dim: int + data_horizon: int + num_tokens: int + num_layers: int + target_codebook_size: int + causal: bool = False + mlp_ratio: float = 2.0 + + bound: float | None = None + + use_state_conditioning: bool = False + + @property + def vocab_size(self) -> int: + return math.prod(FsqCodebook._get_bins_fsq(self.target_codebook_size)) # noqa: SLF001 + + def setup(self): + self.proj = nn.Dense(self.embed_dim) + self.encoder = TokenizerEncoderDecoder( + num_tokens=self.num_tokens, + num_cross_tokens=self.data_horizon, + num_layers=self.num_layers, + causal=self.causal, + use_state_conditioning=self.use_state_conditioning, + mlp_ratio=self.mlp_ratio, + ) + self.codebook = FsqCodebook( + input_dim=self.embed_dim, + target_codebook_size=self.target_codebook_size, + codebook_type="custom", + ) + self.decoder = TokenizerEncoderDecoder( + num_tokens=self.data_horizon, + num_cross_tokens=self.num_tokens, + num_layers=self.num_layers, + causal=self.causal, + use_state_conditioning=self.use_state_conditioning, + mlp_ratio=self.mlp_ratio, + ) + + self.proj_mean = nn.Dense(self.data_dim) + self.out_scale = self.param("out_scale", lambda _: jnp.full((), 1.0)) + + def tokenize( + self, action: jnp.ndarray, *, obs: jnp.ndarray | None = None, train: bool = False + ) -> tuple[jnp.ndarray, jnp.ndarray]: + if self.bound is not None: + action = jnp.clip(action, -self.bound, self.bound) + + x = self.proj(action) + x = self.encoder(x, train=train, state_conditioning=obs) + + return self.codebook.encode(x) + + def detokenize(self, tokens: jnp.ndarray, *, obs: jnp.ndarray | None = None) -> jnp.ndarray: + x = self.decoder(self.codebook.decode(tokens), state_conditioning=obs) + mean = self.proj_mean(x) + return mean * self.out_scale + + def loss( + self, action: jnp.ndarray, *, obs: jnp.ndarray | None = None, train: bool = True + ) -> tuple[jnp.ndarray, dict[str, jnp.ndarray]]: + # Encode + x = self.proj(action) + z = self.encoder(x, train=train, state_conditioning=obs) + + # Quantize + tokens, z = self.codebook(z) + + # Decode + x = self.decoder(z, train=train, state_conditioning=obs) + mean = self.proj_mean(x) * self.out_scale + + mse = jnp.mean(jnp.square(action - mean)) + mae = jnp.mean(jnp.abs(action - mean)) + + return mse, { + "mse": mse, + "mae": mae, + } + + def __call__(self, *args: Any, **kwargs: Any) -> tuple[jnp.ndarray, dict[str, jnp.ndarray]]: + """ + Dummy for .init + """ + return self.loss(*args, **kwargs) diff --git a/policy/openpi-InternData-A1/src/openpi/models/vit.py b/policy/openpi-InternData-A1/src/openpi/models/vit.py new file mode 100644 index 0000000..b7901d0 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models/vit.py @@ -0,0 +1,307 @@ +# Copyright 2024 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""ViT implementation adapted from https://github.com/google-research/vision_transformer/blob/main/vit_jax/models_vit.py.""" + +from collections.abc import Callable +from typing import Any + +import flax.linen as nn +import jax +import jax.numpy as jnp + +from openpi.models import resnet as models_resnet + +Array = Any +PRNGKey = Any +Shape = tuple[int] +Dtype = Any + + +class IdentityLayer(nn.Module): + """Identity layer, convenient for giving a name to an array.""" + + @nn.compact + def __call__(self, x): + return x + + +class AddPositionEmbs(nn.Module): + """Adds learned positional embeddings to the inputs. + + Attributes: + posemb_init: positional embedding initializer. + """ + + posemb_init: Callable[[PRNGKey, Shape, Dtype], Array] + param_dtype: Dtype = jnp.float32 + + @nn.compact + def __call__(self, inputs): + """Applies the AddPositionEmbs module. + + Args: + inputs: Inputs to the layer. + + Returns: + Output tensor with shape `(bs, timesteps, in_dim)`. + """ + # inputs.shape is (batch_size, seq_len, emb_dim). + assert inputs.ndim == 3, f"Number of dimensions should be 3, but it is: {inputs.ndim}" + pos_emb_shape = (1, inputs.shape[1], inputs.shape[2]) + pe = self.param("pos_embedding", self.posemb_init, pos_emb_shape, self.param_dtype) + return inputs + pe + + +class MlpBlock(nn.Module): + """Transformer MLP / feed-forward block.""" + + mlp_dim: int + dtype: Dtype = jnp.float32 + param_dtype: Dtype = jnp.float32 + out_dim: int | None = None + dropout_rate: float = 0.1 + kernel_init: Callable[[PRNGKey, Shape, Dtype], Array] = nn.initializers.xavier_uniform() + bias_init: Callable[[PRNGKey, Shape, Dtype], Array] = nn.initializers.normal(stddev=1e-6) + + @nn.compact + def __call__(self, inputs, *, deterministic): + """Applies Transformer MlpBlock module.""" + actual_out_dim = inputs.shape[-1] if self.out_dim is None else self.out_dim + x = nn.Dense( + features=self.mlp_dim, + dtype=self.dtype, + param_dtype=self.param_dtype, + kernel_init=self.kernel_init, + bias_init=self.bias_init, + )( # pytype: disable=wrong-arg-types + inputs + ) + x = nn.gelu(x) + x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=deterministic) + output = nn.Dense( + features=actual_out_dim, + dtype=self.dtype, + param_dtype=self.param_dtype, + kernel_init=self.kernel_init, + bias_init=self.bias_init, + )( # pytype: disable=wrong-arg-types + x + ) + return nn.Dropout(rate=self.dropout_rate)(output, deterministic=deterministic) + + +class Encoder1DBlock(nn.Module): + """Transformer encoder layer. + + Attributes: + inputs: input data. + mlp_dim: dimension of the mlp on top of attention block. + dtype: the dtype of the computation (default: float32). + dropout_rate: dropout rate. + attention_dropout_rate: dropout for attention heads. + deterministic: bool, deterministic or not (to apply dropout). + num_heads: Number of heads in nn.MultiHeadDotProductAttention + """ + + mlp_dim: int + num_heads: int + dtype: Dtype = jnp.float32 + dropout_rate: float = 0.1 + attention_dropout_rate: float = 0.1 + + @nn.compact + def __call__(self, inputs, deterministic): + """Applies Encoder1DBlock module. + + Args: + inputs: Inputs to the layer. + deterministic: Dropout will not be applied when set to true. + + Returns: + output after transformer encoder block. + """ + + # Attention block. + assert inputs.ndim == 3, f"Expected (batch, seq, hidden) got {inputs.shape}" + x = nn.LayerNorm(dtype=self.dtype)(inputs) + x = nn.MultiHeadDotProductAttention( + dtype=self.dtype, + kernel_init=nn.initializers.xavier_uniform(), + broadcast_dropout=False, + deterministic=deterministic, + dropout_rate=self.attention_dropout_rate, + num_heads=self.num_heads, + # why isn't this true by default??? + force_fp32_for_softmax=True, + )(x, x) + x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=deterministic) + x = x + inputs + + # MLP block. + y = nn.LayerNorm(dtype=self.dtype)(x) + y = MlpBlock(mlp_dim=self.mlp_dim, dtype=self.dtype, dropout_rate=self.dropout_rate)( + y, deterministic=deterministic + ) + + return x + y, None + + +class Encoder(nn.Module): + """Transformer Model Encoder for sequence to sequence translation. + + Attributes: + num_layers: number of layers + mlp_dim: dimension of the mlp on top of attention block + num_heads: Number of heads in nn.MultiHeadDotProductAttention + dropout_rate: dropout rate. + attention_dropout_rate: dropout rate in self attention. + """ + + dtype: jax.typing.DTypeLike + num_layers: int + mlp_dim: int + num_heads: int + dropout_rate: float = 0.1 + attention_dropout_rate: float = 0.1 + add_position_embedding: bool = True + + @nn.compact + def __call__(self, x, *, train): + """Applies Transformer model on the inputs. + + Args: + x: Inputs to the layer. + train: Set to `True` when training. + + Returns: + output of a transformer encoder. + """ + assert x.ndim == 3 # (batch, len, emb) + + if self.add_position_embedding: + x = AddPositionEmbs( + posemb_init=nn.initializers.normal(stddev=0.02), # from BERT. + name="posembed_input", + )(x) + x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=not train) + + x = x.astype(self.dtype) + # Input Encoder + block = nn.remat(Encoder1DBlock, prevent_cse=False, static_argnums=(2,)) + x, _ = nn.scan( + block, + variable_axes={"params": 0}, + split_rngs={"params": True, "dropout": True}, + in_axes=nn.broadcast, + length=self.num_layers, + )( + name="encoderblock", + mlp_dim=self.mlp_dim, + dropout_rate=self.dropout_rate, + attention_dropout_rate=self.attention_dropout_rate, + dtype=self.dtype, + num_heads=self.num_heads, + )(x, not train) + return nn.LayerNorm(name="encoder_norm", dtype=self.dtype)(x) + + +class VisionTransformer(nn.Module): + """VisionTransformer.""" + + dtype: jax.typing.DTypeLike + num_classes: int + patches: Any + transformer: Any + hidden_size: int + resnet: Any | None = None + representation_size: int | None = None + classifier: str = "token" + head_bias_init: float = 0.0 + encoder: type[nn.Module] = Encoder + model_name: str | None = None + + @nn.compact + def __call__(self, inputs, *, train): + x = inputs + # (Possibly partial) ResNet root. + if self.resnet is not None: + width = int(64 * self.resnet.width_factor) + + # Root block. + x = models_resnet.StdConv( + features=width, kernel_size=(7, 7), strides=(2, 2), use_bias=False, name="conv_root" + )(x) + x = nn.GroupNorm(name="gn_root")(x) + x = nn.relu(x) + x = nn.max_pool(x, window_shape=(3, 3), strides=(2, 2), padding="SAME") + + # ResNet stages. + if self.resnet.num_layers: + x = models_resnet.ResNetStage( + block_size=self.resnet.num_layers[0], nout=width, first_stride=(1, 1), name="block1" + )(x) + for i, block_size in enumerate(self.resnet.num_layers[1:], 1): + x = models_resnet.ResNetStage( + block_size=block_size, nout=width * 2**i, first_stride=(2, 2), name=f"block{i + 1}" + )(x) + + n, h, w, c = x.shape + + # We can merge s2d+emb into a single conv; it's the same. + x = nn.Conv( + features=self.hidden_size, + kernel_size=self.patches.size, + strides=self.patches.size, + padding="VALID", + name="embedding", + )(x) + + # Here, x is a grid of embeddings. + + # (Possibly partial) Transformer. + if self.transformer is not None: + n, h, w, c = x.shape + x = jnp.reshape(x, [n, h * w, c]) + + # If we want to add a class token, add it here. + if self.classifier in ["token", "token_unpooled"]: + cls = self.param("cls", nn.initializers.zeros, (1, 1, c)) + cls = jnp.tile(cls, [n, 1, 1]) + x = jnp.concatenate([cls, x], axis=1) + + x = self.encoder(name="Transformer", **self.transformer, dtype=self.dtype)(x, train=train) + + if self.classifier == "token": + x = x[:, 0] + elif self.classifier == "gap": + x = jnp.mean(x, axis=list(range(1, x.ndim - 1))) # (1,) or (1,2) + elif self.classifier in ["unpooled", "token_unpooled"]: + pass + else: + raise ValueError(f"Invalid classifier={self.classifier}") + + if self.representation_size is not None: + x = nn.Dense(features=self.representation_size, name="pre_logits")(x) + x = nn.tanh(x) + else: + x = IdentityLayer(name="pre_logits")(x) + + if self.num_classes: + x = nn.Dense( + features=self.num_classes, + name="head", + kernel_init=nn.initializers.zeros, + bias_init=nn.initializers.constant(self.head_bias_init), + )(x) + return x diff --git a/policy/openpi-InternData-A1/src/openpi/models_pytorch/gemma_pytorch.py b/policy/openpi-InternData-A1/src/openpi/models_pytorch/gemma_pytorch.py new file mode 100644 index 0000000..203b36b --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models_pytorch/gemma_pytorch.py @@ -0,0 +1,281 @@ +from typing import Literal + +import pytest +import torch +from torch import nn +from transformers import GemmaForCausalLM +from transformers import PaliGemmaForConditionalGeneration +from transformers.models.auto import CONFIG_MAPPING +from transformers.models.gemma import modeling_gemma + + +class PaliGemmaWithExpertModel(nn.Module): + def __init__( + self, + vlm_config, + action_expert_config, + use_adarms=None, + precision: Literal["bfloat16", "float32"] = "bfloat16", + ): + if use_adarms is None: + use_adarms = [False, False] + super().__init__() + + vlm_config_hf = CONFIG_MAPPING["paligemma"]() + vlm_config_hf._vocab_size = 257152 # noqa: SLF001 + vlm_config_hf.image_token_index = 257152 + vlm_config_hf.text_config.hidden_size = vlm_config.width + vlm_config_hf.text_config.intermediate_size = vlm_config.mlp_dim + vlm_config_hf.text_config.num_attention_heads = vlm_config.num_heads + vlm_config_hf.text_config.head_dim = vlm_config.head_dim + vlm_config_hf.text_config.num_hidden_layers = vlm_config.depth + vlm_config_hf.text_config.num_key_value_heads = vlm_config.num_kv_heads + vlm_config_hf.text_config.hidden_activation = "gelu_pytorch_tanh" + vlm_config_hf.text_config.torch_dtype = "float32" + vlm_config_hf.text_config.vocab_size = 257152 + vlm_config_hf.text_config.use_adarms = use_adarms[0] + vlm_config_hf.text_config.adarms_cond_dim = vlm_config.width if use_adarms[0] else None + vlm_config_hf.vision_config.intermediate_size = 4304 + vlm_config_hf.vision_config.projection_dim = 2048 + vlm_config_hf.vision_config.projector_hidden_act = "gelu_fast" + vlm_config_hf.vision_config.torch_dtype = "float32" + + action_expert_config_hf = CONFIG_MAPPING["gemma"]( + head_dim=action_expert_config.head_dim, + hidden_size=action_expert_config.width, + intermediate_size=action_expert_config.mlp_dim, + num_attention_heads=action_expert_config.num_heads, + num_hidden_layers=action_expert_config.depth, + num_key_value_heads=action_expert_config.num_kv_heads, + vocab_size=257152, + hidden_activation="gelu_pytorch_tanh", + torch_dtype="float32", + use_adarms=use_adarms[1], + adarms_cond_dim=action_expert_config.width if use_adarms[1] else None, + ) + + self.paligemma = PaliGemmaForConditionalGeneration(config=vlm_config_hf) + self.gemma_expert = GemmaForCausalLM(config=action_expert_config_hf) + self.gemma_expert.model.embed_tokens = None + + self.to_bfloat16_for_selected_params(precision) + + def to_bfloat16_for_selected_params(self, precision: Literal["bfloat16", "float32"] = "bfloat16"): + if precision == "bfloat16": + self.to(dtype=torch.bfloat16) + elif precision == "float32": + self.to(dtype=torch.float32) + return + else: + raise ValueError(f"Invalid precision: {precision}") + + params_to_keep_float32 = [ + "vision_tower.vision_model.embeddings.patch_embedding.weight", + "vision_tower.vision_model.embeddings.patch_embedding.bias", + "vision_tower.vision_model.embeddings.position_embedding.weight", + "input_layernorm", + "post_attention_layernorm", + "model.norm", + ] + + for name, param in self.named_parameters(): + if any(selector in name for selector in params_to_keep_float32): + param.data = param.data.to(dtype=torch.float32) + + def embed_image(self, image: torch.Tensor): + return self.paligemma.model.get_image_features(image) + + def embed_language_tokens(self, tokens: torch.Tensor): + return self.paligemma.language_model.embed_tokens(tokens) + + def forward( + self, + attention_mask: torch.Tensor | None = None, + position_ids: torch.LongTensor | None = None, + past_key_values: list[torch.FloatTensor] | pytest.Cache | None = None, + inputs_embeds: list[torch.FloatTensor] | None = None, + use_cache: bool | None = None, + adarms_cond: list[torch.Tensor] | None = None, + ): + if adarms_cond is None: + adarms_cond = [None, None] + if inputs_embeds[1] is None: + prefix_output = self.paligemma.language_model.forward( + inputs_embeds=inputs_embeds[0], + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + use_cache=use_cache, + adarms_cond=adarms_cond[0] if adarms_cond is not None else None, + ) + prefix_past_key_values = prefix_output.past_key_values + prefix_output = prefix_output.last_hidden_state + suffix_output = None + elif inputs_embeds[0] is None: + suffix_output = self.gemma_expert.model.forward( + inputs_embeds=inputs_embeds[1], + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + use_cache=use_cache, + adarms_cond=adarms_cond[1] if adarms_cond is not None else None, + ) + suffix_output = suffix_output.last_hidden_state + prefix_output = None + prefix_past_key_values = None + else: + models = [self.paligemma.language_model, self.gemma_expert.model] + num_layers = self.paligemma.config.text_config.num_hidden_layers + + # Check if gradient checkpointing is enabled for any of the models + use_gradient_checkpointing = ( + hasattr(self.gemma_expert.model, "gradient_checkpointing") + and self.gemma_expert.model.gradient_checkpointing + and self.training + ) or (hasattr(self, "gradient_checkpointing") and self.gradient_checkpointing and self.training) + + # Force enable gradient checkpointing if we're in training mode and the model supports it + if self.training and hasattr(self.gemma_expert.model, "gradient_checkpointing"): + if not self.gemma_expert.model.gradient_checkpointing: + print("Forcing gradient checkpointing to be enabled for Gemma expert model") + self.gemma_expert.model.gradient_checkpointing = True + use_gradient_checkpointing = True + + # Debug gradient checkpointing status + if hasattr(self, "_debug_gc_printed") and not self._debug_gc_printed: + print(f"Gemma expert model gradient checkpointing: {use_gradient_checkpointing}") + print(f"Model training mode: {self.training}") + print( + f"Gemma expert model has gradient_checkpointing attr: {hasattr(self.gemma_expert.model, 'gradient_checkpointing')}" + ) + if hasattr(self.gemma_expert.model, "gradient_checkpointing"): + print( + f"Gemma expert model gradient_checkpointing value: {self.gemma_expert.model.gradient_checkpointing}" + ) + self._debug_gc_printed = True + + # Define the complete layer computation function for gradient checkpointing + def compute_layer_complete(layer_idx, inputs_embeds, attention_mask, position_ids, adarms_cond): + models = [self.paligemma.language_model, self.gemma_expert.model] + + query_states = [] + key_states = [] + value_states = [] + gates = [] + for i, hidden_states in enumerate(inputs_embeds): + layer = models[i].layers[layer_idx] + hidden_states, gate = layer.input_layernorm(hidden_states, cond=adarms_cond[i]) # noqa: PLW2901 + gates.append(gate) + + input_shape = hidden_states.shape[:-1] + hidden_shape = (*input_shape, -1, layer.self_attn.head_dim) + query_state = layer.self_attn.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) + key_state = layer.self_attn.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) + value_state = layer.self_attn.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) + + query_states.append(query_state) + key_states.append(key_state) + value_states.append(value_state) + + # Concatenate and process attention + query_states = torch.cat(query_states, dim=2) + key_states = torch.cat(key_states, dim=2) + value_states = torch.cat(value_states, dim=2) + + dummy_tensor = torch.zeros( + query_states.shape[0], + query_states.shape[2], + query_states.shape[-1], + device=query_states.device, + dtype=query_states.dtype, + ) + cos, sin = self.paligemma.model.language_model.rotary_emb(dummy_tensor, position_ids) + query_states, key_states = modeling_gemma.apply_rotary_pos_emb( + query_states, key_states, cos, sin, unsqueeze_dim=1 + ) + + batch_size = query_states.shape[0] + scaling = self.paligemma.language_model.layers[layer_idx].self_attn.scaling + + # Attention computation + att_output, _ = modeling_gemma.eager_attention_forward( + self.paligemma.language_model.layers[layer_idx].self_attn, + query_states, + key_states, + value_states, + attention_mask, + scaling, + ) + # Get head_dim from the current layer, not from the model + head_dim = self.paligemma.language_model.layers[layer_idx].self_attn.head_dim + att_output = att_output.reshape(batch_size, -1, 1 * 8 * head_dim) + + # Process layer outputs + outputs_embeds = [] + start_pos = 0 + for i, hidden_states in enumerate(inputs_embeds): + layer = models[i].layers[layer_idx] + end_pos = start_pos + hidden_states.shape[1] + + if att_output.dtype != layer.self_attn.o_proj.weight.dtype: + att_output = att_output.to(layer.self_attn.o_proj.weight.dtype) + out_emb = layer.self_attn.o_proj(att_output[:, start_pos:end_pos]) + + # first residual + out_emb = modeling_gemma._gated_residual(hidden_states, out_emb, gates[i]) # noqa: SLF001 + after_first_residual = out_emb.clone() + out_emb, gate = layer.post_attention_layernorm(out_emb, cond=adarms_cond[i]) + # Convert to bfloat16 if the next layer (mlp) uses bfloat16 + if layer.mlp.up_proj.weight.dtype == torch.bfloat16: + out_emb = out_emb.to(dtype=torch.bfloat16) + + out_emb = layer.mlp(out_emb) + # second residual + out_emb = modeling_gemma._gated_residual(after_first_residual, out_emb, gate) # noqa: SLF001 + outputs_embeds.append(out_emb) + start_pos = end_pos + + return outputs_embeds + + # Process all layers with gradient checkpointing if enabled + for layer_idx in range(num_layers): + if use_gradient_checkpointing: + inputs_embeds = torch.utils.checkpoint.checkpoint( + compute_layer_complete, + layer_idx, + inputs_embeds, + attention_mask, + position_ids, + adarms_cond, + use_reentrant=False, + preserve_rng_state=False, + ) + else: + inputs_embeds = compute_layer_complete( + layer_idx, inputs_embeds, attention_mask, position_ids, adarms_cond + ) + + # Old code removed - now using compute_layer_complete function above + + # final norm + # Define final norm computation function for gradient checkpointing + def compute_final_norms(inputs_embeds, adarms_cond): + outputs_embeds = [] + for i, hidden_states in enumerate(inputs_embeds): + out_emb, _ = models[i].norm(hidden_states, cond=adarms_cond[i]) + outputs_embeds.append(out_emb) + return outputs_embeds + + # Apply gradient checkpointing to final norm if enabled + if use_gradient_checkpointing: + outputs_embeds = torch.utils.checkpoint.checkpoint( + compute_final_norms, inputs_embeds, adarms_cond, use_reentrant=False, preserve_rng_state=False + ) + else: + outputs_embeds = compute_final_norms(inputs_embeds, adarms_cond) + + prefix_output = outputs_embeds[0] + suffix_output = outputs_embeds[1] + prefix_past_key_values = None + + return [prefix_output, suffix_output], prefix_past_key_values diff --git a/policy/openpi-InternData-A1/src/openpi/models_pytorch/pi0_pytorch.py b/policy/openpi-InternData-A1/src/openpi/models_pytorch/pi0_pytorch.py new file mode 100644 index 0000000..d18740e --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models_pytorch/pi0_pytorch.py @@ -0,0 +1,461 @@ +import logging +import math + +import torch +from torch import Tensor +from torch import nn +import torch.nn.functional as F # noqa: N812 + +import openpi.models.gemma as _gemma +from openpi.models_pytorch.gemma_pytorch import PaliGemmaWithExpertModel +import openpi.models_pytorch.preprocessing_pytorch as _preprocessing + + +def get_safe_dtype(target_dtype, device_type): + """Get a safe dtype for the given device type.""" + if device_type == "cpu": + # CPU doesn't support bfloat16, use float32 instead + if target_dtype == torch.bfloat16: + return torch.float32 + if target_dtype == torch.float64: + return torch.float64 + return target_dtype + + +def create_sinusoidal_pos_embedding( + time: torch.tensor, dimension: int, min_period: float, max_period: float, device="cpu" +) -> Tensor: + """Computes sine-cosine positional embedding vectors for scalar positions.""" + if dimension % 2 != 0: + raise ValueError(f"dimension ({dimension}) must be divisible by 2") + + if time.ndim != 1: + raise ValueError("The time tensor is expected to be of shape `(batch_size, )`.") + + dtype = get_safe_dtype(torch.float64, device.type) + fraction = torch.linspace(0.0, 1.0, dimension // 2, dtype=dtype, device=device) + period = min_period * (max_period / min_period) ** fraction + + # Compute the outer product + scaling_factor = 1.0 / period * 2 * math.pi + sin_input = scaling_factor[None, :] * time[:, None] + return torch.cat([torch.sin(sin_input), torch.cos(sin_input)], dim=1) + + +def sample_beta(alpha, beta, bsize, device): + alpha_t = torch.as_tensor(alpha, dtype=torch.float32, device=device) + beta_t = torch.as_tensor(beta, dtype=torch.float32, device=device) + dist = torch.distributions.Beta(alpha_t, beta_t) + return dist.sample((bsize,)) + + +def make_att_2d_masks(pad_masks, att_masks): + """Copied from big_vision. + + Tokens can attend to valid inputs tokens which have a cumulative mask_ar + smaller or equal to theirs. This way `mask_ar` int[B, N] can be used to + setup several types of attention, for example: + + [[1 1 1 1 1 1]]: pure causal attention. + + [[0 0 0 1 1 1]]: prefix-lm attention. The first 3 tokens can attend between + themselves and the last 3 tokens have a causal attention. The first + entry could also be a 1 without changing behaviour. + + [[1 0 1 0 1 0 0 1 0 0]]: causal attention between 4 blocks. Tokens of a + block can attend all previous blocks and all tokens on the same block. + + Args: + input_mask: bool[B, N] true if its part of the input, false if padding. + mask_ar: int32[B, N] mask that's 1 where previous tokens cannot depend on + it and 0 where it shares the same attention mask as the previous token. + """ + if att_masks.ndim != 2: + raise ValueError(att_masks.ndim) + if pad_masks.ndim != 2: + raise ValueError(pad_masks.ndim) + + cumsum = torch.cumsum(att_masks, dim=1) + att_2d_masks = cumsum[:, None, :] <= cumsum[:, :, None] + pad_2d_masks = pad_masks[:, None, :] * pad_masks[:, :, None] + return att_2d_masks & pad_2d_masks + + +class PI0Pytorch(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.pi05 = config.pi05 + + paligemma_config = _gemma.get_config(config.paligemma_variant) + action_expert_config = _gemma.get_config(config.action_expert_variant) + + self.paligemma_with_expert = PaliGemmaWithExpertModel( + paligemma_config, + action_expert_config, + use_adarms=[False, True] if self.pi05 else [False, False], + precision=config.dtype, + ) + + self.action_in_proj = nn.Linear(32, action_expert_config.width) + self.action_out_proj = nn.Linear(action_expert_config.width, 32) + + if self.pi05: + self.time_mlp_in = nn.Linear(action_expert_config.width, action_expert_config.width) + self.time_mlp_out = nn.Linear(action_expert_config.width, action_expert_config.width) + else: + self.state_proj = nn.Linear(32, action_expert_config.width) + self.action_time_mlp_in = nn.Linear(2 * action_expert_config.width, action_expert_config.width) + self.action_time_mlp_out = nn.Linear(action_expert_config.width, action_expert_config.width) + + torch.set_float32_matmul_precision("high") + self.sample_actions = torch.compile(self.sample_actions, mode="max-autotune") + + # Initialize gradient checkpointing flag + self.gradient_checkpointing_enabled = False + + msg = "transformers_replace is not installed correctly. Please install it with `uv pip install transformers==4.53.2` and `cp -r ./src/openpi/models_pytorch/transformers_replace/* /home/tianyang/miniconda3/envs/lam3d/lib/python3.11/site-packages/transformers/`." + try: + from transformers.models.siglip import check + + if not check.check_whether_transformers_replace_is_installed_correctly(): + raise ValueError(msg) + except ImportError: + raise ValueError(msg) from None + + def gradient_checkpointing_enable(self): + """Enable gradient checkpointing for memory optimization.""" + self.gradient_checkpointing_enabled = True + self.paligemma_with_expert.paligemma.language_model.gradient_checkpointing = True + self.paligemma_with_expert.paligemma.vision_tower.gradient_checkpointing = True + self.paligemma_with_expert.gemma_expert.model.gradient_checkpointing = True + + logging.info("Enabled gradient checkpointing for PI0Pytorch model") + + def gradient_checkpointing_disable(self): + """Disable gradient checkpointing.""" + self.gradient_checkpointing_enabled = False + self.paligemma_with_expert.paligemma.language_model.gradient_checkpointing = False + self.paligemma_with_expert.paligemma.vision_tower.gradient_checkpointing = False + self.paligemma_with_expert.gemma_expert.model.gradient_checkpointing = False + + logging.info("Disabled gradient checkpointing for PI0Pytorch model") + + def is_gradient_checkpointing_enabled(self): + """Check if gradient checkpointing is enabled.""" + return self.gradient_checkpointing_enabled + + def _apply_checkpoint(self, func, *args, **kwargs): + """Helper method to apply gradient checkpointing if enabled.""" + if self.gradient_checkpointing_enabled and self.training: + return torch.utils.checkpoint.checkpoint( + func, *args, use_reentrant=False, preserve_rng_state=False, **kwargs + ) + return func(*args, **kwargs) + + def _prepare_attention_masks_4d(self, att_2d_masks): + """Helper method to prepare 4D attention masks for transformer.""" + att_2d_masks_4d = att_2d_masks[:, None, :, :] + return torch.where(att_2d_masks_4d, 0.0, -2.3819763e38) + + def _preprocess_observation(self, observation, *, train=True): + """Helper method to preprocess observation.""" + observation = _preprocessing.preprocess_observation_pytorch(observation, train=train) + return ( + list(observation.images.values()), + list(observation.image_masks.values()), + observation.tokenized_prompt, + observation.tokenized_prompt_mask, + observation.state, + ) + + def sample_noise(self, shape, device): + return torch.normal( + mean=0.0, + std=1.0, + size=shape, + dtype=torch.float32, + device=device, + ) + + def sample_time(self, bsize, device): + time_beta = sample_beta(1.5, 1.0, bsize, device) + time = time_beta * 0.999 + 0.001 + return time.to(dtype=torch.float32, device=device) + + def embed_prefix( + self, images, img_masks, lang_tokens, lang_masks + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Embed images with SigLIP and language tokens with embedding layer to prepare + for PaliGemma transformer processing. + """ + embs = [] + pad_masks = [] + att_masks = [] + + # Process images + for img, img_mask in zip(images, img_masks, strict=True): + + def image_embed_func(img): + return self.paligemma_with_expert.embed_image(img) + + img_emb = self._apply_checkpoint(image_embed_func, img) + + bsize, num_img_embs = img_emb.shape[:2] + + embs.append(img_emb) + pad_masks.append(img_mask[:, None].expand(bsize, num_img_embs)) + + # Create attention masks so that image tokens attend to each other + att_masks += [0] * num_img_embs + + # Process language tokens + def lang_embed_func(lang_tokens): + lang_emb = self.paligemma_with_expert.embed_language_tokens(lang_tokens) + lang_emb_dim = lang_emb.shape[-1] + return lang_emb * math.sqrt(lang_emb_dim) + + lang_emb = self._apply_checkpoint(lang_embed_func, lang_tokens) + + embs.append(lang_emb) + pad_masks.append(lang_masks) + + # full attention between image and language inputs + num_lang_embs = lang_emb.shape[1] + att_masks += [0] * num_lang_embs + + embs = torch.cat(embs, dim=1) + pad_masks = torch.cat(pad_masks, dim=1) + att_masks = torch.tensor(att_masks, dtype=torch.bool, device=pad_masks.device) + + # Get batch size from the first dimension of the concatenated tensors + bsize = pad_masks.shape[0] + att_masks = att_masks[None, :].expand(bsize, len(att_masks)) + + return embs, pad_masks, att_masks + + def embed_suffix(self, state, noisy_actions, timestep): + """Embed state, noisy_actions, timestep to prepare for Expert Gemma processing.""" + embs = [] + pad_masks = [] + att_masks = [] + + if not self.pi05: + if self.state_proj.weight.dtype == torch.float32: + state = state.to(torch.float32) + + # Embed state + def state_proj_func(state): + return self.state_proj(state) + + state_emb = self._apply_checkpoint(state_proj_func, state) + + embs.append(state_emb[:, None, :]) + bsize = state_emb.shape[0] + device = state_emb.device + + state_mask = torch.ones(bsize, 1, dtype=torch.bool, device=device) + pad_masks.append(state_mask) + + # Set attention masks so that image and language inputs do not attend to state or actions + att_masks += [1] + + # Embed timestep using sine-cosine positional encoding with sensitivity in the range [0, 1] + time_emb = create_sinusoidal_pos_embedding( + timestep, self.action_in_proj.out_features, min_period=4e-3, max_period=4.0, device=timestep.device + ) + time_emb = time_emb.type(dtype=timestep.dtype) + + # Fuse timestep + action information using an MLP + def action_proj_func(noisy_actions): + return self.action_in_proj(noisy_actions) + + action_emb = self._apply_checkpoint(action_proj_func, noisy_actions) + + if not self.pi05: + time_emb = time_emb[:, None, :].expand_as(action_emb) + action_time_emb = torch.cat([action_emb, time_emb], dim=2) + + # Apply MLP layers + def mlp_func(action_time_emb): + x = self.action_time_mlp_in(action_time_emb) + x = F.silu(x) # swish == silu + return self.action_time_mlp_out(x) + + action_time_emb = self._apply_checkpoint(mlp_func, action_time_emb) + adarms_cond = None + else: + # time MLP (for adaRMS) + def time_mlp_func(time_emb): + x = self.time_mlp_in(time_emb) + x = F.silu(x) # swish == silu + x = self.time_mlp_out(x) + return F.silu(x) + + time_emb = self._apply_checkpoint(time_mlp_func, time_emb) + action_time_emb = action_emb + adarms_cond = time_emb + + # Add to input tokens + embs.append(action_time_emb) + + bsize, action_time_dim = action_time_emb.shape[:2] + action_time_mask = torch.ones(bsize, action_time_dim, dtype=torch.bool, device=timestep.device) + pad_masks.append(action_time_mask) + + # Set attention masks so that image, language and state inputs do not attend to action tokens + att_masks += [1] + ([0] * (self.config.action_horizon - 1)) + + embs = torch.cat(embs, dim=1) + pad_masks = torch.cat(pad_masks, dim=1) + att_masks = torch.tensor(att_masks, dtype=embs.dtype, device=embs.device) + att_masks = att_masks[None, :].expand(bsize, len(att_masks)) + + return embs, pad_masks, att_masks, adarms_cond + + def forward(self, observation, actions, noise=None, time=None) -> Tensor: + """Do a full training forward pass and compute the loss (batch_size x num_steps x num_motors)""" + images, img_masks, lang_tokens, lang_masks, state = self._preprocess_observation(observation, train=True) + + if noise is None: + noise = self.sample_noise(actions.shape, actions.device) + + if time is None: + time = self.sample_time(actions.shape[0], actions.device) + + time_expanded = time[:, None, None] + x_t = time_expanded * noise + (1 - time_expanded) * actions + u_t = noise - actions + + prefix_embs, prefix_pad_masks, prefix_att_masks = self.embed_prefix(images, img_masks, lang_tokens, lang_masks) + suffix_embs, suffix_pad_masks, suffix_att_masks, adarms_cond = self.embed_suffix(state, x_t, time) + if ( + self.paligemma_with_expert.paligemma.language_model.layers[0].self_attn.q_proj.weight.dtype + == torch.bfloat16 + ): + suffix_embs = suffix_embs.to(dtype=torch.bfloat16) + prefix_embs = prefix_embs.to(dtype=torch.bfloat16) + + pad_masks = torch.cat([prefix_pad_masks, suffix_pad_masks], dim=1) + att_masks = torch.cat([prefix_att_masks, suffix_att_masks], dim=1) + + att_2d_masks = make_att_2d_masks(pad_masks, att_masks) + position_ids = torch.cumsum(pad_masks, dim=1) - 1 + + # Prepare attention masks + att_2d_masks_4d = self._prepare_attention_masks_4d(att_2d_masks) + + # Apply gradient checkpointing if enabled + def forward_func(prefix_embs, suffix_embs, att_2d_masks_4d, position_ids, adarms_cond): + (_, suffix_out), _ = self.paligemma_with_expert.forward( + attention_mask=att_2d_masks_4d, + position_ids=position_ids, + past_key_values=None, + inputs_embeds=[prefix_embs, suffix_embs], + use_cache=False, + adarms_cond=[None, adarms_cond], + ) + return suffix_out + + suffix_out = self._apply_checkpoint( + forward_func, prefix_embs, suffix_embs, att_2d_masks_4d, position_ids, adarms_cond + ) + + suffix_out = suffix_out[:, -self.config.action_horizon :] + suffix_out = suffix_out.to(dtype=torch.float32) + + # Apply gradient checkpointing to final action projection if enabled + def action_out_proj_func(suffix_out): + return self.action_out_proj(suffix_out) + + v_t = self._apply_checkpoint(action_out_proj_func, suffix_out) + + return F.mse_loss(u_t, v_t, reduction="none") + + @torch.no_grad() + def sample_actions(self, device, observation, noise=None, num_steps=10) -> Tensor: + """Do a full inference forward and compute the action (batch_size x num_steps x num_motors)""" + bsize = observation.state.shape[0] + if noise is None: + actions_shape = (bsize, self.config.action_horizon, self.config.action_dim) + noise = self.sample_noise(actions_shape, device) + + images, img_masks, lang_tokens, lang_masks, state = self._preprocess_observation(observation, train=False) + + prefix_embs, prefix_pad_masks, prefix_att_masks = self.embed_prefix(images, img_masks, lang_tokens, lang_masks) + prefix_att_2d_masks = make_att_2d_masks(prefix_pad_masks, prefix_att_masks) + prefix_position_ids = torch.cumsum(prefix_pad_masks, dim=1) - 1 + + # Compute image and language key value cache + prefix_att_2d_masks_4d = self._prepare_attention_masks_4d(prefix_att_2d_masks) + self.paligemma_with_expert.paligemma.language_model.config._attn_implementation = "eager" # noqa: SLF001 + + _, past_key_values = self.paligemma_with_expert.forward( + attention_mask=prefix_att_2d_masks_4d, + position_ids=prefix_position_ids, + past_key_values=None, + inputs_embeds=[prefix_embs, None], + use_cache=True, + ) + + dt = -1.0 / num_steps + dt = torch.tensor(dt, dtype=torch.float32, device=device) + + x_t = noise + time = torch.tensor(1.0, dtype=torch.float32, device=device) + while time >= -dt / 2: + expanded_time = time.expand(bsize) + v_t = self.denoise_step( + state, + prefix_pad_masks, + past_key_values, + x_t, + expanded_time, + ) + + # Euler step - use new tensor assignment instead of in-place operation + x_t = x_t + dt * v_t + time += dt + return x_t + + def denoise_step( + self, + state, + prefix_pad_masks, + past_key_values, + x_t, + timestep, + ): + """Apply one denoising step of the noise `x_t` at a given timestep.""" + suffix_embs, suffix_pad_masks, suffix_att_masks, adarms_cond = self.embed_suffix(state, x_t, timestep) + + suffix_len = suffix_pad_masks.shape[1] + batch_size = prefix_pad_masks.shape[0] + prefix_len = prefix_pad_masks.shape[1] + + prefix_pad_2d_masks = prefix_pad_masks[:, None, :].expand(batch_size, suffix_len, prefix_len) + + suffix_att_2d_masks = make_att_2d_masks(suffix_pad_masks, suffix_att_masks) + + full_att_2d_masks = torch.cat([prefix_pad_2d_masks, suffix_att_2d_masks], dim=2) + + prefix_offsets = torch.sum(prefix_pad_masks, dim=-1)[:, None] + position_ids = prefix_offsets + torch.cumsum(suffix_pad_masks, dim=1) - 1 + + # Prepare attention masks + full_att_2d_masks_4d = self._prepare_attention_masks_4d(full_att_2d_masks) + self.paligemma_with_expert.gemma_expert.model.config._attn_implementation = "eager" # noqa: SLF001 + + outputs_embeds, _ = self.paligemma_with_expert.forward( + attention_mask=full_att_2d_masks_4d, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=[None, suffix_embs], + use_cache=False, + adarms_cond=[None, adarms_cond], + ) + + suffix_out = outputs_embeds[1] + suffix_out = suffix_out[:, -self.config.action_horizon :] + suffix_out = suffix_out.to(dtype=torch.float32) + return self.action_out_proj(suffix_out) diff --git a/policy/openpi-InternData-A1/src/openpi/models_pytorch/preprocessing_pytorch.py b/policy/openpi-InternData-A1/src/openpi/models_pytorch/preprocessing_pytorch.py new file mode 100644 index 0000000..33c94a5 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models_pytorch/preprocessing_pytorch.py @@ -0,0 +1,173 @@ +from collections.abc import Sequence +import logging + +import torch + +from openpi.shared import image_tools + +logger = logging.getLogger("openpi") + +# Constants moved from model.py +IMAGE_KEYS = ( + "base_0_rgb", + "left_wrist_0_rgb", + "right_wrist_0_rgb", +) + +IMAGE_RESOLUTION = (224, 224) + + +def preprocess_observation_pytorch( + observation, + *, + train: bool = False, + image_keys: Sequence[str] = IMAGE_KEYS, + image_resolution: tuple[int, int] = IMAGE_RESOLUTION, +): + """Torch.compile-compatible version of preprocess_observation_pytorch with simplified type annotations. + + This function avoids complex type annotations that can cause torch.compile issues. + """ + if not set(image_keys).issubset(observation.images): + raise ValueError(f"images dict missing keys: expected {image_keys}, got {list(observation.images)}") + + batch_shape = observation.state.shape[:-1] + + out_images = {} + for key in image_keys: + image = observation.images[key] + + # TODO: This is a hack to handle both [B, C, H, W] and [B, H, W, C] formats + # Handle both [B, C, H, W] and [B, H, W, C] formats + is_channels_first = image.shape[1] == 3 # Check if channels are in dimension 1 + + if is_channels_first: + # Convert [B, C, H, W] to [B, H, W, C] for processing + image = image.permute(0, 2, 3, 1) + + if image.shape[1:3] != image_resolution: + logger.info(f"Resizing image {key} from {image.shape[1:3]} to {image_resolution}") + image = image_tools.resize_with_pad_torch(image, *image_resolution) + + if train: + # Convert from [-1, 1] to [0, 1] for PyTorch augmentations + image = image / 2.0 + 0.5 + + # Apply PyTorch-based augmentations + if "wrist" not in key: + # Geometric augmentations for non-wrist cameras + height, width = image.shape[1:3] + + # Random crop and resize + crop_height = int(height * 0.95) + crop_width = int(width * 0.95) + + # Random crop + max_h = height - crop_height + max_w = width - crop_width + if max_h > 0 and max_w > 0: + # Use tensor operations instead of .item() for torch.compile compatibility + start_h = torch.randint(0, max_h + 1, (1,), device=image.device) + start_w = torch.randint(0, max_w + 1, (1,), device=image.device) + image = image[:, start_h : start_h + crop_height, start_w : start_w + crop_width, :] + + # Resize back to original size + image = torch.nn.functional.interpolate( + image.permute(0, 3, 1, 2), # [b, h, w, c] -> [b, c, h, w] + size=(height, width), + mode="bilinear", + align_corners=False, + ).permute(0, 2, 3, 1) # [b, c, h, w] -> [b, h, w, c] + + # Random rotation (small angles) + # Use tensor operations instead of .item() for torch.compile compatibility + angle = torch.rand(1, device=image.device) * 10 - 5 # Random angle between -5 and 5 degrees + if torch.abs(angle) > 0.1: # Only rotate if angle is significant + # Convert to radians + angle_rad = angle * torch.pi / 180.0 + + # Create rotation matrix + cos_a = torch.cos(angle_rad) + sin_a = torch.sin(angle_rad) + + # Apply rotation using grid_sample + grid_x = torch.linspace(-1, 1, width, device=image.device) + grid_y = torch.linspace(-1, 1, height, device=image.device) + + # Create meshgrid + grid_y, grid_x = torch.meshgrid(grid_y, grid_x, indexing="ij") + + # Expand to batch dimension + grid_x = grid_x.unsqueeze(0).expand(image.shape[0], -1, -1) + grid_y = grid_y.unsqueeze(0).expand(image.shape[0], -1, -1) + + # Apply rotation transformation + grid_x_rot = grid_x * cos_a - grid_y * sin_a + grid_y_rot = grid_x * sin_a + grid_y * cos_a + + # Stack and reshape for grid_sample + grid = torch.stack([grid_x_rot, grid_y_rot], dim=-1) + + image = torch.nn.functional.grid_sample( + image.permute(0, 3, 1, 2), # [b, h, w, c] -> [b, c, h, w] + grid, + mode="bilinear", + padding_mode="zeros", + align_corners=False, + ).permute(0, 2, 3, 1) # [b, c, h, w] -> [b, h, w, c] + + # Color augmentations for all cameras + # Random brightness + # Use tensor operations instead of .item() for torch.compile compatibility + brightness_factor = 0.7 + torch.rand(1, device=image.device) * 0.6 # Random factor between 0.7 and 1.3 + image = image * brightness_factor + + # Random contrast + # Use tensor operations instead of .item() for torch.compile compatibility + contrast_factor = 0.6 + torch.rand(1, device=image.device) * 0.8 # Random factor between 0.6 and 1.4 + mean = image.mean(dim=[1, 2, 3], keepdim=True) + image = (image - mean) * contrast_factor + mean + + # Random saturation (convert to HSV, modify S, convert back) + # For simplicity, we'll just apply a random scaling to the color channels + # Use tensor operations instead of .item() for torch.compile compatibility + saturation_factor = 0.5 + torch.rand(1, device=image.device) * 1.0 # Random factor between 0.5 and 1.5 + gray = image.mean(dim=-1, keepdim=True) + image = gray + (image - gray) * saturation_factor + + # Clamp values to [0, 1] + image = torch.clamp(image, 0, 1) + + # Back to [-1, 1] + image = image * 2.0 - 1.0 + + # Convert back to [B, C, H, W] format if it was originally channels-first + if is_channels_first: + image = image.permute(0, 3, 1, 2) # [B, H, W, C] -> [B, C, H, W] + + out_images[key] = image + + # obtain mask + out_masks = {} + for key in out_images: + if key not in observation.image_masks: + # do not mask by default + out_masks[key] = torch.ones(batch_shape, dtype=torch.bool, device=observation.state.device) + else: + out_masks[key] = observation.image_masks[key] + + # Create a simple object with the required attributes instead of using the complex Observation class + class SimpleProcessedObservation: + def __init__(self, **kwargs): + for key, value in kwargs.items(): + setattr(self, key, value) + + return SimpleProcessedObservation( + images=out_images, + image_masks=out_masks, + state=observation.state, + tokenized_prompt=observation.tokenized_prompt, + tokenized_prompt_mask=observation.tokenized_prompt_mask, + token_ar_mask=observation.token_ar_mask, + token_loss_mask=observation.token_loss_mask, + ) diff --git a/policy/openpi-InternData-A1/src/openpi/models_pytorch/transformers_replace/models/gemma/configuration_gemma.py b/policy/openpi-InternData-A1/src/openpi/models_pytorch/transformers_replace/models/gemma/configuration_gemma.py new file mode 100644 index 0000000..a42abe1 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models_pytorch/transformers_replace/models/gemma/configuration_gemma.py @@ -0,0 +1,173 @@ +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# This file was automatically generated from src/transformers/models/gemma/modular_gemma.py. +# Do NOT edit this file manually as any edits will be overwritten by the generation of +# the file from the modular. If any change should be done, please apply the change to the +# modular_gemma.py file directly. One of our CI enforces this. +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# coding=utf-8 +# Copyright 2024 Google Inc. HuggingFace Inc. team. All rights reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional +from ...configuration_utils import PretrainedConfig + + +class GemmaConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`GemmaModel`]. It is used to instantiate an Gemma + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the Gemma-7B. + e.g. [google/gemma-7b](https://huggingface.co/google/gemma-7b) + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + Args: + vocab_size (`int`, *optional*, defaults to 256000): + Vocabulary size of the Gemma model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`GemmaModel`] + hidden_size (`int`, *optional*, defaults to 3072): + Dimension of the hidden representations. + intermediate_size (`int`, *optional*, defaults to 24576): + Dimension of the MLP representations. + num_hidden_layers (`int`, *optional*, defaults to 28): + Number of hidden layers in the Transformer decoder. + num_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer decoder. + num_key_value_heads (`int`, *optional*, defaults to 16): + This is the number of key_value heads that should be used to implement Grouped Query Attention. If + `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if + `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When + converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed + by meanpooling all the original heads within that group. For more details, check out [this + paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to + `num_attention_heads`. + head_dim (`int`, *optional*, defaults to 256): + The attention head dimension. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): + The legacy activation function. It is overwritten by the `hidden_activation`. + hidden_activation (`str` or `function`, *optional*): + The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"` + if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function. + max_position_embeddings (`int`, *optional*, defaults to 8192): + The maximum sequence length that this model might ever be used with. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + rms_norm_eps (`float`, *optional*, defaults to 1e-06): + The epsilon used by the rms normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + pad_token_id (`int`, *optional*, defaults to 0): + Padding token id. + eos_token_id (`int`, *optional*, defaults to 1): + End of stream token id. + bos_token_id (`int`, *optional*, defaults to 2): + Beginning of stream token id. + tie_word_embeddings (`bool`, *optional*, defaults to `True`): + Whether to tie weight embeddings + rope_theta (`float`, *optional*, defaults to 10000.0): + The base period of the RoPE embeddings. + attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): + Whether to use a bias in the query, key, value and output projection layers during self-attention. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + use_adarms (`bool`, *optional*, defaults to `False`): + Whether to use ADARMS. + adarms_cond_dim (`int`, *optional*, defaults to `None`): + The dimension of the ADARMS condition. + ```python + >>> from transformers import GemmaModel, GemmaConfig + >>> # Initializing a Gemma gemma-7b style configuration + >>> configuration = GemmaConfig() + >>> # Initializing a model from the gemma-7b style configuration + >>> model = GemmaModel(configuration) + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "gemma" + keys_to_ignore_at_inference = ["past_key_values"] + base_model_tp_plan = { + "layers.*.self_attn.q_proj": "colwise", + "layers.*.self_attn.k_proj": "colwise", + "layers.*.self_attn.v_proj": "colwise", + "layers.*.self_attn.o_proj": "rowwise", + "layers.*.mlp.gate_proj": "colwise", + "layers.*.mlp.up_proj": "colwise", + "layers.*.mlp.down_proj": "rowwise", + } + base_model_pp_plan = { + "embed_tokens": (["input_ids"], ["inputs_embeds"]), + "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), + "norm": (["hidden_states"], ["hidden_states"]), + } + + def __init__( + self, + vocab_size=256000, + hidden_size=3072, + intermediate_size=24576, + num_hidden_layers=28, + num_attention_heads=16, + num_key_value_heads=16, + head_dim=256, + hidden_act="gelu_pytorch_tanh", + hidden_activation=None, + max_position_embeddings=8192, + initializer_range=0.02, + rms_norm_eps=1e-6, + use_cache=True, + pad_token_id=0, + eos_token_id=1, + bos_token_id=2, + tie_word_embeddings=True, + rope_theta=10000.0, + attention_bias=False, + attention_dropout=0.0, + use_adarms: bool = False, + adarms_cond_dim: Optional[int] = None, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.head_dim = head_dim + self.num_key_value_heads = num_key_value_heads + self.hidden_act = hidden_act + self.hidden_activation = hidden_activation + self.initializer_range = initializer_range + self.rms_norm_eps = rms_norm_eps + self.use_cache = use_cache + self.rope_theta = rope_theta + self.attention_bias = attention_bias + self.attention_dropout = attention_dropout + self.use_adarms = use_adarms + self.adarms_cond_dim = adarms_cond_dim + + # Set default for adarms_cond_dim if use_adarms is True + if self.use_adarms and self.adarms_cond_dim is None: + self.adarms_cond_dim = self.hidden_size + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) + + +__all__ = ["GemmaConfig"] \ No newline at end of file diff --git a/policy/openpi-InternData-A1/src/openpi/models_pytorch/transformers_replace/models/gemma/modeling_gemma.py b/policy/openpi-InternData-A1/src/openpi/models_pytorch/transformers_replace/models/gemma/modeling_gemma.py new file mode 100644 index 0000000..d052977 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models_pytorch/transformers_replace/models/gemma/modeling_gemma.py @@ -0,0 +1,862 @@ +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# This file was automatically generated from src/transformers/models/gemma/modular_gemma.py. +# Do NOT edit this file manually as any edits will be overwritten by the generation of +# the file from the modular. If any change should be done, please apply the change to the +# modular_gemma.py file directly. One of our CI enforces this. +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# coding=utf-8 +# Copyright 2024 Google Inc. HuggingFace Inc. team. All rights reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Callable, Optional, Union + +import torch +from torch import nn + +from ...activations import ACT2FN +from ...cache_utils import Cache, DynamicCache +from ...generation import GenerationMixin +from ...masking_utils import create_causal_mask +from ...modeling_flash_attention_utils import FlashAttentionKwargs +from ...modeling_layers import GradientCheckpointingLayer +from ...modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, + SequenceClassifierOutputWithPast, + TokenClassifierOutput, +) +from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update +from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel +from ...processing_utils import Unpack +from ...utils import LossKwargs, auto_docstring, can_return_tuple, logging +from .configuration_gemma import GemmaConfig + + +logger = logging.get_logger(__name__) + + +class GemmaRMSNorm(nn.Module): + def __init__(self, dim: int, eps: float = 1e-6, cond_dim: Optional[int] = None): + super().__init__() + self.eps = eps + self.dim = dim + self.cond_dim = cond_dim + + # Dense layer for adaptive normalization (if cond_dim is provided) + if cond_dim is not None: + #self.dense = nn.Linear(cond_dim, dim * 3, bias=True, dtype=torch.bfloat16) + self.dense = nn.Linear(cond_dim, dim * 3, bias=True) + # Initialize with zeros (matches source implementation) + nn.init.zeros_(self.dense.weight) + else: + self.weight = nn.Parameter(torch.zeros(dim, dtype=torch.bfloat16)) + self.dense = None + + def _norm(self, x): + # Compute variance in float32 (like the source implementation) + var = torch.mean(torch.square(x.float()), dim=-1, keepdim=True) + # Compute normalization in float32 + normed_inputs = x * torch.rsqrt(var + self.eps) + return normed_inputs + + def forward(self, x, cond=None): + dtype = x.dtype # original dtype, could be half-precision + normed_inputs = self._norm(x) + + if cond is None or self.dense is None: + # regular RMSNorm + # scale by learned parameter in float32 (matches source implementation) + normed_inputs = normed_inputs * (1.0 + self.weight.float()) + return normed_inputs.to(dtype), None # return in original dtype with None gate + + # adaptive RMSNorm (if cond is provided and dense layer exists) + if cond.shape[-1] != self.cond_dim: + raise ValueError(f"Expected cond dimension {self.cond_dim}, got {cond.shape[-1]}") + + #self.dense.to(dtype=torch.bfloat16).to(dtype=torch.float32) + modulation = self.dense(cond) + # Reshape modulation to broadcast properly: [batch, 1, features] for [batch, seq, features] + if len(x.shape) == 3: # [batch, seq, features] + modulation = modulation.unsqueeze(1) + + scale, shift, gate = torch.chunk(modulation, 3, dim=-1) + + # Apply adaptive normalization: use model weight dtype to ensure compatibility + # model_dtype = self.dense.weight.dtype # Use the model's dtype (bfloat16) + # scale = scale.to(model_dtype) + # shift = shift.to(model_dtype) + # gate = gate.to(model_dtype) + # normed_inputs = normed_inputs.to(model_dtype) # Convert normed_inputs to model dtype + + normed_inputs = normed_inputs * (1 + scale.to(torch.float32)) + shift.to(torch.float32) + + return normed_inputs.to(dtype), gate.to(dtype) + + def extra_repr(self): + repr_str = f"{tuple(self.weight.shape)}, eps={self.eps}" + if self.dense is not None: + repr_str += f", adaptive=True, cond_dim={self.cond_dim}" + return repr_str + + +class GemmaMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) + self.act_fn = ACT2FN[config.hidden_act] + + def forward(self, x): + down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + return down_proj + + +class GemmaRotaryEmbedding(nn.Module): + def __init__(self, config: GemmaConfig, device=None): + super().__init__() + # BC: "rope_type" was originally "type" + if hasattr(config, "rope_scaling") and config.rope_scaling is not None: + self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) + else: + self.rope_type = "default" + self.max_seq_len_cached = config.max_position_embeddings + self.original_max_seq_len = config.max_position_embeddings + + self.config = config + self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] + + inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) + self.register_buffer("inv_freq", inv_freq, persistent=False) + self.original_inv_freq = self.inv_freq + + @torch.no_grad() + @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) + def forward(self, x, position_ids): + inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) + position_ids_expanded = position_ids[:, None, :].float() + + device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" + with torch.autocast(device_type=device_type, enabled=False): # Force float32 + freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) + emb = torch.cat((freqs, freqs), dim=-1) + cos = emb.cos() * self.attention_scaling + sin = emb.sin() * self.attention_scaling + + return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) + + +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`, *optional*): + Deprecated and unused. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos.unsqueeze(unsqueeze_dim) + sin = sin.unsqueeze(unsqueeze_dim) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +def _gated_residual(x, y, gate): + """ + Applies gated residual connection with optional gate parameter. + + Args: + x: Input tensor (residual) + y: Output tensor to be added + gate: Optional gate tensor to modulate the addition + + Returns: + x + y if gate is None, otherwise x + y * gate + """ + if x is None and y is None: + return None + if x is None or y is None: + return x if x is not None else y + if gate is None: + return x + y + return x + y * gate + + +def eager_attention_forward( + module: nn.Module, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attention_mask: Optional[torch.Tensor], + scaling: float, + dropout: float = 0.0, + **kwargs, +): + key_states = repeat_kv(key, module.num_key_value_groups) + value_states = repeat_kv(value, module.num_key_value_groups) + + attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling + if attention_mask is not None: + causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] + attn_weights = attn_weights + causal_mask + + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) + attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) + attn_output = torch.matmul(attn_weights, value_states) + attn_output = attn_output.transpose(1, 2).contiguous() + + return attn_output, attn_weights + + +class GemmaAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: GemmaConfig, layer_idx: int): + super().__init__() + self.config = config + self.layer_idx = layer_idx + self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) + self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads + self.scaling = self.head_dim**-0.5 + self.attention_dropout = config.attention_dropout + self.is_causal = True + + self.q_proj = nn.Linear( + config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias + ) + self.k_proj = nn.Linear( + config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias + ) + self.v_proj = nn.Linear( + config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias + ) + self.o_proj = nn.Linear( + config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias + ) + + def forward( + self, + hidden_states: torch.Tensor, + position_embeddings: tuple[torch.Tensor, torch.Tensor], + attention_mask: Optional[torch.Tensor], + past_key_value: Optional[Cache] = None, + cache_position: Optional[torch.LongTensor] = None, + use_cache: bool = False, + **kwargs: Unpack[FlashAttentionKwargs], + ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: + input_shape = hidden_states.shape[:-1] + hidden_shape = (*input_shape, -1, self.head_dim) + + query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) + key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) + value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) + + cos, sin = position_embeddings + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + # Use cache if provided + if past_key_value is not None: + if use_cache: + # sin and cos are specific to RoPE models; cache_position needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + else: + key_states = torch.cat([past_key_value[self.layer_idx][0], key_states], dim=2) + value_states = torch.cat([past_key_value[self.layer_idx][1], value_states], dim=2) + + attention_interface: Callable = eager_attention_forward + if self.config._attn_implementation != "eager": + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + + attn_output, attn_weights = attention_interface( + self, + query_states, + key_states, + value_states, + attention_mask, + dropout=0.0 if not self.training else self.attention_dropout, + scaling=self.scaling, + **kwargs, + ) + + attn_output = attn_output.reshape(*input_shape, -1).contiguous() + attn_output = self.o_proj(attn_output) + return attn_output, attn_weights + + +class GemmaDecoderLayer(GradientCheckpointingLayer): + def __init__(self, config: GemmaConfig, layer_idx: int): + super().__init__() + self.hidden_size = config.hidden_size + + self.self_attn = GemmaAttention(config=config, layer_idx=layer_idx) + + self.mlp = GemmaMLP(config) + cond_dim = getattr(config, 'adarms_cond_dim', None) if getattr(config, 'use_adarms', False) else None + self.input_layernorm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps, cond_dim=cond_dim) + self.post_attention_layernorm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps, cond_dim=cond_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + cache_position: Optional[torch.LongTensor] = None, + position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC + adarms_cond: Optional[torch.Tensor] = None, + **kwargs: Unpack[FlashAttentionKwargs], + ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: + residual = hidden_states + hidden_states, gate = self.input_layernorm(hidden_states, adarms_cond) + + # Self Attention + hidden_states, self_attn_weights = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + position_embeddings=position_embeddings, + **kwargs, + ) + hidden_states = _gated_residual(residual, hidden_states, gate) + + # Fully Connected + residual = hidden_states + hidden_states, gate = self.post_attention_layernorm(hidden_states, adarms_cond) + hidden_states = self.mlp(hidden_states) + hidden_states = _gated_residual(residual, hidden_states, gate) + + outputs = (hidden_states,) + if output_attentions: + outputs += (self_attn_weights,) + + return outputs + + +@auto_docstring +class GemmaPreTrainedModel(PreTrainedModel): + config_class = GemmaConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["GemmaDecoderLayer"] + _skip_keys_device_placement = ["past_key_values"] + _supports_flash_attn_3 = True + _supports_flash_attn_2 = True + _supports_sdpa = True + _supports_flex_attn = True + _supports_cache_class = True + _supports_quantized_cache = True + _supports_static_cache = True + _supports_attention_backend = True + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, GemmaRMSNorm): + if hasattr(module, 'weight'): + module.weight.data.fill_(1.0) + + +@auto_docstring +class GemmaModel(GemmaPreTrainedModel): + def __init__(self, config: GemmaConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.layers = nn.ModuleList( + [GemmaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] + ) + + cond_dim = getattr(config, 'adarms_cond_dim', None) if getattr(config, 'use_adarms', False) else None + self.norm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps, cond_dim=cond_dim) + self.rotary_emb = GemmaRotaryEmbedding(config=config) + self.gradient_checkpointing = False + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + @can_return_tuple + @auto_docstring + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Cache] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + adarms_cond: Optional[torch.Tensor] = None, + **kwargs: Unpack[FlashAttentionKwargs], + ) -> BaseModelOutputWithPast: + """ + adarms_cond (`torch.Tensor` of shape `(batch_size, cond_dim)`, *optional*): + Condition for ADARMS. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + if (input_ids is None) ^ (inputs_embeds is not None): + raise ValueError("You must specify exactly one of input_ids or inputs_embeds") + + if self.gradient_checkpointing and self.training and use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." + ) + use_cache = False + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if use_cache and past_key_values is None: + past_key_values = DynamicCache() + + if cache_position is None: + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + ) + + if position_ids is None: + position_ids = cache_position.unsqueeze(0) + + causal_mask = create_causal_mask( + config=self.config, + input_embeds=inputs_embeds, + attention_mask=attention_mask, + cache_position=cache_position, + past_key_values=past_key_values, + position_ids=position_ids, + ) + + # embed positions + hidden_states = inputs_embeds + # Convert to bfloat16 if the first layer uses bfloat16 + if len(self.layers) > 0 and self.layers[0].self_attn.q_proj.weight.dtype == torch.bfloat16: + hidden_states = hidden_states.to(torch.bfloat16) + + # create position embeddings to be shared across the decoder layers + position_embeddings = self.rotary_emb(hidden_states, position_ids) + + # normalized + # Gemma downcasts the below to float16, causing sqrt(3072)=55.4256 to become 55.5 + # See https://github.com/huggingface/transformers/pull/29402 + normalizer = torch.tensor(self.config.hidden_size**0.5, dtype=hidden_states.dtype) + #hidden_states = hidden_states * normalizer + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + + for decoder_layer in self.layers[: self.config.num_hidden_layers]: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + layer_outputs = decoder_layer( + hidden_states, + attention_mask=causal_mask, + position_ids=position_ids, + past_key_value=past_key_values, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + position_embeddings=position_embeddings, + adarms_cond=adarms_cond, + **kwargs, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states, _ = self.norm(hidden_states, adarms_cond) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=past_key_values if use_cache else None, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ... + + +@auto_docstring +class GemmaForCausalLM(GemmaPreTrainedModel, GenerationMixin): + _tied_weights_keys = ["lm_head.weight"] + _tp_plan = {"lm_head": "colwise_rep"} + _pp_plan = {"lm_head": (["hidden_states"], ["logits"])} + + def __init__(self, config): + super().__init__(config) + self.model = GemmaModel(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + @can_return_tuple + @auto_docstring + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Cache] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + logits_to_keep: Union[int, torch.Tensor] = 0, + adarms_cond: Optional[torch.Tensor] = None, + **kwargs: Unpack[KwargsForCausalLM], + ) -> CausalLMOutputWithPast: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + adarms_cond (`torch.Tensor` of shape `(batch_size, cond_dim)`, *optional*): + Condition for ADARMS. + + Example: + + ```python + >>> from transformers import AutoTokenizer, GemmaForCausalLM + + >>> model = GemmaForCausalLM.from_pretrained("google/gemma-7b") + >>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b") + + >>> prompt = "What is your favorite condiment?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "What is your favorite condiment?" + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs: BaseModelOutputWithPast = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + cache_position=cache_position, + adarms_cond=adarms_cond, + **kwargs, + ) + + hidden_states = outputs.last_hidden_state + # Only compute necessary logits, and do not upcast them to float if we are not computing the loss + slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep + logits = self.lm_head(hidden_states[:, slice_indices, :]) + + loss = None + if labels is not None: + loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@auto_docstring( + custom_intro=""" + The Gemma Model transformer with a sequence classification head on top (linear layer). + + [`GemmaForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """ +) +class GemmaForSequenceClassification(GemmaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = GemmaModel(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @can_return_tuple + @auto_docstring + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Cache] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + adarms_cond: Optional[torch.Tensor] = None, + ) -> SequenceClassifierOutputWithPast: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + + adarms_cond (`torch.Tensor` of shape `(batch_size, cond_dim)`, *optional*): + Condition for ADARMS. + """ + + transformer_outputs: BaseModelOutputWithPast = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + adarms_cond=adarms_cond, + ) + hidden_states = transformer_outputs.last_hidden_state + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") + if self.config.pad_token_id is None: + last_non_pad_token = -1 + elif input_ids is not None: + # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id + non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) + token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) + last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) + else: + last_non_pad_token = -1 + logger.warning_once( + f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " + "unexpected if using padding tokens in conjunction with `inputs_embeds.`" + ) + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] + + loss = None + if labels is not None: + loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config) + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + +@auto_docstring +class GemmaForTokenClassification(GemmaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = GemmaModel(config) + if getattr(config, "classifier_dropout", None) is not None: + classifier_dropout = config.classifier_dropout + elif getattr(config, "hidden_dropout", None) is not None: + classifier_dropout = config.hidden_dropout + else: + classifier_dropout = 0.1 + self.dropout = nn.Dropout(classifier_dropout) + self.score = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @can_return_tuple + @auto_docstring + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Cache] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + adarms_cond: Optional[torch.Tensor] = None, + ) -> TokenClassifierOutput: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + + adarms_cond (`torch.Tensor` of shape `(batch_size, cond_dim)`, *optional*): + Condition for ADARMS. + """ + + outputs: BaseModelOutputWithPast = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + adarms_cond=adarms_cond, + ) + sequence_output = outputs.last_hidden_state + sequence_output = self.dropout(sequence_output) + logits = self.score(sequence_output) + + loss = None + if labels is not None: + loss = self.loss_function(logits, labels, self.config) + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +__all__ = [ + "GemmaModel", + "GemmaForCausalLM", + "GemmaForSequenceClassification", + "GemmaForTokenClassification", + "GemmaPreTrainedModel", +] diff --git a/policy/openpi-InternData-A1/src/openpi/models_pytorch/transformers_replace/models/paligemma/modeling_paligemma.py b/policy/openpi-InternData-A1/src/openpi/models_pytorch/transformers_replace/models/paligemma/modeling_paligemma.py new file mode 100644 index 0000000..fbf0e94 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models_pytorch/transformers_replace/models/paligemma/modeling_paligemma.py @@ -0,0 +1,622 @@ +# coding=utf-8 +# Copyright 2024 the HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch PaliGemmamodel.""" + +from dataclasses import dataclass +from typing import Optional, Union + +import torch +import torch.utils.checkpoint +from torch import nn + +from ...cache_utils import Cache, HybridCache, StaticCache +from ...generation import GenerationMixin +from ...modeling_flash_attention_utils import FlashAttentionKwargs +from ...modeling_outputs import BaseModelOutputWithPast +from ...modeling_utils import PreTrainedModel +from ...processing_utils import Unpack +from ...utils import LossKwargs, ModelOutput, auto_docstring, can_return_tuple, is_torchdynamo_compiling, logging +from ..auto import AutoModel +from .configuration_paligemma import PaliGemmaConfig + + +logger = logging.get_logger(__name__) + + +@dataclass +@auto_docstring( + custom_intro=""" + Base class for Paligemma outputs, with hidden states and attentions. + """ +) +class PaligemmaModelOutputWithPast(BaseModelOutputWithPast): + r""" + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) + + Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + image_hidden_states (`torch.FloatTensor`, *optional*): + A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`. + image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. + """ + + image_hidden_states: Optional[torch.FloatTensor] = None + + +@dataclass +@auto_docstring( + custom_intro=""" + Base class for PaliGemma causal language model (or autoregressive) outputs. + """ +) +class PaliGemmaCausalLMOutputWithPast(ModelOutput): + r""" + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.text_config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) + + Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + image_hidden_states (`torch.FloatTensor`, *optional*): + A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`. + image_hidden_states of the model produced by the vision encoder after projecting last hidden state. + """ + + loss: Optional[torch.FloatTensor] = None + logits: Optional[torch.FloatTensor] = None + past_key_values: Optional[Union[list[torch.FloatTensor], Cache]] = None + hidden_states: Optional[tuple[torch.FloatTensor]] = None + attentions: Optional[tuple[torch.FloatTensor]] = None + image_hidden_states: Optional[torch.FloatTensor] = None + + +class PaliGemmaMultiModalProjector(nn.Module): + def __init__(self, config: PaliGemmaConfig): + super().__init__() + self.linear = nn.Linear(config.vision_config.hidden_size, config.vision_config.projection_dim, bias=True) + + def forward(self, image_features): + hidden_states = self.linear(image_features) + + return hidden_states + + +@auto_docstring +class PaliGemmaPreTrainedModel(PreTrainedModel): + config_class = PaliGemmaConfig + base_model_prefix = "" + supports_gradient_checkpointing = True + _no_split_modules = ["PaliGemmaMultiModalProjector"] + _skip_keys_device_placement = "past_key_values" + _supports_cache_class = True + _supports_quantized_cache = True + _supports_static_cache = True + _supports_flash_attn_2 = True + _supports_sdpa = True + _supports_flex_attn = True + _supports_attention_backend = True + + def _init_weights(self, module): + # important: this ported version of PaliGemmaisn't meant for training from scratch - only + # inference and fine-tuning + std = getattr(self.config, "initializer_range", self.config.get_text_config().initializer_range) + + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + + +@auto_docstring( + custom_intro=""" + The Base Paligemma model which consists of a vision backbone and a language model withou language modeling head., + """ +) +class PaliGemmaModel(PaliGemmaPreTrainedModel): + _checkpoint_conversion_mapping = {"language_model.model": "language_model"} + # we are filtering the logits/labels so we shouldn't divide the loss based on num_items_in_batch + accepts_loss_kwargs = False + + def __init__(self, config: PaliGemmaConfig): + super().__init__(config) + self.vision_tower = AutoModel.from_config(config=config.vision_config) + self.multi_modal_projector = PaliGemmaMultiModalProjector(config) + self.vocab_size = config.text_config.vocab_size + + language_model = AutoModel.from_config(config=config.text_config) + self.language_model = language_model + + self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1 + self.post_init() + + # Copied from transformers.models.llava.modeling_llava.LlavaModel.get_input_embeddings with Llava->PaliGemma + def get_input_embeddings(self): + return self.language_model.get_input_embeddings() + + # Copied from transformers.models.llava.modeling_llava.LlavaModel.set_input_embeddings with Llava->PaliGemma + def set_input_embeddings(self, value): + self.language_model.set_input_embeddings(value) + + def set_decoder(self, decoder): + self.language_model = decoder + + def get_decoder(self): + return self.language_model + + def _update_causal_mask( + self, + attention_mask, + token_type_ids=None, + past_key_values=None, + cache_position=None, + input_tensor=None, + is_training: Optional[bool] = None, + ): + if self.config.text_config._attn_implementation == "flash_attention_2": + if attention_mask is not None and 0.0 in attention_mask: + return attention_mask + return None + is_training = is_training if is_training is not None else self.training + using_static_cache = isinstance(past_key_values, StaticCache) + min_dtype = torch.finfo(self.dtype).min + if input_tensor is None: + input_tensor = attention_mask + + inputs_lead_dim, sequence_length = input_tensor.shape[:2] + if using_static_cache: + target_length = past_key_values.get_max_cache_shape() + elif isinstance(past_key_values, HybridCache): + target_length = past_key_values.get_max_cache_shape() + else: + target_length = ( + attention_mask.shape[-1] + if isinstance(attention_mask, torch.Tensor) + else cache_position[0] + sequence_length + 1 + ) + + if attention_mask is not None and attention_mask.dim() == 4: + # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. + return attention_mask + + causal_mask = torch.full( + (sequence_length, target_length), fill_value=min_dtype, dtype=self.dtype, device=cache_position.device + ) + # Causal diagonal mask only if training, otherwise attend to the whole prefix. Training-specific attn for prefix is handled below + if sequence_length != 1: + if is_training: + causal_mask = torch.triu(causal_mask, diagonal=1) + else: + causal_mask[:, :sequence_length] = 0.0 + + causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) + causal_mask = causal_mask[None, None, :, :].expand(inputs_lead_dim, 1, -1, -1) + if attention_mask is not None: + causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit + mask_length = attention_mask.shape[-1] + + # First unmask prefix tokens during training + if is_training: + if token_type_ids is None: + raise ValueError("Token type ids must be provided during training") + causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( + token_type_ids[:, None, None, :].to(causal_mask.device) == 0, 0 + ) + + # Then apply padding mask (will mask pad tokens) + padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) + padding_mask = padding_mask == 0 + causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( + padding_mask, min_dtype + ) + + return causal_mask + + def get_image_features(self, pixel_values: torch.FloatTensor): + """ + Obtains image last hidden states from the vision tower and apply multimodal projection. + + Args: + pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`) + The tensors corresponding to the input images. + Returns: + image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`). + """ + image_outputs = self.vision_tower(pixel_values) + selected_image_feature = image_outputs.last_hidden_state + image_features = self.multi_modal_projector(selected_image_feature) + return image_features + + @can_return_tuple + @auto_docstring + def forward( + self, + input_ids: torch.LongTensor = None, + pixel_values: torch.FloatTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Union[list[torch.FloatTensor], Cache]] = None, + token_type_ids: Optional[torch.LongTensor] = None, + cache_position: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs: Unpack[FlashAttentionKwargs], + ) -> Union[tuple, PaligemmaModelOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.text_config.vocab_size]`. + + Example: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, PaliGemmaForConditionalGeneration + + >>> model = PaliGemmaForConditionalGeneration.from_pretrained("google/paligemma2-3b-mix-224") + >>> processor = AutoProcessor.from_pretrained("google/paligemma2-3b-mix-224") + + >>> prompt = "Where is the cat standing?" + >>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, text=prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(**inputs,) + >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Where is the cat standing?\nsnow" + ```""" + + if (input_ids is None) ^ (inputs_embeds is not None): + raise ValueError("You must specify exactly one of input_ids or inputs_embeds") + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + is_training = token_type_ids is not None and labels is not None + + # Replace image id woth PAD if the image token if OOV, to avoid index-errors + if input_ids is not None and self.config.image_token_id >= self.vocab_size: + special_image_mask = input_ids == self.config.image_token_id + llm_input_ids = input_ids.clone() + llm_input_ids[special_image_mask] = 0 + else: + llm_input_ids = input_ids + + if inputs_embeds is None: + inputs_embeds = self.get_input_embeddings()(llm_input_ids) + + if cache_position is None: + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + ) + + if position_ids is None: + position_ids = cache_position.unsqueeze(0) + 1 # Paligemma positions are 1-indexed + + # Merge text and images + if pixel_values is not None: + image_features = self.get_image_features(pixel_values) + + if input_ids is None: + special_image_mask = inputs_embeds == self.get_input_embeddings()( + torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device) + ) + else: + special_image_mask = (input_ids == self.config.image_token_id).unsqueeze(-1) + special_image_mask = special_image_mask.expand_as(inputs_embeds).to(inputs_embeds.device) + + if not is_torchdynamo_compiling() and inputs_embeds[special_image_mask].numel() != image_features.numel(): + image_tokens_in_text = (special_image_mask).sum(dim=1).sum(dim=0)[0] + raise ValueError( + f"Number of images does not match number of special image tokens in the input text. " + f"Got {image_tokens_in_text} image tokens in the text but {image_features.shape[0] * image_features.shape[1]} " + "tokens from image embeddings." + ) + image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype) + inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features) + + causal_mask = self._update_causal_mask( + attention_mask, token_type_ids, past_key_values, cache_position, inputs_embeds, is_training + ) + outputs = self.language_model( + attention_mask=causal_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=True, + cache_position=cache_position, + **kwargs, + ) + + return PaligemmaModelOutputWithPast( + last_hidden_state=outputs.last_hidden_state, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + image_hidden_states=image_features if pixel_values is not None else None, + ) + + +class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ... + + +@auto_docstring( + custom_intro=""" + The Base Paligemma model which consists of a vision backbone and a language model without language modeling head., + """ +) +class PaliGemmaForConditionalGeneration(PaliGemmaPreTrainedModel, GenerationMixin): + _checkpoint_conversion_mapping = { + "^language_model.model": "model.language_model", + "^vision_tower": "model.vision_tower", + "^multi_modal_projector": "model.multi_modal_projector", + "^language_model.lm_head": "lm_head", + } + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config: PaliGemmaConfig): + super().__init__(config) + self.model = PaliGemmaModel(config) + self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) + self.post_init() + + def get_input_embeddings(self): + return self.model.get_input_embeddings() + + def set_input_embeddings(self, value): + self.model.set_input_embeddings(value) + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model.set_decoder(decoder) + + def get_decoder(self): + return self.model.get_decoder() + + def get_image_features(self, pixel_values): + return self.model.get_image_features(pixel_values) + + # Make modules available throught conditional class for BC + @property + def language_model(self): + return self.model.language_model + + @property + def vision_tower(self): + return self.model.vision_tower + + @property + def multi_modal_projector(self): + return self.model.multi_modal_projector + + @can_return_tuple + @auto_docstring + def forward( + self, + input_ids: torch.LongTensor = None, + pixel_values: torch.FloatTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Union[list[torch.FloatTensor], Cache]] = None, + token_type_ids: Optional[torch.LongTensor] = None, + cache_position: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + logits_to_keep: Union[int, torch.Tensor] = 0, + **kwargs: Unpack[KwargsForCausalLM], + ) -> Union[tuple, PaliGemmaCausalLMOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.text_config.vocab_size]`. + + Example: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, PaliGemmaForConditionalGeneration + + >>> model = PaliGemmaForConditionalGeneration.from_pretrained("google/paligemma2-3b-mix-224") + >>> processor = AutoProcessor.from_pretrained("google/paligemma2-3b-mix-224") + + >>> prompt = "Where is the cat standing?" + >>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, text=prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(**inputs,) + >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Where is the cat standing?\nsnow" + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.model( + input_ids=input_ids, + pixel_values=pixel_values, + token_type_ids=token_type_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + labels=labels, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=True, + cache_position=cache_position, + **kwargs, + ) + + hidden_states = outputs[0] + # Only compute necessary logits, and do not upcast them to float if we are not computing the loss + slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep + logits = self.lm_head(hidden_states[:, slice_indices, :]) + + loss = None + if labels is not None: + loss = self.loss_function( + logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs + ) + + return PaliGemmaCausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + image_hidden_states=outputs.image_hidden_states, + ) + + def prepare_inputs_for_generation( + self, + input_ids, + past_key_values=None, + inputs_embeds=None, + cache_position=None, + position_ids=None, + pixel_values=None, + attention_mask=None, + token_type_ids=None, + use_cache=True, + logits_to_keep=None, + labels=None, + **kwargs, + ): + # Overwritten -- custom `position_ids` and `pixel_values` handling + model_inputs = super().prepare_inputs_for_generation( + input_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + position_ids=position_ids, + cache_position=cache_position, + use_cache=use_cache, + logits_to_keep=logits_to_keep, + token_type_ids=token_type_ids, + **kwargs, + ) + + # position_ids in Paligemma are 1-indexed + if model_inputs.get("position_ids") is not None: + model_inputs["position_ids"] += 1 + # If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore + # Otherwise we need pixel values to be passed to model. NOTE: use_cache=False needs pixel_values always + if cache_position[0] == 0: + model_inputs["pixel_values"] = pixel_values + is_training = token_type_ids is not None and labels is not None + if cache_position[0] == 0 and isinstance(past_key_values, HybridCache): + input_tensor = inputs_embeds if inputs_embeds is not None else input_ids + causal_mask = self.model._update_causal_mask( + attention_mask, token_type_ids, past_key_values, cache_position, input_tensor, is_training + ) + model_inputs["attention_mask"] = causal_mask + + return model_inputs + + @staticmethod + # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._prepare_4d_causal_attention_mask_with_cache_position + def _prepare_4d_causal_attention_mask_with_cache_position( + attention_mask: torch.Tensor, + sequence_length: int, + target_length: int, + dtype: torch.dtype, + cache_position: torch.Tensor, + batch_size: int, + **kwargs, + ): + """ + Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape + `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. + + Args: + attention_mask (`torch.Tensor`): + A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape + `(batch_size, 1, query_length, key_value_length)`. + sequence_length (`int`): + The sequence length being processed. + target_length (`int`): + The target length: when generating with static cache, the mask should be as long as the static cache, + to account for the 0 padding, the part of the cache that is not filled yet. + dtype (`torch.dtype`): + The dtype to use for the 4D attention mask. + cache_position (`torch.Tensor`): + Indices depicting the position of the input sequence tokens in the sequence. + batch_size (`torch.Tensor`): + Batch size. + """ + if attention_mask is not None and attention_mask.dim() == 4: + # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. + causal_mask = attention_mask + else: + min_dtype = torch.finfo(dtype).min + causal_mask = torch.full( + (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device + ) + if sequence_length != 1: + causal_mask = torch.triu(causal_mask, diagonal=1) + causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) + causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) + if attention_mask is not None: + causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit + mask_length = attention_mask.shape[-1] + padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( + causal_mask.device + ) + padding_mask = padding_mask == 0 + causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( + padding_mask, min_dtype + ) + + return causal_mask + + +__all__ = ["PaliGemmaForConditionalGeneration", "PaliGemmaPreTrainedModel", "PaliGemmaModel"] diff --git a/policy/openpi-InternData-A1/src/openpi/models_pytorch/transformers_replace/models/siglip/check.py b/policy/openpi-InternData-A1/src/openpi/models_pytorch/transformers_replace/models/siglip/check.py new file mode 100644 index 0000000..4bb3c96 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models_pytorch/transformers_replace/models/siglip/check.py @@ -0,0 +1,4 @@ +import transformers + +def check_whether_transformers_replace_is_installed_correctly(): + return transformers.__version__ == "4.53.2" \ No newline at end of file diff --git a/policy/openpi-InternData-A1/src/openpi/models_pytorch/transformers_replace/models/siglip/modeling_siglip.py b/policy/openpi-InternData-A1/src/openpi/models_pytorch/transformers_replace/models/siglip/modeling_siglip.py new file mode 100644 index 0000000..3ea8acd --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/models_pytorch/transformers_replace/models/siglip/modeling_siglip.py @@ -0,0 +1,1237 @@ +# coding=utf-8 +# Copyright 2024 Google AI and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch Siglip model.""" + +import math +import warnings +from dataclasses import dataclass +from typing import Any, Callable, Optional, Union + +import numpy as np +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss +from torch.nn.init import _calculate_fan_in_and_fan_out + +from ...activations import ACT2FN +from ...modeling_attn_mask_utils import _prepare_4d_attention_mask +from ...modeling_layers import GradientCheckpointingLayer +from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput +from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel +from ...utils import ModelOutput, auto_docstring, can_return_tuple, logging, torch_int +from .configuration_siglip import SiglipConfig, SiglipTextConfig, SiglipVisionConfig + + +logger = logging.get_logger(__name__) + + +def _trunc_normal_(tensor, mean, std, a, b): + # Cut & paste from PyTorch official master until it's in a few official releases - RW + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn( + "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect.", + stacklevel=2, + ) + + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.0)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + + +def trunc_normal_tf_( + tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, a: float = -2.0, b: float = 2.0 +) -> torch.Tensor: + """Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the + normal distribution :math:`\\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \\leq \text{mean} \\leq b`. + + NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the + bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0 + and the result is subsequently scaled and shifted by the mean and std args. + + Args: + tensor: an n-dimensional `torch.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + a: the minimum cutoff value + b: the maximum cutoff value + """ + with torch.no_grad(): + _trunc_normal_(tensor, 0, 1.0, a, b) + tensor.mul_(std).add_(mean) + + +def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"): + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) + if mode == "fan_in": + denom = fan_in + elif mode == "fan_out": + denom = fan_out + elif mode == "fan_avg": + denom = (fan_in + fan_out) / 2 + + variance = scale / denom + + if distribution == "truncated_normal": + # constant is stddev of standard normal truncated to (-2, 2) + trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.87962566103423978) + elif distribution == "normal": + with torch.no_grad(): + tensor.normal_(std=math.sqrt(variance)) + elif distribution == "uniform": + bound = math.sqrt(3 * variance) + with torch.no_grad(): + tensor.uniform_(-bound, bound) + else: + raise ValueError(f"invalid distribution {distribution}") + + +def lecun_normal_(tensor): + variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal") + + +def default_flax_embed_init(tensor): + variance_scaling_(tensor, mode="fan_in", distribution="normal") + + +@dataclass +@auto_docstring( + custom_intro=""" + Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. + """ +) +# Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Siglip +class SiglipVisionModelOutput(ModelOutput): + r""" + image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): + The image embeddings obtained by applying the projection layer to the pooler_output. + """ + + image_embeds: Optional[torch.FloatTensor] = None + last_hidden_state: Optional[torch.FloatTensor] = None + hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None + attentions: Optional[tuple[torch.FloatTensor, ...]] = None + + +@dataclass +@auto_docstring( + custom_intro=""" + Base class for text model's outputs that also contains a pooling of the last hidden states. + """ +) +# Copied from transformers.models.clip.modeling_clip.CLIPTextModelOutput with CLIP->Siglip +class SiglipTextModelOutput(ModelOutput): + r""" + text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): + The text embeddings obtained by applying the projection layer to the pooler_output. + """ + + text_embeds: Optional[torch.FloatTensor] = None + last_hidden_state: Optional[torch.FloatTensor] = None + hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None + attentions: Optional[tuple[torch.FloatTensor, ...]] = None + + +@dataclass +@auto_docstring +# Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->Siglip +class SiglipOutput(ModelOutput): + r""" + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): + Contrastive loss for image-text similarity. + logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): + The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text + similarity scores. + logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): + The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image + similarity scores. + text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): + The text embeddings obtained by applying the projection layer to the pooled output of [`SiglipTextModel`]. + image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): + The image embeddings obtained by applying the projection layer to the pooled output of [`SiglipVisionModel`]. + text_model_output (`BaseModelOutputWithPooling`): + The output of the [`SiglipTextModel`]. + vision_model_output (`BaseModelOutputWithPooling`): + The output of the [`SiglipVisionModel`]. + """ + + loss: Optional[torch.FloatTensor] = None + logits_per_image: Optional[torch.FloatTensor] = None + logits_per_text: Optional[torch.FloatTensor] = None + text_embeds: Optional[torch.FloatTensor] = None + image_embeds: Optional[torch.FloatTensor] = None + text_model_output: BaseModelOutputWithPooling = None + vision_model_output: BaseModelOutputWithPooling = None + + def to_tuple(self) -> tuple[Any]: + return tuple( + self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() + for k in self.keys() + ) + + +class SiglipVisionEmbeddings(nn.Module): + def __init__(self, config: SiglipVisionConfig): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.image_size = config.image_size + self.patch_size = config.patch_size + + self.patch_embedding = nn.Conv2d( + in_channels=config.num_channels, + out_channels=self.embed_dim, + kernel_size=self.patch_size, + stride=self.patch_size, + padding="valid", + ) + + self.num_patches = (self.image_size // self.patch_size) ** 2 + self.num_positions = self.num_patches + self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) + self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) + + def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: + """ + This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution + images. This method is also adapted to support torch.jit tracing and no class embeddings. + + Adapted from: + - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and + - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 + """ + + num_patches = embeddings.shape[1] + num_positions = self.position_embedding.weight.shape[0] + + # always interpolate when tracing to ensure the exported model works for dynamic input shapes + if not torch.jit.is_tracing() and num_patches == num_positions and height == width: + return self.position_embedding(self.position_ids) + + patch_pos_embed = self.position_embedding.weight.unsqueeze(0) + + dim = embeddings.shape[-1] + + new_height = height // self.patch_size + new_width = width // self.patch_size + + sqrt_num_positions = torch_int(num_positions**0.5) + patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) + patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) + + patch_pos_embed = nn.functional.interpolate( + patch_pos_embed, + size=(new_height, new_width), + mode="bicubic", + align_corners=False, + ) + + patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) + return patch_pos_embed + + def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor: + _, _, height, width = pixel_values.shape + target_dtype = self.patch_embedding.weight.dtype + patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] + embeddings = patch_embeds.flatten(2).transpose(1, 2) + + if interpolate_pos_encoding: + embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) + else: + embeddings = embeddings + self.position_embedding(self.position_ids) + return embeddings + + +# Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->Siglip +class SiglipTextEmbeddings(nn.Module): + def __init__(self, config: SiglipTextConfig): + super().__init__() + embed_dim = config.hidden_size + + self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) + self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer( + "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False + ) + + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + ) -> torch.Tensor: + seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] + max_position_embedding = self.position_embedding.weight.shape[0] + + if seq_length > max_position_embedding: + raise ValueError( + f"Sequence length must be less than max_position_embeddings (got `sequence length`: " + f"{seq_length} and max_position_embeddings: {max_position_embedding}" + ) + + if position_ids is None: + position_ids = self.position_ids[:, :seq_length] + + if inputs_embeds is None: + inputs_embeds = self.token_embedding(input_ids) + + position_embeddings = self.position_embedding(position_ids) + embeddings = inputs_embeds + position_embeddings + + return embeddings + + +def eager_attention_forward( + module: nn.Module, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attention_mask: Optional[torch.Tensor], + scaling: float, + dropout: float = 0.0, + **kwargs, +): + attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling + if attention_mask is not None: + attn_weights = attn_weights + attention_mask + + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) + attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) + + attn_output = torch.matmul(attn_weights, value) + attn_output = attn_output.transpose(1, 2).contiguous() + + return attn_output, attn_weights + + +class SiglipAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {self.num_heads})." + ) + self.scale = self.head_dim**-0.5 + self.dropout = config.attention_dropout + self.is_causal = False + + self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = False, + ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: + """Input shape: Batch x Time x Channel""" + + batch_size, seq_length, embed_dim = hidden_states.shape + + queries = self.q_proj(hidden_states) + keys = self.k_proj(hidden_states) + values = self.v_proj(hidden_states) + + queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) + keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) + values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) + + attention_interface: Callable = eager_attention_forward + if self.config._attn_implementation != "eager": + if self.config._attn_implementation == "sdpa" and output_attentions: + logger.warning_once( + "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " + 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' + ) + else: + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + + attn_output, attn_weights = attention_interface( + self, + queries, + keys, + values, + attention_mask, + is_causal=self.is_causal, + scaling=self.scale, + dropout=0.0 if not self.training else self.dropout, + ) + + attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous() + attn_output = self.out_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights + + +# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Siglip +class SiglipMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.activation_fn = ACT2FN[config.hidden_act] + self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) + self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.fc1(hidden_states) + hidden_states = self.activation_fn(hidden_states) + hidden_states = self.fc2(hidden_states) + return hidden_states + + +class SiglipEncoderLayer(GradientCheckpointingLayer): + def __init__(self, config: Union[SiglipVisionConfig, SiglipTextConfig]): + super().__init__() + self.embed_dim = config.hidden_size + self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + self.self_attn = SiglipAttention(config) + self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + self.mlp = SiglipMLP(config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + output_attentions: Optional[bool] = False, + ) -> tuple[torch.FloatTensor]: + """ + Args: + hidden_states (`torch.FloatTensor`): + Input to the layer of shape `(batch, seq_len, embed_dim)`. + attention_mask (`torch.FloatTensor`): + Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + hidden_states = self.layer_norm1(hidden_states) + hidden_states, attn_weights = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + output_attentions=output_attentions, + ) + hidden_states = residual + hidden_states + + residual = hidden_states + hidden_states = self.layer_norm2(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +@auto_docstring +class SiglipPreTrainedModel(PreTrainedModel): + config_class = SiglipConfig + base_model_prefix = "siglip" + supports_gradient_checkpointing = True + + _no_split_modules = [ + "SiglipTextEmbeddings", + "SiglipEncoderLayer", + "SiglipVisionEmbeddings", + "SiglipEncoderLayer", + "SiglipMultiheadAttentionPoolingHead", + ] + _supports_flash_attn_2 = True + _supports_sdpa = True + _supports_flex_attn = True + _supports_attention_backend = True + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, SiglipVisionEmbeddings): + width = ( + self.config.vision_config.hidden_size + if isinstance(self.config, SiglipConfig) + else self.config.hidden_size + ) + nn.init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width)) + elif isinstance(module, nn.Embedding): + default_flax_embed_init(module.weight) + elif isinstance(module, SiglipAttention): + nn.init.xavier_uniform_(module.q_proj.weight) + nn.init.xavier_uniform_(module.k_proj.weight) + nn.init.xavier_uniform_(module.v_proj.weight) + nn.init.xavier_uniform_(module.out_proj.weight) + nn.init.zeros_(module.q_proj.bias) + nn.init.zeros_(module.k_proj.bias) + nn.init.zeros_(module.v_proj.bias) + nn.init.zeros_(module.out_proj.bias) + elif isinstance(module, SiglipMLP): + nn.init.xavier_uniform_(module.fc1.weight) + nn.init.xavier_uniform_(module.fc2.weight) + nn.init.normal_(module.fc1.bias, std=1e-6) + nn.init.normal_(module.fc2.bias, std=1e-6) + elif isinstance(module, SiglipMultiheadAttentionPoolingHead): + nn.init.xavier_uniform_(module.probe.data) + nn.init.xavier_uniform_(module.attention.in_proj_weight.data) + nn.init.zeros_(module.attention.in_proj_bias.data) + elif isinstance(module, SiglipModel): + logit_scale_init = torch.log(torch.tensor(1.0)) + module.logit_scale.data.fill_(logit_scale_init) + module.logit_bias.data.zero_() + elif isinstance(module, SiglipForImageClassification): + nn.init.normal_( + module.classifier.weight, + std=self.config.vision_config.hidden_size**-0.5 * self.config.initializer_factor, + ) + elif isinstance(module, (nn.Linear, nn.Conv2d)): + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +# Copied from transformers.models.altclip.modeling_altclip.AltCLIPEncoder with AltCLIP->Siglip +class SiglipEncoder(nn.Module): + """ + Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a + [`SiglipEncoderLayer`]. + + Args: + config: SiglipConfig + """ + + def __init__(self, config: SiglipConfig): + super().__init__() + self.config = config + self.layers = nn.ModuleList([SiglipEncoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + # Ignore copy + @can_return_tuple + def forward( + self, + inputs_embeds, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + ) -> BaseModelOutput: + r""" + Args: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + hidden_states = inputs_embeds + for encoder_layer in self.layers: + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + return BaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=encoder_states, + attentions=all_attentions, + ) + + +class SiglipTextTransformer(nn.Module): + def __init__(self, config: SiglipTextConfig): + super().__init__() + self.config = config + embed_dim = config.hidden_size + self.embeddings = SiglipTextEmbeddings(config) + self.encoder = SiglipEncoder(config) + self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + + self.head = nn.Linear(embed_dim, config.projection_size) + self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" + + @can_return_tuple + @auto_docstring + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + ) -> BaseModelOutputWithPooling: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + + if input_ids is None: + raise ValueError("You have to specify input_ids") + + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + + hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) + + # note: SigLIP's text model does not use a causal mask, unlike the original CLIP model. + # expand attention_mask + if attention_mask is not None and not self._use_flash_attention_2: + # [batch_size, seq_len] -> [batch_size, 1, tgt_seq_len, src_seq_len] + attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) + + encoder_outputs: BaseModelOutput = self.encoder( + inputs_embeds=hidden_states, + attention_mask=attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + + last_hidden_state = encoder_outputs.last_hidden_state + last_hidden_state = self.final_layer_norm(last_hidden_state) + + # Assuming "sticky" EOS tokenization, last token is always EOS. + pooled_output = last_hidden_state[:, -1, :] + pooled_output = self.head(pooled_output) + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +@auto_docstring( + custom_intro=""" + The text model from SigLIP without any head or projection on top. + """ +) +class SiglipTextModel(SiglipPreTrainedModel): + config_class = SiglipTextConfig + + def __init__(self, config: SiglipTextConfig): + super().__init__(config) + self.text_model = SiglipTextTransformer(config) + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> nn.Module: + return self.text_model.embeddings.token_embedding + + def set_input_embeddings(self, value): + self.text_model.embeddings.token_embedding = value + + @can_return_tuple + @auto_docstring + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + ) -> BaseModelOutputWithPooling: + r""" + Examples: + + ```python + >>> from transformers import AutoTokenizer, SiglipTextModel + + >>> model = SiglipTextModel.from_pretrained("google/siglip-base-patch16-224") + >>> tokenizer = AutoTokenizer.from_pretrained("google/siglip-base-patch16-224") + + >>> # important: make sure to set padding="max_length" as that's how the model was trained + >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding="max_length", return_tensors="pt") + + >>> outputs = model(**inputs) + >>> last_hidden_state = outputs.last_hidden_state + >>> pooled_output = outputs.pooler_output # pooled (EOS token) states + ```""" + + return self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + + +class SiglipVisionTransformer(nn.Module): + def __init__(self, config: SiglipVisionConfig): + super().__init__() + self.config = config + embed_dim = config.hidden_size + + self.embeddings = SiglipVisionEmbeddings(config) + self.encoder = SiglipEncoder(config) + self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + self.use_head = True if not hasattr(config, "vision_use_head") else config.vision_use_head + if self.use_head: + self.head = SiglipMultiheadAttentionPoolingHead(config) + + @can_return_tuple + @auto_docstring + def forward( + self, + pixel_values, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + interpolate_pos_encoding: Optional[bool] = False, + ) -> BaseModelOutputWithPooling: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + + hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) + # Convert to bfloat16 if the encoder uses bfloat16 + if len(self.encoder.layers) > 0 and self.encoder.layers[0].self_attn.q_proj.weight.dtype == torch.bfloat16: + hidden_states = hidden_states.to(torch.bfloat16) + + encoder_outputs: BaseModelOutput = self.encoder( + inputs_embeds=hidden_states, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + + last_hidden_state = encoder_outputs.last_hidden_state + last_hidden_state = self.post_layernorm(last_hidden_state) + + pooler_output = self.head(last_hidden_state) if self.use_head else None + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooler_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +class SiglipMultiheadAttentionPoolingHead(nn.Module): + """Multihead Attention Pooling.""" + + def __init__(self, config: SiglipVisionConfig): + super().__init__() + + self.probe = nn.Parameter(torch.randn(1, 1, config.hidden_size)) + self.attention = torch.nn.MultiheadAttention(config.hidden_size, config.num_attention_heads, batch_first=True) + self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.mlp = SiglipMLP(config) + + def forward(self, hidden_state): + batch_size = hidden_state.shape[0] + probe = self.probe.repeat(batch_size, 1, 1) + + hidden_state = self.attention(probe, hidden_state, hidden_state)[0] + + residual = hidden_state + hidden_state = self.layernorm(hidden_state) + hidden_state = residual + self.mlp(hidden_state) + + return hidden_state[:, 0] + + +@auto_docstring( + custom_intro=""" + The vision model from SigLIP without any head or projection on top. + """ +) +class SiglipVisionModel(SiglipPreTrainedModel): + config_class = SiglipVisionConfig + main_input_name = "pixel_values" + + def __init__(self, config: SiglipVisionConfig): + super().__init__(config) + + self.vision_model = SiglipVisionTransformer(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> nn.Module: + return self.vision_model.embeddings.patch_embedding + + @can_return_tuple + @auto_docstring + def forward( + self, + pixel_values, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + interpolate_pos_encoding: bool = False, + ) -> BaseModelOutputWithPooling: + r""" + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, SiglipVisionModel + + >>> model = SiglipVisionModel.from_pretrained("google/siglip-base-patch16-224") + >>> processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> last_hidden_state = outputs.last_hidden_state + >>> pooled_output = outputs.pooler_output # pooled features + ```""" + + return self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + interpolate_pos_encoding=interpolate_pos_encoding, + ) + + +@auto_docstring +class SiglipModel(SiglipPreTrainedModel): + config_class = SiglipConfig + + def __init__(self, config: SiglipConfig): + super().__init__(config) + + if not isinstance(config.text_config, SiglipTextConfig): + raise TypeError( + "config.text_config is expected to be of type SiglipTextConfig but is of type" + f" {type(config.text_config)}." + ) + + if not isinstance(config.vision_config, SiglipVisionConfig): + raise TypeError( + "config.vision_config is expected to be of type SiglipVisionConfig but is of type" + f" {type(config.vision_config)}." + ) + + text_config = config.text_config + vision_config = config.vision_config + + # First, initialize the text and vision models with proper attention implementation + text_model = SiglipTextModel._from_config(text_config) + vision_model = SiglipVisionModel._from_config(vision_config) + + # Second, get the text and vision submodules (for backward compatibility) + self.text_model = text_model.text_model + self.vision_model = vision_model.vision_model + + self.logit_scale = nn.Parameter(torch.randn(1)) + self.logit_bias = nn.Parameter(torch.randn(1)) + + # Initialize weights and apply final processing + self.post_init() + + @auto_docstring + def get_text_features( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + ) -> torch.FloatTensor: + r""" + Returns: + text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by + applying the projection layer to the pooled output of [`SiglipTextModel`]. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModel + >>> import torch + + >>> model = AutoModel.from_pretrained("google/siglip-base-patch16-224") + >>> tokenizer = AutoTokenizer.from_pretrained("google/siglip-base-patch16-224") + + >>> # important: make sure to set padding="max_length" as that's how the model was trained + >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding="max_length", return_tensors="pt") + >>> with torch.no_grad(): + ... text_features = model.get_text_features(**inputs) + ```""" + # Use SigLIP model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + + text_outputs: BaseModelOutputWithPooling = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + + pooled_output = text_outputs.pooler_output + + return pooled_output + + @auto_docstring + def get_image_features( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + interpolate_pos_encoding: bool = False, + ) -> torch.FloatTensor: + r""" + Returns: + image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by + applying the projection layer to the pooled output of [`SiglipVisionModel`]. + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, AutoModel + >>> import torch + + >>> model = AutoModel.from_pretrained("google/siglip-base-patch16-224") + >>> processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="pt") + + >>> with torch.no_grad(): + ... image_features = model.get_image_features(**inputs) + ```""" + # Use SiglipModel's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + + vision_outputs: BaseModelOutputWithPooling = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + interpolate_pos_encoding=interpolate_pos_encoding, + ) + + pooled_output = vision_outputs.pooler_output + + return pooled_output + + @can_return_tuple + @auto_docstring + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + pixel_values: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + return_loss: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + interpolate_pos_encoding: bool = False, + ) -> SiglipOutput: + r""" + return_loss (`bool`, *optional*): + Whether or not to return the contrastive loss. + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, AutoModel + >>> import torch + + >>> model = AutoModel.from_pretrained("google/siglip-base-patch16-224") + >>> processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> texts = ["a photo of 2 cats", "a photo of 2 dogs"] + >>> # important: we pass `padding=max_length` since the model was trained with this + >>> inputs = processor(text=texts, images=image, padding="max_length", return_tensors="pt") + + >>> with torch.no_grad(): + ... outputs = model(**inputs) + + >>> logits_per_image = outputs.logits_per_image + >>> probs = torch.sigmoid(logits_per_image) # these are the probabilities + >>> print(f"{probs[0][0]:.1%} that image 0 is '{texts[0]}'") + 31.9% that image 0 is 'a photo of 2 cats' + ```""" + # Use SigLIP model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + + vision_outputs: BaseModelOutputWithPooling = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + interpolate_pos_encoding=interpolate_pos_encoding, + ) + + text_outputs: BaseModelOutputWithPooling = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + + image_embeds = vision_outputs.pooler_output + text_embeds = text_outputs.pooler_output + + # normalized features + image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) + text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) + + # cosine similarity as logits + logits_per_text = torch.matmul(text_embeds, image_embeds.t().to(text_embeds.device)) + + logit_scale, logit_bias = self.logit_scale.to(text_embeds.device), self.logit_bias.to(text_embeds.device) + logits_per_text = logits_per_text * logit_scale.exp() + logit_bias + + logits_per_image = logits_per_text.t() + + loss = None + if return_loss: + # Adapted from https://github.com/google-research/big_vision/blob/01edb81a4716f93a48be43b3a4af14e29cdb3a7f/big_vision/trainers/proj/image_text/siglip.py#L287 + eye = torch.eye(logits_per_text.size(0), device=logits_per_text.device) + m1_diag1 = -torch.ones_like(logits_per_text) + 2 * eye + loglik = torch.nn.functional.logsigmoid(m1_diag1 * logits_per_text) + nll = -torch.sum(loglik, dim=-1) + loss = nll.mean() + + return SiglipOutput( + loss=loss, + logits_per_image=logits_per_image, + logits_per_text=logits_per_text, + text_embeds=text_embeds, + image_embeds=image_embeds, + text_model_output=text_outputs, + vision_model_output=vision_outputs, + ) + + +@auto_docstring( + custom_intro=""" + SigLIP vision encoder with an image classification head on top (a linear layer on top of the pooled final hidden states of + the patch tokens) e.g. for ImageNet. + """ +) +class SiglipForImageClassification(SiglipPreTrainedModel): + main_input_name = "pixel_values" + + def __init__(self, config: SiglipConfig) -> None: + super().__init__(config) + + self.num_labels = config.num_labels + + # Create the vision model with proper attention + # and take only vision_model submodule (for backward compatibility) + vision_model = SiglipVisionModel._from_config(config.vision_config) + self.vision_model = vision_model.vision_model + + # Classifier head + self.classifier = ( + nn.Linear(config.vision_config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() + ) + + # Initialize weights and apply final processing + self.post_init() + + @can_return_tuple + @auto_docstring + def forward( + self, + pixel_values: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + interpolate_pos_encoding: bool = False, + ) -> ImageClassifierOutput: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the image classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + + Examples: + + ```python + >>> from transformers import AutoImageProcessor, SiglipForImageClassification + >>> import torch + >>> from PIL import Image + >>> import requests + + >>> torch.manual_seed(3) # doctest: +IGNORE_RESULT + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> # note: we are loading a `SiglipModel` from the hub here, + >>> # so the head will be randomly initialized, hence the predictions will be random if seed is not set above. + >>> image_processor = AutoImageProcessor.from_pretrained("google/siglip-base-patch16-224") + >>> model = SiglipForImageClassification.from_pretrained("google/siglip-base-patch16-224") + + >>> inputs = image_processor(images=image, return_tensors="pt") + >>> outputs = model(**inputs) + >>> logits = outputs.logits + >>> # model predicts one of the two classes + >>> predicted_class_idx = logits.argmax(-1).item() + >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) + Predicted class: LABEL_1 + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + + outputs: BaseModelOutputWithPooling = self.vision_model( + pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + interpolate_pos_encoding=interpolate_pos_encoding, + ) + + sequence_output = outputs.last_hidden_state + + # average pool the patch tokens + sequence_output = torch.mean(sequence_output, dim=1) + # apply classifier + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + # move labels to correct device to enable model parallelism + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + + return ImageClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +__all__ = [ + "SiglipModel", + "SiglipPreTrainedModel", + "SiglipTextModel", + "SiglipVisionModel", + "SiglipForImageClassification", +] \ No newline at end of file diff --git a/policy/openpi-InternData-A1/src/openpi/policies/aloha_policy.py b/policy/openpi-InternData-A1/src/openpi/policies/aloha_policy.py new file mode 100644 index 0000000..f16be33 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/policies/aloha_policy.py @@ -0,0 +1,202 @@ +import dataclasses +from typing import ClassVar + +import einops +import numpy as np + +from openpi import transforms + + +def make_aloha_example() -> dict: + """Creates a random input example for the Aloha policy.""" + return { + "state": np.ones((14,)), + "images": { + "cam_high": np.random.randint(256, size=(3, 224, 224), dtype=np.uint8), + "cam_low": np.random.randint(256, size=(3, 224, 224), dtype=np.uint8), + "cam_left_wrist": np.random.randint(256, size=(3, 224, 224), dtype=np.uint8), + "cam_right_wrist": np.random.randint(256, size=(3, 224, 224), dtype=np.uint8), + }, + "prompt": "do something", + } + + +@dataclasses.dataclass(frozen=True) +class AlohaInputs(transforms.DataTransformFn): + """Inputs for the Aloha policy. + + Expected inputs: + - images: dict[name, img] where img is [channel, height, width]. name must be in EXPECTED_CAMERAS. + - state: [14] + - actions: [action_horizon, 14] + """ + + # If true, this will convert the joint and gripper values from the standard Aloha space to + # the space used by the pi internal runtime which was used to train the base model. + adapt_to_pi: bool = True + + # The expected cameras names. All input cameras must be in this set. Missing cameras will be + # replaced with black images and the corresponding `image_mask` will be set to False. + EXPECTED_CAMERAS: ClassVar[tuple[str, ...]] = ("cam_high", "cam_low", "cam_left_wrist", "cam_right_wrist") + + def __call__(self, data: dict) -> dict: + data = _decode_aloha(data, adapt_to_pi=self.adapt_to_pi) + + in_images = data["images"] + if set(in_images) - set(self.EXPECTED_CAMERAS): + raise ValueError(f"Expected images to contain {self.EXPECTED_CAMERAS}, got {tuple(in_images)}") + + # Assume that base image always exists. + base_image = in_images["cam_high"] + + images = { + "base_0_rgb": base_image, + } + image_masks = { + "base_0_rgb": np.True_, + } + + # Add the extra images. + extra_image_names = { + "left_wrist_0_rgb": "cam_left_wrist", + "right_wrist_0_rgb": "cam_right_wrist", + } + for dest, source in extra_image_names.items(): + if source in in_images: + images[dest] = in_images[source] + image_masks[dest] = np.True_ + else: + images[dest] = np.zeros_like(base_image) + image_masks[dest] = np.False_ + + inputs = { + "image": images, + "image_mask": image_masks, + "state": data["state"], + } + + # Actions are only available during training. + if "actions" in data: + actions = np.asarray(data["actions"]) + actions = _encode_actions_inv(actions, adapt_to_pi=self.adapt_to_pi) + inputs["actions"] = actions + + if "prompt" in data: + inputs["prompt"] = data["prompt"] + + return inputs + + +@dataclasses.dataclass(frozen=True) +class AlohaOutputs(transforms.DataTransformFn): + """Outputs for the Aloha policy.""" + + # If true, this will convert the joint and gripper values from the standard Aloha space to + # the space used by the pi internal runtime which was used to train the base model. + adapt_to_pi: bool = True + + def __call__(self, data: dict) -> dict: + # Only return the first 14 dims. + actions = np.asarray(data["actions"][:, :14]) + return {"actions": _encode_actions(actions, adapt_to_pi=self.adapt_to_pi)} + + +def _joint_flip_mask() -> np.ndarray: + """Used to convert between aloha and pi joint angles.""" + return np.array([1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1]) + + +def _normalize(x, min_val, max_val): + return (x - min_val) / (max_val - min_val) + + +def _unnormalize(x, min_val, max_val): + return x * (max_val - min_val) + min_val + + +def _gripper_to_angular(value): + # Aloha transforms the gripper positions into a linear space. The following code + # reverses this transformation to be consistent with pi0 which is pretrained in + # angular space. + # + # These values are coming from the Aloha code: + # PUPPET_GRIPPER_POSITION_OPEN, PUPPET_GRIPPER_POSITION_CLOSED + value = _unnormalize(value, min_val=0.01844, max_val=0.05800) + + # This is the inverse of the angular to linear transformation inside the Interbotix code. + def linear_to_radian(linear_position, arm_length, horn_radius): + value = (horn_radius**2 + linear_position**2 - arm_length**2) / (2 * horn_radius * linear_position) + return np.arcsin(np.clip(value, -1.0, 1.0)) + + # The constants are taken from the Interbotix code. + value = linear_to_radian(value, arm_length=0.036, horn_radius=0.022) + + # pi0 gripper data is normalized (0, 1) between encoder counts (2405, 3110). + # There are 4096 total encoder counts and aloha uses a zero of 2048. + # Converting this to radians means that the normalized inputs are between (0.5476, 1.6296) + return _normalize(value, min_val=0.5476, max_val=1.6296) + + +def _gripper_from_angular(value): + # Convert from the gripper position used by pi0 to the gripper position that is used by Aloha. + # Note that the units are still angular but the range is different. + + # We do not scale the output since the trossen model predictions are already in radians. + # See the comment in _gripper_to_angular for a derivation of the constant + value = value + 0.5476 + + # These values are coming from the Aloha code: + # PUPPET_GRIPPER_JOINT_OPEN, PUPPET_GRIPPER_JOINT_CLOSE + return _normalize(value, min_val=-0.6213, max_val=1.4910) + + +def _gripper_from_angular_inv(value): + # Directly inverts the gripper_from_angular function. + value = _unnormalize(value, min_val=-0.6213, max_val=1.4910) + return value - 0.5476 + + +def _decode_aloha(data: dict, *, adapt_to_pi: bool = False) -> dict: + # state is [left_arm_joint_angles, left_arm_gripper, right_arm_joint_angles, right_arm_gripper] + # dim sizes: [6, 1, 6, 1] + state = np.asarray(data["state"]) + state = _decode_state(state, adapt_to_pi=adapt_to_pi) + + def convert_image(img): + img = np.asarray(img) + # Convert to uint8 if using float images. + if np.issubdtype(img.dtype, np.floating): + img = (255 * img).astype(np.uint8) + # Convert from [channel, height, width] to [height, width, channel]. + return einops.rearrange(img, "c h w -> h w c") + + images = data["images"] + images_dict = {name: convert_image(img) for name, img in images.items()} + + data["images"] = images_dict + data["state"] = state + return data + + +def _decode_state(state: np.ndarray, *, adapt_to_pi: bool = False) -> np.ndarray: + if adapt_to_pi: + # Flip the joints. + state = _joint_flip_mask() * state + # Reverse the gripper transformation that is being applied by the Aloha runtime. + state[[6, 13]] = _gripper_to_angular(state[[6, 13]]) + return state + + +def _encode_actions(actions: np.ndarray, *, adapt_to_pi: bool = False) -> np.ndarray: + if adapt_to_pi: + # Flip the joints. + actions = _joint_flip_mask() * actions + actions[:, [6, 13]] = _gripper_from_angular(actions[:, [6, 13]]) + return actions + + +def _encode_actions_inv(actions: np.ndarray, *, adapt_to_pi: bool = False) -> np.ndarray: + if adapt_to_pi: + actions = _joint_flip_mask() * actions + actions[:, [6, 13]] = _gripper_from_angular_inv(actions[:, [6, 13]]) + return actions diff --git a/policy/openpi-InternData-A1/src/openpi/policies/droid_policy.py b/policy/openpi-InternData-A1/src/openpi/policies/droid_policy.py new file mode 100644 index 0000000..666b7f7 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/policies/droid_policy.py @@ -0,0 +1,81 @@ +import dataclasses + +import einops +import numpy as np + +from openpi import transforms +from openpi.models import model as _model + + +def make_droid_example() -> dict: + """Creates a random input example for the Droid policy.""" + return { + "observation/exterior_image_1_left": np.random.randint(256, size=(224, 224, 3), dtype=np.uint8), + "observation/wrist_image_left": np.random.randint(256, size=(224, 224, 3), dtype=np.uint8), + "observation/joint_position": np.random.rand(7), + "observation/gripper_position": np.random.rand(1), + "prompt": "do something", + } + + +def _parse_image(image) -> np.ndarray: + image = np.asarray(image) + if np.issubdtype(image.dtype, np.floating): + image = (255 * image).astype(np.uint8) + if image.shape[0] == 3: + image = einops.rearrange(image, "c h w -> h w c") + return image + + +@dataclasses.dataclass(frozen=True) +class DroidInputs(transforms.DataTransformFn): + # Determines which model will be used. + model_type: _model.ModelType + + def __call__(self, data: dict) -> dict: + gripper_pos = np.asarray(data["observation/gripper_position"]) + if gripper_pos.ndim == 0: + # Ensure gripper position is a 1D array, not a scalar, so we can concatenate with joint positions + gripper_pos = gripper_pos[np.newaxis] + state = np.concatenate([data["observation/joint_position"], gripper_pos]) + + # Possibly need to parse images to uint8 (H,W,C) since LeRobot automatically + # stores as float32 (C,H,W), gets skipped for policy inference + base_image = _parse_image(data["observation/exterior_image_1_left"]) + wrist_image = _parse_image(data["observation/wrist_image_left"]) + + match self.model_type: + case _model.ModelType.PI0 | _model.ModelType.PI05: + names = ("base_0_rgb", "left_wrist_0_rgb", "right_wrist_0_rgb") + images = (base_image, wrist_image, np.zeros_like(base_image)) + image_masks = (np.True_, np.True_, np.False_) + case _model.ModelType.PI0_FAST: + names = ("base_0_rgb", "base_1_rgb", "wrist_0_rgb") + # We don't mask out padding images for FAST models. + images = (base_image, np.zeros_like(base_image), wrist_image) + image_masks = (np.True_, np.True_, np.True_) + case _: + raise ValueError(f"Unsupported model type: {self.model_type}") + + inputs = { + "state": state, + "image": dict(zip(names, images, strict=True)), + "image_mask": dict(zip(names, image_masks, strict=True)), + } + + if "actions" in data: + inputs["actions"] = np.asarray(data["actions"]) + + if "prompt" in data: + if isinstance(data["prompt"], bytes): + data["prompt"] = data["prompt"].decode("utf-8") + inputs["prompt"] = data["prompt"] + + return inputs + + +@dataclasses.dataclass(frozen=True) +class DroidOutputs(transforms.DataTransformFn): + def __call__(self, data: dict) -> dict: + # Only return the first 8 dims. + return {"actions": np.asarray(data["actions"][:, :8])} diff --git a/policy/openpi-InternData-A1/src/openpi/policies/libero_policy.py b/policy/openpi-InternData-A1/src/openpi/policies/libero_policy.py new file mode 100644 index 0000000..10611f6 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/policies/libero_policy.py @@ -0,0 +1,100 @@ +import dataclasses + +import einops +import numpy as np + +from openpi import transforms +from openpi.models import model as _model + + +def make_libero_example() -> dict: + """Creates a random input example for the Libero policy.""" + return { + "observation/state": np.random.rand(8), + "observation/image": np.random.randint(256, size=(224, 224, 3), dtype=np.uint8), + "observation/wrist_image": np.random.randint(256, size=(224, 224, 3), dtype=np.uint8), + "prompt": "do something", + } + + +def _parse_image(image) -> np.ndarray: + image = np.asarray(image) + if np.issubdtype(image.dtype, np.floating): + image = (255 * image).astype(np.uint8) + if image.shape[0] == 3: + image = einops.rearrange(image, "c h w -> h w c") + return image + + +@dataclasses.dataclass(frozen=True) +class LiberoInputs(transforms.DataTransformFn): + """ + This class is used to convert inputs to the model to the expected format. It is used for both training and inference. + + For your own dataset, you can copy this class and modify the keys based on the comments below to pipe + the correct elements of your dataset into the model. + """ + + # Determines which model will be used. + # Do not change this for your own dataset. + model_type: _model.ModelType + + def __call__(self, data: dict) -> dict: + # Possibly need to parse images to uint8 (H,W,C) since LeRobot automatically + # stores as float32 (C,H,W), gets skipped for policy inference. + # Keep this for your own dataset, but if your dataset stores the images + # in a different key than "observation/image" or "observation/wrist_image", + # you should change it below. + # Pi0 models support three image inputs at the moment: one third-person view, + # and two wrist views (left and right). If your dataset does not have a particular type + # of image, e.g. wrist images, you can comment it out here and replace it with zeros like we do for the + # right wrist image below. + base_image = _parse_image(data["observation/image"]) + wrist_image = _parse_image(data["observation/wrist_image"]) + + # Create inputs dict. Do not change the keys in the dict below. + inputs = { + "state": data["observation/state"], + "image": { + "base_0_rgb": base_image, + "left_wrist_0_rgb": wrist_image, + # Pad any non-existent images with zero-arrays of the appropriate shape. + "right_wrist_0_rgb": np.zeros_like(base_image), + }, + "image_mask": { + "base_0_rgb": np.True_, + "left_wrist_0_rgb": np.True_, + # We only mask padding images for pi0 model, not pi0-FAST. Do not change this for your own dataset. + "right_wrist_0_rgb": np.True_ if self.model_type == _model.ModelType.PI0_FAST else np.False_, + }, + } + + # Pad actions to the model action dimension. Keep this for your own dataset. + # Actions are only available during training. + if "actions" in data: + inputs["actions"] = data["actions"] + + # Pass the prompt (aka language instruction) to the model. + # Keep this for your own dataset (but modify the key if the instruction is not + # stored in "prompt"; the output dict always needs to have the key "prompt"). + if "prompt" in data: + inputs["prompt"] = data["prompt"] + + return inputs + + +@dataclasses.dataclass(frozen=True) +class LiberoOutputs(transforms.DataTransformFn): + """ + This class is used to convert outputs from the model back the the dataset specific format. It is + used for inference only. + + For your own dataset, you can copy this class and modify the action dimension based on the comments below. + """ + + def __call__(self, data: dict) -> dict: + # Only return the first N actions -- since we padded actions above to fit the model action + # dimension, we need to now parse out the correct number of actions in the return dict. + # For Libero, we only return the first 7 actions (since the rest is padding). + # For your own dataset, replace `7` with the action dimension of your dataset. + return {"actions": np.asarray(data["actions"][:, :7])} diff --git a/policy/openpi-InternData-A1/src/openpi/policies/policy.py b/policy/openpi-InternData-A1/src/openpi/policies/policy.py new file mode 100644 index 0000000..b9b708b --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/policies/policy.py @@ -0,0 +1,135 @@ +from collections.abc import Sequence +import logging +import pathlib +import time +from typing import Any, TypeAlias + +import flax +import flax.traverse_util +import jax +import jax.numpy as jnp +import numpy as np +from openpi_client import base_policy as _base_policy +import torch +from typing_extensions import override + +from openpi import transforms as _transforms +from openpi.models import model as _model +from openpi.shared import array_typing as at +from openpi.shared import nnx_utils + +BasePolicy: TypeAlias = _base_policy.BasePolicy + + +class Policy(BasePolicy): + def __init__( + self, + model: _model.BaseModel, + *, + rng: at.KeyArrayLike | None = None, + transforms: Sequence[_transforms.DataTransformFn] = (), + output_transforms: Sequence[_transforms.DataTransformFn] = (), + sample_kwargs: dict[str, Any] | None = None, + metadata: dict[str, Any] | None = None, + pytorch_device: str = "cpu", + is_pytorch: bool = False, + ): + """Initialize the Policy. + + Args: + model: The model to use for action sampling. + rng: Random number generator key for JAX models. Ignored for PyTorch models. + transforms: Input data transformations to apply before inference. + output_transforms: Output data transformations to apply after inference. + sample_kwargs: Additional keyword arguments to pass to model.sample_actions. + metadata: Additional metadata to store with the policy. + pytorch_device: Device to use for PyTorch models (e.g., "cpu", "cuda:0"). + Only relevant when is_pytorch=True. + is_pytorch: Whether the model is a PyTorch model. If False, assumes JAX model. + """ + self._model = model + self._input_transform = _transforms.compose(transforms) + self._output_transform = _transforms.compose(output_transforms) + self._sample_kwargs = sample_kwargs or {} + self._metadata = metadata or {} + self._is_pytorch_model = is_pytorch + self._pytorch_device = pytorch_device + + if self._is_pytorch_model: + self._model = self._model.to(pytorch_device) + self._model.eval() + self._sample_actions = model.sample_actions + else: + # JAX model setup + self._sample_actions = nnx_utils.module_jit(model.sample_actions) + self._rng = rng or jax.random.key(0) + + @override + def infer(self, obs: dict, *, noise: np.ndarray | None = None) -> dict: # type: ignore[misc] + # Make a copy since transformations may modify the inputs in place. + inputs = jax.tree.map(lambda x: x, obs) + inputs = self._input_transform(inputs) + if not self._is_pytorch_model: + # Make a batch and convert to jax.Array. + inputs = jax.tree.map(lambda x: jnp.asarray(x)[np.newaxis, ...], inputs) + self._rng, sample_rng_or_pytorch_device = jax.random.split(self._rng) + else: + # Convert inputs to PyTorch tensors and move to correct device + inputs = jax.tree.map(lambda x: torch.from_numpy(np.array(x)).to(self._pytorch_device)[None, ...], inputs) + sample_rng_or_pytorch_device = self._pytorch_device + + # Prepare kwargs for sample_actions + sample_kwargs = dict(self._sample_kwargs) + if noise is not None: + noise = torch.from_numpy(noise).to(self._pytorch_device) if self._is_pytorch_model else jnp.asarray(noise) + + if noise.ndim == 2: # If noise is (action_horizon, action_dim), add batch dimension + noise = noise[None, ...] # Make it (1, action_horizon, action_dim) + sample_kwargs["noise"] = noise + + observation = _model.Observation.from_dict(inputs) + start_time = time.monotonic() + outputs = { + "state": inputs["state"], + "actions": self._sample_actions(sample_rng_or_pytorch_device, observation, **sample_kwargs), + } + model_time = time.monotonic() - start_time + if self._is_pytorch_model: + outputs = jax.tree.map(lambda x: np.asarray(x[0, ...].detach().cpu()), outputs) + else: + outputs = jax.tree.map(lambda x: np.asarray(x[0, ...]), outputs) + + outputs = self._output_transform(outputs) + outputs["policy_timing"] = { + "infer_ms": model_time * 1000, + } + return outputs + + @property + def metadata(self) -> dict[str, Any]: + return self._metadata + + +class PolicyRecorder(_base_policy.BasePolicy): + """Records the policy's behavior to disk.""" + + def __init__(self, policy: _base_policy.BasePolicy, record_dir: str): + self._policy = policy + + logging.info(f"Dumping policy records to: {record_dir}") + self._record_dir = pathlib.Path(record_dir) + self._record_dir.mkdir(parents=True, exist_ok=True) + self._record_step = 0 + + @override + def infer(self, obs: dict) -> dict: # type: ignore[misc] + results = self._policy.infer(obs) + + data = {"inputs": obs, "outputs": results} + data = flax.traverse_util.flatten_dict(data, sep="/") + + output_path = self._record_dir / f"step_{self._record_step}" + self._record_step += 1 + + np.save(output_path, np.asarray(data)) + return results diff --git a/policy/openpi-InternData-A1/src/openpi/policies/policy_config.py b/policy/openpi-InternData-A1/src/openpi/policies/policy_config.py new file mode 100644 index 0000000..6570df0 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/policies/policy_config.py @@ -0,0 +1,94 @@ +import logging +import os +import pathlib +from typing import Any + +import jax.numpy as jnp + +import openpi.models.model as _model +import openpi.policies.policy as _policy +import openpi.shared.download as download +from openpi.training import checkpoints as _checkpoints +from openpi.training import config as _config +import openpi.transforms as transforms + + +def create_trained_policy( + train_config: _config.TrainConfig, + checkpoint_dir: pathlib.Path | str, + *, + repack_transforms: transforms.Group | None = None, + sample_kwargs: dict[str, Any] | None = None, + default_prompt: str | None = None, + norm_stats: dict[str, transforms.NormStats] | None = None, + pytorch_device: str | None = None, +) -> _policy.Policy: + """Create a policy from a trained checkpoint. + + Args: + train_config: The training config to use to create the model. + checkpoint_dir: The directory to load the model from. + repack_transforms: Optional transforms that will be applied before any other transforms. + sample_kwargs: The kwargs to pass to the `sample_actions` method. If not provided, the default + kwargs will be used. + default_prompt: The default prompt to use for the policy. Will inject the prompt into the input + data if it doesn't already exist. + norm_stats: The norm stats to use for the policy. If not provided, the norm stats will be loaded + from the checkpoint directory. + pytorch_device: Device to use for PyTorch models (e.g., "cpu", "cuda", "cuda:0"). + If None and is_pytorch=True, will use "cuda" if available, otherwise "cpu". + + Note: + The function automatically detects whether the model is PyTorch-based by checking for the + presence of "model.safensors" in the checkpoint directory. + """ + repack_transforms = repack_transforms or transforms.Group() + checkpoint_dir = download.maybe_download(str(checkpoint_dir)) + + # Check if this is a PyTorch model by looking for model.safetensors + weight_path = os.path.join(checkpoint_dir, "model.safetensors") + is_pytorch = os.path.exists(weight_path) + + logging.info("Loading model...") + if is_pytorch: + model = train_config.model.load_pytorch(train_config, weight_path) + model.paligemma_with_expert.to_bfloat16_for_selected_params("bfloat16") + else: + model = train_config.model.load(_model.restore_params(checkpoint_dir / "params", dtype=jnp.bfloat16)) + data_config = train_config.data.create(train_config.assets_dirs, train_config.model) + if norm_stats is None: + # We are loading the norm stats from the checkpoint instead of the config assets dir to make sure + # that the policy is using the same normalization stats as the original training process. + if data_config.asset_id is None: + raise ValueError("Asset id is required to load norm stats.") + norm_stats = _checkpoints.load_norm_stats(checkpoint_dir / "assets", data_config.asset_id) + + # Determine the device to use for PyTorch models + if is_pytorch and pytorch_device is None: + try: + import torch + + pytorch_device = "cuda" if torch.cuda.is_available() else "cpu" + except ImportError: + pytorch_device = "cpu" + + return _policy.Policy( + model, + transforms=[ + *repack_transforms.inputs, + transforms.InjectDefaultPrompt(default_prompt), + *data_config.data_transforms.inputs, + transforms.Normalize(norm_stats, use_quantiles=data_config.use_quantile_norm), + *data_config.model_transforms.inputs, + ], + output_transforms=[ + *data_config.model_transforms.outputs, + transforms.Unnormalize(norm_stats, use_quantiles=data_config.use_quantile_norm), + *data_config.data_transforms.outputs, + *repack_transforms.outputs, + ], + sample_kwargs=sample_kwargs, + metadata=train_config.policy_metadata, + is_pytorch=is_pytorch, + pytorch_device=pytorch_device if is_pytorch else None, + ) diff --git a/policy/openpi-InternData-A1/src/openpi/policies/policy_test.py b/policy/openpi-InternData-A1/src/openpi/policies/policy_test.py new file mode 100644 index 0000000..5808e52 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/policies/policy_test.py @@ -0,0 +1,34 @@ +from openpi_client import action_chunk_broker +import pytest + +from openpi.policies import aloha_policy +from openpi.policies import policy_config as _policy_config +from openpi.training import config as _config + + +@pytest.mark.manual +def test_infer(): + config = _config.get_config("pi0_aloha_sim") + policy = _policy_config.create_trained_policy(config, "gs://openpi-assets/checkpoints/pi0_aloha_sim") + + example = aloha_policy.make_aloha_example() + result = policy.infer(example) + + assert result["actions"].shape == (config.model.action_horizon, 14) + + +@pytest.mark.manual +def test_broker(): + config = _config.get_config("pi0_aloha_sim") + policy = _policy_config.create_trained_policy(config, "gs://openpi-assets/checkpoints/pi0_aloha_sim") + + broker = action_chunk_broker.ActionChunkBroker( + policy, + # Only execute the first half of the chunk. + action_horizon=config.model.action_horizon // 2, + ) + + example = aloha_policy.make_aloha_example() + for _ in range(config.model.action_horizon): + outputs = broker.infer(example) + assert outputs["actions"].shape == (14,) diff --git a/policy/openpi-InternData-A1/src/openpi/policies/real_a2d_policy.py b/policy/openpi-InternData-A1/src/openpi/policies/real_a2d_policy.py new file mode 100644 index 0000000..e7bd186 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/policies/real_a2d_policy.py @@ -0,0 +1,221 @@ +import dataclasses +from typing import ClassVar + +import einops +import numpy as np + +from openpi import transforms +from pdb import set_trace + + +@dataclasses.dataclass(frozen=True) +class Reala2dInputs(transforms.DataTransformFn): + """Inputs for the A2D policy. + """ + + adapt_to_pi: bool = True + + # The expected cameras names. All input cameras must be in this set. Missing cameras will be + # replaced with black images and the corresponding `image_mask` will be set to False. + EXPECTED_CAMERAS: ClassVar[tuple[str, ...]] = ("cam_high", "cam_low", "cam_left_wrist", "cam_right_wrist") + + def __call__(self, data: dict) -> dict: + data = _decode_a2d(data, adapt_to_pi=self.adapt_to_pi) + if "images" in data: + in_images = data["images"] + if set(in_images) - set(self.EXPECTED_CAMERAS): + raise ValueError(f"Expected images to contain {self.EXPECTED_CAMERAS}, got {tuple(in_images)}") + + # Assume that base image always exists. + base_image = in_images["cam_high"] + + images = { + "base_0_rgb": base_image, + } + image_masks = { + "base_0_rgb": np.True_, + } + + # Add the extra images. + extra_image_names = { + "left_wrist_0_rgb": "cam_left_wrist", + "right_wrist_0_rgb": "cam_right_wrist", + } + for dest, source in extra_image_names.items(): + if source in in_images: + images[dest] = in_images[source] + image_masks[dest] = np.True_ + else: + images[dest] = np.zeros_like(base_image) + image_masks[dest] = np.False_ + + inputs = { + "image": images, + "image_mask": image_masks, + "state": data["state"], + } + else: + inputs={ + "state": data["state"], + } + + # Actions are only available during training. + if "actions" in data: + actions = np.asarray(data["actions"]) + actions = _encode_actions_inv(actions, adapt_to_pi=self.adapt_to_pi) + inputs["actions"] = actions + + if "prompt" in data: + inputs["prompt"] = data["prompt"] + + return inputs + + +@dataclasses.dataclass(frozen=True) +class Reala2dOutputs(transforms.DataTransformFn): + """Outputs for the a2d policy.""" + adapt_to_pi: bool = True + + def __call__(self, data: dict) -> dict: + # Only return the first 16 dims. + actions = np.asarray(data["actions"][:, :16]) + return {"actions": _encode_actions(actions, adapt_to_pi=self.adapt_to_pi)} + + +def _joint_flip_mask() -> np.ndarray: + """Used to convert between aloha and pi joint angles.""" + return np.array([1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1]) + + +def _normalize(x, min_val, max_val): + return (x - min_val) / (max_val - min_val) + + +def _unnormalize(x, min_val, max_val): + return x * (max_val - min_val) + min_val + + +def _gripper_to_angular(value): + # Aloha transforms the gripper positions into a linear space. The following code + # reverses this transformation to be consistent with pi0 which is pretrained in + # angular space. + # + # These values are coming from the Aloha code: + # PUPPET_GRIPPER_POSITION_OPEN, PUPPET_GRIPPER_POSITION_CLOSED + value = _unnormalize(value, min_val=0.01844, max_val=0.05800) + + # This is the inverse of the angular to linear transformation inside the Interbotix code. + def linear_to_radian(linear_position, arm_length, horn_radius): + value = (horn_radius**2 + linear_position**2 - arm_length**2) / (2 * horn_radius * linear_position) + return np.arcsin(np.clip(value, -1.0, 1.0)) + + # The constants are taken from the Interbotix code. + value = linear_to_radian(value, arm_length=0.036, horn_radius=0.022) + + # pi0 gripper data is normalized (0, 1) between encoder counts (2405, 3110). + # There are 4096 total encoder counts and aloha uses a zero of 2048. + # Converting this to radians means that the normalized inputs are between (0.5476, 1.6296) + return _normalize(value, min_val=0.5476, max_val=1.6296) + + +def _gripper_from_angular(value): + # Convert from the gripper position used by pi0 to the gripper position that is used by Aloha. + # Note that the units are still angular but the range is different. + + # We do not scale the output since the trossen model predictions are already in radians. + # See the comment in _gripper_to_angular for a derivation of the constant + value = value + 0.5476 + + # These values are coming from the Aloha code: + # PUPPET_GRIPPER_JOINT_OPEN, PUPPET_GRIPPER_JOINT_CLOSE + return _normalize(value, min_val=-0.6213, max_val=1.4910) + + +def _gripper_from_angular_inv(value): + # Directly inverts the gripper_from_angular function. + value = _unnormalize(value, min_val=-0.6213, max_val=1.4910) + return value - 0.5476 + + +def _decode_a2d(data: dict, *, adapt_to_pi: bool = False) -> dict: + state_dict = data["state_dict"] + data["state"] = _decode_state(state_dict, adapt_to_pi=adapt_to_pi) + del data["state_dict"] + action_dict = data["action_dict"] + data["actions"] = _decode_action(action_dict, adapt_to_pi=adapt_to_pi) + del data["action_dict"] + + def convert_image(img): + img = np.asarray(img) + # Convert to uint8 if using float images. + if np.issubdtype(img.dtype, np.floating): + img = (255 * img).astype(np.uint8) + # Convert from [channel, height, width] to [height, width, channel]. + return einops.rearrange(img, "c h w -> h w c") + if "images" in data: + images = data["images"] + images_dict = {name: convert_image(img) for name, img in images.items()} + + data["images"] = images_dict + return data + + +def _decode_state(state, *, adapt_to_pi: bool = False) -> np.ndarray: + joint = state["joint"] + gripper = state["gripper"] + state_left_arm_gripper = np.concatenate( + [ + joint[:7], + gripper[:1], + ], + axis=-1 + ) + state_right_arm_gripper = np.concatenate( + [ + joint[7:14], + gripper[1:2], + ], + axis=-1 + ) + state = np.concatenate( + [ + state_left_arm_gripper, + state_right_arm_gripper, + ], + axis=-1 + ) + return state + +def _decode_action(action, *, adapt_to_pi: bool = False) -> np.ndarray: + joint = action["joint"] + gripper = action["gripper"] + action_left_arm_gripper = np.concatenate( + [ + joint[:,:7], + gripper[:,:1], + ], + axis=-1 + ) + action_right_arm_gripper = np.concatenate( + [ + joint[:,7:14], + gripper[:,1:2], + ], + axis=-1 + ) + action = np.concatenate( + [ + action_left_arm_gripper, + action_right_arm_gripper, + ], + axis=-1 + ) + return action + + +def _encode_actions(actions: np.ndarray, *, adapt_to_pi: bool = False) -> np.ndarray: + return actions + + +def _encode_actions_inv(actions: np.ndarray, *, adapt_to_pi: bool = False) -> np.ndarray: + return actions diff --git a/policy/openpi-InternData-A1/src/openpi/policies/real_lift2_policy.py b/policy/openpi-InternData-A1/src/openpi/policies/real_lift2_policy.py new file mode 100644 index 0000000..2900b3a --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/policies/real_lift2_policy.py @@ -0,0 +1,205 @@ +import dataclasses +from typing import ClassVar + +import einops +import numpy as np + +from openpi import transforms +from pdb import set_trace + + +@dataclasses.dataclass(frozen=True) +class RealLift2Inputs(transforms.DataTransformFn): + """Inputs for the Lift2 policy. + """ + + adapt_to_pi: bool = True + + # The expected cameras names. All input cameras must be in this set. Missing cameras will be + # replaced with black images and the corresponding `image_mask` will be set to False. + EXPECTED_CAMERAS: ClassVar[tuple[str, ...]] = ("cam_high", "cam_low", "cam_left_wrist", "cam_right_wrist") + + def __call__(self, data: dict) -> dict: + data = _decode_aloha(data, adapt_to_pi=self.adapt_to_pi) + if "images" in data: + in_images = data["images"] + if set(in_images) - set(self.EXPECTED_CAMERAS): + raise ValueError(f"Expected images to contain {self.EXPECTED_CAMERAS}, got {tuple(in_images)}") + + # Assume that base image always exists. + base_image = in_images["cam_high"] + + images = { + "base_0_rgb": base_image, + } + image_masks = { + "base_0_rgb": np.True_, + } + + # Add the extra images. + extra_image_names = { + "left_wrist_0_rgb": "cam_left_wrist", + "right_wrist_0_rgb": "cam_right_wrist", + } + for dest, source in extra_image_names.items(): + if source in in_images: + images[dest] = in_images[source] + image_masks[dest] = np.True_ + else: + images[dest] = np.zeros_like(base_image) + image_masks[dest] = np.False_ + + inputs = { + "image": images, + "image_mask": image_masks, + "state": data["state"], + } + else: + inputs={ + "state": data["state"], + } + + # Actions are only available during training. + if "actions" in data: + actions = np.asarray(data["actions"]) + actions = _encode_actions_inv(actions, adapt_to_pi=self.adapt_to_pi) + inputs["actions"] = actions + + if "prompt" in data: + inputs["prompt"] = data["prompt"] + + return inputs + + +@dataclasses.dataclass(frozen=True) +class RealLift2Outputs(transforms.DataTransformFn): + """Outputs for the Lift2 policy.""" + adapt_to_pi: bool = True + + def __call__(self, data: dict) -> dict: + # Only return the first 14 dims. + actions = np.asarray(data["actions"][:, :14]) + return {"actions": _encode_actions(actions, adapt_to_pi=self.adapt_to_pi)} + + +def _joint_flip_mask() -> np.ndarray: + """Used to convert between aloha and pi joint angles.""" + return np.array([1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1]) + + +def _normalize(x, min_val, max_val): + return (x - min_val) / (max_val - min_val) + + +def _unnormalize(x, min_val, max_val): + return x * (max_val - min_val) + min_val + + +def _gripper_to_angular(value): + # Aloha transforms the gripper positions into a linear space. The following code + # reverses this transformation to be consistent with pi0 which is pretrained in + # angular space. + # + # These values are coming from the Aloha code: + # PUPPET_GRIPPER_POSITION_OPEN, PUPPET_GRIPPER_POSITION_CLOSED + value = _unnormalize(value, min_val=0.01844, max_val=0.05800) + + # This is the inverse of the angular to linear transformation inside the Interbotix code. + def linear_to_radian(linear_position, arm_length, horn_radius): + value = (horn_radius**2 + linear_position**2 - arm_length**2) / (2 * horn_radius * linear_position) + return np.arcsin(np.clip(value, -1.0, 1.0)) + + # The constants are taken from the Interbotix code. + value = linear_to_radian(value, arm_length=0.036, horn_radius=0.022) + + # pi0 gripper data is normalized (0, 1) between encoder counts (2405, 3110). + # There are 4096 total encoder counts and aloha uses a zero of 2048. + # Converting this to radians means that the normalized inputs are between (0.5476, 1.6296) + return _normalize(value, min_val=0.5476, max_val=1.6296) + + +def _gripper_from_angular(value): + # Convert from the gripper position used by pi0 to the gripper position that is used by Aloha. + # Note that the units are still angular but the range is different. + + # We do not scale the output since the trossen model predictions are already in radians. + # See the comment in _gripper_to_angular for a derivation of the constant + value = value + 0.5476 + + # These values are coming from the Aloha code: + # PUPPET_GRIPPER_JOINT_OPEN, PUPPET_GRIPPER_JOINT_CLOSE + return _normalize(value, min_val=-0.6213, max_val=1.4910) + + +def _gripper_from_angular_inv(value): + # Directly inverts the gripper_from_angular function. + value = _unnormalize(value, min_val=-0.6213, max_val=1.4910) + return value - 0.5476 + + +def _decode_aloha(data: dict, *, adapt_to_pi: bool = False) -> dict: + # state is [left_arm_joint_angles, left_arm_gripper, right_arm_joint_angles, right_arm_gripper] + # dim sizes: [6, 1, 6, 1] + state_dict = data["state_dict"] + data["state"] = _decode_state(state_dict, adapt_to_pi=adapt_to_pi) + del data["state_dict"] + action_dict = data["action_dict"] + data["actions"] = _decode_action(action_dict, adapt_to_pi=adapt_to_pi) + del data["action_dict"] + + def convert_image(img): + img = np.asarray(img) + # Convert to uint8 if using float images. + if np.issubdtype(img.dtype, np.floating): + img = (255 * img).astype(np.uint8) + # Convert from [channel, height, width] to [height, width, channel]. + return einops.rearrange(img, "c h w -> h w c") + if "images" in data: + images = data["images"] + images_dict = {name: convert_image(img) for name, img in images.items()} + + data["images"] = images_dict + return data + + +def _decode_state(state, *, adapt_to_pi: bool = False) -> np.ndarray: + state_left_arm = state["left_joint"] + state_left_gripper = state["left_gripper"] + state_right_arm = state["right_joint"] + state_right_gripper = state["right_gripper"] + if state_left_arm.ndim - state_left_gripper.ndim == 1: + if state_left_gripper.ndim == 0: + state_left_gripper = state_left_gripper[None] + state_right_gripper = state_right_gripper[None] + state = np.concatenate([state_left_arm, state_left_gripper, state_right_arm, state_right_gripper], axis=0) + elif state_left_gripper.ndim == 1: + state_left_gripper = state_left_gripper[:, None] + state_right_gripper = state_right_gripper[:, None] + state = np.concatenate([state_left_arm, state_left_gripper, state_right_arm, state_right_gripper], axis=1) + return state + +def _decode_action(action, *, adapt_to_pi: bool = False) -> np.ndarray: + action_left_arm = action["left_joint"] + action_left_gripper = action["left_gripper"] + action_right_arm = action["right_joint"] + action_right_gripper = action["right_gripper"] + + if action_left_arm.ndim - action_left_gripper.ndim == 1: + if action_left_gripper.ndim == 0: + action_left_gripper = action_left_gripper[None] + action_right_gripper = action_right_gripper[None] + action = np.concatenate([action_left_arm, action_left_gripper, action_right_arm, action_right_gripper], axis=0) + elif action_left_gripper.ndim == 1: + action_left_gripper = action_left_gripper[:, None] + action_right_gripper = action_right_gripper[:, None] + action = np.concatenate([action_left_arm, action_left_gripper, action_right_arm, action_right_gripper], axis=1) + + return action + + +def _encode_actions(actions: np.ndarray, *, adapt_to_pi: bool = False) -> np.ndarray: + return actions + + +def _encode_actions_inv(actions: np.ndarray, *, adapt_to_pi: bool = False) -> np.ndarray: + return actions diff --git a/policy/openpi-InternData-A1/src/openpi/policies/sim2real_split_aloha_policy.py b/policy/openpi-InternData-A1/src/openpi/policies/sim2real_split_aloha_policy.py new file mode 100644 index 0000000..d7f0811 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/policies/sim2real_split_aloha_policy.py @@ -0,0 +1,207 @@ +import dataclasses +from typing import ClassVar + +import einops +import numpy as np + +from openpi import transforms +from pdb import set_trace + + +@dataclasses.dataclass(frozen=True) +class Sim2RealSplitAlohaInputs(transforms.DataTransformFn): + """Inputs for the Split Aloha policy. + """ + + adapt_to_pi: bool = True + + # The expected cameras names. All input cameras must be in this set. Missing cameras will be + # replaced with black images and the corresponding `image_mask` will be set to False. + EXPECTED_CAMERAS: ClassVar[tuple[str, ...]] = ("cam_high", "cam_low", "cam_left_wrist", "cam_right_wrist") + + def __call__(self, data: dict) -> dict: + data = _decode_aloha(data, adapt_to_pi=self.adapt_to_pi) + if "images" in data: + in_images = data["images"] + if set(in_images) - set(self.EXPECTED_CAMERAS): + raise ValueError(f"Expected images to contain {self.EXPECTED_CAMERAS}, got {tuple(in_images)}") + + # Assume that base image always exists. + base_image = in_images["cam_high"] + + images = { + "base_0_rgb": base_image, + } + image_masks = { + "base_0_rgb": np.True_, + } + + # Add the extra images. + extra_image_names = { + "left_wrist_0_rgb": "cam_left_wrist", + "right_wrist_0_rgb": "cam_right_wrist", + } + for dest, source in extra_image_names.items(): + if source in in_images: + images[dest] = in_images[source] + image_masks[dest] = np.True_ + else: + images[dest] = np.zeros_like(base_image) + image_masks[dest] = np.False_ + + inputs = { + "image": images, + "image_mask": image_masks, + "state": data["state"], + } + else: + inputs = { + "state": data["state"], + } + + # Actions are only available during training. + if "actions" in data: + actions = np.asarray(data["actions"]) + actions = _encode_actions_inv(actions, adapt_to_pi=self.adapt_to_pi) + inputs["actions"] = actions + + if "prompt" in data: + inputs["prompt"] = data["prompt"] + # set_trace() + return inputs + + +@dataclasses.dataclass(frozen=True) +class Sim2RealSplitAlohaOutputs(transforms.DataTransformFn): + """Outputs for the Split Aloha policy.""" + + # If true, this will convert the joint and gripper values from the standard Aloha space to + # the space used by the pi internal runtime which was used to train the base model. + adapt_to_pi: bool = True + + def __call__(self, data: dict) -> dict: + # Only return the first 14 dims. + actions = np.asarray(data["actions"][:, :14]) + return {"actions": _encode_actions(actions, adapt_to_pi=self.adapt_to_pi)} + + +def _joint_flip_mask() -> np.ndarray: + """Used to convert between aloha and pi joint angles.""" + return np.array([1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1]) + + +def _normalize(x, min_val, max_val): + return (x - min_val) / (max_val - min_val) + + +def _unnormalize(x, min_val, max_val): + return x * (max_val - min_val) + min_val + + +def _gripper_to_angular(value): + # Aloha transforms the gripper positions into a linear space. The following code + # reverses this transformation to be consistent with pi0 which is pretrained in + # angular space. + # + # These values are coming from the Aloha code: + # PUPPET_GRIPPER_POSITION_OPEN, PUPPET_GRIPPER_POSITION_CLOSED + value = _unnormalize(value, min_val=0.01844, max_val=0.05800) + + # This is the inverse of the angular to linear transformation inside the Interbotix code. + def linear_to_radian(linear_position, arm_length, horn_radius): + value = (horn_radius**2 + linear_position**2 - arm_length**2) / (2 * horn_radius * linear_position) + return np.arcsin(np.clip(value, -1.0, 1.0)) + + # The constants are taken from the Interbotix code. + value = linear_to_radian(value, arm_length=0.036, horn_radius=0.022) + + # pi0 gripper data is normalized (0, 1) between encoder counts (2405, 3110). + # There are 4096 total encoder counts and aloha uses a zero of 2048. + # Converting this to radians means that the normalized inputs are between (0.5476, 1.6296) + return _normalize(value, min_val=0.5476, max_val=1.6296) + + +def _gripper_from_angular(value): + # Convert from the gripper position used by pi0 to the gripper position that is used by Aloha. + # Note that the units are still angular but the range is different. + + # We do not scale the output since the trossen model predictions are already in radians. + # See the comment in _gripper_to_angular for a derivation of the constant + value = value + 0.5476 + + # These values are coming from the Aloha code: + # PUPPET_GRIPPER_JOINT_OPEN, PUPPET_GRIPPER_JOINT_CLOSE + return _normalize(value, min_val=-0.6213, max_val=1.4910) + + +def _gripper_from_angular_inv(value): + # Directly inverts the gripper_from_angular function. + value = _unnormalize(value, min_val=-0.6213, max_val=1.4910) + return value - 0.5476 + + +def _decode_aloha(data: dict, *, adapt_to_pi: bool = False) -> dict: + state_dict = data["state_dict"] + data["state"] = _decode_state(state_dict, adapt_to_pi=adapt_to_pi) + del data["state_dict"] + action_dict = data["action_dict"] + data["actions"] = _decode_action(action_dict, adapt_to_pi=adapt_to_pi) + del data["action_dict"] + + def convert_image(img): + img = np.asarray(img) + # Convert to uint8 if using float images. + if np.issubdtype(img.dtype, np.floating): + img = (255 * img).astype(np.uint8) + # Convert from [channel, height, width] to [height, width, channel]. + return einops.rearrange(img, "c h w -> h w c") + if "images" in data: + images = data["images"] + images_dict = {name: convert_image(img) for name, img in images.items()} + + data["images"] = images_dict + return data + + +def _decode_state(state, *, adapt_to_pi: bool = False) -> np.ndarray: + state_left_arm = state["left_joint"] + state_left_gripper = state["left_gripper"] + state_right_arm = state["right_joint"] + state_right_gripper = state["right_gripper"] + if state_left_arm.ndim - state_left_gripper.ndim == 1: + if state_left_gripper.ndim == 0: + state_left_gripper = np.array([0]) + state_right_gripper = np.array([0]) + state = np.concatenate([state_left_arm, state_left_gripper, state_right_arm, state_right_gripper], axis=0) + elif state_left_gripper.ndim == 1: + state_left_gripper = np.array([[0]]) + state_right_gripper = np.array([[0]]) + state = np.concatenate([state_left_arm, state_left_gripper, state_right_arm, state_right_gripper], axis=1) + return state + +def _decode_action(action, *, adapt_to_pi: bool = False) -> np.ndarray: + action_left_arm = action["left_joint"] + action_left_gripper = action["left_gripper_openness"] + action_right_arm = action["right_joint"] + action_right_gripper = action["right_gripper_openness"] + + if action_left_arm.ndim - action_left_gripper.ndim == 1: + if action_left_gripper.ndim == 0: + action_left_gripper = action_left_gripper[None] + action_right_gripper = action_right_gripper[None] + action = np.concatenate([action_left_arm, action_left_gripper, action_right_arm, action_right_gripper], axis=0) + elif action_left_gripper.ndim == 1: + action_left_gripper = action_left_gripper[:, None] + action_right_gripper = action_right_gripper[:, None] + action = np.concatenate([action_left_arm, action_left_gripper, action_right_arm, action_right_gripper], axis=1) + + return action + + +def _encode_actions(actions: np.ndarray, *, adapt_to_pi: bool = False) -> np.ndarray: + + return actions + + +def _encode_actions_inv(actions: np.ndarray, *, adapt_to_pi: bool = False) -> np.ndarray: + return actions diff --git a/policy/openpi-InternData-A1/src/openpi/policies/sim_franka_policy.py b/policy/openpi-InternData-A1/src/openpi/policies/sim_franka_policy.py new file mode 100644 index 0000000..ab101f3 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/policies/sim_franka_policy.py @@ -0,0 +1,185 @@ +import dataclasses +from typing import ClassVar + +import einops +import numpy as np + +from openpi import transforms +from pdb import set_trace + + +@dataclasses.dataclass(frozen=True) +class SimFrankaInputs(transforms.DataTransformFn): + """Inputs for the Franka policy. + """ + + adapt_to_pi: bool = True + + # The expected cameras names. All input cameras must be in this set. Missing cameras will be + # replaced with black images and the corresponding `image_mask` will be set to False. + EXPECTED_CAMERAS: ClassVar[tuple[str, ...]] = ("cam_high", "cam_low", "cam_left_wrist", "cam_right_wrist") + + def __call__(self, data: dict) -> dict: + data = _decode_franka(data, adapt_to_pi=self.adapt_to_pi) + if "images" in data: + in_images = data["images"] + if set(in_images) - set(self.EXPECTED_CAMERAS): + raise ValueError(f"Expected images to contain {self.EXPECTED_CAMERAS}, got {tuple(in_images)}") + + # Assume that base image always exists. + base_image = in_images["cam_high"] + + images = { + "base_0_rgb": base_image, + } + image_masks = { + "base_0_rgb": np.True_, + } + + # Add the extra images. + extra_image_names = { + "left_wrist_0_rgb": "cam_left_wrist", + "right_wrist_0_rgb": "cam_right_wrist", + } + for dest, source in extra_image_names.items(): + if source in in_images: + images[dest] = in_images[source] + image_masks[dest] = np.True_ + else: + images[dest] = np.zeros_like(base_image) + image_masks[dest] = np.False_ + + inputs = { + "image": images, + "image_mask": image_masks, + "state": data["state"], + "pose": data["pose"], + } + else: + inputs = { + "state": data["state"], + "pose": data["pose"], + } + + # Actions are only available during training. + if "actions" in data: + actions = np.asarray(data["actions"]) + actions = _encode_actions_inv(actions, adapt_to_pi=self.adapt_to_pi) + inputs["actions"] = actions + + if "prompt" in data: + inputs["prompt"] = data["prompt"] + return inputs + + +@dataclasses.dataclass(frozen=True) +class SimFrankaOutputs(transforms.DataTransformFn): + """Outputs for the Lift2 policy.""" + + adapt_to_pi: bool = True + + def __call__(self, data: dict) -> dict: + # Only return the first 7 dims. + actions = np.asarray(data["actions"][:, :7]) + return {"actions": _encode_actions(actions, adapt_to_pi=self.adapt_to_pi)} + + +def _joint_flip_mask() -> np.ndarray: + """Used to convert between aloha and pi joint angles.""" + return np.array([1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1]) + + +def _normalize(x, min_val, max_val): + return (x - min_val) / (max_val - min_val) + + +def _unnormalize(x, min_val, max_val): + return x * (max_val - min_val) + min_val + + +def _gripper_to_angular(value): + # Aloha transforms the gripper positions into a linear space. The following code + # reverses this transformation to be consistent with pi0 which is pretrained in + # angular space. + # + # These values are coming from the Aloha code: + # PUPPET_GRIPPER_POSITION_OPEN, PUPPET_GRIPPER_POSITION_CLOSED + value = _unnormalize(value, min_val=0.01844, max_val=0.05800) + + # This is the inverse of the angular to linear transformation inside the Interbotix code. + def linear_to_radian(linear_position, arm_length, horn_radius): + value = (horn_radius**2 + linear_position**2 - arm_length**2) / (2 * horn_radius * linear_position) + return np.arcsin(np.clip(value, -1.0, 1.0)) + + # The constants are taken from the Interbotix code. + value = linear_to_radian(value, arm_length=0.036, horn_radius=0.022) + + # pi0 gripper data is normalized (0, 1) between encoder counts (2405, 3110). + # There are 4096 total encoder counts and aloha uses a zero of 2048. + # Converting this to radians means that the normalized inputs are between (0.5476, 1.6296) + return _normalize(value, min_val=0.5476, max_val=1.6296) + + +def _gripper_from_angular(value): + # Convert from the gripper position used by pi0 to the gripper position that is used by Aloha. + # Note that the units are still angular but the range is different. + + # We do not scale the output since the trossen model predictions are already in radians. + # See the comment in _gripper_to_angular for a derivation of the constant + value = value + 0.5476 + + # These values are coming from the Aloha code: + # PUPPET_GRIPPER_JOINT_OPEN, PUPPET_GRIPPER_JOINT_CLOSE + return _normalize(value, min_val=-0.6213, max_val=1.4910) + + +def _gripper_from_angular_inv(value): + # Directly inverts the gripper_from_angular function. + value = _unnormalize(value, min_val=-0.6213, max_val=1.4910) + return value - 0.5476 + + +def _decode_franka(data: dict, *, adapt_to_pi: bool = False) -> dict: + state_dict = data["state_dict"] + data["state"], data["pose"] = _decode_state(state_dict, adapt_to_pi=adapt_to_pi) + del data["state_dict"] + action_dict = data["action_dict"] + data["actions"] = _decode_action(action_dict, adapt_to_pi=adapt_to_pi) + del data["action_dict"] + + def convert_image(img): + img = np.asarray(img) + # Convert to uint8 if using float images. + if np.issubdtype(img.dtype, np.floating): + img = (255 * img).astype(np.uint8) + # Convert from [channel, height, width] to [height, width, channel]. + return einops.rearrange(img, "c h w -> h w c") + if "images" in data: + images = data["images"] + images_dict = {name: convert_image(img) for name, img in images.items()} + + data["images"] = images_dict + return data + + +def _decode_state(state, *, adapt_to_pi: bool = False) -> np.ndarray: + gripper_position = state["gripper_position"][None] + gripper_pose = state["gripper_pose"] + joint_position = state["joint_position"] + state = np.concatenate([joint_position, gripper_position], axis=0) + pose = np.concatenate([gripper_pose, gripper_position], axis=0) + return state, pose + +def _decode_action(action, *, adapt_to_pi: bool = False) -> np.ndarray: + gripper_pose = action["gripper_pose"] + gripper_openness = action["gripper_openness"][..., None] + action = np.concatenate([gripper_pose, gripper_openness], axis=1) + return action + + +def _encode_actions(actions: np.ndarray, *, adapt_to_pi: bool = False) -> np.ndarray: + return actions + + +def _encode_actions_inv(actions: np.ndarray, *, adapt_to_pi: bool = False) -> np.ndarray: + return actions diff --git a/policy/openpi-InternData-A1/src/openpi/policies/sim_split_aloha_policy.py b/policy/openpi-InternData-A1/src/openpi/policies/sim_split_aloha_policy.py new file mode 100644 index 0000000..5007da6 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/policies/sim_split_aloha_policy.py @@ -0,0 +1,208 @@ +import dataclasses +from typing import ClassVar + +import einops +import numpy as np + +from openpi import transforms +from pdb import set_trace + + +@dataclasses.dataclass(frozen=True) +class SimSplitAlohaInputs(transforms.DataTransformFn): + """Inputs for the Split Aloha policy. + """ + + adapt_to_pi: bool = True + + + # The expected cameras names. All input cameras must be in this set. Missing cameras will be + # replaced with black images and the corresponding `image_mask` will be set to False. + EXPECTED_CAMERAS: ClassVar[tuple[str, ...]] = ("cam_high", "cam_low", "cam_left_wrist", "cam_right_wrist") + + def __call__(self, data: dict) -> dict: + data = _decode_aloha(data, adapt_to_pi=self.adapt_to_pi) + if "images" in data: + in_images = data["images"] + if set(in_images) - set(self.EXPECTED_CAMERAS): + raise ValueError(f"Expected images to contain {self.EXPECTED_CAMERAS}, got {tuple(in_images)}") + + # Assume that base image always exists. + base_image = in_images["cam_high"] + + images = { + "base_0_rgb": base_image, + } + image_masks = { + "base_0_rgb": np.True_, + } + + # Add the extra images. + extra_image_names = { + "left_wrist_0_rgb": "cam_left_wrist", + "right_wrist_0_rgb": "cam_right_wrist", + } + for dest, source in extra_image_names.items(): + if source in in_images: + images[dest] = in_images[source] + image_masks[dest] = np.True_ + else: + images[dest] = np.zeros_like(base_image) + image_masks[dest] = np.False_ + + inputs = { + "image": images, + "image_mask": image_masks, + "state": data["state"], + } + else: + inputs = { + "state": data["state"], + } + + # Actions are only available during training. + if "actions" in data: + actions = np.asarray(data["actions"]) + actions = _encode_actions_inv(actions, adapt_to_pi=self.adapt_to_pi) + inputs["actions"] = actions + + if "prompt" in data: + inputs["prompt"] = data["prompt"] + return inputs + + +@dataclasses.dataclass(frozen=True) +class SimSplitAlohaOutputs(transforms.DataTransformFn): + """Outputs for the Split Aloha policy.""" + + # If true, this will convert the joint and gripper values from the standard Aloha space to + # the space used by the pi internal runtime which was used to train the base model. + adapt_to_pi: bool = True + + def __call__(self, data: dict) -> dict: + # Only return the first 14 dims. + actions = np.asarray(data["actions"][:, :14]) + return {"actions": _encode_actions(actions, adapt_to_pi=self.adapt_to_pi)} + + +def _joint_flip_mask() -> np.ndarray: + """Used to convert between aloha and pi joint angles.""" + return np.array([1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1]) + + +def _normalize(x, min_val, max_val): + return (x - min_val) / (max_val - min_val) + + +def _unnormalize(x, min_val, max_val): + return x * (max_val - min_val) + min_val + + +def _gripper_to_angular(value): + # Aloha transforms the gripper positions into a linear space. The following code + # reverses this transformation to be consistent with pi0 which is pretrained in + # angular space. + # + # These values are coming from the Aloha code: + # PUPPET_GRIPPER_POSITION_OPEN, PUPPET_GRIPPER_POSITION_CLOSED + value = _unnormalize(value, min_val=0.01844, max_val=0.05800) + + # This is the inverse of the angular to linear transformation inside the Interbotix code. + def linear_to_radian(linear_position, arm_length, horn_radius): + value = (horn_radius**2 + linear_position**2 - arm_length**2) / (2 * horn_radius * linear_position) + return np.arcsin(np.clip(value, -1.0, 1.0)) + + # The constants are taken from the Interbotix code. + value = linear_to_radian(value, arm_length=0.036, horn_radius=0.022) + + # pi0 gripper data is normalized (0, 1) between encoder counts (2405, 3110). + # There are 4096 total encoder counts and aloha uses a zero of 2048. + # Converting this to radians means that the normalized inputs are between (0.5476, 1.6296) + return _normalize(value, min_val=0.5476, max_val=1.6296) + + +def _gripper_from_angular(value): + # Convert from the gripper position used by pi0 to the gripper position that is used by Aloha. + # Note that the units are still angular but the range is different. + + # We do not scale the output since the trossen model predictions are already in radians. + # See the comment in _gripper_to_angular for a derivation of the constant + value = value + 0.5476 + + # These values are coming from the Aloha code: + # PUPPET_GRIPPER_JOINT_OPEN, PUPPET_GRIPPER_JOINT_CLOSE + return _normalize(value, min_val=-0.6213, max_val=1.4910) + + +def _gripper_from_angular_inv(value): + # Directly inverts the gripper_from_angular function. + value = _unnormalize(value, min_val=-0.6213, max_val=1.4910) + return value - 0.5476 + + +def _decode_aloha(data: dict, *, adapt_to_pi: bool = False) -> dict: + # state is [left_arm_joint_angles, left_arm_gripper, right_arm_joint_angles, right_arm_gripper] + # dim sizes: [7, 1, 7, 1] + state_dict = data["state_dict"] + data["state"] = _decode_state(state_dict, adapt_to_pi=adapt_to_pi) + del data["state_dict"] + action_dict = data["action_dict"] + data["actions"] = _decode_action(action_dict, adapt_to_pi=adapt_to_pi) + del data["action_dict"] + + def convert_image(img): + img = np.asarray(img) + # Convert to uint8 if using float images. + if np.issubdtype(img.dtype, np.floating): + img = (255 * img).astype(np.uint8) + # Convert from [channel, height, width] to [height, width, channel]. + return einops.rearrange(img, "c h w -> h w c") + if "images" in data: + images = data["images"] + images_dict = {name: convert_image(img) for name, img in images.items()} + + data["images"] = images_dict + return data + + +def _decode_state(state, *, adapt_to_pi: bool = False) -> np.ndarray: + state_left_arm = state["left_joint"] + state_left_gripper = state["left_gripper"] + state_right_arm = state["right_joint"] + state_right_gripper = state["right_gripper"] + if state_left_arm.ndim - state_left_gripper.ndim == 1: + if state_left_gripper.ndim == 0: + state_left_gripper = state_left_gripper[None] + state_right_gripper = state_right_gripper[None] + state = np.concatenate([state_left_arm, state_left_gripper, state_right_arm, state_right_gripper], axis=0) + elif state_left_gripper.ndim == 1: + state_left_gripper = state_left_gripper[:, None] + state_right_gripper = state_right_gripper[:, None] + state = np.concatenate([state_left_arm, state_left_gripper, state_right_arm, state_right_gripper], axis=1) + return state + +def _decode_action(action, *, adapt_to_pi: bool = False) -> np.ndarray: + action_left_arm = action["left_joint"] + action_left_gripper = action["left_gripper_openness"] + action_right_arm = action["right_joint"] + action_right_gripper = action["right_gripper_openness"] + + if action_left_arm.ndim - action_left_gripper.ndim == 1: + if action_left_gripper.ndim == 0: + action_left_gripper = action_left_gripper[None] + action_right_gripper = action_right_gripper[None] + action = np.concatenate([action_left_arm, action_left_gripper, action_right_arm, action_right_gripper], axis=0) + elif action_left_gripper.ndim == 1: + action_left_gripper = action_left_gripper[:, None] + action_right_gripper = action_right_gripper[:, None] + action = np.concatenate([action_left_arm, action_left_gripper, action_right_arm, action_right_gripper], axis=1) + + return action + + +def _encode_actions(actions: np.ndarray, *, adapt_to_pi: bool = False) -> np.ndarray: + return actions + + +def _encode_actions_inv(actions: np.ndarray, *, adapt_to_pi: bool = False) -> np.ndarray: + return actions diff --git a/policy/openpi-InternData-A1/src/openpi/py.typed b/policy/openpi-InternData-A1/src/openpi/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/policy/openpi-InternData-A1/src/openpi/serving/websocket_policy_server.py b/policy/openpi-InternData-A1/src/openpi/serving/websocket_policy_server.py new file mode 100644 index 0000000..bdefa98 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/serving/websocket_policy_server.py @@ -0,0 +1,90 @@ +import asyncio +import http +import logging +import time +import traceback + +from openpi_client import base_policy as _base_policy +from openpi_client import msgpack_numpy +import websockets.asyncio.server as _server +import websockets.frames + +logger = logging.getLogger(__name__) + + +class WebsocketPolicyServer: + """Serves a policy using the websocket protocol. See websocket_client_policy.py for a client implementation. + + Currently only implements the `load` and `infer` methods. + """ + + def __init__( + self, + policy: _base_policy.BasePolicy, + host: str = "0.0.0.0", + port: int | None = None, + metadata: dict | None = None, + ) -> None: + self._policy = policy + self._host = host + self._port = port + self._metadata = metadata or {} + logging.getLogger("websockets.server").setLevel(logging.INFO) + + def serve_forever(self) -> None: + asyncio.run(self.run()) + + async def run(self): + async with _server.serve( + self._handler, + self._host, + self._port, + compression=None, + max_size=None, + process_request=_health_check, + ) as server: + await server.serve_forever() + + async def _handler(self, websocket: _server.ServerConnection): + logger.info(f"Connection from {websocket.remote_address} opened") + packer = msgpack_numpy.Packer() + + await websocket.send(packer.pack(self._metadata)) + + prev_total_time = None + while True: + try: + start_time = time.monotonic() + obs = msgpack_numpy.unpackb(await websocket.recv()) + + infer_time = time.monotonic() + action = self._policy.infer(obs) + infer_time = time.monotonic() - infer_time + + action["server_timing"] = { + "infer_ms": infer_time * 1000, + } + if prev_total_time is not None: + # We can only record the last total time since we also want to include the send time. + action["server_timing"]["prev_total_ms"] = prev_total_time * 1000 + + await websocket.send(packer.pack(action)) + prev_total_time = time.monotonic() - start_time + + except websockets.ConnectionClosed: + logger.info(f"Connection from {websocket.remote_address} closed") + break + except Exception: + await websocket.send(traceback.format_exc()) + await websocket.close( + code=websockets.frames.CloseCode.INTERNAL_ERROR, + reason="Internal server error. Traceback included in previous frame.", + ) + raise + + +def _health_check(connection: _server.ServerConnection, request: _server.Request) -> _server.Response | None: + if request.path == "/healthz": + return connection.respond(http.HTTPStatus.OK, "OK\n") + # Continue with the normal request handling. + return None diff --git a/policy/openpi-InternData-A1/src/openpi/shared/__init__.py b/policy/openpi-InternData-A1/src/openpi/shared/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/policy/openpi-InternData-A1/src/openpi/shared/array_typing.py b/policy/openpi-InternData-A1/src/openpi/shared/array_typing.py new file mode 100644 index 0000000..569eafe --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/shared/array_typing.py @@ -0,0 +1,89 @@ +import contextlib +import functools as ft +import inspect +from typing import TypeAlias, TypeVar, cast + +import beartype +import jax +import jax._src.tree_util as private_tree_util +import jax.core +from jaxtyping import ArrayLike +from jaxtyping import Bool # noqa: F401 +from jaxtyping import DTypeLike # noqa: F401 +from jaxtyping import Float +from jaxtyping import Int # noqa: F401 +from jaxtyping import Key # noqa: F401 +from jaxtyping import Num # noqa: F401 +from jaxtyping import PyTree +from jaxtyping import Real # noqa: F401 +from jaxtyping import UInt8 # noqa: F401 +from jaxtyping import config +from jaxtyping import jaxtyped +import jaxtyping._decorator +import torch + +# patch jaxtyping to handle https://github.com/patrick-kidger/jaxtyping/issues/277. +# the problem is that custom PyTree nodes are sometimes initialized with arbitrary types (e.g., `jax.ShapeDtypeStruct`, +# `jax.Sharding`, or even ) due to JAX tracing operations. this patch skips typechecking when the stack trace +# contains `jax._src.tree_util`, which should only be the case during tree unflattening. +_original_check_dataclass_annotations = jaxtyping._decorator._check_dataclass_annotations # noqa: SLF001 +# Redefine Array to include both JAX arrays and PyTorch tensors +Array = jax.Array | torch.Tensor + + +def _check_dataclass_annotations(self, typechecker): + if not any( + frame.frame.f_globals.get("__name__") in {"jax._src.tree_util", "flax.nnx.transforms.compilation"} + for frame in inspect.stack() + ): + return _original_check_dataclass_annotations(self, typechecker) + return None + + +jaxtyping._decorator._check_dataclass_annotations = _check_dataclass_annotations # noqa: SLF001 + +KeyArrayLike: TypeAlias = jax.typing.ArrayLike +Params: TypeAlias = PyTree[Float[ArrayLike, "..."]] + +T = TypeVar("T") + + +# runtime type-checking decorator +def typecheck(t: T) -> T: + return cast(T, ft.partial(jaxtyped, typechecker=beartype.beartype)(t)) + + +@contextlib.contextmanager +def disable_typechecking(): + initial = config.jaxtyping_disable + config.update("jaxtyping_disable", True) # noqa: FBT003 + yield + config.update("jaxtyping_disable", initial) + + +def check_pytree_equality(*, expected: PyTree, got: PyTree, check_shapes: bool = False, check_dtypes: bool = False): + """Checks that two PyTrees have the same structure and optionally checks shapes and dtypes. Creates a much nicer + error message than if `jax.tree.map` is naively used on PyTrees with different structures. + """ + + if errors := list(private_tree_util.equality_errors(expected, got)): + raise ValueError( + "PyTrees have different structure:\n" + + ( + "\n".join( + f" - at keypath '{jax.tree_util.keystr(path)}': expected {thing1}, got {thing2}, so {explanation}.\n" + for path, thing1, thing2, explanation in errors + ) + ) + ) + + if check_shapes or check_dtypes: + + def check(kp, x, y): + if check_shapes and x.shape != y.shape: + raise ValueError(f"Shape mismatch at {jax.tree_util.keystr(kp)}: expected {x.shape}, got {y.shape}") + + if check_dtypes and x.dtype != y.dtype: + raise ValueError(f"Dtype mismatch at {jax.tree_util.keystr(kp)}: expected {x.dtype}, got {y.dtype}") + + jax.tree_util.tree_map_with_path(check, expected, got) diff --git a/policy/openpi-InternData-A1/src/openpi/shared/download.py b/policy/openpi-InternData-A1/src/openpi/shared/download.py new file mode 100644 index 0000000..8868825 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/shared/download.py @@ -0,0 +1,194 @@ +import concurrent.futures +import datetime +import logging +import os +import pathlib +import re +import shutil +import stat +import time +import urllib.parse + +import filelock +import fsspec +import fsspec.generic +import tqdm_loggable.auto as tqdm + +# Environment variable to control cache directory path, ~/.cache/openpi will be used by default. +_OPENPI_DATA_HOME = "OPENPI_DATA_HOME" +DEFAULT_CACHE_DIR = "~/.cache/openpi" + +logger = logging.getLogger(__name__) + + +def get_cache_dir() -> pathlib.Path: + cache_dir = pathlib.Path(os.getenv(_OPENPI_DATA_HOME, DEFAULT_CACHE_DIR)).expanduser().resolve() + cache_dir.mkdir(parents=True, exist_ok=True) + _set_folder_permission(cache_dir) + return cache_dir + + +def maybe_download(url: str, *, force_download: bool = False, **kwargs) -> pathlib.Path: + """Download a file or directory from a remote filesystem to the local cache, and return the local path. + + If the local file already exists, it will be returned directly. + + It is safe to call this function concurrently from multiple processes. + See `get_cache_dir` for more details on the cache directory. + + Args: + url: URL to the file to download. + force_download: If True, the file will be downloaded even if it already exists in the cache. + **kwargs: Additional arguments to pass to fsspec. + + Returns: + Local path to the downloaded file or directory. That path is guaranteed to exist and is absolute. + """ + # Don't use fsspec to parse the url to avoid unnecessary connection to the remote filesystem. + parsed = urllib.parse.urlparse(url) + + # Short circuit if this is a local path. + if parsed.scheme == "": + path = pathlib.Path(url) + if not path.exists(): + raise FileNotFoundError(f"File not found at {url}") + return path.resolve() + + cache_dir = get_cache_dir() + + local_path = cache_dir / parsed.netloc / parsed.path.strip("/") + local_path = local_path.resolve() + + # Check if the cache should be invalidated. + invalidate_cache = False + if local_path.exists(): + if force_download or _should_invalidate_cache(cache_dir, local_path): + invalidate_cache = True + else: + return local_path + + try: + lock_path = local_path.with_suffix(".lock") + with filelock.FileLock(lock_path): + # Ensure consistent permissions for the lock file. + _ensure_permissions(lock_path) + # First, remove the existing cache if it is expired. + if invalidate_cache: + logger.info(f"Removing expired cached entry: {local_path}") + if local_path.is_dir(): + shutil.rmtree(local_path) + else: + local_path.unlink() + + # Download the data to a local cache. + logger.info(f"Downloading {url} to {local_path}") + scratch_path = local_path.with_suffix(".partial") + _download_fsspec(url, scratch_path, **kwargs) + + shutil.move(scratch_path, local_path) + _ensure_permissions(local_path) + + except PermissionError as e: + msg = ( + f"Local file permission error was encountered while downloading {url}. " + f"Please try again after removing the cached data using: `rm -rf {local_path}*`" + ) + raise PermissionError(msg) from e + + return local_path + + +def _download_fsspec(url: str, local_path: pathlib.Path, **kwargs) -> None: + """Download a file from a remote filesystem to the local cache, and return the local path.""" + fs, _ = fsspec.core.url_to_fs(url, **kwargs) + info = fs.info(url) + # Folders are represented by 0-byte objects with a trailing forward slash. + if is_dir := (info["type"] == "directory" or (info["size"] == 0 and info["name"].endswith("/"))): + total_size = fs.du(url) + else: + total_size = info["size"] + with tqdm.tqdm(total=total_size, unit="iB", unit_scale=True, unit_divisor=1024) as pbar: + executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) + future = executor.submit(fs.get, url, local_path, recursive=is_dir) + while not future.done(): + current_size = sum(f.stat().st_size for f in [*local_path.rglob("*"), local_path] if f.is_file()) + pbar.update(current_size - pbar.n) + time.sleep(1) + pbar.update(total_size - pbar.n) + + +def _set_permission(path: pathlib.Path, target_permission: int): + """chmod requires executable permission to be set, so we skip if the permission is already match with the target.""" + if path.stat().st_mode & target_permission == target_permission: + logger.debug(f"Skipping {path} because it already has correct permissions") + return + path.chmod(target_permission) + logger.debug(f"Set {path} to {target_permission}") + + +def _set_folder_permission(folder_path: pathlib.Path) -> None: + """Set folder permission to be read, write and searchable.""" + _set_permission(folder_path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) + + +def _ensure_permissions(path: pathlib.Path) -> None: + """Since we are sharing cache directory with containerized runtime as well as training script, we need to + ensure that the cache directory has the correct permissions. + """ + + def _setup_folder_permission_between_cache_dir_and_path(path: pathlib.Path) -> None: + cache_dir = get_cache_dir() + relative_path = path.relative_to(cache_dir) + moving_path = cache_dir + for part in relative_path.parts: + _set_folder_permission(moving_path / part) + moving_path = moving_path / part + + def _set_file_permission(file_path: pathlib.Path) -> None: + """Set all files to be read & writable, if it is a script, keep it as a script.""" + file_rw = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH + if file_path.stat().st_mode & 0o100: + _set_permission(file_path, file_rw | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) + else: + _set_permission(file_path, file_rw) + + _setup_folder_permission_between_cache_dir_and_path(path) + for root, dirs, files in os.walk(str(path)): + root_path = pathlib.Path(root) + for file in files: + file_path = root_path / file + _set_file_permission(file_path) + + for dir in dirs: + dir_path = root_path / dir + _set_folder_permission(dir_path) + + +def _get_mtime(year: int, month: int, day: int) -> float: + """Get the mtime of a given date at midnight UTC.""" + date = datetime.datetime(year, month, day, tzinfo=datetime.UTC) + return time.mktime(date.timetuple()) + + +# Map of relative paths, defined as regular expressions, to expiration timestamps (mtime format). +# Partial matching will be used from top to bottom and the first match will be chosen. +# Cached entries will be retained only if they are newer than the expiration timestamp. +_INVALIDATE_CACHE_DIRS: dict[re.Pattern, float] = { + re.compile("openpi-assets/checkpoints/pi0_aloha_pen_uncap"): _get_mtime(2025, 2, 17), + re.compile("openpi-assets/checkpoints/pi0_libero"): _get_mtime(2025, 2, 6), + re.compile("openpi-assets/checkpoints/"): _get_mtime(2025, 2, 3), +} + + +def _should_invalidate_cache(cache_dir: pathlib.Path, local_path: pathlib.Path) -> bool: + """Invalidate the cache if it is expired. Return True if the cache was invalidated.""" + + assert local_path.exists(), f"File not found at {local_path}" + + relative_path = str(local_path.relative_to(cache_dir)) + for pattern, expire_time in _INVALIDATE_CACHE_DIRS.items(): + if pattern.match(relative_path): + # Remove if not newer than the expiration timestamp. + return local_path.stat().st_mtime <= expire_time + + return False diff --git a/policy/openpi-InternData-A1/src/openpi/shared/download_test.py b/policy/openpi-InternData-A1/src/openpi/shared/download_test.py new file mode 100644 index 0000000..ecb92bb --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/shared/download_test.py @@ -0,0 +1,54 @@ +import pathlib + +import pytest + +import openpi.shared.download as download + + +@pytest.fixture(scope="session", autouse=True) +def set_openpi_data_home(tmp_path_factory): + temp_dir = tmp_path_factory.mktemp("openpi_data") + with pytest.MonkeyPatch().context() as mp: + mp.setenv("OPENPI_DATA_HOME", str(temp_dir)) + yield + + +def test_download_local(tmp_path: pathlib.Path): + local_path = tmp_path / "local" + local_path.touch() + + result = download.maybe_download(str(local_path)) + assert result == local_path + + with pytest.raises(FileNotFoundError): + download.maybe_download("bogus") + + +def test_download_gs_dir(): + remote_path = "gs://openpi-assets/testdata/random" + + local_path = download.maybe_download(remote_path) + assert local_path.exists() + + new_local_path = download.maybe_download(remote_path) + assert new_local_path == local_path + + +def test_download_gs(): + remote_path = "gs://openpi-assets/testdata/random/random_512kb.bin" + + local_path = download.maybe_download(remote_path) + assert local_path.exists() + + new_local_path = download.maybe_download(remote_path) + assert new_local_path == local_path + + +def test_download_fsspec(): + remote_path = "gs://big_vision/paligemma_tokenizer.model" + + local_path = download.maybe_download(remote_path, gs={"token": "anon"}) + assert local_path.exists() + + new_local_path = download.maybe_download(remote_path, gs={"token": "anon"}) + assert new_local_path == local_path diff --git a/policy/openpi-InternData-A1/src/openpi/shared/image_tools.py b/policy/openpi-InternData-A1/src/openpi/shared/image_tools.py new file mode 100644 index 0000000..8cde353 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/shared/image_tools.py @@ -0,0 +1,126 @@ +import functools + +import jax +import jax.numpy as jnp +import torch +import torch.nn.functional as F # noqa: N812 + +import openpi.shared.array_typing as at + + +@functools.partial(jax.jit, static_argnums=(1, 2, 3)) +@at.typecheck +def resize_with_pad( + images: at.UInt8[at.Array, "*b h w c"] | at.Float[at.Array, "*b h w c"], + height: int, + width: int, + method: jax.image.ResizeMethod = jax.image.ResizeMethod.LINEAR, +) -> at.UInt8[at.Array, "*b {height} {width} c"] | at.Float[at.Array, "*b {height} {width} c"]: + """Replicates tf.image.resize_with_pad. Resizes an image to a target height and width without distortion + by padding with black. If the image is float32, it must be in the range [-1, 1]. + """ + has_batch_dim = images.ndim == 4 + if not has_batch_dim: + images = images[None] # type: ignore + cur_height, cur_width = images.shape[1:3] + ratio = max(cur_width / width, cur_height / height) + resized_height = int(cur_height / ratio) + resized_width = int(cur_width / ratio) + resized_images = jax.image.resize( + images, (images.shape[0], resized_height, resized_width, images.shape[3]), method=method + ) + if images.dtype == jnp.uint8: + # round from float back to uint8 + resized_images = jnp.round(resized_images).clip(0, 255).astype(jnp.uint8) + elif images.dtype == jnp.float32: + resized_images = resized_images.clip(-1.0, 1.0) + else: + raise ValueError(f"Unsupported image dtype: {images.dtype}") + + pad_h0, remainder_h = divmod(height - resized_height, 2) + pad_h1 = pad_h0 + remainder_h + pad_w0, remainder_w = divmod(width - resized_width, 2) + pad_w1 = pad_w0 + remainder_w + padded_images = jnp.pad( + resized_images, + ((0, 0), (pad_h0, pad_h1), (pad_w0, pad_w1), (0, 0)), + constant_values=0 if images.dtype == jnp.uint8 else -1.0, + ) + + if not has_batch_dim: + padded_images = padded_images[0] + return padded_images + + +def resize_with_pad_torch( + images: torch.Tensor, + height: int, + width: int, + mode: str = "bilinear", +) -> torch.Tensor: + """PyTorch version of resize_with_pad. Resizes an image to a target height and width without distortion + by padding with black. If the image is float32, it must be in the range [-1, 1]. + + Args: + images: Tensor of shape [*b, h, w, c] or [*b, c, h, w] + height: Target height + width: Target width + mode: Interpolation mode ('bilinear', 'nearest', etc.) + + Returns: + Resized and padded tensor with same shape format as input + """ + # Check if input is in channels-last format [*b, h, w, c] or channels-first [*b, c, h, w] + if images.shape[-1] <= 4: # Assume channels-last format + channels_last = True + # Convert to channels-first for torch operations + if images.dim() == 3: + images = images.unsqueeze(0) # Add batch dimension + images = images.permute(0, 3, 1, 2) # [b, h, w, c] -> [b, c, h, w] + else: + channels_last = False + if images.dim() == 3: + images = images.unsqueeze(0) # Add batch dimension + + batch_size, channels, cur_height, cur_width = images.shape + + # Calculate resize ratio + ratio = max(cur_width / width, cur_height / height) + resized_height = int(cur_height / ratio) + resized_width = int(cur_width / ratio) + + # Resize + resized_images = F.interpolate( + images, size=(resized_height, resized_width), mode=mode, align_corners=False if mode == "bilinear" else None + ) + + # Handle dtype-specific clipping + if images.dtype == torch.uint8: + resized_images = torch.round(resized_images).clamp(0, 255).to(torch.uint8) + elif images.dtype == torch.float32: + resized_images = resized_images.clamp(-1.0, 1.0) + else: + raise ValueError(f"Unsupported image dtype: {images.dtype}") + + # Calculate padding + pad_h0, remainder_h = divmod(height - resized_height, 2) + pad_h1 = pad_h0 + remainder_h + pad_w0, remainder_w = divmod(width - resized_width, 2) + pad_w1 = pad_w0 + remainder_w + + # Pad + constant_value = 0 if images.dtype == torch.uint8 else -1.0 + padded_images = F.pad( + resized_images, + (pad_w0, pad_w1, pad_h0, pad_h1), # left, right, top, bottom + mode="constant", + value=constant_value, + ) + + # Convert back to original format if needed + if channels_last: + padded_images = padded_images.permute(0, 2, 3, 1) # [b, c, h, w] -> [b, h, w, c] + if batch_size == 1 and images.shape[0] == 1: + padded_images = padded_images.squeeze(0) # Remove batch dimension if it was added + + return padded_images diff --git a/policy/openpi-InternData-A1/src/openpi/shared/image_tools_test.py b/policy/openpi-InternData-A1/src/openpi/shared/image_tools_test.py new file mode 100644 index 0000000..c19bee2 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/shared/image_tools_test.py @@ -0,0 +1,37 @@ +import jax.numpy as jnp + +from openpi.shared import image_tools + + +def test_resize_with_pad_shapes(): + # Test case 1: Resize image with larger dimensions + images = jnp.zeros((2, 10, 10, 3), dtype=jnp.uint8) # Input images of shape (batch_size, height, width, channels) + height = 20 + width = 20 + resized_images = image_tools.resize_with_pad(images, height, width) + assert resized_images.shape == (2, height, width, 3) + assert jnp.all(resized_images == 0) + + # Test case 2: Resize image with smaller dimensions + images = jnp.zeros((3, 30, 30, 3), dtype=jnp.uint8) + height = 15 + width = 15 + resized_images = image_tools.resize_with_pad(images, height, width) + assert resized_images.shape == (3, height, width, 3) + assert jnp.all(resized_images == 0) + + # Test case 3: Resize image with the same dimensions + images = jnp.zeros((1, 50, 50, 3), dtype=jnp.uint8) + height = 50 + width = 50 + resized_images = image_tools.resize_with_pad(images, height, width) + assert resized_images.shape == (1, height, width, 3) + assert jnp.all(resized_images == 0) + + # Test case 3: Resize image with odd-numbered padding + images = jnp.zeros((1, 256, 320, 3), dtype=jnp.uint8) + height = 60 + width = 80 + resized_images = image_tools.resize_with_pad(images, height, width) + assert resized_images.shape == (1, height, width, 3) + assert jnp.all(resized_images == 0) diff --git a/policy/openpi-InternData-A1/src/openpi/shared/nnx_utils.py b/policy/openpi-InternData-A1/src/openpi/shared/nnx_utils.py new file mode 100644 index 0000000..29df222 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/shared/nnx_utils.py @@ -0,0 +1,69 @@ +from collections.abc import Callable +import dataclasses +import functools +import inspect +import re +from typing import Any, ParamSpec, TypeVar + +import flax.nnx as nnx +import jax + +P = ParamSpec("P") +R = TypeVar("R") + + +def module_jit(meth: Callable[P, R], *jit_args, **jit_kwargs) -> Callable[P, R]: + """A higher-order function to JIT-compile `nnx.Module` methods, freezing the module's state in the process. + + Why not `nnx.jit`? For some reason, naively applying `nnx.jit` to `nnx.Module` methods, bound or unbound, uses much + more memory than necessary. I'm guessing it has something to do with the fact that it must keep track of module + mutations. Also, `nnx.jit` has some inherent overhead compared to a standard `jax.jit`, since every call must + traverse the NNX module graph. See https://github.com/google/flax/discussions/4224 for details. + + `module_jit` is an alternative that avoids these issues by freezing the module's state. The function returned by + `module_jit` acts exactly like the original method, except that the state of the module is frozen to whatever it was + when `module_jit` was called. Mutations to the module within `meth` are still allowed, but they will be discarded + after the method call completes. + """ + if not (inspect.ismethod(meth) and isinstance(meth.__self__, nnx.Module)): + raise ValueError("module_jit must only be used on bound methods of nnx.Modules.") + + graphdef, state = nnx.split(meth.__self__) + + def fun(state: nnx.State, *args: P.args, **kwargs: P.kwargs) -> R: + module = nnx.merge(graphdef, state) + return meth.__func__(module, *args, **kwargs) + + jitted_fn = jax.jit(fun, *jit_args, **jit_kwargs) + + @functools.wraps(meth) + def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + return jitted_fn(state, *args, **kwargs) + + return wrapper + + +@dataclasses.dataclass(frozen=True) +class PathRegex: + """NNX Filter that matches paths using a regex. + + By default, paths are joined with a `/` separator. This can be overridden by setting the `sep` argument. + """ + + pattern: str | re.Pattern + sep: str = "/" + + def __post_init__(self): + if not isinstance(self.pattern, re.Pattern): + object.__setattr__(self, "pattern", re.compile(self.pattern)) + + def __call__(self, path: nnx.filterlib.PathParts, x: Any) -> bool: + joined_path = self.sep.join(str(x) for x in path) + assert isinstance(self.pattern, re.Pattern) + return self.pattern.fullmatch(joined_path) is not None + + +def state_map(state: nnx.State, filter: nnx.filterlib.Filter, fn: Callable[[Any], Any]) -> nnx.State: + """Apply a function to the leaves of the state that match the filter.""" + filtered_keys = set(state.filter(filter).flat_state()) + return state.map(lambda k, v: fn(v) if k in filtered_keys else v) diff --git a/policy/openpi-InternData-A1/src/openpi/shared/normalize.py b/policy/openpi-InternData-A1/src/openpi/shared/normalize.py new file mode 100644 index 0000000..9d29b8c --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/shared/normalize.py @@ -0,0 +1,199 @@ +import json +import pathlib + +import numpy as np +import numpydantic +import pydantic + + +@pydantic.dataclasses.dataclass +class NormStats: + mean: numpydantic.NDArray + std: numpydantic.NDArray + q01: numpydantic.NDArray | None = None # 1st quantile + q99: numpydantic.NDArray | None = None # 99th quantile + + +class RunningStats: + """Compute running statistics of a batch of vectors.""" + + def __init__(self): + self._count = 0 + self._mean = None + self._mean_of_squares = None + self._min = None + self._max = None + self._histograms = None + self._bin_edges = None + self._num_quantile_bins = 5000 # for computing quantiles on the fly + + def update(self, batch: np.ndarray) -> None: + """ + Update the running statistics with a batch of vectors. + + Args: + vectors (np.ndarray): An array where all dimensions except the last are batch dimensions. + """ + batch = batch.reshape(-1, batch.shape[-1]) + num_elements, vector_length = batch.shape + if self._count == 0: + self._mean = np.mean(batch, axis=0) + self._mean_of_squares = np.mean(batch**2, axis=0) + self._min = np.min(batch, axis=0) + self._max = np.max(batch, axis=0) + self._histograms = [np.zeros(self._num_quantile_bins) for _ in range(vector_length)] + self._bin_edges = [ + np.linspace(self._min[i] - 1e-10, self._max[i] + 1e-10, self._num_quantile_bins + 1) + for i in range(vector_length) + ] + else: + if vector_length != self._mean.size: + raise ValueError("The length of new vectors does not match the initialized vector length.") + new_max = np.max(batch, axis=0) + new_min = np.min(batch, axis=0) + max_changed = np.any(new_max > self._max) + min_changed = np.any(new_min < self._min) + self._max = np.maximum(self._max, new_max) + self._min = np.minimum(self._min, new_min) + + if max_changed or min_changed: + self._adjust_histograms() + + self._count += num_elements + + batch_mean = np.mean(batch, axis=0) + batch_mean_of_squares = np.mean(batch**2, axis=0) + + # Update running mean and mean of squares. + self._mean += (batch_mean - self._mean) * (num_elements / self._count) + self._mean_of_squares += (batch_mean_of_squares - self._mean_of_squares) * (num_elements / self._count) + + self._update_histograms(batch) + + def get_statistics(self) -> NormStats: + """ + Compute and return the statistics of the vectors processed so far. + + Returns: + dict: A dictionary containing the computed statistics. + """ + if self._count < 2: + raise ValueError("Cannot compute statistics for less than 2 vectors.") + + variance = self._mean_of_squares - self._mean**2 + stddev = np.sqrt(np.maximum(0, variance)) + q01, q99 = self._compute_quantiles([0.01, 0.99]) + return NormStats(mean=self._mean, std=stddev, q01=q01, q99=q99) + + def _adjust_histograms(self): + """Adjust histograms when min or max changes.""" + for i in range(len(self._histograms)): + old_edges = self._bin_edges[i] + new_edges = np.linspace(self._min[i], self._max[i], self._num_quantile_bins + 1) + + # Redistribute the existing histogram counts to the new bins + new_hist, _ = np.histogram(old_edges[:-1], bins=new_edges, weights=self._histograms[i]) + + self._histograms[i] = new_hist + self._bin_edges[i] = new_edges + + def _update_histograms(self, batch: np.ndarray) -> None: + """Update histograms with new vectors.""" + for i in range(batch.shape[1]): + hist, _ = np.histogram(batch[:, i], bins=self._bin_edges[i]) + self._histograms[i] += hist + + def _compute_quantiles(self, quantiles): + """Compute quantiles based on histograms.""" + results = [] + for q in quantiles: + target_count = q * self._count + q_values = [] + for hist, edges in zip(self._histograms, self._bin_edges, strict=True): + cumsum = np.cumsum(hist) + idx = np.searchsorted(cumsum, target_count) + q_values.append(edges[idx]) + results.append(np.array(q_values)) + return results + +class OptimizedRunningStats: + def __init__(self, num_quantile_bins=1000): # 减少bin数量 + self._count = 0 + self._sum = None + self._sum_sq = None + self._min = None + self._max = None + self._all_samples = [] # 用于存储采样数据 + self._sample_rate = 0.01 # 1%采样率 + self._num_quantile_bins = num_quantile_bins + + def update(self, batch: np.ndarray) -> None: + batch = batch.reshape(-1, batch.shape[-1]) + num_elements = batch.shape[0] + + # 更新基本统计量(向量化) + if self._count == 0: + self._sum = np.sum(batch, axis=0, dtype=np.float64) + self._sum_sq = np.sum(batch**2, axis=0, dtype=np.float64) + self._min = np.min(batch, axis=0) + self._max = np.max(batch, axis=0) + else: + self._sum += np.sum(batch, axis=0, dtype=np.float64) + self._sum_sq += np.sum(batch**2, axis=0, dtype=np.float64) + self._min = np.minimum(self._min, np.min(batch, axis=0)) + self._max = np.maximum(self._max, np.max(batch, axis=0)) + + # 随机采样用于分位数计算(避免存储所有数据) + if np.random.random() < self._sample_rate: + sample_idx = np.random.randint(0, num_elements, size=min(100, num_elements)) + self._all_samples.append(batch[sample_idx]) + + self._count += num_elements + + def get_statistics(self): + if self._count < 2: + raise ValueError("Cannot compute statistics for less than 2 vectors.") + + # 计算均值和标准差 + mean = self._sum / self._count + variance = (self._sum_sq / self._count) - mean**2 + stddev = np.sqrt(np.maximum(0, variance)) + + # 基于采样数据计算分位数 + if self._all_samples: + all_sampled = np.concatenate(self._all_samples, axis=0) + q01 = np.quantile(all_sampled, 0.01, axis=0) + q99 = np.quantile(all_sampled, 0.99, axis=0) + else: + q01 = np.zeros_like(mean) + q99 = np.zeros_like(mean) + + return NormStats(mean=mean, std=stddev, q01=q01, q99=q99) + +class _NormStatsDict(pydantic.BaseModel): + norm_stats: dict[str, NormStats] + + +def serialize_json(norm_stats: dict[str, NormStats]) -> str: + """Serialize the running statistics to a JSON string.""" + return _NormStatsDict(norm_stats=norm_stats).model_dump_json(indent=2) + + +def deserialize_json(data: str) -> dict[str, NormStats]: + """Deserialize the running statistics from a JSON string.""" + return _NormStatsDict(**json.loads(data)).norm_stats + + +def save(directory: pathlib.Path | str, norm_stats: dict[str, NormStats]) -> None: + """Save the normalization stats to a directory.""" + path = pathlib.Path(directory) / "norm_stats.json" + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(serialize_json(norm_stats)) + + +def load(directory: pathlib.Path | str) -> dict[str, NormStats]: + """Load the normalization stats from a directory.""" + path = pathlib.Path(directory) / "norm_stats.json" + if not path.exists(): + raise FileNotFoundError(f"Norm stats file not found at: {path}") + return deserialize_json(path.read_text()) diff --git a/policy/openpi-InternData-A1/src/openpi/shared/normalize_test.py b/policy/openpi-InternData-A1/src/openpi/shared/normalize_test.py new file mode 100644 index 0000000..9c36bd9 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/shared/normalize_test.py @@ -0,0 +1,43 @@ +import numpy as np + +import openpi.shared.normalize as normalize + + +def test_normalize_update(): + arr = np.arange(12).reshape(4, 3) # 4 vectors of length 3 + + stats = normalize.RunningStats() + for i in range(len(arr)): + stats.update(arr[i : i + 1]) # Update with one vector at a time + results = stats.get_statistics() + + assert np.allclose(results.mean, np.mean(arr, axis=0)) + assert np.allclose(results.std, np.std(arr, axis=0)) + + +def test_serialize_deserialize(): + stats = normalize.RunningStats() + stats.update(np.arange(12).reshape(4, 3)) # 4 vectors of length 3 + + norm_stats = {"test": stats.get_statistics()} + norm_stats2 = normalize.deserialize_json(normalize.serialize_json(norm_stats)) + assert np.allclose(norm_stats["test"].mean, norm_stats2["test"].mean) + assert np.allclose(norm_stats["test"].std, norm_stats2["test"].std) + + +def test_multiple_batch_dimensions(): + # Test with multiple batch dimensions: (2, 3, 4) where 4 is vector dimension + batch_shape = (2, 3, 4) + arr = np.random.rand(*batch_shape) + + stats = normalize.RunningStats() + stats.update(arr) # Should handle (2, 3, 4) -> reshape to (6, 4) + results = stats.get_statistics() + + # Flatten batch dimensions and compute expected stats + flattened = arr.reshape(-1, arr.shape[-1]) # (6, 4) + expected_mean = np.mean(flattened, axis=0) + expected_std = np.std(flattened, axis=0) + + assert np.allclose(results.mean, expected_mean) + assert np.allclose(results.std, expected_std) diff --git a/policy/openpi-InternData-A1/src/openpi/shared/online_compute_norm_stats.py b/policy/openpi-InternData-A1/src/openpi/shared/online_compute_norm_stats.py new file mode 100644 index 0000000..f318985 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/shared/online_compute_norm_stats.py @@ -0,0 +1,96 @@ +"""Compute normalization statistics for a config. + +This script is used to compute the normalization statistics for a given config. It +will compute the mean and standard deviation of the data in the dataset and save it +to the config assets directory. +""" + +import numpy as np +import tqdm +import tyro + +import openpi.models.model as _model +import openpi.shared.normalize as normalize +import openpi.training.config as _config +import openpi.training.mixture_dataset as _mixture_dataset +import openpi.training.data_loader as _data_loader +import openpi.transforms as transforms +from pdb import set_trace +import openpi.training.weight_loaders as weight_loaders +import openpi.models.pi0_config as pi0_config +# from openpi.training.config import MultiSimGenieDataConfig, MultiSimSplitAlohaDataConfig, MultiSimFrankaDataConfig, MultiLeRobotReala2dDataConfig, MultiLeRobotRealArxLift2DataConfig, MultiDataConfig, DataConfig, TrainConfig +import logging +from pdb import set_trace +from typing import List +class RemoveStrings(transforms.DataTransformFn): + def __call__(self, x: dict) -> dict: + return {k: v for k, v in x.items() if not np.issubdtype(np.asarray(v).dtype, np.str_)} + + +def create_torch_dataloader( + data_config: List[_config.DataConfig], + action_horizon: int, + batch_size: int, + model_config: _model.BaseModelConfig, + num_workers: int, + max_frames: int | None = None, +) -> tuple[_data_loader.Dataset, int]: + # if data_config.repo_id is None: + # raise ValueError("Data config must have a repo_id") + # dataset = _data_loader.create_torch_dataset(data_config, action_horizon, model_config) + dataset = _mixture_dataset.create_mixture_dataset_no_transform(data_config, action_horizon, model_config) + # from pdb import set_trace; set_trace() + dataset = _data_loader.TransformedDataset( + dataset, + [ + *data_config[0][0].repack_transforms.inputs, + *data_config[0][0].data_transforms.inputs, + # Remove strings since they are not supported by JAX and are not needed to compute norm stats. + RemoveStrings(), + ], + ) + if max_frames is not None and max_frames < len(dataset): + num_batches = max_frames // batch_size + shuffle = True + else: + num_batches = len(dataset) // batch_size + shuffle = False + data_loader = _data_loader.TorchDataLoader( + dataset, + local_batch_size=batch_size, + num_workers=num_workers, + shuffle=shuffle, + num_batches=num_batches, + ) + return data_loader, num_batches + +def compute_norm_stats(config_name: str, max_frames: int | None = None): + config = _config.get_config(config_name) + data_configs_list = [] + for data_config_factory in config.data: + data_configs = data_config_factory.create(config.model) + logging.info(f"data_config: {data_configs}") + data_configs_list.append(data_configs) + print("done") + data_loader, num_batches = create_torch_dataloader( + data_configs_list, config.model.action_horizon, config.batch_size, config.model, config.num_workers, max_frames=None + ) + + + keys = ["state", "actions"] + stats = {key: normalize.RunningStats() for key in keys} + + # stats = {key: normalize.OptimizedRunningStats() for key in keys} # 新的 + # set_trace() + step_id = 0 + for batch in tqdm.tqdm(data_loader, total=num_batches, desc="Computing stats"): + step_id += 1 + for key in keys: + stats[key].update(np.asarray(batch[key])) + if step_id > 10000: + break + + norm_stats = {key: stats.get_statistics() for key, stats in stats.items()} + print(norm_stats) + return norm_stats + diff --git a/policy/openpi-InternData-A1/src/openpi/training/checkpoints.py b/policy/openpi-InternData-A1/src/openpi/training/checkpoints.py new file mode 100644 index 0000000..f32a831 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/training/checkpoints.py @@ -0,0 +1,159 @@ +from __future__ import annotations + +import asyncio +import concurrent.futures as futures +import dataclasses +import logging +from typing import Protocol + +from etils import epath +import jax +import orbax.checkpoint as ocp +import orbax.checkpoint.future as future + +from openpi.shared import array_typing as at +import openpi.shared.normalize as _normalize +import openpi.training.data_loader as _data_loader +import openpi.training.utils as training_utils + + +def initialize_checkpoint_dir( + checkpoint_dir: epath.Path | str, *, keep_period: int | None, overwrite: bool, resume: bool +) -> tuple[ocp.CheckpointManager, bool]: + checkpoint_dir = epath.Path(checkpoint_dir).resolve() + resuming = False + if checkpoint_dir.exists(): + if overwrite: + checkpoint_dir.rmtree() + checkpoint_dir.mkdir(parents=True, exist_ok=True) + logging.info(f"Wiped checkpoint directory {checkpoint_dir}") + elif resume: + resuming = True + else: + raise FileExistsError( + f"Checkpoint directory {checkpoint_dir} already exists. Use --overwrite or --resume " + "to indicate how to handle it." + ) + + checkpoint_dir.mkdir(parents=True, exist_ok=True) + + mngr = ocp.CheckpointManager( + checkpoint_dir, + item_handlers={ + "assets": CallbackHandler(), + "train_state": ocp.PyTreeCheckpointHandler(), + "params": ocp.PyTreeCheckpointHandler(), + }, + options=ocp.CheckpointManagerOptions( + max_to_keep=1, + keep_period=keep_period, + create=False, + async_options=ocp.AsyncOptions(timeout_secs=7200), + ), + ) + + # Special case: the checkpoint directory exists and the user requests to resume training, but the training run did + # not get to the first checkpoint saved. In this case, we don't actually want the train script to try and restore a + # checkpoint, since it will fail. + if resuming and tuple(mngr.all_steps()) in [(), (0,)]: + logging.info("Checkpoint directory exists, but does not contain any checkpoints. Aborting resume.") + resuming = False + + return mngr, resuming + + +def save_state( + checkpoint_manager: ocp.CheckpointManager, + state: training_utils.TrainState, + data_loader: _data_loader.DataLoader, + step: int, +): + def save_assets(directory: epath.Path): + # Save the normalization stats. + data_config = data_loader.data_config() + norm_stats = data_config.norm_stats + if norm_stats is not None and data_config.asset_id is not None: + _normalize.save(directory / data_config.asset_id, norm_stats) + + # Split params that can be used for inference into a separate item. + with at.disable_typechecking(): + train_state, params = _split_params(state) + items = { + "assets": save_assets, + "train_state": train_state, + "params": {"params": params}, + } + checkpoint_manager.save(step, items) + + +def restore_state( + checkpoint_manager: ocp.CheckpointManager, + state: training_utils.TrainState, + data_loader: _data_loader.DataLoader, + step: int | None = None, +) -> training_utils.TrainState: + del data_loader + + with at.disable_typechecking(): + # Split params that can be used for inference into a separate item. + train_state, params = _split_params(state) + restored = checkpoint_manager.restore( + step, + items={ + "train_state": train_state, + "params": {"params": params}, + }, + ) + return _merge_params(restored["train_state"], restored["params"]) + + +def load_norm_stats(assets_dir: epath.Path | str, asset_id: str) -> dict[str, _normalize.NormStats] | None: + norm_stats_dir = epath.Path(assets_dir) / asset_id + norm_stats = _normalize.load(norm_stats_dir) + logging.info(f"Loaded norm stats from {norm_stats_dir}") + return norm_stats + + +class Callback(Protocol): + def __call__(self, directory: epath.Path) -> None: ... + + +class CallbackHandler(ocp.AsyncCheckpointHandler): + """A CheckpointHandler for calling an arbitrary function asynchronously. Only for saving, not for restoring.""" + + def save(self, directory: epath.Path, args: CallbackSave): + if jax.process_index() == 0: + args.callback(directory) + + async def async_save(self, directory: epath.Path, args: CallbackSave) -> list[futures.Future]: + return [future.CommitFutureAwaitingContractedSignals(asyncio.to_thread(self.save, directory, args))] + + def restore(self, *args, **kwargs): + raise NotImplementedError("CallbackHandler does not support restore") + + +@ocp.args.register_with_handler(CallbackHandler, for_save=True) +@dataclasses.dataclass +class CallbackSave(ocp.args.CheckpointArgs): + callback: Callback + + +@ocp.args.register_with_handler(CallbackHandler, for_restore=True) +class CallbackRestore(ocp.args.CheckpointArgs): ... + + +def _split_params(state: training_utils.TrainState) -> tuple[training_utils.TrainState, at.Params]: + if state.ema_params is not None: + params = state.ema_params + train_state = dataclasses.replace(state, ema_params=None) + else: + params = state.params + train_state = dataclasses.replace(state, params={}) + return train_state, params + + +def _merge_params(train_state: training_utils.TrainState, params: dict[str, at.Params]) -> training_utils.TrainState: + # Revert the logic inside `_split_params`. Assumes that existence of `params` means that EMA params were used during the split. + if train_state.params: + return dataclasses.replace(train_state, ema_params=params["params"]) + return dataclasses.replace(train_state, params=params["params"]) diff --git a/policy/openpi-InternData-A1/src/openpi/training/config.py b/policy/openpi-InternData-A1/src/openpi/training/config.py new file mode 100644 index 0000000..95ef6bf --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/training/config.py @@ -0,0 +1,1904 @@ +"""See _CONFIGS for the list of available configs.""" + +import abc +from collections.abc import Sequence +import dataclasses +import difflib +import logging +import os +import pathlib +from typing import Any, Literal, Protocol, TypeAlias, Dict + +import etils.epath as epath +import flax.nnx as nnx +from typing_extensions import override +import tyro + +import openpi.models.model as _model +import openpi.models.pi0_config as pi0_config +import openpi.models.pi0_fast as pi0_fast +import openpi.models.tokenizer as _tokenizer +import openpi.policies.aloha_policy as aloha_policy +import openpi.policies.droid_policy as droid_policy +import openpi.policies.libero_policy as libero_policy +import openpi.policies.real_lift2_policy as real_lift2_policy +import openpi.policies.real_a2d_policy as real_a2d_policy +import openpi.policies.sim_split_aloha_policy as sim_split_aloha_policy +import openpi.policies.sim2real_split_aloha_policy as sim2real_split_aloha_policy +import openpi.policies.sim_franka_policy as sim_franka_policy +import openpi.shared.download as _download +import openpi.shared.normalize as _normalize +import openpi.training.droid_rlds_dataset as droid_rlds_dataset +import openpi.training.misc.roboarena_config as roboarena_config +import openpi.training.optimizer as _optimizer +import openpi.training.weight_loaders as weight_loaders +import openpi.transforms as _transforms +import openpi.shared.normalize as normalize +from natsort import natsorted +from glob import glob +ModelType: TypeAlias = _model.ModelType +# Work around a tyro issue with using nnx.filterlib.Filter directly. +Filter: TypeAlias = nnx.filterlib.Filter + +from pdb import set_trace + +@dataclasses.dataclass(frozen=True) +class AssetsConfig: + """Determines the location of assets (e.g., norm stats) that will be used to set up the data pipeline. + + These assets will be replicated inside the checkpoint under the `assets/asset_id` directory. + + This can be used to load assets from a different checkpoint (e.g., base model checkpoint) or some other + centralized location. For example, to load the norm stats for the Trossen robot from the base model checkpoint + during fine-tuning, use: + + ``` + AssetsConfig( + assets_dir="gs://openpi-assets/checkpoints/pi0_base/assets", + asset_id="trossen", + ) + ``` + """ + + # Assets directory. If not provided, the config assets_dirs will be used. This is useful to load assets from + # a different checkpoint (e.g., base model checkpoint) or some other centralized location. + assets_dir: str | None = None + + # Asset id. If not provided, the repo id will be used. This allows users to reference assets that describe + # different robot platforms. + asset_id: str | None = None + + +@dataclasses.dataclass(frozen=True) +class DataConfig: + # LeRobot repo id. If None, fake data will be created. + repo_id: str | None = None + # Directory within the assets directory containing the data assets. + asset_id: str | None = None + # Contains precomputed normalization stats. If None, normalization will not be performed. + norm_stats: dict[str, _transforms.NormStats] | None = None + + # Used to adopt the inputs from a dataset specific format to a common format + # which is expected by the data transforms. + repack_transforms: _transforms.Group = dataclasses.field(default_factory=_transforms.Group) + # Data transforms, typically include robot specific transformations. Will be applied + # before the data is normalized. See `model.Observation` and `model.Actions` to learn about the + # normalized data. + data_transforms: _transforms.Group = dataclasses.field(default_factory=_transforms.Group) + # Model specific transforms. Will be applied after the data is normalized. + model_transforms: _transforms.Group = dataclasses.field(default_factory=_transforms.Group) + # If true, will use quantile normalization. Otherwise, normal z-score normalization will be used. + use_quantile_norm: bool = False + + # Names of keys that will be used by the data loader to generate the action sequence. The length of the + # sequence is defined by the `action_horizon` field in the model config. This should be adjusted if your + # LeRobot dataset is using different keys to represent the action. + action_sequence_keys: Sequence[str] = ("actions",) + + # If true, will use the LeRobot dataset task to define the prompt. + prompt_from_task: bool = False + + # Only used for RLDS data loader (ie currently only used for DROID). + rlds_data_dir: str | None = None + # Action space for DROID dataset. + action_space: droid_rlds_dataset.DroidActionSpace | None = None + # Path to the data filter file for DROID dataset + filter_dict_path: str | None = None + + +@dataclasses.dataclass(frozen=True) +class MultiDataConfig: + # LeRobot repo id. If None, fake data will be created. + repo_dir: str | None = None + # Directory within the assets directory containing the data assets. + task_id: str | None = None + subtask_id: str | None = None + # Contains precomputed normalization stats. If None, normalization will not be performed. + norm_stats: dict[str, _transforms.NormStats] | None = None + + # Used to adopt the inputs from a dataset specific format to a common format + # which is expected by the data transforms. + repack_transforms: _transforms.Group = dataclasses.field(default_factory=_transforms.Group) + # Data transforms, typically include robot specific transformations. Will be applied + # before the data is normalized. See `model.Observation` and `model.Actions` to learn about the + # normalized data. + data_transforms: _transforms.Group = dataclasses.field(default_factory=_transforms.Group) + # Model specific transforms. Will be applied after the data is normalized. + model_transforms: _transforms.Group = dataclasses.field(default_factory=_transforms.Group) + # If true, will use quantile normalization. Otherwise, normal z-score normalization will be used. + use_quantile_norm: bool = False + + # Names of keys that will be used by the data loader to generate the action sequence. The length of the + # sequence is defined by the `action_horizon` field in the model config. This should be adjusted if your + # LeRobot dataset is using different keys to represent the action. + action_sequence_keys: Sequence[str] = ("actions",) + + # If true, will use the LeRobot dataset task to define the prompt. + prompt_from_task: bool = False + + # Only used for RLDS data loader (ie currently only used for DROID). + rlds_data_dir: str | None = None + # Action space for DROID dataset. + action_space: droid_rlds_dataset.DroidActionSpace | None = None + # Path to the data filter file for DROID dataset + filter_dict_path: str | None = None + # weight + weight: float = 1.0 + use_gripper_aug: bool = False + gripper_aug_config: dict[str, Any] | None = None + stats_dir: str = tyro.MISSING + data_ratio: float = 1.0 + # asset_id for saving norm stats + asset_id: str = tyro.MISSING + downsample_ratio: float = 1.0 + + +class GroupFactory(Protocol): + def __call__(self, model_config: _model.BaseModelConfig) -> _transforms.Group: + """Create a group.""" + + +@dataclasses.dataclass(frozen=True) +class ModelTransformFactory(GroupFactory): + """Creates model transforms for standard pi0 models.""" + + # If provided, will determine the default prompt that be used by the model. + default_prompt: str | None = None + + def __call__(self, model_config: _model.BaseModelConfig) -> _transforms.Group: + match model_config.model_type: + case _model.ModelType.PI0: + return _transforms.Group( + inputs=[ + _transforms.InjectDefaultPrompt(self.default_prompt), + _transforms.ResizeImages(224, 224), + _transforms.TokenizePrompt( + _tokenizer.PaligemmaTokenizer(model_config.max_token_len), + ), + _transforms.PadStatesAndActions(model_config.action_dim), + ], + ) + case _model.ModelType.PI05: + assert isinstance(model_config, pi0_config.Pi0Config) + return _transforms.Group( + inputs=[ + _transforms.InjectDefaultPrompt(self.default_prompt), + _transforms.ResizeImages(224, 224), + _transforms.TokenizePrompt( + _tokenizer.PaligemmaTokenizer(model_config.max_token_len), + discrete_state_input=model_config.discrete_state_input, + ), + _transforms.PadStatesAndActions(model_config.action_dim), + ], + ) + case _model.ModelType.PI0_FAST: + tokenizer_cls = ( + _tokenizer.FASTTokenizer + if model_config.fast_model_tokenizer is None + else model_config.fast_model_tokenizer + ) + tokenizer_kwargs = ( + {} if model_config.fast_model_tokenizer_kwargs is None else model_config.fast_model_tokenizer_kwargs + ) + return _transforms.Group( + inputs=[ + _transforms.InjectDefaultPrompt(self.default_prompt), + _transforms.ResizeImages(224, 224), + _transforms.TokenizeFASTInputs( + tokenizer_cls(model_config.max_token_len, **tokenizer_kwargs), + ), + ], + outputs=[ + _transforms.ExtractFASTActions( + tokenizer_cls(model_config.max_token_len, **tokenizer_kwargs), + action_horizon=model_config.action_horizon, + action_dim=model_config.action_dim, + ) + ], + ) + + +@dataclasses.dataclass(frozen=True) +class DataConfigFactory(abc.ABC): + # The LeRobot repo id. + repo_id: str = tyro.MISSING + # Determines how the assets will be loaded. + assets: AssetsConfig = dataclasses.field(default_factory=AssetsConfig) + # Base config that will be updated by the factory. + base_config: tyro.conf.Suppress[DataConfig | None] = None + + @abc.abstractmethod + def create(self, assets_dirs: pathlib.Path, model_config: _model.BaseModelConfig) -> DataConfig: + """Create a data config.""" + + def create_base_config(self, assets_dirs: pathlib.Path, model_config: _model.BaseModelConfig) -> DataConfig: + repo_id = self.repo_id if self.repo_id is not tyro.MISSING else None + asset_id = self.assets.asset_id or repo_id + return dataclasses.replace( + self.base_config or DataConfig(), + repo_id=repo_id, + asset_id=asset_id, + norm_stats=self._load_norm_stats(epath.Path(self.assets.assets_dir or assets_dirs), asset_id), + use_quantile_norm=model_config.model_type != ModelType.PI0, + ) + + def _load_norm_stats(self, assets_dir: epath.Path, asset_id: str | None) -> dict[str, _transforms.NormStats] | None: + if asset_id is None: + return None + try: + data_assets_dir = str(assets_dir / asset_id) + norm_stats = _normalize.load(_download.maybe_download(data_assets_dir)) + logging.info(f"Loaded norm stats from {data_assets_dir}") + return norm_stats + except FileNotFoundError: + # logging.info(f"Norm stats not found in {data_assets_dir}, skipping.") + raise FileNotFoundError(f"Norm stats not found in {data_assets_dir}, skipping.") + return None + +@dataclasses.dataclass(frozen=True) +class MultiDataConfigFactory(abc.ABC): + # The LeRobot repo id. + repo_dir: str = tyro.MISSING + # Determines how the assets will be loaded. + task_id: str | None = None + # Determines how the assets will be loaded. + # Base config that will be updated by the factory. + asset_id: str = tyro.MISSING + base_config: tyro.conf.Suppress[MultiDataConfig | None] = None + weight: float = 1.0 + use_gripper_aug: bool = False + gripper_aug_config: dict[str, Any] | None = None + stats_dir: str = tyro.MISSING + data_ratio: float = 1.0 + fixed_stats_dir: str | None = None + downsample_ratio: float = 1.0 + robot_name: str = tyro.MISSING + @abc.abstractmethod + def create(self, assets_dirs: pathlib.Path, model_config: _model.BaseModelConfig) -> MultiDataConfig: + """Create a data config.""" + + def create_base_config(self, repo_dir, task_id, subtask_id, stats_dir, model_config: _model.BaseModelConfig) -> MultiDataConfig: + robot_name = repo_dir.split('/')[-1] + assert robot_name == self.robot_name, f"robot_name mismatch: {robot_name} != {self.robot_name}" + task_category = repo_dir.split('/')[-2] + return dataclasses.replace( + self.base_config or MultiDataConfig(), + repo_dir=repo_dir, + task_id=task_id, + subtask_id=subtask_id, + norm_stats=self._load_norm_stats(epath.Path(stats_dir), task_category, robot_name, task_id, subtask_id), + use_quantile_norm=model_config.model_type != ModelType.PI0, + weight=self.weight, + use_gripper_aug=self.use_gripper_aug, + gripper_aug_config=self.gripper_aug_config, + stats_dir=self.stats_dir, + data_ratio=self.data_ratio, + asset_id=self.asset_id, + downsample_ratio=self.downsample_ratio, + ) + + def _load_norm_stats(self, stats_dir: epath.Path, task_category, robot_name, task_id, subtask_id) -> dict[str, _transforms.NormStats] | None: + try: + if self.fixed_stats_dir is not None: + data_assets_dir = self.fixed_stats_dir + logging.info("Loaded from fixed stats dir") + else: + data_assets_dir = str(stats_dir / task_category / robot_name / task_id / subtask_id) + norm_stats = _normalize.load(_download.maybe_download(data_assets_dir)) + logging.info(f"Loaded norm stats from {data_assets_dir}") + return norm_stats + except FileNotFoundError: + logging.info(f"Norm stats not found in {data_assets_dir}, check stats_dir config.") + # raise FileNotFoundError(f"Norm stats not found in {data_assets_dir}, check stats_dir config.") + +@dataclasses.dataclass(frozen=True) +class MultiLeRobotRealArxLift2DataConfig(MultiDataConfigFactory): + # If true, will convert joint dimensions to deltas with respect to the current state before passing to the model. + # Gripper dimensions will remain in absolute values. + use_delta_joint_actions: bool = True + default_prompt: str | None = None + # If provided, will be injected into the input data if the "prompt" key is not present. + # If true, this will convert the joint and gripper values from the standard Aloha space to + # the space used by the pi internal runtime which was used to train the base model. People who + # use standard Aloha data should set this to true. + adapt_to_pi: bool = False + # Repack transforms. + repack_transforms: tyro.conf.Suppress[_transforms.Group] = dataclasses.field( + default=_transforms.Group( + inputs=[ + _transforms.RepackTransform( + { + "images": + { + "cam_high": "images.rgb.head", + "cam_left_wrist": "images.rgb.hand_left", + "cam_right_wrist": "images.rgb.hand_right" + }, + "state_dict": { + "left_joint": "states.left_joint.position", + "right_joint": "states.right_joint.position", + "left_gripper": "states.left_gripper.position", + "right_gripper": "states.right_gripper.position" + }, + "action_dict": { + "left_joint": "actions.left_joint.position", + "right_joint": "actions.right_joint.position", + "left_gripper": "actions.left_gripper.position", + "right_gripper": "actions.right_gripper.position" + }, + "prompt": "task" + } + ) + ] + ) + ) + # Action keys that will be used to read the action sequence from the dataset. + action_sequence_keys: Sequence[str] = ("actions.left_joint.position","actions.right_joint.position","actions.left_gripper.position","actions.right_gripper.position") + + @override + def create(self, model_config: _model.BaseModelConfig, global_norm_stats: Dict[str, normalize.NormStats] = None) -> list[MultiDataConfig]: + data_configs = [] + data_transforms = _transforms.Group( + inputs=[real_lift2_policy.RealLift2Inputs(adapt_to_pi=self.adapt_to_pi)], + outputs=[real_lift2_policy.RealLift2Outputs(adapt_to_pi=self.adapt_to_pi)], + ) + if self.use_delta_joint_actions: + delta_action_mask = _transforms.make_bool_mask(6, -1, 6, -1) + data_transforms = data_transforms.push( + inputs=[_transforms.DeltaActions(delta_action_mask)], + outputs=[_transforms.AbsoluteActions(delta_action_mask)], + ) + + model_transforms = ModelTransformFactory(default_prompt=self.default_prompt)(model_config) + if isinstance(self.repo_dir, str) and "*" not in self.repo_dir: + local_paths = [self.repo_dir] + elif isinstance(self.repo_dir, str) and "*" in self.repo_dir: + all_local_paths = natsorted(glob(self.repo_dir)) + if self.task_id is not None: + local_paths = [ + p for p in all_local_paths + if any(str(task_id) in p for task_id in self.task_id) + ] + else: + local_paths = all_local_paths + + for local_path in local_paths: + if not check_lerobot_repo(local_path): + continue + robot_names = self.robot_name + parts = local_path.split("/") + + robot_idx = next((i for i, p in enumerate(parts) if p == robot_names), None) + if robot_idx is None: + raise ValueError( + f"Cannot find robot name in path. Expected {robot_names}, " + f"but got path: {local_path}" + ) + + repo_dir = "/".join(parts[:robot_idx + 1]) + + if robot_idx + 1 >= len(parts): + raise ValueError( + f"Path ends at robot name '{parts[robot_idx]}', cannot determine task_name: {local_path}" + ) + + task_name = parts[robot_idx + 1] + subtask_name = parts[robot_idx + 2] if robot_idx + 2 < len(parts) else "" + if global_norm_stats is None: + data_configs.append(dataclasses.replace( + self.create_base_config(repo_dir, task_name, subtask_name, self.stats_dir, model_config), + repack_transforms=self.repack_transforms, + data_transforms=data_transforms, + model_transforms=model_transforms, + action_sequence_keys=self.action_sequence_keys, + weight=self.weight, + use_gripper_aug=self.use_gripper_aug, + gripper_aug_config=self.gripper_aug_config, + stats_dir=self.stats_dir, + )) + else: + data_configs.append(dataclasses.replace( + self.create_base_config(repo_dir, task_name, subtask_name, self.stats_dir, model_config), + repack_transforms=self.repack_transforms, + data_transforms=data_transforms, + model_transforms=model_transforms, + action_sequence_keys=self.action_sequence_keys, + weight=self.weight, + use_gripper_aug=self.use_gripper_aug, + gripper_aug_config=self.gripper_aug_config, + stats_dir=self.stats_dir, + norm_stats=global_norm_stats, + )) + + + return data_configs + + +@dataclasses.dataclass(frozen=True) +class MultiLeRobotReala2dDataConfig(MultiDataConfigFactory): + # If true, will convert joint dimensions to deltas with respect to the current state before passing to the model. + # Gripper dimensions will remain in absolute values. + use_delta_joint_actions: bool = True + default_prompt: str | None = None + # If provided, will be injected into the input data if the "prompt" key is not present. + # If true, this will convert the joint and gripper values from the standard Aloha space to + # the space used by the pi internal runtime which was used to train the base model. People who + # use standard Aloha data should set this to true. + adapt_to_pi: bool = False + weight: float = 1.0 + use_gripper_aug: bool = False + gripper_aug_config: dict[str, Any] | None = None + stats_dir: str = tyro.MISSING + # Repack transforms. + repack_transforms: tyro.conf.Suppress[_transforms.Group] = dataclasses.field( + default=_transforms.Group( + inputs=[ + _transforms.RepackTransform( + { + "images": + { + "cam_high": "observation.images.head", + "cam_left_wrist": "observation.images.hand_left", + "cam_right_wrist": "observation.images.hand_right" + }, + "state_dict": { + "joint": "observation.states.joint.position", + "gripper": "observation.states.effector.position", + }, + "action_dict": { + "joint": "actions.joint.position", + "gripper": "actions.effector.position", + }, + "prompt": "task" + } + ) + ] + ) + ) + # Action keys that will be used to read the action sequence from the dataset. + action_sequence_keys: Sequence[str] = ("actions.joint.position","actions.effector.position") + + @override + def create(self, model_config: _model.BaseModelConfig, global_norm_stats: Dict[str, normalize.NormStats] = None) -> list[MultiDataConfig]: + data_configs = [] + data_transforms = _transforms.Group( + inputs=[real_a2d_policy.Reala2dInputs(adapt_to_pi=self.adapt_to_pi)], + outputs=[real_a2d_policy.Reala2dOutputs(adapt_to_pi=self.adapt_to_pi)], + ) + if self.use_delta_joint_actions: + delta_action_mask = _transforms.make_bool_mask(7, -1, 7, -1) + data_transforms = data_transforms.push( + inputs=[_transforms.DeltaActions(delta_action_mask)], + outputs=[_transforms.AbsoluteActions(delta_action_mask)], + ) + + model_transforms = ModelTransformFactory(default_prompt=self.default_prompt)(model_config) + if isinstance(self.repo_dir, str) and "*" not in self.repo_dir: + local_paths = [self.repo_dir] + elif isinstance(self.repo_dir, str) and "*" in self.repo_dir: + all_local_paths = natsorted(glob(self.repo_dir)) + if self.task_id is not None: + local_paths = [ + p for p in all_local_paths + if any(str(task_id) in p for task_id in self.task_id) + ] + else: + local_paths = all_local_paths + + for local_path in local_paths: + if not check_lerobot_repo(local_path): + continue + robot_names = self.robot_name + parts = local_path.split("/") + + robot_idx = next((i for i, p in enumerate(parts) if p == robot_names), None) + if robot_idx is None: + raise ValueError( + f"Cannot find robot name in path. Expected {robot_names}, " + f"but got path: {local_path}" + ) + + repo_dir = "/".join(parts[:robot_idx + 1]) + + if robot_idx + 1 >= len(parts): + raise ValueError( + f"Path ends at robot name '{parts[robot_idx]}', cannot determine task_name: {local_path}" + ) + + task_name = parts[robot_idx + 1] + subtask_name = parts[robot_idx + 2] if robot_idx + 2 < len(parts) else "" + if global_norm_stats is None: + data_configs.append(dataclasses.replace( + self.create_base_config(repo_dir, task_name, subtask_name, self.stats_dir, model_config), + repack_transforms=self.repack_transforms, + data_transforms=data_transforms, + model_transforms=model_transforms, + action_sequence_keys=self.action_sequence_keys, + weight=self.weight, + use_gripper_aug=self.use_gripper_aug, + gripper_aug_config=self.gripper_aug_config, + stats_dir=self.stats_dir, + )) + else: + data_configs.append(dataclasses.replace( + self.create_base_config(repo_dir, task_name, subtask_name, self.stats_dir, model_config), + repack_transforms=self.repack_transforms, + data_transforms=data_transforms, + model_transforms=model_transforms, + action_sequence_keys=self.action_sequence_keys, + weight=self.weight, + use_gripper_aug=self.use_gripper_aug, + gripper_aug_config=self.gripper_aug_config, + stats_dir=self.stats_dir, + norm_stats=global_norm_stats, + )) + + + return data_configs + +@dataclasses.dataclass(frozen=True) +class MultiSimGenieDataConfig(MultiDataConfigFactory): + # If true, will convert joint dimensions to deltas with respect to the current state before passing to the model. + # Gripper dimensions will remain in absolute values. + use_delta_joint_actions: bool = True + default_prompt: str | None = None + # If provided, will be injected into the input data if the "prompt" key is not present. + # If true, this will convert the joint and gripper values from the standard Aloha space to + # the space used by the pi internal runtime which was used to train the base model. People who + # use standard Aloha data should set this to true. + adapt_to_pi: bool = False + # Repack transforms. + repack_transforms: tyro.conf.Suppress[_transforms.Group] = dataclasses.field( + default=_transforms.Group( + inputs=[ + _transforms.RepackTransform( + { + "images": + { + "cam_high": "images.rgb.head", + "cam_left_wrist": "images.rgb.hand_left", + "cam_right_wrist": "images.rgb.hand_right" + }, + "state_dict": { + "left_joint": "states.left_joint.position", + "right_joint": "states.right_joint.position", + "left_gripper": "states.left_gripper.position", + "right_gripper": "states.right_gripper.position" + }, + "action_dict": { + "left_joint": "actions.left_joint.position", + "right_joint": "actions.right_joint.position", + "left_gripper": "actions.left_gripper.position", + "right_gripper": "actions.right_gripper.position", + "left_gripper_openness": "master_actions.left_gripper.openness", + "right_gripper_openness": "master_actions.right_gripper.openness" + }, + "prompt": "task" + } + ) + ] + ) + ) + # Action keys that will be used to read the action sequence from the dataset. + action_sequence_keys: Sequence[str] = ("actions.left_joint.position","actions.right_joint.position","actions.left_gripper.position","actions.right_gripper.position","master_actions.left_gripper.openness","master_actions.right_gripper.openness") + + @override + def create(self, model_config: _model.BaseModelConfig,global_norm_stats: Dict[str, normalize.NormStats] = None) -> list[MultiDataConfig]: + data_configs = [] + data_transforms = _transforms.Group( + inputs=[sim_split_aloha_policy.SimSplitAlohaInputs(adapt_to_pi=self.adapt_to_pi)], + outputs=[sim_split_aloha_policy.SimSplitAlohaOutputs(adapt_to_pi=self.adapt_to_pi)], + ) + if self.use_delta_joint_actions: + delta_action_mask = _transforms.make_bool_mask(7, -1, 7, -1) + data_transforms = data_transforms.push( + inputs=[_transforms.DeltaActions(delta_action_mask)], + outputs=[_transforms.AbsoluteActions(delta_action_mask)], + ) + + model_transforms = ModelTransformFactory(default_prompt=self.default_prompt)(model_config) + if isinstance(self.repo_dir, str) and "*" not in self.repo_dir: + local_paths = [self.repo_dir] + elif isinstance(self.repo_dir, str) and "*" in self.repo_dir: + all_local_paths = natsorted(glob(self.repo_dir)) + if self.task_id is not None: + local_paths = [ + p for p in all_local_paths + if any(str(task_id) in p for task_id in self.task_id) + ] + else: + local_paths = all_local_paths + for local_path in local_paths: + if not check_lerobot_repo(local_path): + continue + robot_names = self.robot_name + parts = local_path.split("/") + + robot_idx = next((i for i, p in enumerate(parts) if p == robot_names), None) + if robot_idx is None: + raise ValueError( + f"Cannot find robot name in path. Expected {robot_names}, " + f"but got path: {local_path}" + ) + + repo_dir = "/".join(parts[:robot_idx + 1]) + + if robot_idx + 1 >= len(parts): + raise ValueError( + f"Path ends at robot name '{parts[robot_idx]}', cannot determine task_name: {local_path}" + ) + + task_name = parts[robot_idx + 1] + subtask_name = parts[robot_idx + 2] if robot_idx + 2 < len(parts) else "" + if global_norm_stats is None: + data_configs.append(dataclasses.replace( + self.create_base_config(repo_dir, task_name, subtask_name, self.stats_dir, model_config), + repack_transforms=self.repack_transforms, + data_transforms=data_transforms, + model_transforms=model_transforms, + action_sequence_keys=self.action_sequence_keys, + weight=self.weight, + use_gripper_aug=self.use_gripper_aug, + gripper_aug_config=self.gripper_aug_config, + stats_dir=self.stats_dir, + )) + else: + data_configs.append(dataclasses.replace( + self.create_base_config(repo_dir, task_name, subtask_name, self.stats_dir, model_config), + repack_transforms=self.repack_transforms, + data_transforms=data_transforms, + model_transforms=model_transforms, + action_sequence_keys=self.action_sequence_keys, + weight=self.weight, + use_gripper_aug=self.use_gripper_aug, + gripper_aug_config=self.gripper_aug_config, + stats_dir=self.stats_dir, + norm_stats=global_norm_stats, + )) + + return data_configs + +@dataclasses.dataclass(frozen=True) +class MultiSim2RealGenieDataConfig(MultiDataConfigFactory): + # If true, will convert joint dimensions to deltas with respect to the current state before passing to the model. + # Gripper dimensions will remain in absolute values. + use_delta_joint_actions: bool = True + default_prompt: str | None = None + # If provided, will be injected into the input data if the "prompt" key is not present. + # If true, this will convert the joint and gripper values from the standard Aloha space to + # the space used by the pi internal runtime which was used to train the base model. People who + # use standard Aloha data should set this to true. + adapt_to_pi: bool = False + # Repack transforms. + repack_transforms: tyro.conf.Suppress[_transforms.Group] = dataclasses.field( + default=_transforms.Group( + inputs=[ + _transforms.RepackTransform( + { + "images": + { + "cam_high": "images.rgb.head", + "cam_left_wrist": "images.rgb.hand_left", + "cam_right_wrist": "images.rgb.hand_right" + }, + "state_dict": { + "left_joint": "states.left_joint.position", + "right_joint": "states.right_joint.position", + "left_gripper": "states.left_gripper.position", + "right_gripper": "states.right_gripper.position" + }, + "action_dict": { + "left_joint": "actions.left_joint.position", + "right_joint": "actions.right_joint.position", + "left_gripper": "actions.left_gripper.position", + "right_gripper": "actions.right_gripper.position", + "left_gripper_openness": "master_actions.left_gripper.openness", + "right_gripper_openness": "master_actions.right_gripper.openness" + }, + "prompt": "task" + } + ) + ] + ) + ) + # Action keys that will be used to read the action sequence from the dataset. + action_sequence_keys: Sequence[str] = ("actions.left_joint.position","actions.right_joint.position","actions.left_gripper.position","actions.right_gripper.position","master_actions.left_gripper.openness","master_actions.right_gripper.openness") + + @override + def create(self, model_config: _model.BaseModelConfig,global_norm_stats: Dict[str, normalize.NormStats] = None) -> list[MultiDataConfig]: + data_configs = [] + data_transforms = _transforms.Group( + inputs=[sim2real_split_aloha_policy.Sim2RealSplitAlohaInputs(adapt_to_pi=self.adapt_to_pi)], + outputs=[sim2real_split_aloha_policy.Sim2RealSplitAlohaOutputs(adapt_to_pi=self.adapt_to_pi)], + ) + if self.use_delta_joint_actions: + delta_action_mask = _transforms.make_bool_mask(7, -1, 7, -1) + data_transforms = data_transforms.push( + inputs=[_transforms.DeltaActions(delta_action_mask)], + outputs=[_transforms.AbsoluteActions(delta_action_mask)], + ) + + model_transforms = ModelTransformFactory(default_prompt=self.default_prompt)(model_config) + if isinstance(self.repo_dir, str) and "*" not in self.repo_dir: + local_paths = [self.repo_dir] + elif isinstance(self.repo_dir, str) and "*" in self.repo_dir: + all_local_paths = natsorted(glob(self.repo_dir)) + if self.task_id is not None: + local_paths = [ + p for p in all_local_paths + if any(str(task_id) in p for task_id in self.task_id) + ] + else: + local_paths = all_local_paths + + for local_path in local_paths: + if not check_lerobot_repo(local_path): + continue + robot_names = self.robot_name + parts = local_path.split("/") + + robot_idx = next((i for i, p in enumerate(parts) if p == robot_names), None) + if robot_idx is None: + raise ValueError( + f"Cannot find robot name in path. Expected {robot_names}, " + f"but got path: {local_path}" + ) + + repo_dir = "/".join(parts[:robot_idx + 1]) + + if robot_idx + 1 >= len(parts): + raise ValueError( + f"Path ends at robot name '{parts[robot_idx]}', cannot determine task_name: {local_path}" + ) + + task_name = parts[robot_idx + 1] + subtask_name = parts[robot_idx + 2] if robot_idx + 2 < len(parts) else "" + if global_norm_stats is None: + data_configs.append(dataclasses.replace( + self.create_base_config(repo_dir, task_name, subtask_name, self.stats_dir, model_config), + repack_transforms=self.repack_transforms, + data_transforms=data_transforms, + model_transforms=model_transforms, + action_sequence_keys=self.action_sequence_keys, + weight=self.weight, + use_gripper_aug=self.use_gripper_aug, + gripper_aug_config=self.gripper_aug_config, + stats_dir=self.stats_dir, + )) + else: + data_configs.append(dataclasses.replace( + self.create_base_config(repo_dir, task_name, subtask_name, self.stats_dir, model_config), + repack_transforms=self.repack_transforms, + data_transforms=data_transforms, + model_transforms=model_transforms, + action_sequence_keys=self.action_sequence_keys, + weight=self.weight, + use_gripper_aug=self.use_gripper_aug, + gripper_aug_config=self.gripper_aug_config, + stats_dir=self.stats_dir, + norm_stats=global_norm_stats, + )) + + return data_configs + + +@dataclasses.dataclass(frozen=True) +class MultiSimSplitAlohaDataConfig(MultiDataConfigFactory): + # If true, will convert joint dimensions to deltas with respect to the current state before passing to the model. + # Gripper dimensions will remain in absolute values. + use_delta_joint_actions: bool = True + default_prompt: str | None = None + # If provided, will be injected into the input data if the "prompt" key is not present. + # If true, this will convert the joint and gripper values from the standard Aloha space to + # the space used by the pi internal runtime which was used to train the base model. People who + # use standard Aloha data should set this to true. + adapt_to_pi: bool = False + + # Repack transforms. + repack_transforms: tyro.conf.Suppress[_transforms.Group] = dataclasses.field( + default=_transforms.Group( + inputs=[ + _transforms.RepackTransform( + { + "images": + { + "cam_high": "images.rgb.head", + "cam_left_wrist": "images.rgb.hand_left", + "cam_right_wrist": "images.rgb.hand_right" + }, + "state_dict": { + "left_joint": "states.left_joint.position", + "right_joint": "states.right_joint.position", + "left_gripper": "states.left_gripper.position", + "right_gripper": "states.right_gripper.position" + }, + "action_dict": { + "left_joint": "actions.left_joint.position", + "right_joint": "actions.right_joint.position", + "left_gripper": "actions.left_gripper.position", + "right_gripper": "actions.right_gripper.position", + "left_gripper_openness": "master_actions.left_gripper.openness", + "right_gripper_openness": "master_actions.right_gripper.openness" + }, + "prompt": "task" + } + ) + ] + ) + ) + # Action keys that will be used to read the action sequence from the dataset. + action_sequence_keys: Sequence[str] = ("actions.left_joint.position","actions.right_joint.position","actions.left_gripper.position","actions.right_gripper.position","master_actions.left_gripper.openness","master_actions.right_gripper.openness") + + @override + def create(self, model_config: _model.BaseModelConfig,global_norm_stats: Dict[str, normalize.NormStats] = None) -> list[MultiDataConfig]: + data_configs = [] + data_transforms = _transforms.Group( + inputs=[sim_split_aloha_policy.SimSplitAlohaInputs(adapt_to_pi=self.adapt_to_pi)], + outputs=[sim_split_aloha_policy.SimSplitAlohaOutputs(adapt_to_pi=self.adapt_to_pi)], + ) + if self.use_delta_joint_actions: + delta_action_mask = _transforms.make_bool_mask(6, -1, 6, -1) + data_transforms = data_transforms.push( + inputs=[_transforms.DeltaActions(delta_action_mask)], + outputs=[_transforms.AbsoluteActions(delta_action_mask)], + ) + + model_transforms = ModelTransformFactory(default_prompt=self.default_prompt)(model_config) + if isinstance(self.repo_dir, str) and "*" not in self.repo_dir: + local_paths = [self.repo_dir] + elif isinstance(self.repo_dir, str) and "*" in self.repo_dir: + all_local_paths = natsorted(glob(self.repo_dir)) + if self.task_id is not None: + local_paths = [ + p for p in all_local_paths + if any(str(task_id) in p for task_id in self.task_id) + ] + else: + local_paths = all_local_paths + + for local_path in local_paths: + if not check_lerobot_repo(local_path): + continue + robot_names = self.robot_name + parts = local_path.split("/") + + robot_idx = next((i for i, p in enumerate(parts) if p == robot_names), None) + if robot_idx is None: + raise ValueError( + f"Cannot find robot name in path. Expected {robot_names}, " + f"but got path: {local_path}" + ) + + repo_dir = "/".join(parts[:robot_idx + 1]) + + if robot_idx + 1 >= len(parts): + raise ValueError( + f"Path ends at robot name '{parts[robot_idx]}', cannot determine task_name: {local_path}" + ) + + task_name = parts[robot_idx + 1] + subtask_name = parts[robot_idx + 2] if robot_idx + 2 < len(parts) else "" + if global_norm_stats is None: + data_configs.append(dataclasses.replace( + self.create_base_config(repo_dir, task_name, subtask_name, self.stats_dir, model_config), + repack_transforms=self.repack_transforms, + data_transforms=data_transforms, + model_transforms=model_transforms, + action_sequence_keys=self.action_sequence_keys, + weight=self.weight, + use_gripper_aug=self.use_gripper_aug, + gripper_aug_config=self.gripper_aug_config, + stats_dir=self.stats_dir, + )) + else: + data_configs.append(dataclasses.replace( + self.create_base_config(repo_dir, task_name, subtask_name, self.stats_dir, model_config), + repack_transforms=self.repack_transforms, + data_transforms=data_transforms, + model_transforms=model_transforms, + action_sequence_keys=self.action_sequence_keys, + weight=self.weight, + use_gripper_aug=self.use_gripper_aug, + gripper_aug_config=self.gripper_aug_config, + stats_dir=self.stats_dir, + norm_stats=global_norm_stats, + )) + return data_configs + + +@dataclasses.dataclass(frozen=True) +class MultiSim2RealSplitAlohaDataConfig(MultiDataConfigFactory): + # If true, will convert joint dimensions to deltas with respect to the current state before passing to the model. + # Gripper dimensions will remain in absolute values. + use_delta_joint_actions: bool = True + default_prompt: str | None = None + # If provided, will be injected into the input data if the "prompt" key is not present. + # If true, this will convert the joint and gripper values from the standard Aloha space to + # the space used by the pi internal runtime which was used to train the base model. People who + # use standard Aloha data should set this to true. + adapt_to_pi: bool = False + # Repack transforms. + repack_transforms: tyro.conf.Suppress[_transforms.Group] = dataclasses.field( + default=_transforms.Group( + inputs=[ + _transforms.RepackTransform( + { + "images": + { + "cam_high": "images.rgb.head", + "cam_left_wrist": "images.rgb.hand_left", + "cam_right_wrist": "images.rgb.hand_right" + }, + "state_dict": { + "left_joint": "states.left_joint.position", + "right_joint": "states.right_joint.position", + "left_gripper": "states.left_gripper.position", + "right_gripper": "states.right_gripper.position" + }, + "action_dict": { + "left_joint": "actions.left_joint.position", + "right_joint": "actions.right_joint.position", + "left_gripper": "actions.left_gripper.position", + "right_gripper": "actions.right_gripper.position", + "left_gripper_openness": "master_actions.left_gripper.openness", + "right_gripper_openness": "master_actions.right_gripper.openness" + }, + "prompt": "task" + } + ) + ] + ) + ) + # Action keys that will be used to read the action sequence from the dataset. + action_sequence_keys: Sequence[str] = ("actions.left_joint.position","actions.right_joint.position","actions.left_gripper.position","actions.right_gripper.position","master_actions.left_gripper.openness","master_actions.right_gripper.openness") + + @override + def create(self, model_config: _model.BaseModelConfig,global_norm_stats: Dict[str, normalize.NormStats] = None) -> list[MultiDataConfig]: + data_configs = [] + data_transforms = _transforms.Group( + inputs=[sim2real_split_aloha_policy.Sim2RealSplitAlohaInputs(adapt_to_pi=self.adapt_to_pi)], + outputs=[sim2real_split_aloha_policy.Sim2RealSplitAlohaOutputs(adapt_to_pi=self.adapt_to_pi)], + ) + if self.use_delta_joint_actions: + delta_action_mask = _transforms.make_bool_mask(6, -1, 6, -1) + data_transforms = data_transforms.push( + inputs=[_transforms.DeltaActions(delta_action_mask)], + outputs=[_transforms.AbsoluteActions(delta_action_mask)], + ) + + model_transforms = ModelTransformFactory(default_prompt=self.default_prompt)(model_config) + if isinstance(self.repo_dir, str) and "*" not in self.repo_dir: + local_paths = [self.repo_dir] + elif isinstance(self.repo_dir, str) and "*" in self.repo_dir: + all_local_paths = natsorted(glob(self.repo_dir)) + if self.task_id is not None: + local_paths = [ + p for p in all_local_paths + if any(str(task_id) in p for task_id in self.task_id) + ] + else: + local_paths = all_local_paths + + for local_path in local_paths: + if not check_lerobot_repo(local_path): + continue + robot_names = self.robot_name + parts = local_path.split("/") + + robot_idx = next((i for i, p in enumerate(parts) if p == robot_names), None) + if robot_idx is None: + raise ValueError( + f"Cannot find robot name in path. Expected {robot_names}, " + f"but got path: {local_path}" + ) + + repo_dir = "/".join(parts[:robot_idx + 1]) + + if robot_idx + 1 >= len(parts): + raise ValueError( + f"Path ends at robot name '{parts[robot_idx]}', cannot determine task_name: {local_path}" + ) + + task_name = parts[robot_idx + 1] + subtask_name = parts[robot_idx + 2] if robot_idx + 2 < len(parts) else "" + if global_norm_stats is None: + data_configs.append(dataclasses.replace( + self.create_base_config(repo_dir, task_name, subtask_name, self.stats_dir, model_config), + repack_transforms=self.repack_transforms, + data_transforms=data_transforms, + model_transforms=model_transforms, + action_sequence_keys=self.action_sequence_keys, + weight=self.weight, + use_gripper_aug=self.use_gripper_aug, + gripper_aug_config=self.gripper_aug_config, + stats_dir=self.stats_dir, + )) + else: + data_configs.append(dataclasses.replace( + self.create_base_config(repo_dir, task_name, subtask_name, self.stats_dir, model_config), + repack_transforms=self.repack_transforms, + data_transforms=data_transforms, + model_transforms=model_transforms, + action_sequence_keys=self.action_sequence_keys, + weight=self.weight, + use_gripper_aug=self.use_gripper_aug, + gripper_aug_config=self.gripper_aug_config, + stats_dir=self.stats_dir, + norm_stats=global_norm_stats, + )) + return data_configs + + + +@dataclasses.dataclass(frozen=True) +class MultiSimFrankaDataConfig(MultiDataConfigFactory): + # If true, will convert joint dimensions to deltas with respect to the current state before passing to the model. + # Gripper dimensions will remain in absolute values. + use_delta_joint_actions: bool = True + default_prompt: str | None = None + # If provided, will be injected into the input data if the "prompt" key is not present. + # If true, this will convert the joint and gripper values from the standard Aloha space to + # the space used by the pi internal runtime which was used to train the base model. People who + # use standard Aloha data should set this to true. + adapt_to_pi: bool = False + + # Repack transforms. + repack_transforms: tyro.conf.Suppress[_transforms.Group] = dataclasses.field( + default=_transforms.Group( + inputs=[ + _transforms.RepackTransform( + { + "images": + { + "cam_high": "images.rgb.head", + "cam_left_wrist": "images.rgb.hand", + }, + "state_dict": { + "joint_position": "states.joint.position", + "gripper_pose": "states.gripper.pose", + "gripper_position": "states.gripper.position", + }, + "action_dict": { + "gripper_pose": "actions.gripper.pose", + "gripper_position": "actions.gripper.position", + "gripper_openness": "actions.gripper.openness", + }, + "prompt": "task" + } + ) + ] + ) + ) + # Action keys that will be used to read the action sequence from the dataset. + action_sequence_keys: Sequence[str] = ("actions.gripper.pose","actions.gripper.position","actions.gripper.openness") + + @override + def create(self, model_config: _model.BaseModelConfig,global_norm_stats: Dict[str, normalize.NormStats] = None) -> list[MultiDataConfig]: + data_configs = [] + data_transforms = _transforms.Group( + inputs=[sim_franka_policy.SimFrankaInputs(adapt_to_pi=self.adapt_to_pi)], + outputs=[sim_franka_policy.SimFrankaOutputs(adapt_to_pi=self.adapt_to_pi)], + ) + if self.use_delta_joint_actions: + delta_action_mask = _transforms.make_bool_mask(6, -1) + data_transforms = data_transforms.push( + inputs=[_transforms.DeltaActionsPose(delta_action_mask)], + outputs=[_transforms.AbsoluteActionsPose(delta_action_mask)], + ) + + model_transforms = ModelTransformFactory(default_prompt=self.default_prompt)(model_config) + if isinstance(self.repo_dir, str) and "*" not in self.repo_dir: + local_paths = [self.repo_dir] + elif isinstance(self.repo_dir, str) and "*" in self.repo_dir: + all_local_paths = natsorted(glob(self.repo_dir)) + if self.task_id is not None: + local_paths = [ + p for p in all_local_paths + if any(str(task_id) in p for task_id in self.task_id) + ] + else: + local_paths = all_local_paths + + for local_path in local_paths: + if not check_lerobot_repo(local_path): + continue + robot_names = self.robot_name + parts = local_path.split("/") + + robot_idx = next((i for i, p in enumerate(parts) if p == robot_names), None) + if robot_idx is None: + raise ValueError( + f"Cannot find robot name in path. Expected {robot_names}, " + f"but got path: {local_path}" + ) + + repo_dir = "/".join(parts[:robot_idx + 1]) + + if robot_idx + 1 >= len(parts): + raise ValueError( + f"Path ends at robot name '{parts[robot_idx]}', cannot determine task_name: {local_path}" + ) + + task_name = parts[robot_idx + 1] + subtask_name = parts[robot_idx + 2] if robot_idx + 2 < len(parts) else "" + if global_norm_stats is None: + data_configs.append(dataclasses.replace( + self.create_base_config(repo_dir, task_name, subtask_name, self.stats_dir, model_config), + repack_transforms=self.repack_transforms, + data_transforms=data_transforms, + model_transforms=model_transforms, + action_sequence_keys=self.action_sequence_keys, + weight=self.weight, + use_gripper_aug=self.use_gripper_aug, + gripper_aug_config=self.gripper_aug_config, + stats_dir=self.stats_dir, + )) + else: + data_configs.append(dataclasses.replace( + self.create_base_config(repo_dir, task_name, subtask_name, self.stats_dir, model_config), + repack_transforms=self.repack_transforms, + data_transforms=data_transforms, + model_transforms=model_transforms, + action_sequence_keys=self.action_sequence_keys, + weight=self.weight, + use_gripper_aug=self.use_gripper_aug, + gripper_aug_config=self.gripper_aug_config, + stats_dir=self.stats_dir, + norm_stats=global_norm_stats, + )) + return data_configs + + + +@dataclasses.dataclass(frozen=True) +class FakeDataConfig(DataConfigFactory): + repo_id: str = "fake" + + @override + def create(self, assets_dirs: pathlib.Path, model_config: _model.BaseModelConfig) -> DataConfig: + return DataConfig(repo_id=self.repo_id) + + +@dataclasses.dataclass(frozen=True) +class SimpleDataConfig(DataConfigFactory): + # Factory for the data transforms. + data_transforms: tyro.conf.Suppress[GroupFactory] = dataclasses.field(default_factory=GroupFactory) + # Factory for the model transforms. + model_transforms: tyro.conf.Suppress[GroupFactory] = dataclasses.field(default_factory=ModelTransformFactory) + + @override + def create(self, assets_dirs: pathlib.Path, model_config: _model.BaseModelConfig) -> DataConfig: + return dataclasses.replace( + self.create_base_config(assets_dirs, model_config), + data_transforms=self.data_transforms(model_config), + model_transforms=self.model_transforms(model_config), + ) + + +@dataclasses.dataclass(frozen=True) +class LeRobotAlohaDataConfig(DataConfigFactory): + # If true, will convert joint dimensions to deltas with respect to the current state before passing to the model. + # Gripper dimensions will remain in absolute values. + use_delta_joint_actions: bool = True + # If provided, will be injected into the input data if the "prompt" key is not present. + default_prompt: str | None = None + # If true, this will convert the joint and gripper values from the standard Aloha space to + # the space used by the pi internal runtime which was used to train the base model. People who + # use standard Aloha data should set this to true. + adapt_to_pi: bool = True + + # Repack transforms. + repack_transforms: tyro.conf.Suppress[_transforms.Group] = dataclasses.field( + default=_transforms.Group( + inputs=[ + _transforms.RepackTransform( + { + "images": {"cam_high": "observation.images.top"}, + "state": "observation.state", + "actions": "action", + } + ) + ] + ) + ) + # Action keys that will be used to read the action sequence from the dataset. + action_sequence_keys: Sequence[str] = ("action",) + + @override + def create(self, assets_dirs: pathlib.Path, model_config: _model.BaseModelConfig) -> DataConfig: + data_transforms = _transforms.Group( + inputs=[aloha_policy.AlohaInputs(adapt_to_pi=self.adapt_to_pi)], + outputs=[aloha_policy.AlohaOutputs(adapt_to_pi=self.adapt_to_pi)], + ) + if self.use_delta_joint_actions: + delta_action_mask = _transforms.make_bool_mask(6, -1, 6, -1) + data_transforms = data_transforms.push( + inputs=[_transforms.DeltaActions(delta_action_mask)], + outputs=[_transforms.AbsoluteActions(delta_action_mask)], + ) + + model_transforms = ModelTransformFactory(default_prompt=self.default_prompt)(model_config) + + return dataclasses.replace( + self.create_base_config(assets_dirs, model_config), + repack_transforms=self.repack_transforms, + data_transforms=data_transforms, + model_transforms=model_transforms, + action_sequence_keys=self.action_sequence_keys, + ) + + +@dataclasses.dataclass(frozen=True) +class LeRobotArxAlohaDataConfig(DataConfigFactory): + # If true, will convert joint dimensions to deltas with respect to the current state before passing to the model. + # Gripper dimensions will remain in absolute values. + use_delta_joint_actions: bool = True + default_prompt: str | None = None + # If provided, will be injected into the input data if the "prompt" key is not present. + # If true, this will convert the joint and gripper values from the standard Aloha space to + # the space used by the pi internal runtime which was used to train the base model. People who + # use standard Aloha data should set this to true. + adapt_to_pi: bool = False + + # Repack transforms. + repack_transforms: tyro.conf.Suppress[_transforms.Group] = dataclasses.field( + default=_transforms.Group( + inputs=[ + _transforms.RepackTransform( + { + "images": + { + "cam_high": "images.rgb.head", + "cam_left_wrist": "images.rgb.hand_left", + "cam_right_wrist": "images.rgb.hand_right" + }, + "state": "observation.state", + "actions": "action", + "prompt": "task" + } + ) + ] + ) + ) + # Action keys that will be used to read the action sequence from the dataset. + action_sequence_keys: Sequence[str] = ("action",) + + @override + def create(self, assets_dirs: pathlib.Path, model_config: _model.BaseModelConfig) -> DataConfig: + data_transforms = _transforms.Group( + inputs=[aloha_policy.AlohaInputs(adapt_to_pi=self.adapt_to_pi)], + outputs=[aloha_policy.AlohaOutputs(adapt_to_pi=self.adapt_to_pi)], + ) + if self.use_delta_joint_actions: + delta_action_mask = _transforms.make_bool_mask(6, -1, 6, -1) + data_transforms = data_transforms.push( + inputs=[_transforms.DeltaActions(delta_action_mask)], + outputs=[_transforms.AbsoluteActions(delta_action_mask)], + ) + + model_transforms = ModelTransformFactory(default_prompt=self.default_prompt)(model_config) + return dataclasses.replace( + self.create_base_config(assets_dirs, model_config), + repack_transforms=self.repack_transforms, + data_transforms=data_transforms, + model_transforms=model_transforms, + action_sequence_keys=self.action_sequence_keys, + ) + + + +@dataclasses.dataclass(frozen=True) +class StatsDataConfig(DataConfigFactory): + # If true, will convert joint dimensions to deltas with respect to the current state before passing to the model. + # Gripper dimensions will remain in absolute values. + use_delta_joint_actions: bool = False + default_prompt: str | None = None + # If provided, will be injected into the input data if the "prompt" key is not present. + # If true, this will convert the joint and gripper values from the standard Aloha space to + # the space used by the pi internal runtime which was used to train the base model. People who + # use standard Aloha data should set this to true. + adapt_to_pi: bool = False + + # Repack transforms. + repack_transforms: tyro.conf.Suppress[_transforms.Group] = dataclasses.field( + default=_transforms.Group( + inputs=[ + _transforms.RepackTransform( + { + "images": + { + "cam_high": "images.rgb.head", + "cam_left_wrist": "images.rgb.hand_left", + "cam_right_wrist": "images.rgb.hand_right" + }, + "state": "observation.state", + "actions": "action", + "prompt": "task" + } + ) + ] + ) + ) + # Action keys that will be used to read the action sequence from the dataset. + action_sequence_keys: Sequence[str] = ("action",) + + @override + def create(self, assets_dirs: pathlib.Path, model_config: _model.BaseModelConfig) -> DataConfig: + data_transforms = _transforms.Group( + inputs=[aloha_policy.AlohaInputs(adapt_to_pi=self.adapt_to_pi)], + outputs=[aloha_policy.AlohaOutputs(adapt_to_pi=self.adapt_to_pi)], + ) + if self.use_delta_joint_actions: + delta_action_mask = _transforms.make_bool_mask(6, -1, 6, -1) + data_transforms = data_transforms.push( + inputs=[_transforms.DeltaActions(delta_action_mask)], + outputs=[_transforms.AbsoluteActions(delta_action_mask)], + ) + + model_transforms = ModelTransformFactory(default_prompt=self.default_prompt)(model_config) + return dataclasses.replace( + self.create_base_config(assets_dirs, model_config), + repack_transforms=self.repack_transforms, + data_transforms=data_transforms, + model_transforms=model_transforms, + action_sequence_keys=self.action_sequence_keys, + ) + + +@dataclasses.dataclass(frozen=True) +class LeRobotLiberoDataConfig(DataConfigFactory): + """ + This config is used to configure transforms that are applied at various parts of the data pipeline. + For your own dataset, you can copy this class and modify the transforms to match your dataset based on the + comments below. + """ + + extra_delta_transform: bool = False + + @override + def create(self, assets_dirs: pathlib.Path, model_config: _model.BaseModelConfig) -> DataConfig: + # The repack transform is *only* applied to the data coming from the dataset, + # and *not* during inference. We can use it to make inputs from the dataset look + # as close as possible to those coming from the inference environment (e.g. match the keys). + # Below, we match the keys in the dataset (which we defined in the data conversion script) to + # the keys we use in our inference pipeline (defined in the inference script for libero). + # For your own dataset, first figure out what keys your environment passes to the policy server + # and then modify the mappings below so your dataset's keys get matched to those target keys. + # The repack transform simply remaps key names here. + repack_transform = _transforms.Group( + inputs=[ + _transforms.RepackTransform( + { + "observation/image": "image", + "observation/wrist_image": "wrist_image", + "observation/state": "state", + "actions": "actions", + "prompt": "prompt", + } + ) + ] + ) + + # The data transforms are applied to the data coming from the dataset *and* during inference. + # Below, we define the transforms for data going into the model (``inputs``) and the transforms + # for data coming out of the model (``outputs``) (the latter is only used during inference). + # We defined these transforms in `libero_policy.py`. You can check the detailed comments there for + # how to modify the transforms to match your dataset. Once you created your own transforms, you can + # replace the transforms below with your own. + data_transforms = _transforms.Group( + inputs=[libero_policy.LiberoInputs(model_type=model_config.model_type)], + outputs=[libero_policy.LiberoOutputs()], + ) + + # One additional data transform: pi0 models are trained on delta actions (relative to the first + # state in each action chunk). IF your data has ``absolute`` actions (e.g. target joint angles) + # you can uncomment the following line to convert the actions to delta actions. The only exception + # is for the gripper actions which are always absolute. + # In the example below, we would apply the delta conversion to the first 6 actions (joints) and + # leave the 7th action (gripper) unchanged, i.e. absolute. + # In Libero, the raw actions in the dataset are already delta actions, so we *do not* need to + # apply a separate delta conversion (that's why it's commented out). Choose whether to apply this + # transform based on whether your dataset uses ``absolute`` or ``delta`` actions out of the box. + + # LIBERO already represents actions as deltas, but we have some old Pi0 checkpoints that are trained with this + # extra delta transform. + if self.extra_delta_transform: + delta_action_mask = _transforms.make_bool_mask(6, -1) + data_transforms = data_transforms.push( + inputs=[_transforms.DeltaActions(delta_action_mask)], + outputs=[_transforms.AbsoluteActions(delta_action_mask)], + ) + + # Model transforms include things like tokenizing the prompt and action targets + # You do not need to change anything here for your own dataset. + model_transforms = ModelTransformFactory()(model_config) + + # We return all data transforms for training and inference. No need to change anything here. + return dataclasses.replace( + self.create_base_config(assets_dirs, model_config), + repack_transforms=repack_transform, + data_transforms=data_transforms, + model_transforms=model_transforms, + ) + + +@dataclasses.dataclass(frozen=True) +class RLDSDroidDataConfig(DataConfigFactory): + """ + Config for training on DROID, using RLDS data format (for efficient training on larger datasets). + """ + + rlds_data_dir: str | None = None + action_space: droid_rlds_dataset.DroidActionSpace | None = None + + # Filtering options. Can pass a path to a dictionary that maps episodes to timestep ranges + # to tuples denoting ranges of time steps to keep (start, end). Episodes are uniquely identified with + # f"{recording_folderpath}--{file_path}", both of which are present in the RLDS episode metadata. + # Path to the filter dictionary file. + filter_dict_path: str | None = "gs://openpi-assets/droid/droid_sample_ranges_v1_0_1.json" + + @override + def create(self, assets_dirs: pathlib.Path, model_config: _model.BaseModelConfig) -> DataConfig: + repack_transform = _transforms.Group( + inputs=[ + _transforms.RepackTransform( + { + "observation/exterior_image_1_left": "observation/image", + "observation/wrist_image_left": "observation/wrist_image", + "observation/joint_position": "observation/joint_position", + "observation/gripper_position": "observation/gripper_position", + "actions": "actions", + "prompt": "prompt", + } + ) + ] + ) + + data_transforms = _transforms.Group( + inputs=[droid_policy.DroidInputs(model_type=model_config.model_type)], + outputs=[droid_policy.DroidOutputs()], + ) + + if self.action_space == droid_rlds_dataset.DroidActionSpace.JOINT_POSITION: + # Data loader returns absolute joint position actions -- convert to delta actions for training. + delta_action_mask = _transforms.make_bool_mask(7, -1) + data_transforms = data_transforms.push( + inputs=[_transforms.DeltaActions(delta_action_mask)], + outputs=[_transforms.AbsoluteActions(delta_action_mask)], + ) + + model_transforms = ModelTransformFactory()(model_config) + + assert self.rlds_data_dir is not None, "Need to set rlds data dir for RLDS data loader." + + return dataclasses.replace( + self.create_base_config(assets_dirs, model_config), + repack_transforms=repack_transform, + data_transforms=data_transforms, + model_transforms=model_transforms, + rlds_data_dir=self.rlds_data_dir, + action_space=self.action_space, + filter_dict_path=self.filter_dict_path, + ) + + +@dataclasses.dataclass(frozen=True) +class LeRobotDROIDDataConfig(DataConfigFactory): + """ + Example data config for custom DROID dataset in LeRobot format. + To convert your custom DROID dataset (<10s of hours) to LeRobot format, see examples/droid/convert_droid_data_to_lerobot.py + """ + + @override + def create(self, assets_dirs: pathlib.Path, model_config: _model.BaseModelConfig) -> DataConfig: + repack_transform = _transforms.Group( + inputs=[ + _transforms.RepackTransform( + { + "observation/exterior_image_1_left": "exterior_image_1_left", + "observation/exterior_image_2_left": "exterior_image_2_left", + "observation/wrist_image_left": "wrist_image_left", + "observation/joint_position": "joint_position", + "observation/gripper_position": "gripper_position", + "actions": "actions", + "prompt": "prompt", + } + ) + ] + ) + # We assume joint *velocity* actions, so we should *not* apply an additional delta transform. + data_transforms = _transforms.Group( + inputs=[droid_policy.DroidInputs(model_type=model_config.model_type)], + outputs=[droid_policy.DroidOutputs()], + ) + model_transforms = ModelTransformFactory()(model_config) + + return dataclasses.replace( + self.create_base_config(assets_dirs, model_config), + repack_transforms=repack_transform, + data_transforms=data_transforms, + model_transforms=model_transforms, + ) + + +@dataclasses.dataclass(frozen=True) +class TrainConfig: + # Name of the config. Must be unique. Will be used to reference this config. + name: tyro.conf.Suppress[str] + # Project name. + project_name: str = "openpi" + # Experiment name. Will be used to name the metadata and checkpoint directories. + exp_name: str = tyro.MISSING + + # Defines the model config. Some attributes (action_dim, action_horizon, and max_token_len) are shared by all models + # -- see BaseModelConfig. Specific model implementations (e.g., Pi0Config) inherit from BaseModelConfig and may + # define additional attributes. + model: _model.BaseModelConfig = dataclasses.field(default_factory=pi0_config.Pi0Config) + + # A weight loader can optionally load (possibly partial) weights from disk after the model is initialized. + weight_loader: weight_loaders.WeightLoader = dataclasses.field(default_factory=weight_loaders.NoOpWeightLoader) + + # Optional path to a PyTorch checkpoint to load weights from. + pytorch_weight_path: str | None = None + + # Precision for PyTorch training. + pytorch_training_precision: Literal["bfloat16", "float32"] = "bfloat16" + + lr_schedule: _optimizer.LRScheduleConfig = dataclasses.field(default_factory=_optimizer.CosineDecaySchedule) + optimizer: _optimizer.OptimizerConfig = dataclasses.field(default_factory=_optimizer.AdamW) + ema_decay: float | None = 0.99 + + # Specifies which weights should be frozen. + freeze_filter: tyro.conf.Suppress[Filter] = dataclasses.field(default_factory=nnx.Nothing) + + # Determines the data to be trained on. + data: list[DataConfigFactory] = dataclasses.field(default_factory=list) + + # Base directory for config assets (e.g., norm stats). + assets_base_dir: str = "./assets" + # Base directory for checkpoints. + checkpoint_base_dir: str = "./checkpoints" + + # Random seed that will be used by random generators during training. + seed: int = 42 + # Global batch size. + batch_size: int = 32 + # Number of workers to use for the data loader. Increasing this number will speed up data loading but + # will increase memory and CPU usage. + num_workers: int = 2 + # Number of train steps (batches) to run. + num_train_steps: int = 30_000 + + # How often (in steps) to log training metrics. + log_interval: int = 100 + # How often (in steps) to save checkpoints. + save_interval: int = 1000 + # If set, any existing checkpoints matching step % keep_period == 0 will not be deleted. + keep_period: int | None = 5000 + + # If true, will overwrite the checkpoint directory if it already exists. + overwrite: bool = False + # If true, will resume training from the last checkpoint. + resume: bool = False + + # If true, will enable wandb logging. + wandb_enabled: bool = True + + # Used to pass metadata to the policy server. + policy_metadata: dict[str, Any] | None = None + + # If the value is greater than 1, FSDP will be enabled and shard across number of specified devices; overall + # device memory will be reduced but training could potentially be slower. + # eg. if total device is 4 and fsdp devices is 2; then the model will shard to 2 devices and run + # data parallel between 2 groups of devices. + fsdp_devices: int = 1 + online_compute_norm_stats: bool = False + + @property + def assets_dirs(self) -> pathlib.Path: + """Get the assets directory for this config.""" + return (pathlib.Path(self.assets_base_dir) / self.name).resolve() + + @property + def checkpoint_dir(self) -> pathlib.Path: + """Get the checkpoint directory for this config.""" + if not self.exp_name: + raise ValueError("--exp_name must be set") + return (pathlib.Path(self.checkpoint_base_dir) / self.name / self.exp_name).resolve() + + @property + def trainable_filter(self) -> nnx.filterlib.Filter: + """Get the filter for the trainable parameters.""" + return nnx.All(nnx.Param, nnx.Not(self.freeze_filter)) + + def __post_init__(self) -> None: + if self.resume and self.overwrite: + raise ValueError("Cannot resume and overwrite at the same time.") + + +# Use `get_config` if you need to get a config by name in your code. +_CONFIGS = [ + # pretrain on interndata a1 + TrainConfig( + name="pretrain-interndata-a1", + model=pi0_config.Pi0Config(), + data=[ + # genie1 + MultiSimGenieDataConfig( + repo_dir='data/InternData-A1/sim/*/genie1/*/*', + task_id=None, + use_gripper_aug=True, + gripper_aug_config={ + "gripper_action_keys": ["master_actions.left_gripper.openness", "master_actions.right_gripper.openness"], + "gripper_dim": -1, + "gripper_threshold_method": "std_multiplier", + "gripper_threshold_multiplier": 1.0, + "gripper_min_threshold": 0.001, + "gripper_max_threshold": 1.0, + }, + stats_dir='stats/sim', + base_config=MultiDataConfig( + prompt_from_task=True, + ), + weight=10, + asset_id='genie1', + robot_name='genie1', + ), + MultiSimGenieDataConfig( + repo_dir='data/InternData-A1/sim/*/genie1/*', + task_id=None, + use_gripper_aug=True, + gripper_aug_config={ + "gripper_action_keys": ["master_actions.left_gripper.openness", "master_actions.right_gripper.openness"], + "gripper_dim": -1, + "gripper_threshold_method": "std_multiplier", + "gripper_threshold_multiplier": 1.0, + "gripper_min_threshold": 0.001, + "gripper_max_threshold": 1.0, + }, + stats_dir='stats/sim', + base_config=MultiDataConfig( + prompt_from_task=True, + ), + weight=10, + asset_id='genie1', + robot_name='genie1', + ), + + # arx_lift2 + MultiSimSplitAlohaDataConfig( + repo_dir='data/InternData-A1/sim/*/lift2/*', + task_id=None, + use_gripper_aug=True, + gripper_aug_config={ + "gripper_action_keys": ["master_actions.left_gripper.openness", "master_actions.right_gripper.openness"], + "gripper_dim": -1, + "gripper_threshold_method": "std_multiplier", + "gripper_threshold_multiplier": 1.0, + "gripper_min_threshold": 0.001, + "gripper_max_threshold": 1.0, + }, + stats_dir='stats/sim', + base_config=MultiDataConfig( + prompt_from_task=True, + ), + weight=5, + asset_id='lift2', + robot_name='lift2', + ), + MultiSimSplitAlohaDataConfig( + repo_dir='data/InternData-A1/sim/*/lift2/*/*', + task_id=None, + use_gripper_aug=True, + gripper_aug_config={ + "gripper_action_keys": ["master_actions.left_gripper.openness", "master_actions.right_gripper.openness"], + "gripper_dim": -1, + "gripper_threshold_method": "std_multiplier", + "gripper_threshold_multiplier": 1.0, + "gripper_min_threshold": 0.001, + "gripper_max_threshold": 1.0, + }, + stats_dir='stats/sim', + base_config=MultiDataConfig( + prompt_from_task=True, + ), + weight=5, + asset_id='lift2', + robot_name='lift2', + ), + # agilex_split_aloha + MultiSimSplitAlohaDataConfig( + repo_dir='data/InternData-A1/sim/*/split_aloha/*', + task_id=None, + use_gripper_aug=True, + gripper_aug_config={ + "gripper_action_keys": ["master_actions.left_gripper.openness", "master_actions.right_gripper.openness"], + "gripper_dim": -1, + "gripper_threshold_method": "std_multiplier", + "gripper_threshold_multiplier": 1.0, + "gripper_min_threshold": 0.001, + "gripper_max_threshold": 1.0, + }, + stats_dir='stats/sim', + base_config=MultiDataConfig( + prompt_from_task=True, + ), + asset_id='split_aloha', + weight=10, + robot_name='split_aloha', + ), + MultiSimSplitAlohaDataConfig( + repo_dir='data/InternData-A1/sim/*/split_aloha/*/*', + task_id=None, + use_gripper_aug=True, + gripper_aug_config={ + "gripper_action_keys": ["master_actions.left_gripper.openness", "master_actions.right_gripper.openness"], + "gripper_dim": -1, + "gripper_threshold_method": "std_multiplier", + "gripper_threshold_multiplier": 1.0, + "gripper_min_threshold": 0.001, + "gripper_max_threshold": 1.0, + }, + stats_dir='stats/sim', + base_config=MultiDataConfig( + prompt_from_task=True, + ), + asset_id='split_aloha', + weight=10, + robot_name='split_aloha', + ), + # franka + MultiSimFrankaDataConfig( + repo_dir='data/InternData-A1/sim/*/franka/*', + task_id=None, + use_gripper_aug=True, + gripper_aug_config={ + "gripper_action_keys": ["actions.gripper.openness"], + "gripper_dim": -1, + "gripper_threshold_method": "std_multiplier", + "gripper_threshold_multiplier": 1.0, + "gripper_min_threshold": 0.001, + "gripper_max_threshold": 1.0, + }, + stats_dir='stats/sim', + base_config=MultiDataConfig( + prompt_from_task=True, + ), + asset_id='franka', + weight=5, + robot_name='franka', + ), + MultiSimFrankaDataConfig( + repo_dir='data/InternData-A1/sim/*/franka/*/*', + task_id=None, + use_gripper_aug=True, + gripper_aug_config={ + "gripper_action_keys": ["actions.gripper.openness"], + "gripper_dim": -1, + "gripper_threshold_method": "std_multiplier", + "gripper_threshold_multiplier": 1.0, + "gripper_min_threshold": 0.001, + "gripper_max_threshold": 1.0, + }, + stats_dir='stats/sim', + base_config=MultiDataConfig( + prompt_from_task=True, + ), + asset_id='franka', + weight=5, + robot_name='franka', + ), + ], + # pretrain model path + weight_loader=weight_loaders.PaliGemmaWeightLoader("checkpoints/jax/paligemma/pt_224.npz"), + pytorch_weight_path="", + num_train_steps=2_000_000, + num_workers=12, + fsdp_devices=8, + batch_size=512, + save_interval=5000, + lr_schedule=_optimizer.WarmupConstantSchedule(), + ), + # finetune on real-world tasks + TrainConfig( + name="finetune-a2d-pen", + model=pi0_config.Pi0Config(), + data=[ + MultiLeRobotReala2dDataConfig( + repo_dir='data/InternData-A1/real/genie1/Put_the_pen_from_the_table_into_the_pen_holder/*', + task_id=["set_0"], + use_gripper_aug=False, + stats_dir='', + fixed_stats_dir='stats/real/genie1/Put_the_pen_from_the_table_into_the_pen_holder', + base_config=MultiDataConfig( + prompt_from_task=True, + ), + asset_id='finetune-a2d-pen', + weight=1, + robot_name='genie1', + ), + ], + # pretrain model path + weight_loader=weight_loaders.CheckpointWeightLoader("checkpoints/20251014-sim-pretrain-470k/20251014-sim-pretrain-470k-8-node-bs512-nw12/680000/params"), + # weight_loader=weight_loaders.CheckpointWeightLoader("checkpoints/jax/pi0_base/params"), + pytorch_weight_path="", + num_train_steps=30_000, + num_workers=32, + fsdp_devices=8, + batch_size=128, + save_interval=5000, + ), + TrainConfig( + name="finetune-sim2real-lift2-sort-rubbish", + model=pi0_config.Pi0Config(), + data=[ + MultiSim2RealSplitAlohaDataConfig( + repo_dir='data/InternData-A1/sim/long_horizon_tasks/lift2/sort_the_rubbish/*', + task_id=None, # when task_id is None, we use all collections under the repo_dir + use_gripper_aug=False, + stats_dir='', + fixed_stats_dir='stats/sim2real/lift2/sort_the_rubbish', + base_config=MultiDataConfig( + prompt_from_task=True, + ), + asset_id='finetune-sim2real-lift2-sort-rubbish', + weight=1, + robot_name='lift2', + ), + ], + # pretrain model path + weight_loader=weight_loaders.CheckpointWeightLoader("checkpoints/20251014-sim-pretrain-470k/20251014-sim-pretrain-470k-8-node-bs512-nw12/680000/params"), + # weight_loader=weight_loaders.CheckpointWeightLoader("checkpoints/jax/pi0_base/params"), + pytorch_weight_path="", + num_train_steps=30_000, + num_workers=32, + fsdp_devices=8, + batch_size=128, + save_interval=5000, + ), + ] + + +if len({config.name for config in _CONFIGS}) != len(_CONFIGS): + raise ValueError("Config names must be unique.") +_CONFIGS_DICT = {config.name: config for config in _CONFIGS} + + +def cli() -> TrainConfig: + return tyro.extras.overridable_config_cli({k: (k, v) for k, v in _CONFIGS_DICT.items()}) + + +def get_config(config_name: str) -> TrainConfig: + """Get a config by name.""" + if config_name not in _CONFIGS_DICT: + closest = difflib.get_close_matches(config_name, _CONFIGS_DICT.keys(), n=1, cutoff=0.0) + closest_str = f" Did you mean '{closest[0]}'? " if closest else "" + raise ValueError(f"Config '{config_name}' not found.{closest_str}") + + return _CONFIGS_DICT[config_name] + +def check_lerobot_repo(repo_dir: str): + if os.path.isdir(os.path.join(repo_dir, "data")) and os.path.isdir(os.path.join(repo_dir, "meta")) and os.path.isdir(os.path.join(repo_dir, "videos")): + return True + else: + return False diff --git a/policy/openpi-InternData-A1/src/openpi/training/data_loader.py b/policy/openpi-InternData-A1/src/openpi/training/data_loader.py new file mode 100644 index 0000000..6d5bed8 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/training/data_loader.py @@ -0,0 +1,721 @@ +from collections.abc import Iterator, Sequence +import logging +import multiprocessing +import os +import typing +from typing import Literal, Protocol, SupportsIndex, TypeVar, Dict +import sys +import jax +import jax.numpy as jnp +import lerobot.common.datasets.lerobot_dataset as lerobot_dataset +import numpy as np +import torch +from torch.utils.data import dataloader +from torch.multiprocessing import reductions +from multiprocessing.reduction import ForkingPickler +import openpi.shared.normalize as normalize +default_collate_func = dataloader.default_collate +import psutil + +def default_collate_override(batch): + dataloader._use_shared_memory = False + return default_collate_func(batch) + +setattr(dataloader, 'default_collate', default_collate_override) + +for t in torch._storage_classes: + if sys.version_info[0] == 2: + if t in ForkingPickler.dispatch: + del ForkingPickler.dispatch[t] + else: + if t in ForkingPickler._extra_reducers: + del ForkingPickler._extra_reducers[t] + +import openpi.models.model as _model +import openpi.training.config as _config +from openpi.training.droid_rlds_dataset import DroidRldsDataset +import openpi.transforms as _transforms +from openpi.training.mixture_dataset import create_mixture_dataset + +T_co = TypeVar("T_co", covariant=True) +import copy +from memory_profiler import profile +from pdb import set_trace + +class Dataset(Protocol[T_co]): + """Interface for a dataset with random access.""" + + def __getitem__(self, index: SupportsIndex) -> T_co: + raise NotImplementedError("Subclasses of Dataset should implement __getitem__.") + + def __len__(self) -> int: + raise NotImplementedError("Subclasses of Dataset should implement __len__.") + + +class IterableDataset(Protocol[T_co]): + """Interface for an iterable dataset.""" + + def __iter__(self) -> Iterator[T_co]: + raise NotImplementedError("Subclasses of IterableDataset should implement __iter__.") + + def __len__(self) -> int: + raise NotImplementedError("Subclasses of Dataset should implement __len__.") + + +class DataLoader(Protocol[T_co]): + """Interface for a data loader.""" + + def data_config(self) -> _config.DataConfig: + """Get the data config for this data loader.""" + raise NotImplementedError("Subclasses of DataLoader should implement data_config.") + + def __iter__(self) -> Iterator[T_co]: + raise NotImplementedError("Subclasses of DataLoader should implement __iter__.") + + +class TransformedDataset(Dataset[T_co]): + def __init__(self, dataset: Dataset, transforms: Sequence[_transforms.DataTransformFn]): + self._dataset = dataset + self._transform = _transforms.compose(transforms) + + def __getitem__(self, index: SupportsIndex) -> T_co: + return self._transform(self._dataset[index]) + + def __len__(self) -> int: + return len(self._dataset) + + +class IterableTransformedDataset(IterableDataset[T_co]): + def __init__( + self, + dataset: IterableDataset, + transforms: Sequence[_transforms.DataTransformFn], + *, + is_batched: bool = False, + ): + self._dataset = dataset + self._transform = _transforms.compose(transforms) + self._is_batched = is_batched + + def __iter__(self): + for sample in self._dataset: + if self._is_batched: + # Transforms are designed to be applied to individual samples. So we need to split the batch into + # individual samples and apply the transform to each sample individually. + batch_size = next(v.shape[0] for v in sample.values()) + + # Split batch into individual samples using tree_map + individual_samples = [jax.tree.map(lambda x: x[i], sample) for i in range(batch_size)] # noqa: B023 + + # Transform each sample + transformed = [self._transform(s) for s in individual_samples] + + # Recombine batch with tree_map + yield jax.tree.map(lambda *x: np.stack(x, axis=0), *transformed) + else: + yield self._transform(sample) + + def __len__(self) -> int: + return len(self._dataset) + + +class FakeDataset(Dataset): + def __init__(self, model_config: _model.BaseModelConfig, num_samples: int): + self._num_samples = num_samples + self._observation_spec, self._action_spec = model_config.inputs_spec() + + def __getitem__(self, index: SupportsIndex) -> dict: + rng = jax.random.key(index.__index__()) + + def make_from_spec(spec: jax.ShapeDtypeStruct): + nonlocal rng + rng, data_rng = jax.random.split(rng) + # Remove the batch dimension. + shape = spec.shape[1:] + if spec.dtype == jnp.float32: + return jax.random.uniform(data_rng, shape=shape, minval=-1.0, maxval=1.0) + if spec.dtype == jnp.int32: + return jax.random.randint(data_rng, shape=shape, minval=0, maxval=2048) + return jnp.zeros(shape=shape, dtype=spec.dtype) + + observation = jax.tree.map(make_from_spec, self._observation_spec) + action = jax.tree.map(make_from_spec, self._action_spec) + + return { + **observation.to_dict(), + "actions": action, + } + + def __len__(self) -> int: + return self._num_samples + + +def create_torch_dataset( + data_config: _config.DataConfig, action_horizon: int, model_config: _model.BaseModelConfig +) -> Dataset: + """Create a dataset for training.""" + repo_id = data_config.repo_id + if repo_id is None: + raise ValueError("Repo ID is not set. Cannot create dataset.") + if repo_id == "fake": + return FakeDataset(model_config, num_samples=1024) + + dataset_meta = lerobot_dataset.LeRobotDatasetMetadata(repo_id) + dataset = lerobot_dataset.LeRobotDataset( + data_config.repo_id, + delta_timestamps={ + key: [t / dataset_meta.fps for t in range(action_horizon)] for key in data_config.action_sequence_keys + }, + ) + + if data_config.prompt_from_task: + dataset = TransformedDataset(dataset, [_transforms.PromptFromLeRobotTask(dataset_meta.tasks)]) + + return dataset + + +def create_rlds_dataset( + data_config: _config.DataConfig, + action_horizon: int, + batch_size: int, + *, + shuffle: bool = False, +) -> Dataset: + # At the moment, we only support DROID for RLDS datasets. + return DroidRldsDataset( + data_dir=data_config.rlds_data_dir, + batch_size=batch_size, + shuffle=shuffle, + action_chunk_size=action_horizon, + action_space=data_config.action_space, + filter_dict_path=data_config.filter_dict_path, + ) + + +def transform_dataset(dataset: Dataset, data_config: _config.DataConfig, *, skip_norm_stats: bool = False) -> Dataset: + """Transform the dataset by applying the data transforms.""" + norm_stats = {} + if data_config.repo_id != "fake" and not skip_norm_stats: + if data_config.norm_stats is None: + raise ValueError( + "Normalization stats not found. " + "Make sure to run `scripts/compute_norm_stats.py --config-name=`." + ) + norm_stats = data_config.norm_stats + + return TransformedDataset( + dataset, + [ + *data_config.repack_transforms.inputs, + *data_config.data_transforms.inputs, + _transforms.Normalize(norm_stats, use_quantiles=data_config.use_quantile_norm), + *data_config.model_transforms.inputs, + ], + ) + + +def transform_iterable_dataset( + dataset: IterableDataset, + data_config: _config.DataConfig, + *, + skip_norm_stats: bool = False, + is_batched: bool = False, +) -> IterableDataset: + """Transform the dataset by applying the data transforms.""" + norm_stats = {} + if data_config.repo_id != "fake" and not skip_norm_stats: + if data_config.norm_stats is None: + raise ValueError( + "Normalization stats not found. " + "Make sure to run `scripts/compute_norm_stats.py --config-name=`." + ) + norm_stats = data_config.norm_stats + + return IterableTransformedDataset( + dataset, + [ + *data_config.repack_transforms.inputs, + *data_config.data_transforms.inputs, + _transforms.Normalize(norm_stats, use_quantiles=data_config.use_quantile_norm), + *data_config.model_transforms.inputs, + ], + is_batched=is_batched, + ) + + +def create_data_loader( + config: _config.TrainConfig, + *, + sharding: jax.sharding.Sharding | None = None, + shuffle: bool = False, + num_batches: int | None = None, + skip_norm_stats: bool = False, + framework: Literal["jax", "pytorch"] = "jax", +) -> DataLoader[tuple[_model.Observation, _model.Actions]]: + """Create a data loader for training. + + Args: + config: The training configuration. + sharding: The sharding to use for the data loader (JAX only). + shuffle: Whether to shuffle the data. + num_batches: Determines the number of batches to return. + skip_norm_stats: Whether to skip data normalization. + framework: The framework to use ("jax" or "pytorch"). + """ + data_config = config.data.create(config.assets_dirs, config.model) + logging.info(f"data_config: {data_config}") + + if data_config.rlds_data_dir is not None: + return create_rlds_data_loader( + data_config, + action_horizon=config.model.action_horizon, + batch_size=config.batch_size, + sharding=sharding, + shuffle=shuffle, + num_batches=num_batches, + skip_norm_stats=skip_norm_stats, + framework=framework, + ) + return create_torch_data_loader( + data_config, + model_config=config.model, + action_horizon=config.model.action_horizon, + batch_size=config.batch_size, + sharding=sharding, + shuffle=shuffle, + num_batches=num_batches, + num_workers=config.num_workers, + seed=config.seed, + skip_norm_stats=skip_norm_stats, + framework=framework, + ) + +def create_data_loader_multi( + config: _config.TrainConfig, + *, + sharding: jax.sharding.Sharding | None = None, + shuffle: bool = False, + num_batches: int | None = None, + skip_norm_stats: bool = False, + framework: Literal["jax", "pytorch"] = "jax", + global_norm_stats: Dict[str, normalize.NormStats] | None = None, +) -> DataLoader[tuple[_model.Observation, _model.Actions]]: + """Create a data loader for training. + + Args: + config: The training configuration. + sharding: The sharding to use for the data loader (JAX only). + shuffle: Whether to shuffle the data. + num_batches: Determines the number of batches to return. + skip_norm_stats: Whether to skip data normalization. + framework: The framework to use ("jax" or "pytorch"). + """ + data_configs_list = [] + for data_config_factory in config.data: + data_configs = data_config_factory.create(config.model, global_norm_stats) + logging.info(f"data_config: {data_configs}") + data_configs_list.append(data_configs) + + return create_torch_data_loader_multi( + data_configs_list, + model_config=config.model, + action_horizon=config.model.action_horizon, + batch_size=config.batch_size, + sharding=sharding, + shuffle=shuffle, + num_batches=num_batches, + num_workers=config.num_workers, + seed=config.seed, + skip_norm_stats=skip_norm_stats, + framework=framework, + global_norm_stats=global_norm_stats, + ) + + +def create_torch_data_loader( + data_config: _config.DataConfig, + model_config: _model.BaseModelConfig, + action_horizon: int, + batch_size: int, + *, + sharding: jax.sharding.Sharding | None = None, + skip_norm_stats: bool = False, + shuffle: bool = False, + num_batches: int | None = None, + num_workers: int = 0, + seed: int = 0, + framework: str = "jax", +) -> DataLoader[tuple[_model.Observation, _model.Actions]]: + """Create a data loader for training. + + Args: + data_config: The data configuration. + action_horizon: The action horizon. + batch_size: The batch size. + sharding: The sharding to use for the data loader. If None, the data loader will + use a single device sharding. + skip_norm_stats: Whether to skip data normalization. + shuffle: Whether to shuffle the data. + num_batches: Determines the number of batches to return. If the number exceeds the + number of batches in the dataset, the data loader will loop over the dataset. + If not provided, will iterate over the dataset indefinitely. + num_workers: The number of worker processes to use. If zero, the data loader will + execute in the main process. + seed: The seed to use for shuffling the data. + """ + dataset = create_torch_dataset(data_config, action_horizon, model_config) + dataset = transform_dataset(dataset, data_config, skip_norm_stats=skip_norm_stats) + + # Use TorchDataLoader for both frameworks + # For PyTorch DDP, create DistributedSampler and divide batch size by world size + # For JAX, divide by process count + sampler = None + if framework == "pytorch": + if torch.distributed.is_initialized(): + sampler = torch.utils.data.distributed.DistributedSampler( + dataset, + num_replicas=torch.distributed.get_world_size(), + rank=torch.distributed.get_rank(), + shuffle=shuffle, + drop_last=True, + ) + local_batch_size = batch_size // torch.distributed.get_world_size() + else: + local_batch_size = batch_size + else: + local_batch_size = batch_size // jax.process_count() + if jax.process_count() > 1: + sampler = JaxProcessDistributedSampler( + dataset_size=len(dataset), + num_replicas=jax.process_count(), + rank=jax.process_index(), + shuffle=shuffle, + seed=seed, + ) + logging.info(f"local_batch_size: {local_batch_size}") + data_loader = TorchDataLoader( + dataset, + local_batch_size=local_batch_size, + sharding=None if framework == "pytorch" else sharding, + shuffle=(sampler is None and shuffle), # Don't shuffle if using sampler + sampler=sampler, + num_batches=num_batches, + num_workers=num_workers, + seed=seed, + framework=framework, + ) + + return DataLoaderImpl(data_config, data_loader) + +def create_torch_data_loader_multi( + data_configs_list: list[_config.DataConfig], + model_config: _model.BaseModelConfig, + action_horizon: int, + batch_size: int, + *, + sharding: jax.sharding.Sharding | None = None, + skip_norm_stats: bool = False, + shuffle: bool = False, + num_batches: int | None = None, + num_workers: int = 0, + seed: int = 0, + framework: str = "jax", + global_norm_stats: Dict[str, normalize.NormStats] | None = None, +) -> DataLoader[tuple[_model.Observation, _model.Actions]]: + """Create a data loader for training. + + Args: + data_config: The data configuration. + action_horizon: The action horizon. + batch_size: The batch size. + sharding: The sharding to use for the data loader. If None, the data loader will + use a single device sharding. + skip_norm_stats: Whether to skip data normalization. + shuffle: Whether to shuffle the data. + num_batches: Determines the number of batches to return. If the number exceeds the + number of batches in the dataset, the data loader will loop over the dataset. + If not provided, will iterate over the dataset indefinitely. + num_workers: The number of worker processes to use. If zero, the data loader will + execute in the main process. + seed: The seed to use for shuffling the data. + """ + dataset = create_mixture_dataset(data_configs_list, action_horizon, model_config) + # Use TorchDataLoader for both frameworks + # For PyTorch DDP, create DistributedSampler and divide batch size by world size + # For JAX, divide by process count + sampler = None + if framework == "pytorch": + if torch.distributed.is_initialized(): + sampler = torch.utils.data.distributed.DistributedSampler( + dataset, + num_replicas=torch.distributed.get_world_size(), + rank=torch.distributed.get_rank(), + shuffle=shuffle, + drop_last=True, + ) + local_batch_size = batch_size // torch.distributed.get_world_size() + else: + local_batch_size = batch_size + else: + local_batch_size = batch_size // jax.process_count() + if jax.process_count() > 1: + sampler = JaxProcessDistributedSampler( + dataset_size=len(dataset), + num_replicas=jax.process_count(), + rank=jax.process_index(), + shuffle=shuffle, + seed=seed, + ) + logging.info(f"local_batch_size: {local_batch_size}") + data_loader = TorchDataLoader( + dataset, + local_batch_size=local_batch_size, + sharding=None if framework == "pytorch" else sharding, + shuffle=(sampler is None and shuffle), # Don't shuffle if using sampler + sampler=sampler, + num_batches=num_batches, + num_workers=num_workers, + seed=seed, + framework=framework, + ) + + return DataLoaderImpl(data_configs_list[0][0], data_loader) + + +def create_rlds_data_loader( + data_config: _config.DataConfig, + action_horizon: int, + batch_size: int, + *, + sharding: jax.sharding.Sharding | None = None, + skip_norm_stats: bool = False, + shuffle: bool = False, + num_batches: int | None = None, + framework: str = "jax", +) -> DataLoader[tuple[_model.Observation, _model.Actions]]: + """Create an RLDS data loader for training. + + Note: This data loader requires some extra dependencies -- see examples/droid/README_train.md + + Args: + data_config: The data configuration. + action_horizon: The action horizon. + batch_size: The batch size. + sharding: The sharding to use for the data loader. If None, the data loader will + use a single device sharding. + skip_norm_stats: Whether to skip data normalization. + shuffle: Whether to shuffle the data. + num_batches: Determines the number of batches to return. If the number exceeds the + number of batches in the dataset, the data loader will loop over the dataset. + If not provided, will iterate over the dataset indefinitely. + """ + if framework == "pytorch": + raise NotImplementedError("PyTorch RLDS data loader is not supported yet") + dataset = create_rlds_dataset(data_config, action_horizon, batch_size, shuffle=shuffle) + dataset = transform_iterable_dataset(dataset, data_config, skip_norm_stats=skip_norm_stats, is_batched=True) + + data_loader = RLDSDataLoader( + dataset, + sharding=sharding, + num_batches=num_batches, + ) + + return DataLoaderImpl(data_config, data_loader) + + +class JaxProcessDistributedSampler(torch.utils.data.Sampler[int]): + """Simple sampler to split dataset indices across JAX processes. + + Each process sees a disjoint slice of indices using striding by num_replicas. + Shuffling (if enabled) is deterministic via the provided seed. + """ + + def __init__( + self, + dataset_size: int, + *, + num_replicas: int, + rank: int, + shuffle: bool, + seed: int, + ) -> None: + self._dataset_size = max(0, dataset_size) + self._num_replicas = max(1, num_replicas) + self._rank = max(0, rank) + self._shuffle = shuffle + self._seed = seed + + def __iter__(self): + indices = list(range(self._dataset_size)) + if self._shuffle and self._dataset_size > 0: + g = torch.Generator() + g.manual_seed(self._seed) + indices = torch.randperm(self._dataset_size, generator=g).tolist() + # Strided split across processes; drop remainder for balance + indices = indices[self._rank :: self._num_replicas] + return iter(indices) + + def __len__(self) -> int: + # Match strided selection length + return (self._dataset_size + self._num_replicas - 1) // self._num_replicas + +# @profile +class TorchDataLoader: + """Torch data loader implementation.""" + + def __init__( + self, + dataset, + local_batch_size: int, + *, + sharding: jax.sharding.Sharding | None = None, + shuffle: bool = False, + sampler: torch.utils.data.Sampler | None = None, + num_batches: int | None = None, + num_workers: int = 0, + seed: int = 0, + framework: str = "jax", + ): + """Create a PyTorch data loader. + + Args: + dataset: The dataset to load. + local_batch_size: The local batch size for each process. + sharding: The sharding to use for the data loader. + shuffle: Whether to shuffle the data. + num_batches: If provided, determines the number of returned batches. If the + number is larger than the number of batches in the dataset, the data loader + will loop over the dataset. If not provided, will iterate over the dataset + indefinitely. + num_workers: The number of worker processes to use. If zero, the data loader will + execute in the main process. + seed: The seed to use for shuffling the data. + """ + if len(dataset) < local_batch_size: + raise ValueError(f"Local batch size ({local_batch_size}) is larger than the dataset size ({len(dataset)}).") + + # Store sharding - None for PyTorch, JAX sharding for JAX + self._sharding = sharding + if sharding is None and framework == "jax": + # Use data parallel sharding by default for JAX only. + self._sharding = jax.sharding.NamedSharding( + jax.sharding.Mesh(jax.devices(), ("B",)), + jax.sharding.PartitionSpec("B"), + ) + self._num_batches = num_batches + + mp_context = None + if num_workers > 0: + mp_context = multiprocessing.get_context("spawn") + + generator = torch.Generator() + generator.manual_seed(seed) + self._data_loader = torch.utils.data.DataLoader( + typing.cast(torch.utils.data.Dataset, dataset), + batch_size=local_batch_size, + shuffle=(sampler is None and shuffle), # Don't shuffle if using sampler + sampler=sampler, + num_workers=num_workers, + multiprocessing_context=mp_context, + persistent_workers=num_workers > 0, + collate_fn=_collate_fn, + worker_init_fn=_worker_init_fn, + drop_last=True, + generator=generator, + pin_memory=False, + ) + + @property + def torch_loader(self) -> torch.utils.data.DataLoader: + return self._data_loader + + @profile + def __iter__(self): + num_items = 0 + while True: + data_iter = iter(self._data_loader) + while True: + if self._num_batches is not None and num_items >= self._num_batches: + return + try: + batch = next(data_iter) + except StopIteration: + break # We've exhausted the dataset. Create a new iterator and start over. + num_items += 1 + # For JAX, convert to sharded arrays; for PyTorch, return torch tensors + if self._sharding is not None: + yield jax.tree.map(lambda x: jax.make_array_from_process_local_data(self._sharding, x), batch) + else: + yield jax.tree.map(torch.as_tensor, batch) + + +def _collate_fn(items): + """Collate the batch elements into batched numpy arrays.""" + # Make sure to convert to numpy arrays before stacking since some of the incoming elements + # may be JAX arrays. + return jax.tree.map(lambda *xs: np.stack([np.asarray(x) for x in xs], axis=0), *items) + + +def _worker_init_fn(worker_id: int) -> None: + """Tell JAX inside the worker process not to preallocate the GPU memory.""" + # NOTE: This is called after jax is imported inside the worker process. This + # means that this approach will not work for selecting the backend. + os.environ["XLA_PYTHON_CLIENT_PREALLOCATE"] = "false" + os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform" + + +class RLDSDataLoader: + """Shallow wrapper around the DROID data loader to make it compatible with openpi. + + All batching already happens in the DROID dataset, so we don't need to do anything here. + """ + + def __init__( + self, + dataset: DroidRldsDataset, + *, + sharding: jax.sharding.Sharding | None = None, + num_batches: int | None = None, + ): + self._dataset = dataset + self._num_batches = num_batches + + if jax.process_count() > 1: + raise NotImplementedError("Data loading with multiple processes is not supported.") + + if sharding is None: + # Use data parallel sharding by default. + sharding = jax.sharding.NamedSharding( + jax.sharding.Mesh(jax.devices(), ("B",)), + jax.sharding.PartitionSpec("B"), + ) + + self._sharding = sharding + self._num_batches = num_batches + + def __iter__(self): + num_items = 0 + while True: + data_iter = iter(self._dataset) + while True: + if self._num_batches is not None and num_items >= self._num_batches: + return + try: + batch = next(data_iter) + except StopIteration: + break # We've exhausted the dataset. Create a new iterator and start over. + num_items += 1 + yield jax.tree.map(lambda x: jax.make_array_from_process_local_data(self._sharding, x), batch) + + +class DataLoaderImpl(DataLoader): + def __init__(self, data_config: _config.DataConfig, data_loader: TorchDataLoader | RLDSDataLoader): + self._data_config = data_config + self._data_loader = data_loader + + def data_config(self) -> _config.DataConfig: + return self._data_config + + def __iter__(self): + for batch in self._data_loader: + yield _model.Observation.from_dict(batch), batch["actions"] diff --git a/policy/openpi-InternData-A1/src/openpi/training/droid_rlds_dataset.py b/policy/openpi-InternData-A1/src/openpi/training/droid_rlds_dataset.py new file mode 100644 index 0000000..ebe863a --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/training/droid_rlds_dataset.py @@ -0,0 +1,221 @@ +""" +RLDS-based data loader for DROID. +While openpi typically uses LeRobot's data loader, it is not currently scalable enough for larger datasets like DROID. +Thus, we provide a data loader example here that uses the RLDS data format. +The data loader also applies a few DROID-specific data filters / transformations. +""" + +from enum import Enum +from enum import auto +import json +import logging +from pathlib import Path + +import tqdm + +import openpi.shared.download as download + + +class DroidActionSpace(Enum): + """Action space for DROID dataset.""" + + JOINT_POSITION = auto() + JOINT_VELOCITY = auto() + + +class DroidRldsDataset: + def __init__( + self, + data_dir: str, + batch_size: int, + *, # Force keyword-only arguments + shuffle: bool = True, + action_chunk_size: int = 16, + # We default to joint position actions, since they allow policy evaluation in simulation. + action_space: DroidActionSpace = DroidActionSpace.JOINT_POSITION, + max_loaded_steps_per_episode: int = 100, + # Reduce this if you are running out of memory, but careful -- below ~100k shuffling is not sufficiently random. + shuffle_buffer_size: int = 250_000, + num_parallel_reads: int = -1, # -1 == tf.data.AUTOTUNE -- hack to not import tf at top level + num_parallel_calls: int = -1, # -1 == tf.data.AUTOTUNE -- hack to not import tf at top level + filter_dict_path=None, # Path to json file with indices to sample during training + ): + # Import tensorflow here to not make it mandatory in case RLDS data loader is not used. + import dlimp as dl + import tensorflow as tf + import tensorflow_datasets as tfds + + # Configure Tensorflow with *no GPU devices* (to prevent clobber with PyTorch / JAX) + tf.config.set_visible_devices([], "GPU") + + builder = tfds.builder("droid", data_dir=data_dir, version="1.0.1") + dataset = dl.DLataset.from_rlds(builder, split="train", shuffle=shuffle, num_parallel_reads=num_parallel_reads) + + # Filter out any unsuccessful trajectories -- we use the file name to check this + dataset = dataset.filter( + lambda traj: tf.strings.regex_full_match( + traj["traj_metadata"]["episode_metadata"]["file_path"][0], ".*success.*" + ) + ) + + # # Repeat dataset so we never run out of data. + dataset = dataset.repeat() + + # Load the filter dictionary if provided. + # The filter dictionary is a JSON file that maps episode keys to ranges of frames to sample + # (e.g., + # { + # "": [[0, 100], [200, 300]] + # } + # means keep frames 0-99 and 200-299). + if filter_dict_path is not None: + cached_filter_dict_path = download.maybe_download(filter_dict_path) + with Path(cached_filter_dict_path).open("r") as f: + filter_dict = json.load(f) + + logging.info(f"Using filter dictionary with {len(filter_dict)} episodes") + + keys_tensor = [] + values_tensor = [] + + for episode_key, ranges in tqdm.tqdm(filter_dict.items(), desc="Creating idle filter hash table..."): + for start, end in ranges: + for t in range(start, end): + frame_key = f"{episode_key}--{t}" + keys_tensor.append(frame_key) + values_tensor.append(True) + self.filter_table = tf.lookup.StaticHashTable( + tf.lookup.KeyValueTensorInitializer(keys_tensor, values_tensor), default_value=False + ) + logging.info("Filter hash table initialized") + else: + self.filter_table = tf.lookup.StaticHashTable( + tf.lookup.KeyValueTensorInitializer([""], [True]), default_value=True + ) + + def restructure(traj): + """Reformat observation and action keys, sample language instruction.""" + # Important: we use joint *position* action space -- easier to simulate! + actions = tf.concat( + ( + ( + traj["action_dict"]["joint_position"] + if action_space == DroidActionSpace.JOINT_POSITION + else traj["action_dict"]["joint_velocity"] + ), + traj["action_dict"]["gripper_position"], + ), + axis=-1, + ) + # Randomly samples one of the two exterior images in DROID during training (we only train with one at a time). + # Note: the "left" refers to the left camera in the stereo pair, we only train on the left camera. + exterior_img = tf.cond( + tf.random.uniform(shape=[]) > 0.5, + lambda: traj["observation"]["exterior_image_1_left"], + lambda: traj["observation"]["exterior_image_2_left"], + ) + wrist_img = traj["observation"]["wrist_image_left"] + # Randomly sample one of the three language instructions + instruction = tf.random.shuffle( + [traj["language_instruction"], traj["language_instruction_2"], traj["language_instruction_3"]] + )[0] + + traj_len = tf.shape(traj["action"])[0] + indices = tf.as_string(tf.range(traj_len)) + + # Data filtering: + # Compute a uniquely-identifying step ID by concatenating the recording folderpath, file path, + # and each step's time step index. This will index into the filter hash table, and if it returns true, + # then the frame passes the filter. + step_id = ( + traj["traj_metadata"]["episode_metadata"]["recording_folderpath"] + + "--" + + traj["traj_metadata"]["episode_metadata"]["file_path"] + + "--" + + indices + ) + passes_filter = self.filter_table.lookup(step_id) + + return { + "actions": actions, + "observation": { + "image": exterior_img, + "wrist_image": wrist_img, + "joint_position": traj["observation"]["joint_position"], + "gripper_position": traj["observation"]["gripper_position"], + }, + "prompt": instruction, + "step_id": step_id, + "passes_filter": passes_filter, + } + + dataset = dataset.traj_map(restructure, num_parallel_calls) + + def chunk_actions(traj): + """Splits episode into action chunks.""" + traj_len = tf.shape(traj["actions"])[0] + + # For each step in the trajectory, construct indices for the next n actions + action_chunk_indices = tf.broadcast_to( + tf.range(action_chunk_size)[None], + [traj_len, action_chunk_size], + ) + tf.broadcast_to( + tf.range(traj_len)[:, None], + [traj_len, action_chunk_size], + ) + + # Cap to length of the sequence --> final chunks will repeat the last action + # This makes sense, since we are using absolute joint + gripper position actions + action_chunk_indices = tf.minimum(action_chunk_indices, traj_len - 1) + + # Gather the actions for each chunk + traj["actions"] = tf.gather(traj["actions"], action_chunk_indices) + return traj + + dataset = dataset.traj_map(chunk_actions, num_parallel_calls) + + # Flatten: map from trajectory dataset to dataset of individual action chunks + dataset = dataset.flatten(num_parallel_calls=num_parallel_calls) + + # Filter data that doesn't pass the filter + def filter_from_dict(frame): + return frame["passes_filter"] + + dataset = dataset.filter(filter_from_dict) + + # Remove "passes_filter" key from output + def remove_passes_filter(frame): + frame.pop("passes_filter") + return frame + + dataset = dataset.map(remove_passes_filter) + + # Decode images: RLDS saves encoded images, only decode now for efficiency + def decode_images(traj): + traj["observation"]["image"] = tf.io.decode_image( + traj["observation"]["image"], expand_animations=False, dtype=tf.uint8 + ) + traj["observation"]["wrist_image"] = tf.io.decode_image( + traj["observation"]["wrist_image"], expand_animations=False, dtype=tf.uint8 + ) + return traj + + dataset = dataset.frame_map(decode_images, num_parallel_calls) + + # Shuffle, batch + dataset = dataset.shuffle(shuffle_buffer_size) + dataset = dataset.batch(batch_size) + # Note =>> Seems to reduce memory usage without affecting speed? + dataset = dataset.with_ram_budget(1) + + self.dataset = dataset + self.batch_size = batch_size + self.shuffle = shuffle + + def __iter__(self): + yield from self.dataset.as_numpy_iterator() + + def __len__(self): + # This is the approximate number of samples in DROID after filtering. + # Easier to hardcode than to iterate through the dataset and compute it. + return 20_000_000 diff --git a/policy/openpi-InternData-A1/src/openpi/training/misc/roboarena_config.py b/policy/openpi-InternData-A1/src/openpi/training/misc/roboarena_config.py new file mode 100644 index 0000000..0795bae --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/training/misc/roboarena_config.py @@ -0,0 +1,116 @@ +"""RoboArena baseline policy configs.""" + +from typing import TypeAlias + +import openpi.models.model as _model +import openpi.models.pi0_config as pi0_config +import openpi.models.pi0_fast as pi0_fast +import openpi.models.tokenizer as _tokenizer +import openpi.policies.droid_policy as droid_policy +import openpi.transforms as _transforms + +ModelType: TypeAlias = _model.ModelType + + +def get_roboarena_configs(): + # Import here to avoid circular imports. + from openpi.training.config import AssetsConfig + from openpi.training.config import DataConfig + from openpi.training.config import SimpleDataConfig + from openpi.training.config import TrainConfig + + return [ + # + # RoboArena DROID baseline inference configs. + # + TrainConfig( + # Trained from PaliGemma, using RT-2 / OpenVLA style binning tokenizer. + name="paligemma_binning_droid", + model=pi0_fast.Pi0FASTConfig( + action_dim=8, + action_horizon=15, + max_token_len=400, + fast_model_tokenizer=_tokenizer.BinningTokenizer, + ), + data=SimpleDataConfig( + assets=AssetsConfig(asset_id="droid"), + data_transforms=lambda model: _transforms.Group( + inputs=[droid_policy.DroidInputs(action_dim=model.action_dim, model_type=ModelType.PI0_FAST)], + outputs=[droid_policy.DroidOutputs()], + ), + base_config=DataConfig( + prompt_from_task=True, + ), + ), + ), + TrainConfig( + # Trained from PaliGemma, using FAST tokenizer (using universal FAST+ tokenizer). + name="paligemma_fast_droid", + model=pi0_fast.Pi0FASTConfig(action_dim=8, action_horizon=15), + data=SimpleDataConfig( + assets=AssetsConfig(asset_id="droid"), + data_transforms=lambda model: _transforms.Group( + inputs=[droid_policy.DroidInputs(action_dim=model.action_dim, model_type=ModelType.PI0_FAST)], + outputs=[droid_policy.DroidOutputs()], + ), + base_config=DataConfig( + prompt_from_task=True, + ), + ), + ), + TrainConfig( + # Trained from PaliGemma, using FAST tokenizer (tokenizer trained on DROID dataset). + name="paligemma_fast_specialist_droid", + model=pi0_fast.Pi0FASTConfig( + action_dim=8, + action_horizon=15, + fast_model_tokenizer=_tokenizer.FASTTokenizer, + fast_model_tokenizer_kwargs={"fast_tokenizer_path": "KarlP/fast_droid_specialist"}, + ), + data=SimpleDataConfig( + assets=AssetsConfig(asset_id="droid"), + data_transforms=lambda model: _transforms.Group( + inputs=[droid_policy.DroidInputs(action_dim=model.action_dim, model_type=ModelType.PI0_FAST)], + outputs=[droid_policy.DroidOutputs()], + ), + base_config=DataConfig( + prompt_from_task=True, + ), + ), + ), + TrainConfig( + # Trained from PaliGemma, using FSQ tokenizer. + name="paligemma_vq_droid", + model=pi0_fast.Pi0FASTConfig( + action_dim=8, + action_horizon=15, + fast_model_tokenizer=_tokenizer.FSQTokenizer, + fast_model_tokenizer_kwargs={"fsq_tokenizer_path": "gs://openpi-assets/tokenizers/droid_fsq_tokenizer"}, + ), + data=SimpleDataConfig( + assets=AssetsConfig(asset_id="droid"), + data_transforms=lambda model: _transforms.Group( + inputs=[droid_policy.DroidInputs(action_dim=model.action_dim, model_type=ModelType.PI0_FAST)], + outputs=[droid_policy.DroidOutputs()], + ), + base_config=DataConfig( + prompt_from_task=True, + ), + ), + ), + TrainConfig( + # pi0-style diffusion / flow VLA, trained on DROID from PaliGemma. + name="paligemma_diffusion_droid", + model=pi0_config.Pi0Config(action_horizon=10, action_dim=8), + data=SimpleDataConfig( + assets=AssetsConfig(asset_id="droid"), + data_transforms=lambda model: _transforms.Group( + inputs=[droid_policy.DroidInputs(action_dim=model.action_dim)], + outputs=[droid_policy.DroidOutputs()], + ), + base_config=DataConfig( + prompt_from_task=True, + ), + ), + ), + ] diff --git a/policy/openpi-InternData-A1/src/openpi/training/mixture_dataset.py b/policy/openpi-InternData-A1/src/openpi/training/mixture_dataset.py new file mode 100644 index 0000000..d70e26c --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/training/mixture_dataset.py @@ -0,0 +1,703 @@ +import numpy as np +from dataclasses import dataclass +from typing import SupportsIndex, Sequence, List, Dict, Any, Tuple, Optional, Union, TypeVar, Protocol + +import torch + +from lerobot.common.datasets.lerobot_dataset import ( + LeRobotDataset, + LeRobotDatasetMetadata, + MultiLeRobotDataset, +) + +import openpi.transforms as _transforms +from pdb import set_trace +import logging +T_co = TypeVar("T_co", covariant=True) +import openpi.training.config as _config +import openpi.shared.normalize as normalize + +def detect_gripper_change_step( + dataset, + select_actions: list[str] = ["action"], + gripper_dim: int = -1, + threshold_method: str = "std_multiplier", + threshold_multiplier: float = 2.0, + min_threshold: float = 0.001, + max_threshold: float = 1.0, + plot_gripper_changes: bool = False, +): + """ + Detect the step of gripper change. Only work for the self-collected dataset. + Modifies the dataset in place by adding 'gripper_change_step_idx' attribute. + This version uses a sliding window of size 4 centered around non_zero_idx, + including the indices and removing duplicates. + + Args: + dataset: LeRobotDataset instance + select_actions: List of action keys to process + gripper_dim: Dimension index for gripper in the action vector + threshold_method: Method to calculate threshold ('std_multiplier', 'percentile', 'absolute') + threshold_multiplier: Multiplier for std-based threshold + min_threshold: Minimum threshold value to avoid too sensitive detection + max_threshold: Maximum threshold value to avoid missing large changes + plot_gripper_changes: Whether to plot gripper changes visualization + """ + episode_lengths = [ep_dict["length"] for ep_dict in dataset.meta.episodes.values()] + cumulative_lengths = np.cumsum(episode_lengths) + + all_window_indices = set() # Use a set for automatic deduplication + + for action_key in select_actions: + action_values = dataset.hf_dataset[action_key] + + delta_action = np.diff(action_values, axis=0) + + # Handle episode boundaries + for end_idx in cumulative_lengths[:-1]: + if end_idx - 1 < len(delta_action) and end_idx - 2 >= 0: + delta_action[end_idx - 1] = delta_action[end_idx - 2] + elif end_idx - 1 < len(delta_action): + delta_action[end_idx - 1] = 0 + + if delta_action.ndim == 1: + delta_action = delta_action[:, np.newaxis] + + assert delta_action.ndim == 2 + + # Extract gripper delta values + gripper_delta = delta_action[:, gripper_dim] + + # Calculate threshold based on statistical properties + if threshold_method == "std_multiplier": + # Use standard deviation to filter out small tremors + std_val = np.std(gripper_delta) + threshold = threshold_multiplier * std_val + elif threshold_method == "percentile": + # Use percentile-based threshold (e.g., 90th percentile) + threshold = np.percentile(np.abs(gripper_delta), 85) + elif threshold_method == "absolute": + # Use absolute threshold + threshold = threshold_multiplier + else: + raise ValueError(f"Unknown threshold_method: {threshold_method}") + + # Clamp threshold to reasonable bounds + threshold = np.clip(threshold, min_threshold, max_threshold) + + # Find indices where gripper change exceeds threshold + significant_change_idx = np.where(np.abs(gripper_delta) > threshold)[0] + + cur_window_indices = set() + for idx in significant_change_idx: + # Create a sliding window of size 4 centered around idx. + # The window should include [idx-2, idx-1, idx, idx+1]. + # This means starting 2 before and ending 1 after. + window_start = idx - 2 + window_end = idx + 1 + + # Generate indices for the current window and ensure they are non-negative + # and within the bounds of the original action_values length. + # The maximum index possible is len(action_values) - 1. + # Since delta_action is len(action_values) - 1, the index refers to + # the step *before* the change. So the max index we want is effectively + # len(action_values) - 1, which corresponds to the last valid step index. + # If the original index is `i`, delta_action[i] corresponds to the change + # from step `i` to `i+1`. We want to include step `i` and its neighbors. + # The maximum index for steps is `len(action_values) - 1`. + # So, the window indices should not exceed `len(action_values) - 1`. + max_possible_idx = len(action_values) - 1 + + # Ensure indices are within valid range [0, max_possible_idx] + current_window_indices = np.arange( + max(0, window_start), min(max_possible_idx + 1, window_end + 1) + ) + for w_idx in current_window_indices: + cur_window_indices.add(w_idx) + all_window_indices.add(w_idx) + + if plot_gripper_changes: + num_episodes_to_plot = 5 + end_index_for_plot = cumulative_lengths[num_episodes_to_plot - 1] - 1 + delta_action_to_plot = delta_action[:end_index_for_plot] + + # Filter gripper_change_step_idx + gripper_change_step_idx = np.array(sorted(list(cur_window_indices))).astype(np.int32) + gripper_change_step_idx_to_plot = gripper_change_step_idx[gripper_change_step_idx < end_index_for_plot] + + plot_gripper_changes_in_subplots( + delta_action_to_plot, + gripper_change_step_idx_to_plot, + episode_lengths, + num_episodes_to_plot, + gripper_dim, + f"{action_key}_gripper_change" + ) + + # Convert the set to a numpy array and sort it + gripper_change_step_idx = np.array(sorted(list(all_window_indices))).astype(np.int32) + + print(f"Total unique gripper change steps: {len(gripper_change_step_idx)}, Total steps: {len(action_values)}") + + dataset.gripper_change_step_idx = gripper_change_step_idx + # set_trace() + + return dataset + +class Dataset(Protocol[T_co]): + """Interface for a dataset with random access.""" + + def __getitem__(self, index: SupportsIndex) -> T_co: + raise NotImplementedError("Subclasses of Dataset should implement __getitem__.") + + def __len__(self) -> int: + raise NotImplementedError("Subclasses of Dataset should implement __len__.") + +class TransformedDataset(Dataset[T_co]): + def __init__(self, dataset: Dataset, transforms: Sequence[_transforms.DataTransformFn]): + self._dataset = dataset + self._transform = _transforms.compose(transforms) + + def __getitem__(self, index: SupportsIndex) -> T_co: + return self._transform(self._dataset[index]) + + def __len__(self) -> int: + return len(self._dataset) + +def transform_dataset(dataset: Dataset, data_config: _config.DataConfig) -> Dataset: + """Transform the dataset by applying the data transforms.""" + norm_stats = {} + norm_stats = data_config.norm_stats + + return TransformedDataset( + dataset, + [ + *data_config.repack_transforms.inputs, + *data_config.data_transforms.inputs, + _transforms.Normalize(norm_stats, use_quantiles=data_config.use_quantile_norm), + *data_config.model_transforms.inputs, + ], + ) + +class MixtureDataset(Dataset): + """ + A composite dataset that combines multiple datasets, allowing for weighted sampling + and specific handling based on training stage (e.g., pretrain, finetune) and + gripper change detection for augmentation. + + This dataset flattens all eligible samples from its constituent datasets and assigns + sampling weights based on configuration and heuristics (e.g., `gripper_aug_ratio`). + """ + + def __init__( + self, + datasets: Sequence[Dataset], + datasets_name: Sequence[str], + datasets_meta: Sequence[LeRobotDatasetMetadata], + datasets_weights: Dict[str, float] = None, + gripper_aug_ratio: float = 1.0, + shuffle: bool = True, + ): + """ + Initializes the MixtureDataset. + + Args: + datasets (Sequence[Dataset]): A list of `Dataset` objects to be combined. + datasets_name (Sequence[str]): A list of names corresponding to each dataset in `datasets`. + datasets_meta (Sequence[LeRobotDatasetMetadata]): Metadata for each dataset, + typically containing `num_episodes`, `num_frames`, `fps`, and `num_indices`. + datasets_weights (Dict[str, float], optional): A dictionary mapping dataset names + to their base sampling weights. If None, equal weights are assumed. + is_eval (bool): If True, the dataset is configured for evaluation, potentially + limiting the number of episodes and disabling shuffling for reproducibility. + num_eval_episodes (int, optional): The number of episodes to select for evaluation. + Only used if `is_eval` is True. + stage (str): The current training stage (e.g., "stage1_pretrain_wm"). + This affects how indices are sampled from the underlying datasets. + gripper_aug_ratio (float): A multiplier applied to the weights of samples + that contain a detected gripper change. Useful for augmenting rare events. + shuffle (bool): If True, the flat sample map and sampling weights are shuffled + after initial creation. Ignored if `is_eval` is True. + """ + self.datasets = datasets + self.datasets_name = datasets_name + self.meta = datasets_meta + # Extract total number of episodes and frames for each dataset from metadata. + self.num_episodes = [meta.info['total_episodes'] for meta in datasets_meta] + self.num_frames = [meta.info['total_frames'] for meta in datasets_meta] + + + # Compute the flattened list of (dataset_idx, sample_idx) pairs. + # This involves sampling indices based on the stage and dataset type. + self._compute_len(False) + # Assign normalized sampling weights to each sample in the flattened map. + self._get_weights(datasets_weights, gripper_aug_ratio) + + # For training, ensure the sample map and weights are consistent. + if len(self.flat_sample_map) != len(self.sample_weights): + raise ValueError( + f"Mismatch in flat sample map length ({len(self.flat_sample_map)}) " + f"and sample weights length ({len(self.sample_weights)})." + ) + if shuffle: + # Shuffle both the sample map and weights in the same order for training. + # This ensures random access to samples while maintaining their assigned probabilities. + indices = np.random.permutation(len(self.flat_sample_map)) + self.flat_sample_map = [self.flat_sample_map[i] for i in indices] + self.sample_weights = self.sample_weights[indices] + + def __len__(self) -> int: + """ + Returns the total number of samples in the mixture dataset (after flattening and selection). + This length represents the effective size of the dataset for iteration. + """ + return len(self.flat_sample_map) + + def __getitem__(self, index: SupportsIndex): + """ + Retrieves a specific sample from one of the underlying datasets based on the + flattened sample map. + + Args: + index (SupportsIndex): The index in the flattened `flat_sample_map` (0 to `len(self) - 1`). + + Returns: + Tuple[int, Any]: A tuple containing the original dataset index and the + sample data (dictionary) from that dataset. + + Raises: + IndexError: If the provided index is out of bounds for the dataset. + """ + if not (0 <= index < len(self.flat_sample_map)): + raise IndexError(f"Index {index} is out of bounds for the dataset (size: {len(self.flat_sample_map)}).") + + # Retrieve the original dataset index and sample index from the flattened map. + dataset_idx, sample_idx = self.flat_sample_map[index] + return self.datasets[dataset_idx][sample_idx] + + def _compute_len(self, is_eval: bool = False): + """ + Pre-computes and stores `all_sample_indices`, a list of episode indices sampled + from each constituent dataset. This method prepares the data for `_create_flat_sample_map`. + + Args: + is_eval (bool): Flag indicating if indices are being computed for an evaluation dataset. + """ + self.all_sample_indices: List[Union[List[List[torch.Tensor]], List[torch.Tensor], torch.Tensor]] = [] + + for i, (ds, meta) in enumerate(zip(self.datasets, self.meta)): + # Access the underlying LeRobotDataset or MultiLeRobotDataset, bypassing TransformedDataset wrapper. + actual_ds = ds._dataset if isinstance(ds, TransformedDataset) else ds + + # Determine the number of indices to sample for this dataset based on the current stage. + # "stage1" typically uses a limited number of indices (`num_indices`), while other stages + # might use all available data or a different strategy. + num_indices = None + + if isinstance(actual_ds, MultiLeRobotDataset): + # For MultiLeRobotDataset, iterate through its sub-datasets to get indices. + indices_list_for_multi_ds = [] + for sub_ds in actual_ds._datasets: + _from = sub_ds.episode_data_index["from"] + _to = sub_ds.episode_data_index["to"] + indices = self._sample_indices( + _from, _to, num_indices, is_eval=is_eval, dataset_name=self.datasets_name[i] + ) + indices_list_for_multi_ds.append(indices) + self.all_sample_indices.append(indices_list_for_multi_ds) + elif isinstance(actual_ds, LeRobotDataset): + # For a single LeRobotDataset. + _from = actual_ds.episode_data_index["from"] + _to = actual_ds.episode_data_index["to"] + indices = self._sample_indices( + _from, _to, num_indices, is_eval=is_eval, dataset_name=self.datasets_name[i] + ) + self.all_sample_indices.append(indices) + else: + raise TypeError(f"Unsupported dataset type: {type(actual_ds)}. " + "Expected `LeRobotDataset` or `MultiLeRobotDataset`.") + + # After collecting all sampled episode indices, flatten them into `flat_sample_map`. + self.flat_sample_map = self._create_flat_sample_map() + + def _create_flat_sample_map(self) -> List[Tuple[int, int]]: + """ + Converts the potentially nested structure of `self.all_sample_indices` (which can be + lists of lists of tensors, or lists of tensors) into a flat list of + `(original_dataset_index, sample_index_within_original_dataset)` tuples. + + This flattened map is then used by `__getitem__` to efficiently retrieve samples. + """ + flat_map = [] + for dataset_idx, sample_group in enumerate(self.all_sample_indices): + # Case 1: `MultiLeRobotDataset` where `sample_group` is `List[List[torch.Tensor]]` + if isinstance(sample_group, list) and len(sample_group) > 0 and isinstance(sample_group[0], list): + for sub_group in sample_group: # Iterate through sub-datasets' index lists + for tensor_of_indices in sub_group: # Iterate through tensors of indices for episodes + for i in range(tensor_of_indices.numel()): + flat_map.append((dataset_idx, tensor_of_indices[i].item())) + # Case 2: `LeRobotDataset` where `sample_group` is `List[torch.Tensor]` + elif isinstance(sample_group, list) and len(sample_group) > 0 and isinstance(sample_group[0], torch.Tensor): + for tensor_of_indices in sample_group: + for i in range(tensor_of_indices.numel()): + flat_map.append((dataset_idx, tensor_of_indices[i].item())) + # Case 3: A rare case where `sample_group` might be a single `torch.Tensor` directly + elif isinstance(sample_group, torch.Tensor): + for i in range(sample_group.numel()): + flat_map.append((dataset_idx, sample_group[i].item())) + return flat_map + + def _sample_indices( + self, + start: List[int], + end: List[int], + num_frames: Optional[int], + random_pad: bool = False, + is_eval: bool = False, + dataset_name: str = None, # Added for potential future stage-specific logic + ) -> List[torch.Tensor]: + """ + Samples indices for episodes based on the current stage and dataset-specific rules. + This function is called per episode to determine which frames to include. + + Args: + start (List[int]): List of starting frame indices for each episode. + end (List[int]): List of ending frame indices for each episode. + num_frames (Optional[int]): The target number of frames to sample per episode. + This is primarily used for "stage1" where sampling + a fixed number of frames per episode might be desired. + random_pad (bool): If True, and `frame_count < target_frames`, shorter episodes + will be padded with randomly selected indices from themselves. + is_eval (bool): If True, adjusts indices for evaluation (e.g., shifting by 1 for stage1 + to ensure predicted frames are not identical to observed frames). + dataset_name (str): The name of the dataset (for debugging or future dataset-specific sampling rules). + + Returns: + List[torch.Tensor]: A list of PyTorch tensors, where each tensor contains the + sampled frame indices for a single episode. + """ + all_indices_for_episodes = [] + for _start, _end in zip(start, end): + frame_count = _end - _start # Total frames available in this episode. + target_frames = frame_count + if frame_count >= target_frames: + # If enough frames are available, linearly space the indices to sample `target_frames`. + indices = torch.linspace(_start, _end - 1, steps=target_frames).long() + else: + # If fewer frames than `target_frames` are available. + if random_pad: + # Pad the existing frames with randomly chosen duplicates from the episode. + pad_size = target_frames - frame_count + indices = torch.arange(_start, _end) # All available original indices + # Randomly sample `pad_size` indices from the existing ones. + pad_indices = indices[torch.randint(0, frame_count, (pad_size,))] + indices = torch.cat([indices, pad_indices]) # Combine original and padded indices + indices = indices[torch.randperm(target_frames)] # Randomly permute to mix original and padded. + else: + # If not padding, simply use all available frames. + indices = torch.arange(_start, _end) + + all_indices_for_episodes.append(indices) + + return all_indices_for_episodes + + def _get_weights(self, datasets_weights: Dict[str, float], aug_ratio: float = 1.0): + """ + Assigns normalized sampling weights to each individual sample in the flattened map. + Weights are adjusted based on base dataset weights and `gripper_aug_ratio` for + samples that have a detected gripper change. + + Args: + datasets_weights (Dict[str, float]): A dictionary mapping dataset names to their + base sampling weights. If a dataset name is + not found, a default weight of 1.0 is used. + aug_ratio (float): The augmentation ratio (multiplier) to apply to the base weight + for samples where a gripper change is detected. + """ + self.sample_weights: List[float] = [] + self.datasets_weight_map: Dict[str, float] = {} + + if datasets_weights is None: + num_datasets = len(self.datasets_name) + datasets_weights = {name: 1.0 / num_datasets for name in self.datasets_name} + + for idx, ds_name in enumerate(self.datasets_name): + # Access the underlying dataset to get gripper change information. + # It might be wrapped in a TransformedDataset, so we unwrap it. + current_base_dataset = self.datasets[idx]._dataset if isinstance(self.datasets[idx], TransformedDataset) else self.datasets[idx] + base_weight = datasets_weights.get(ds_name, 1.0) # Get base weight for this dataset + + individual_weights_for_ds: List[float] = [] + + # Logic to retrieve `gripper_change_step_idx` and assign weights. + if isinstance(current_base_dataset, MultiLeRobotDataset): + # For MultiLeRobotDataset, iterate through its sub-datasets. + for idj, sub_ds in enumerate(current_base_dataset._datasets): + gripper_change_step_idx = getattr(sub_ds, 'gripper_change_step_idx', None) + if gripper_change_step_idx is not None: + sampled_indices_sub_ds = self.all_sample_indices[idx][idj] + for tensor_of_indices in sampled_indices_sub_ds: + for step_idx in tensor_of_indices.tolist(): + if step_idx in gripper_change_step_idx: + individual_weights_for_ds.append(base_weight * aug_ratio) + else: + individual_weights_for_ds.append(base_weight) + elif isinstance(current_base_dataset, LeRobotDataset): + # For a single LeRobotDataset. + gripper_change_step_idx = getattr(current_base_dataset, 'gripper_change_step_idx', None) + if gripper_change_step_idx is not None: + sampled_indices_ds = self.all_sample_indices[idx] + for tensor_of_indices in sampled_indices_ds: + for step_idx in tensor_of_indices.tolist(): + if step_idx in gripper_change_step_idx: + individual_weights_for_ds.append(base_weight * aug_ratio) + else: + individual_weights_for_ds.append(base_weight) + if gripper_change_step_idx is None: + print(f"Warning: Gripper change detection not fully supported for dataset type {type(current_base_dataset)}. " + "Assigning uniform weights based on `base_weight` for this dataset.") + num_samples_for_ds_in_flat_map = sum(1 for map_ds_idx, _ in self.flat_sample_map if map_ds_idx == idx) + individual_weights_for_ds.extend([base_weight] * num_samples_for_ds_in_flat_map) + + # Accumulate individual weights for all samples and for the dataset's total. + self.sample_weights.extend(individual_weights_for_ds) + self.datasets_weight_map[ds_name] = self.datasets_weight_map.get(ds_name, 0.0) + sum(individual_weights_for_ds) + + # Final normalization of all individual sample weights across the entire mixture dataset. + total_sum_of_all_individual_weights = sum(self.sample_weights) + if total_sum_of_all_individual_weights > 0: + self.sample_weights = np.array(self.sample_weights, dtype=np.float32) + self.sample_weights = self.sample_weights / total_sum_of_all_individual_weights + else: + self.sample_weights = np.array([], dtype=np.float32) + + # Normalize the `datasets_weight_map` to reflect the effective proportion of each dataset + # in the final sampling distribution. + if total_sum_of_all_individual_weights > 0: + for k in self.datasets_weight_map: + self.datasets_weight_map[k] /= total_sum_of_all_individual_weights + else: + self.datasets_weight_map = {k: 0.0 for k in self.datasets_weight_map} # All weights become zero. + + def __str__(self) -> str: + """ + Returns a formatted string representation of the MixtureDataset, + showing the effective sampling weights and dataset lengths. + """ + # Define ANSI escape codes for colored and bold text. + RESET = "\033[0m" + BOLD = "\033[1m" + CYAN = "\033[96m" + YELLOW = "\033[93m" + GREEN = "\033[92m" + MAGENTA = "\033[95m" + + # Determine the maximum key length for consistent formatting. + max_key_len = max(len(k) for k in self.datasets_weight_map.keys()) + 2 if self.datasets_weight_map else 20 + + # Build the lines of the string representation. + lines = [ + f"{BOLD}{MAGENTA}######################################### 👈 Dataset Weight Map: ########################################{RESET}" + ] + + # Add individual dataset information: name, number of samples, and effective weight. + for idx, (name, weight) in enumerate(self.datasets_weight_map.items()): + # Use `len(self.datasets[idx])` to get the number of samples in each transformed dataset. + # Formatting to 2 decimal places for weight and 0 for sample count. + lines.append(f"{CYAN}{name:<{max_key_len}} : {len(self.datasets[idx]):>18.0f} ({weight*100:>.2f}%){RESET}") + + # Add a separator line. + separator_length = len(lines[0]) - len(BOLD) - len(MAGENTA) - len(RESET) + 1 + lines.append("-" * separator_length) + + # Add total episodes summary. + lines.append(f"{CYAN}{'Total Episodes':<{max_key_len}}{RESET} : {YELLOW}{sum(self.num_episodes):>18.0f}{RESET}") + + # Add the closing border, matching the length of the separator. + lines.append(f"{BOLD}{MAGENTA}{'#' * separator_length}{RESET}") + + return "\n".join(lines) + + +def create_mixture_dataset( + data_configs_list, + action_horizon, + model_config, +): + all_datasets = [] + all_datasets_name = [] + all_datasets_meta = [] + all_datasets_weight = {} + + for ds_configs in data_configs_list: + for ds_config in ds_configs: + repo_dir = ds_config.repo_dir + task_id = ds_config.task_id + subtask_id = ds_config.subtask_id + root_path = f"{repo_dir}/{task_id}/{subtask_id}" + + dataset_meta = LeRobotDatasetMetadata(repo_id=root_path, root=root_path) + episodes = list(dataset_meta.episodes_stats.keys()) + if ds_config.data_ratio < 1.0: + sub_length = int(len(episodes) * ds_config.data_ratio) + 1 + logging.info(f"sub_length: {sub_length}") + indices = np.random.choice(len(episodes), sub_length, replace=False) + episodes = [episodes[i] for i in indices] + print(f"downsample ratio: {ds_config.downsample_ratio}") + dataset = LeRobotDataset( + episodes=episodes, + repo_id=root_path, + root=root_path, + delta_timestamps={ + key: [t / (dataset_meta.fps // ds_config.downsample_ratio) for t in range(action_horizon)] for key in ds_config.action_sequence_keys + }, + ) + if ds_config.use_gripper_aug and ds_config.gripper_aug_config is not None: + gripper_aug_config = ds_config.gripper_aug_config + dataset = detect_gripper_change_step( + dataset, + select_actions=gripper_aug_config["gripper_action_keys"], + gripper_dim=gripper_aug_config["gripper_dim"], + threshold_method=gripper_aug_config["gripper_threshold_method"], + threshold_multiplier=gripper_aug_config["gripper_threshold_multiplier"], + min_threshold=gripper_aug_config["gripper_min_threshold"], + max_threshold=gripper_aug_config["gripper_max_threshold"], + ) + + dataset = transform_dataset(dataset, ds_config) + dataset_name = root_path + dataset_weight = ds_config.weight + + all_datasets.append(dataset) + all_datasets_name.append(dataset_name) + all_datasets_meta.append(dataset_meta) + all_datasets_weight[dataset_name] = dataset_weight + + mixture_dataset = MixtureDataset( + all_datasets, + all_datasets_name, + all_datasets_meta, + all_datasets_weight, + gripper_aug_ratio=10.0, + ) + return mixture_dataset + +def create_mixture_dataset_no_transform( + data_configs_list, + action_horizon, + model_config +): + all_datasets = [] + all_datasets_name = [] + all_datasets_meta = [] + all_datasets_weight = {} + + for ds_configs in data_configs_list: + for ds_config in ds_configs: + repo_dir = ds_config.repo_dir + task_id = ds_config.task_id + subtask_id = ds_config.subtask_id + root_path = f"{repo_dir}/{task_id}/{subtask_id}" + + dataset_meta = LeRobotDatasetMetadata(repo_id=root_path, root=root_path) + episodes = list(dataset_meta.episodes_stats.keys()) + if ds_config.data_ratio < 1.0: + sub_length = int(len(episodes) * ds_config.data_ratio) + 1 + episodes = episodes[:sub_length] + dataset = LeRobotDataset( + episodes=episodes, + repo_id=root_path, + root=root_path, + delta_timestamps={ + key: [t / (dataset_meta.fps // ds_config.downsample_ratio) for t in range(action_horizon)] for key in ds_config.action_sequence_keys + }, + ) + if ds_config.use_gripper_aug and ds_config.gripper_aug_config is not None: + gripper_aug_config = ds_config.gripper_aug_config + dataset = detect_gripper_change_step( + dataset, + select_actions=gripper_aug_config["gripper_action_keys"], + gripper_dim=gripper_aug_config["gripper_dim"], + threshold_method=gripper_aug_config["gripper_threshold_method"], + threshold_multiplier=gripper_aug_config["gripper_threshold_multiplier"], + min_threshold=gripper_aug_config["gripper_min_threshold"], + max_threshold=gripper_aug_config["gripper_max_threshold"], + ) + + dataset_name = root_path + dataset_weight = ds_config.weight + + all_datasets.append(dataset) + all_datasets_name.append(dataset_name) + all_datasets_meta.append(dataset_meta) + all_datasets_weight[dataset_name] = dataset_weight + + mixture_dataset = MixtureDataset( + all_datasets, + all_datasets_name, + all_datasets_meta, + all_datasets_weight, + gripper_aug_ratio=10.0, + ) + return mixture_dataset + +def create_mixture_dataset_calculate_norm_stats( + data_configs_list, + action_horizon, + model_config +): + all_datasets = [] + all_datasets_name = [] + all_datasets_meta = [] + all_datasets_weight = {} + + for ds_config in data_configs_list: + repo_dir = ds_config.repo_dir + task_id = ds_config.task_id + subtask_id = ds_config.subtask_id + root_path = f"{repo_dir}/{task_id}/{subtask_id}" + + dataset_meta = LeRobotDatasetMetadata(repo_id=root_path, root=root_path) + episodes = list(dataset_meta.episodes_stats.keys()) + if ds_config.data_ratio < 1.0: + sub_length = int(len(episodes) * ds_config.data_ratio) + 1 + episodes = episodes[:sub_length] + dataset = LeRobotDataset( + episodes=episodes, + repo_id=root_path, + root=root_path, + delta_timestamps={ + key: [t / (dataset_meta.fps // ds_config.downsample_ratio) for t in range(action_horizon)] for key in ds_config.action_sequence_keys + }, + load_video=False, + + ) + if ds_config.use_gripper_aug and ds_config.gripper_aug_config is not None: + gripper_aug_config = ds_config.gripper_aug_config + dataset = detect_gripper_change_step( + dataset, + select_actions=gripper_aug_config["gripper_action_keys"], + gripper_dim=gripper_aug_config["gripper_dim"], + threshold_method=gripper_aug_config["gripper_threshold_method"], + threshold_multiplier=gripper_aug_config["gripper_threshold_multiplier"], + min_threshold=gripper_aug_config["gripper_min_threshold"], + max_threshold=gripper_aug_config["gripper_max_threshold"], + ) + + dataset_name = root_path + dataset_weight = ds_config.weight + + all_datasets.append(dataset) + all_datasets_name.append(dataset_name) + all_datasets_meta.append(dataset_meta) + all_datasets_weight[dataset_name] = dataset_weight + + mixture_dataset = MixtureDataset( + all_datasets, + all_datasets_name, + all_datasets_meta, + all_datasets_weight, + gripper_aug_ratio=10.0, + ) + return mixture_dataset + diff --git a/policy/openpi-InternData-A1/src/openpi/training/optimizer.py b/policy/openpi-InternData-A1/src/openpi/training/optimizer.py new file mode 100644 index 0000000..778f0cf --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/training/optimizer.py @@ -0,0 +1,123 @@ +import dataclasses +from typing import Protocol, runtime_checkable + +import jax.numpy as jnp +import optax + +import openpi.shared.array_typing as at + + +@runtime_checkable +class LRScheduleConfig(Protocol): + def create(self) -> optax.Schedule: ... + + +@dataclasses.dataclass(frozen=True) +class CosineDecaySchedule(LRScheduleConfig): + """Cosine decay schedule with warmup.""" + + warmup_steps: int = 1_000 + peak_lr: float = 2.5e-5 + decay_steps: int = 30_000 + decay_lr: float = 2.5e-6 + + def create(self) -> optax.Schedule: + return optax.warmup_cosine_decay_schedule( + init_value=self.peak_lr / (self.warmup_steps + 1), + peak_value=self.peak_lr, + warmup_steps=self.warmup_steps, + decay_steps=self.decay_steps, + end_value=self.decay_lr, + ) + + +@dataclasses.dataclass(frozen=True) +class RsqrtDecaySchedule(LRScheduleConfig): + """Inverse square root decay schedule with warmup.""" + + warmup_steps: int = 1_000 + peak_lr: float = 5e-5 + timescale: float = 10_000 + + def create(self) -> optax.Schedule: + return optax.join_schedules( + [ + optax.linear_schedule( + init_value=self.peak_lr / (self.warmup_steps + 1), + end_value=self.peak_lr, + transition_steps=self.warmup_steps, + ), + lambda step: self.peak_lr / jnp.sqrt((self.timescale + step) / self.timescale), + ], + [self.warmup_steps], + ) + +@dataclasses.dataclass(frozen=True) +class WarmupConstantSchedule(LRScheduleConfig): + """Warmup constant schedule with warmup.""" + + warmup_steps: int = 2_000 + peak_lr: float = 5e-5 + + def create(self) -> optax.Schedule: + return optax.warmup_constant_schedule( + init_value=self.peak_lr / (self.warmup_steps + 1), + peak_value=self.peak_lr, + warmup_steps=self.warmup_steps, + ) + + +@runtime_checkable +class OptimizerConfig(Protocol): + def create( + self, + lr: optax.ScalarOrSchedule, + weight_decay_mask: at.PyTree | None = None, + ) -> optax.GradientTransformation: ... + + +@dataclasses.dataclass(frozen=True) +class AdamW(OptimizerConfig): + """AdamW optimizer.""" + + b1: float = 0.9 + b2: float = 0.95 + eps: float = 1e-8 + # Changing this to 0 can cause out-of-memory errors for some reason, so we set it to a negligible value. + weight_decay: float = 1e-10 + clip_gradient_norm: float = 1.0 + + def create( + self, + lr: optax.ScalarOrSchedule, + weight_decay_mask: at.PyTree | None = None, + ) -> optax.GradientTransformation: + tx = optax.adamw( + lr, b1=self.b1, b2=self.b2, eps=self.eps, weight_decay=self.weight_decay, mask=weight_decay_mask + ) + + return optax.chain(optax.clip_by_global_norm(self.clip_gradient_norm), tx) + + +@dataclasses.dataclass(frozen=True) +class SGD(OptimizerConfig): + """SGD optimizer.""" + + lr: float = 5e-5 + momentum: float = 0.9 + nesterov: bool = False + + def create( + self, + lr: optax.ScalarOrSchedule, + weight_decay_mask: at.PyTree | None = None, + ) -> optax.GradientTransformation: + assert weight_decay_mask is None, "Weight decay is not supported for SGD" + return optax.sgd(lr, momentum=self.momentum, nesterov=self.nesterov) + + +def create_optimizer( + optimizer: OptimizerConfig, lr_schedule: LRScheduleConfig, weight_decay_mask: at.PyTree | None = None +) -> optax.GradientTransformation: + lr = lr_schedule.create() + return optimizer.create(lr, weight_decay_mask=weight_decay_mask) diff --git a/policy/openpi-InternData-A1/src/openpi/training/sharding.py b/policy/openpi-InternData-A1/src/openpi/training/sharding.py new file mode 100644 index 0000000..2c54a5c --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/training/sharding.py @@ -0,0 +1,102 @@ +import contextlib +import logging + +import jax +import numpy as np + +BATCH_AXIS = "batch" +FSDP_AXIS = "fsdp" +# In FSDP, we shard the data across both the batch and FSDP axes. +DATA_AXIS = (BATCH_AXIS, FSDP_AXIS) + + +class _MeshState: + active_mesh: jax.sharding.Mesh | None = None + + +def make_mesh(num_fsdp_devices: int) -> jax.sharding.Mesh: + if jax.device_count() % num_fsdp_devices != 0: + raise ValueError( + f"Number of devices {jax.device_count()} must be divisible by the number of FSDP devices {num_fsdp_devices}." + ) + mesh_shape = (jax.device_count() // num_fsdp_devices, num_fsdp_devices) + return jax.make_mesh(mesh_shape, (BATCH_AXIS, FSDP_AXIS)) + + +@contextlib.contextmanager +def set_mesh(mesh: jax.sharding.Mesh): + """Plumbing the mesh deep into the module tree is extremeley cumbersome; until the JAX team lands a better API, a + custom context manager like this one is the recommended way to maintain a reference to a global mesh. This is only used + in `activation_sharding_constraint` below.""" + if _MeshState.active_mesh is not None: + raise ValueError("Cannot nest set_mesh context managers.") + _MeshState.active_mesh = mesh + try: + yield + finally: + _MeshState.active_mesh = None + + +def activation_sharding_constraint(pytree): + if _MeshState.active_mesh is None: + return pytree + return jax.lax.with_sharding_constraint( + pytree, jax.sharding.NamedSharding(_MeshState.active_mesh, jax.sharding.PartitionSpec(DATA_AXIS)) + ) + + +def fsdp_sharding( + pytree, + mesh: jax.sharding.Mesh, + *, + min_size_mbytes: int = 4, # 4 MiB + log: bool = False, +): + """Apply FSDP sharding to a pytree of arrays based on the mesh shape. + + Args: + pytree: A pytree to be apply sharding specified by the mesh, note that only array types (eg. contains .shape attr) + will be considered for sharding. + mesh: The mesh being used for applying sharding on to pytree. + min_size_mbytes: The minimum size of the array in MiB to be considered for sharding, any array smaller than this + will be replicated. + log: If true, will log the sharding decisions for arrays that are being considered for sharding. + + Returns: + The sharded pytree. + """ + min_size_bytes = min_size_mbytes * 2**20 + + def _shard_arr(kp, array: jax.ShapeDtypeStruct): + # if fsdp is not actually going to be used, replicate everything to avoid extraneous logging + if mesh.shape[FSDP_AXIS] == 1: + return jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec()) + # replicate scalar and vector arrays + if not hasattr(array, "shape"): + return jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec()) + if len(array.shape) < 2: + return jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec()) + # replicate small arrays + if (arr_size := np.prod(array.shape) * np.dtype(array.dtype).itemsize) < min_size_bytes: + return jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec()) + + # shard matrices and larger tensors along the largest axis that is divisible by the fsdp dimension + axes = np.argsort(array.shape)[::-1] + spec = [None] * len(axes) + for i in axes: + if array.shape[i] % mesh.shape[FSDP_AXIS] == 0: + if log: + logging.info( + f"Sharding {jax.tree_util.keystr(kp)} of shape {array.shape} ({arr_size / 2**20:.2f} MiB) along axis {i}" + ) + spec[i] = FSDP_AXIS + return jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec(*spec)) + + # replicate if no valid sharding was found + if log: + logging.warning( + f"Could not find a valid sharding for {jax.tree_util.keystr(kp)} of shape {array.shape} with mesh of shape {mesh.shape}" + ) + return jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec()) + + return jax.tree_util.tree_map_with_path(_shard_arr, pytree) diff --git a/policy/openpi-InternData-A1/src/openpi/training/utils.py b/policy/openpi-InternData-A1/src/openpi/training/utils.py new file mode 100644 index 0000000..fe7f94d --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/training/utils.py @@ -0,0 +1,38 @@ +from collections.abc import Callable +from typing import Any + +from flax import nnx +from flax import struct +import jax +import optax + +from openpi.models import model as _model +from openpi.shared import array_typing as at + + +@at.typecheck +@struct.dataclass +class TrainState: + step: at.Int[at.ArrayLike, ""] + params: nnx.State + model_def: nnx.GraphDef[_model.BaseModel] + opt_state: optax.OptState + tx: optax.GradientTransformation = struct.field(pytree_node=False) + + ema_decay: float | None = struct.field(pytree_node=False) + ema_params: nnx.State | None = None + + +@at.typecheck +def tree_to_info(tree: at.PyTree, interp_func: Callable[[Any], str] = str) -> str: + """Converts a PyTree into a human-readable string for logging. Optionally, `interp_func` can be provided to convert + the leaf values to more meaningful strings. + """ + tree, _ = jax.tree_util.tree_flatten_with_path(tree) + return "\n".join(f"{jax.tree_util.keystr(path)}: {interp_func(value)}" for path, value in tree) + + +@at.typecheck +def array_tree_to_info(tree: at.PyTree) -> str: + """Converts a PyTree of arrays into a human-readable string for logging.""" + return tree_to_info(tree, lambda x: f"{x.shape}@{x.dtype}") diff --git a/policy/openpi-InternData-A1/src/openpi/training/weight_loaders.py b/policy/openpi-InternData-A1/src/openpi/training/weight_loaders.py new file mode 100644 index 0000000..0417ea9 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/training/weight_loaders.py @@ -0,0 +1,103 @@ +import dataclasses +import logging +import re +from typing import Protocol, runtime_checkable + +import flax.traverse_util +import numpy as np + +import openpi.models.model as _model +import openpi.shared.array_typing as at +import openpi.shared.download as download +from pathlib import Path +logger = logging.getLogger(__name__) + + +@runtime_checkable +class WeightLoader(Protocol): + def load(self, params: at.Params) -> at.Params: + """Loads the model weights. + + Args: + params: Parameters of the model. This is a nested structure of array-like objects that + represent the model's parameters. + + Returns: + Loaded parameters. The structure must be identical to `params`. If returning a subset of + the parameters the loader must merge the loaded parameters with `params`. + """ + + +@dataclasses.dataclass(frozen=True) +class NoOpWeightLoader(WeightLoader): + def load(self, params: at.Params) -> at.Params: + return params + + +@dataclasses.dataclass(frozen=True) +class CheckpointWeightLoader(WeightLoader): + """Loads an entire set of weights from a checkpoint. + + Compatible with: + trained checkpoints: + example: "./checkpoints////params" + released checkpoints: + example: "gs://openpi-assets/checkpoints//params" + """ + + params_path: str + + def load(self, params: at.Params) -> at.Params: + # We are loading np.ndarray and relying on the training code to properly convert and shard the params. + loaded_params = _model.restore_params(download.maybe_download(self.params_path), restore_type=np.ndarray) + # Add all missing LoRA weights. + return _merge_params(loaded_params, params, missing_regex=".*lora.*") + +@dataclasses.dataclass(frozen=True) +class PaliGemmaWeightLoader(WeightLoader): + """Loads weights from the official PaliGemma checkpoint. + + This will overwrite existing weights with similar names while keeping all extra weights intact. + This allows us to support the action expert which is used by the Pi0 model. + """ + params_path: str + + def load(self, params: at.Params) -> at.Params: + path = Path(self.params_path) + with path.open("rb") as f: + flat_params = dict(np.load(f, allow_pickle=False)) + loaded_params = {"PaliGemma": flax.traverse_util.unflatten_dict(flat_params, sep="/")["params"]} + # Add all missing weights. + return _merge_params(loaded_params, params, missing_regex=".*") + + + +def _merge_params(loaded_params: at.Params, params: at.Params, *, missing_regex: str) -> at.Params: + """Merges the loaded parameters with the reference parameters. + + Args: + loaded_params: The parameters to merge. + params: The reference parameters. + missing_regex: A regex pattern for all missing keys that should be merged from the reference parameters. + + Returns: + A new dictionary with the merged parameters. + """ + flat_ref = flax.traverse_util.flatten_dict(params, sep="/") + flat_loaded = flax.traverse_util.flatten_dict(loaded_params, sep="/") + + # First, take all weights that are a subset of the reference weights. + result = {} + for k, v in flat_loaded.items(): + if k in flat_ref: + result[k] = v.astype(flat_ref[k].dtype) if v.dtype != flat_ref[k].dtype else v + + flat_loaded.clear() + + # Then, merge any missing weights as defined by the missing regex. + pattern = re.compile(missing_regex) + for k in {k for k in flat_ref if pattern.fullmatch(k)}: + if k not in result: + result[k] = flat_ref[k] + + return flax.traverse_util.unflatten_dict(result, sep="/") diff --git a/policy/openpi-InternData-A1/src/openpi/transforms.py b/policy/openpi-InternData-A1/src/openpi/transforms.py new file mode 100644 index 0000000..9f24b14 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/transforms.py @@ -0,0 +1,597 @@ +from collections.abc import Callable, Mapping, Sequence +import dataclasses +import re +from typing import Protocol, TypeAlias, TypeVar, runtime_checkable + +import flax.traverse_util as traverse_util +import jax +import numpy as np +from openpi_client import image_tools + +from openpi.models import tokenizer as _tokenizer +from openpi.shared import array_typing as at +from openpi.shared import normalize as _normalize + +from scipy.spatial.transform import Rotation as R +from pdb import set_trace +DataDict: TypeAlias = at.PyTree +NormStats: TypeAlias = _normalize.NormStats + + +T = TypeVar("T") +S = TypeVar("S") + + +@runtime_checkable +class DataTransformFn(Protocol): + def __call__(self, data: DataDict) -> DataDict: + """Apply transformation to the data. + + Args: + data: The data to apply the transform to. This is a possibly nested dictionary that contains + unbatched data elements. Each leaf is expected to be a numpy array. Using JAX arrays is allowed + but not recommended since it may result in extra GPU memory usage inside data loader worker + processes. + + Returns: + The transformed data. Could be the input `data` that was modified in place, or a new data structure. + """ + + +@dataclasses.dataclass(frozen=True) +class Group: + """A group of transforms.""" + + # Transforms that are applied to the model input data. + inputs: Sequence[DataTransformFn] = () + + # Transforms that are applied to the model output data. + outputs: Sequence[DataTransformFn] = () + + def push(self, *, inputs: Sequence[DataTransformFn] = (), outputs: Sequence[DataTransformFn] = ()) -> "Group": + """Append transforms to the group and return a new group. + + Args: + inputs: Appended to the *end* of the current input transforms. + outputs: Appended to the *beginning* of the current output transforms. + + Returns: + A new group with the appended transforms. + """ + return Group(inputs=(*self.inputs, *inputs), outputs=(*outputs, *self.outputs)) + + +@dataclasses.dataclass(frozen=True) +class CompositeTransform(DataTransformFn): + """A composite transform that applies a sequence of transforms in order.""" + + transforms: Sequence[DataTransformFn] + + def __call__(self, data: DataDict) -> DataDict: + for transform in self.transforms: + data = transform(data) + return data + + +def compose(transforms: Sequence[DataTransformFn]) -> DataTransformFn: + """Compose a sequence of transforms into a single transform.""" + return CompositeTransform(transforms) + + +@dataclasses.dataclass(frozen=True) +class RepackTransform(DataTransformFn): + """Repacks an input dictionary into a new dictionary. + + Repacking is defined using a dictionary where the keys are the new keys and the values + are the flattened paths to the old keys. We use '/' as the separator during flattening. + + Example: + { + "images": { + "cam_high": "observation.images.top", + "cam_low": "observation.images.bottom", + }, + "state": "observation.state", + "actions": "action", + } + """ + + structure: at.PyTree[str] + + def __call__(self, data: DataDict) -> DataDict: + flat_item = flatten_dict(data) + return jax.tree.map(lambda k: flat_item[k], self.structure) + + +@dataclasses.dataclass(frozen=True) +class ReTransform(DataTransformFn): + """Repacks an input dictionary into a new dictionary. + + Repacking is defined using a dictionary where the keys are the new keys and the values + are the flattened paths to the old keys. We use '/' as the separator during flattening. + + Example: + { + "images": { + "cam_high": "observation.images.top", + "cam_low": "observation.images.bottom", + }, + "state": "observation.state", + "actions": "action", + } + """ + + structure: at.PyTree[str] + + def __call__(self, data: DataDict) -> DataDict: + flat_item = flatten_dict(data) + import pdb + pdb.set_trace() + + return jax.tree.map(lambda k: flat_item[k], self.structure) + + +@dataclasses.dataclass(frozen=True) +class InjectDefaultPrompt(DataTransformFn): + prompt: str | None + + def __call__(self, data: DataDict) -> DataDict: + if self.prompt is not None and "prompt" not in data: + data["prompt"] = np.asarray(self.prompt) + return data + + +@dataclasses.dataclass(frozen=True) +class Normalize(DataTransformFn): + norm_stats: at.PyTree[NormStats] | None + # If true, will use quantile normalization. Otherwise, normal z-score normalization will be used. + use_quantiles: bool = False + # If true, will raise an error if any of the keys in the norm stats are not present in the data. + strict: bool = False + + def __post_init__(self): + if self.norm_stats is not None and self.use_quantiles: + _assert_quantile_stats(self.norm_stats) + + def __call__(self, data: DataDict) -> DataDict: + if self.norm_stats is None: + return data + + return apply_tree( + data, + self.norm_stats, + self._normalize_quantile if self.use_quantiles else self._normalize, + strict=self.strict, + ) + + def _normalize(self, x, stats: NormStats): + mean, std = stats.mean[..., : x.shape[-1]], stats.std[..., : x.shape[-1]] + return (x - mean) / (std + 1e-6) + + def _normalize_quantile(self, x, stats: NormStats): + assert stats.q01 is not None + assert stats.q99 is not None + q01, q99 = stats.q01[..., : x.shape[-1]], stats.q99[..., : x.shape[-1]] + return (x - q01) / (q99 - q01 + 1e-6) * 2.0 - 1.0 + + +@dataclasses.dataclass(frozen=True) +class Unnormalize(DataTransformFn): + norm_stats: at.PyTree[NormStats] | None + # If true, will use quantile normalization. Otherwise, normal z-score normalization will be used. + use_quantiles: bool = False + + def __post_init__(self): + if self.norm_stats is not None and self.use_quantiles: + _assert_quantile_stats(self.norm_stats) + + def __call__(self, data: DataDict) -> DataDict: + if self.norm_stats is None: + return data + + # Make sure that all the keys in the norm stats are present in the data. + return apply_tree( + data, + self.norm_stats, + self._unnormalize_quantile if self.use_quantiles else self._unnormalize, + strict=True, + ) + + def _unnormalize(self, x, stats: NormStats): + mean = pad_to_dim(stats.mean, x.shape[-1], axis=-1, value=0.0) + std = pad_to_dim(stats.std, x.shape[-1], axis=-1, value=1.0) + return x * (std + 1e-6) + mean + + def _unnormalize_quantile(self, x, stats: NormStats): + assert stats.q01 is not None + assert stats.q99 is not None + q01, q99 = stats.q01, stats.q99 + if (dim := q01.shape[-1]) < x.shape[-1]: + return np.concatenate([(x[..., :dim] + 1.0) / 2.0 * (q99 - q01 + 1e-6) + q01, x[..., dim:]], axis=-1) + return (x + 1.0) / 2.0 * (q99 - q01 + 1e-6) + q01 + + +@dataclasses.dataclass(frozen=True) +class ResizeImages(DataTransformFn): + height: int + width: int + + def __call__(self, data: DataDict) -> DataDict: + data["image"] = {k: image_tools.resize_with_pad(v, self.height, self.width) for k, v in data["image"].items()} + return data + + +@dataclasses.dataclass(frozen=True) +class SubsampleActions(DataTransformFn): + stride: int + + def __call__(self, data: DataDict) -> DataDict: + data["actions"] = data["actions"][:: self.stride] + return data + + +@dataclasses.dataclass(frozen=True) +class DeltaActions(DataTransformFn): + """Repacks absolute actions into delta action space.""" + + # Boolean mask for the action dimensions to be repacked into delta action space. Length + # can be smaller than the actual number of dimensions. If None, this transform is a no-op. + # See `make_bool_mask` for more details. + mask: Sequence[bool] | None + + def __call__(self, data: DataDict) -> DataDict: + if "actions" not in data or self.mask is None: + return data + + state, actions = data["state"], data["actions"] + mask = np.asarray(self.mask) + dims = mask.shape[-1] + actions[..., :dims] -= np.expand_dims(np.where(mask, state[..., :dims], 0), axis=-2) + data["actions"] = actions + + return data + +@dataclasses.dataclass(frozen=True) +class DeltaActionsPose(DataTransformFn): + """Repacks absolute actions into delta action space.""" + + # Boolean mask for the action dimensions to be repacked into delta action space. Length + # can be smaller than the actual number of dimensions. If None, this transform is a no-op. + # See `make_bool_mask` for more details. + mask: Sequence[bool] | None + + def __call__(self, data: DataDict) -> DataDict: + # set_trace() + if "actions" not in data or self.mask is None: + return data + + pose, actions = data["pose"], data["actions"] + mask = np.asarray(self.mask) + dims = mask.shape[-1] + act = actions[..., :dims] + st = pose[..., :dims] + pose_mask = mask[:6] + if np.any(pose_mask): + pose_action = act[..., :6] + pose_state = st[..., :6] + + if pose_action.ndim == 2: + rel_list = [] + for i in range(pose_action.shape[0]): + rel_list.append(relative_pose(pose_action[i], pose_state)) + rel_pose = np.stack(rel_list, axis=0) + else: + raise ValueError("pose_action must be dim 2") + + act[..., :6] = rel_pose + data["actions"][..., :dims] = act + del data["pose"] + return data + + +@dataclasses.dataclass(frozen=True) +class AbsoluteActions(DataTransformFn): + """Repacks delta actions into absolute action space.""" + + # Boolean mask for the action dimensions to be repacked into absolute action space. Length + # can be smaller than the actual number of dimensions. If None, this transform is a no-op. + # See `make_bool_mask` for more details. + mask: Sequence[bool] | None + + def __call__(self, data: DataDict) -> DataDict: + if "actions" not in data or self.mask is None: + return data + + state, actions = data["state"], data["actions"] + mask = np.asarray(self.mask) + dims = mask.shape[-1] + actions[..., :dims] += np.expand_dims(np.where(mask, state[..., :dims], 0), axis=-2) + data["actions"] = actions + + return data + +@dataclasses.dataclass(frozen=True) +class AbsoluteActionsPose: + """Convert relative pose actions back into absolute pose actions.""" + + mask: Sequence[bool] | None + + def __call__(self, data): + if "actions" not in data or "pose" not in data or self.mask is None: + return data + + actions = data["actions"] # (T, D) + pose = data["pose"] # (D,) + mask = np.asarray(self.mask) + dims = mask.shape[-1] + + act = actions[..., :dims] + st = pose[..., :dims] + + pose_mask = mask[:6] + if np.any(pose_mask): + pose_action = act[..., :6] + pose_state = st[..., :6] + + abs_list = [] + for i in range(pose_action.shape[0]): + abs_list.append(absolute_pose(pose_action[i], pose_state)) + abs_pose = np.stack(abs_list, axis=0) + act[..., :6] = abs_pose + + data["actions"][..., :dims] = act + return data + + +@dataclasses.dataclass(frozen=True) +class TokenizePrompt(DataTransformFn): + tokenizer: _tokenizer.PaligemmaTokenizer + discrete_state_input: bool = False + + def __call__(self, data: DataDict) -> DataDict: + if (prompt := data.pop("prompt", None)) is None: + raise ValueError("Prompt is required") + + if self.discrete_state_input: + if (state := data.get("state", None)) is None: + raise ValueError("State is required.") + else: + state = None + + if not isinstance(prompt, str): + prompt = prompt.item() + + tokens, token_masks = self.tokenizer.tokenize(prompt, state) + return {**data, "tokenized_prompt": tokens, "tokenized_prompt_mask": token_masks} + + +@dataclasses.dataclass(frozen=True) +class TokenizeFASTInputs(DataTransformFn): + tokenizer: _tokenizer.FASTTokenizer + + def __call__(self, data: DataDict) -> DataDict: + if (prompt := data.pop("prompt", None)) is None: + raise ValueError("Prompt is required") + + if not isinstance(prompt, str): + prompt = prompt.item() + + state, actions = data["state"], data.get("actions") + tokens, token_mask, ar_mask, loss_mask = self.tokenizer.tokenize(prompt, state, actions) + return { + **data, + "tokenized_prompt": tokens, + "tokenized_prompt_mask": token_mask, + "token_ar_mask": ar_mask, + "token_loss_mask": loss_mask, + } + + +@dataclasses.dataclass(frozen=True) +class ExtractFASTActions(DataTransformFn): + tokenizer: _tokenizer.FASTTokenizer + action_horizon: int + action_dim: int + + def __call__(self, data: DataDict) -> DataDict: + if "actions" not in data: + return data + # Model outputs are saved in "actions", but for FAST models they represent tokens. + tokens = data.pop("actions") + actions = self.tokenizer.extract_actions(tokens.astype(np.int32), self.action_horizon, self.action_dim) + return { + **data, + "actions": actions, + } + + +@dataclasses.dataclass(frozen=True) +class PromptFromLeRobotTask(DataTransformFn): + """Extracts a prompt from the current LeRobot dataset task.""" + + # Contains the LeRobot dataset tasks (dataset.meta.tasks). + tasks: dict[int, str] + + def __call__(self, data: DataDict) -> DataDict: + if "task_index" not in data: + raise ValueError('Cannot extract prompt without "task_index"') + + task_index = int(data["task_index"]) + if (prompt := self.tasks.get(task_index)) is None: + raise ValueError(f"{task_index=} not found in task mapping: {self.tasks}") + + return {**data, "prompt": prompt} + + +@dataclasses.dataclass(frozen=True) +class PadStatesAndActions(DataTransformFn): + """Zero-pads states and actions to the model action dimension.""" + + model_action_dim: int + + def __call__(self, data: DataDict) -> DataDict: + data["state"] = pad_to_dim(data["state"], self.model_action_dim, axis=-1) + if "actions" in data: + data["actions"] = pad_to_dim(data["actions"], self.model_action_dim, axis=-1) + return data + + +def flatten_dict(tree: at.PyTree) -> dict: + """Flatten a nested dictionary. Uses '/' as the separator.""" + return traverse_util.flatten_dict(tree, sep="/") + + +def unflatten_dict(tree: dict) -> at.PyTree: + """Unflatten a flattened dictionary. Assumes that '/' was used as a separator.""" + return traverse_util.unflatten_dict(tree, sep="/") + + +def transform_dict(patterns: Mapping[str, str | None], tree: at.PyTree) -> at.PyTree: + """Transform the structure of a nested dictionary using a set of patterns. + + The transformation is defined using the `patterns` dictionary. The keys are the + input keys that should be matched and the values are the new names inside the output + dictionary. If the value is None, the input key is removed. + + Both keys and values should represent flattened paths using '/' as the separator. + Keys can be regular expressions and values can include backreferences to the + matched groups (see `re.sub` for more details). Note that the regular expression + must match the entire key. + + The order inside the `patterns` dictionary is important. Only the first pattern that + matches the input key will be used. + + See unit tests for more examples. + + Args: + patterns: A mapping from old keys to new keys. + tree: The nested dictionary to transform. + + Returns: + The transformed nested dictionary. + """ + data = flatten_dict(tree) + + # Compile the patterns. + compiled = {re.compile(k): v for k, v in patterns.items()} + + output = {} + for k in data: + for pattern, repl in compiled.items(): + if pattern.fullmatch(k): + new_k = pattern.sub(repl, k, count=1) if repl is not None else None + break + else: + # Use the original key if no match is found. + new_k = k + + if new_k is not None: + if new_k in output: + raise ValueError(f"Key '{new_k}' already exists in output") + output[new_k] = data[k] + + # Validate the output structure to make sure that it can be unflattened. + names = sorted(output) + for i in range(len(names) - 1): + name, next_name = names[i : i + 2] + if next_name.startswith(name + "/"): + raise ValueError(f"Leaf '{name}' aliases a node of '{next_name}'") + + return unflatten_dict(output) + + +def apply_tree( + tree: at.PyTree[T], selector: at.PyTree[S], fn: Callable[[T, S], T], *, strict: bool = False +) -> at.PyTree[T]: + tree = flatten_dict(tree) + selector = flatten_dict(selector) + + def transform(k: str, v: T) -> T: + if k in selector: + return fn(v, selector[k]) + return v + + if strict: + for k in selector: + if k not in tree: + raise ValueError(f"Selector key {k} not found in tree") + + return unflatten_dict({k: transform(k, v) for k, v in tree.items()}) + + +def pad_to_dim(x: np.ndarray, target_dim: int, axis: int = -1, value: float = 0.0) -> np.ndarray: + """Pad an array to the target dimension with zeros along the specified axis.""" + current_dim = x.shape[axis] + if current_dim < target_dim: + pad_width = [(0, 0)] * len(x.shape) + pad_width[axis] = (0, target_dim - current_dim) + return np.pad(x, pad_width, constant_values=value) + return x + + +def make_bool_mask(*dims: int) -> tuple[bool, ...]: + """Make a boolean mask for the given dimensions. + + Example: + make_bool_mask(2, -2, 2) == (True, True, False, False, True, True) + make_bool_mask(2, 0, 2) == (True, True, True, True) + + Args: + dims: The dimensions to make the mask for. + + Returns: + A tuple of booleans. + """ + result = [] + for dim in dims: + if dim > 0: + result.extend([True] * (dim)) + else: + result.extend([False] * (-dim)) + return tuple(result) + + +def _assert_quantile_stats(norm_stats: at.PyTree[NormStats]) -> None: + for k, v in flatten_dict(norm_stats).items(): + if v.q01 is None or v.q99 is None: + raise ValueError( + f"quantile stats must be provided if use_quantile_norm is True. Key {k} is missing q01 or q99." + ) + + +def pose6d_to_pose(pose6d, degrees=False): + """ + pose6d: (6,) + return: (4, 4) + """ + pose = np.eye(4) + pos = pose6d[:3] + euler = pose6d[3:] + R_mat = R.from_euler("xyz", euler, degrees=degrees).as_matrix() + pose[:3, :3] = R_mat + pose[:3, 3] = pos + return pose + + +def pose_to_6d(pose, degrees=False): + """ + pose: (4, 4) + return: (6,) + """ + pos = pose[:3, 3] + rot = pose[:3, :3] + euler = R.from_matrix(rot).as_euler("xyz", degrees=degrees) + return np.concatenate([pos, euler], axis=0) + + +def relative_pose(pose_action, pose_state): + pose_a = pose6d_to_pose(pose_action, degrees=False) + pose_s = pose6d_to_pose(pose_state, degrees=False) + rel = np.linalg.inv(pose_s) @ pose_a + return pose_to_6d(rel, degrees=False) + +def absolute_pose(pose_delta, pose_state): + pose_d = pose6d_to_pose(pose_delta, degrees=False) + pose_s = pose6d_to_pose(pose_state, degrees=False) + abs_pose = pose_s @ pose_d + return pose_to_6d(abs_pose, degrees=False) \ No newline at end of file diff --git a/policy/openpi-InternData-A1/src/openpi/transforms_test.py b/policy/openpi-InternData-A1/src/openpi/transforms_test.py new file mode 100644 index 0000000..54b1fe9 --- /dev/null +++ b/policy/openpi-InternData-A1/src/openpi/transforms_test.py @@ -0,0 +1,121 @@ +import numpy as np +import pytest + +import openpi.models.tokenizer as _tokenizer +import openpi.transforms as _transforms + + +def test_repack_transform(): + transform = _transforms.RepackTransform( + structure={ + "a": {"b": "b/c"}, + "d": "e/f", + } + ) + item = {"b": {"c": 1}, "e": {"f": 2}} + assert transform(item) == {"a": {"b": 1}, "d": 2} + + +def test_delta_actions(): + item = {"state": np.array([1, 2, 3]), "actions": np.array([[3, 4, 5], [5, 6, 7]])} + + transform = _transforms.DeltaActions(mask=[False, True]) + transformed = transform(item) + + assert np.all(transformed["state"] == np.array([1, 2, 3])) + assert np.all(transformed["actions"] == np.array([[3, 2, 5], [5, 4, 7]])) + + +def test_delta_actions_noop(): + item = {"state": np.array([1, 2, 3]), "actions": np.array([[3, 4, 5], [5, 6, 7]])} + + # No-op when the mask is disabled. + transform = _transforms.DeltaActions(mask=None) + assert transform(item) is item + + # No-op when there are no actions in the input. + del item["actions"] + transform = _transforms.DeltaActions(mask=[True, False]) + assert transform(item) is item + + +def test_absolute_actions(): + item = {"state": np.array([1, 2, 3]), "actions": np.array([[3, 4, 5], [5, 6, 7]])} + + transform = _transforms.AbsoluteActions(mask=[False, True]) + transformed = transform(item) + + assert np.all(transformed["state"] == np.array([1, 2, 3])) + assert np.all(transformed["actions"] == np.array([[3, 6, 5], [5, 8, 7]])) + + +def test_absolute_actions_noop(): + item = {"state": np.array([1, 2, 3]), "actions": np.array([[3, 4, 5], [5, 6, 7]])} + + # No-op when the mask is disabled. + transform = _transforms.AbsoluteActions(mask=None) + assert transform(item) is item + + # No-op when there are no actions in the input. + del item["actions"] + transform = _transforms.AbsoluteActions(mask=[True, False]) + assert transform(item) is item + + +def test_make_bool_mask(): + assert _transforms.make_bool_mask(2, -2, 2) == (True, True, False, False, True, True) + assert _transforms.make_bool_mask(2, 0, 2) == (True, True, True, True) + + +def test_tokenize_prompt(): + tokenizer = _tokenizer.PaligemmaTokenizer(max_len=12) + transform = _transforms.TokenizePrompt(tokenizer) + + data = transform({"prompt": "Hello, world!"}) + + tok_prompt, tok_mask = tokenizer.tokenize("Hello, world!") + assert np.allclose(tok_prompt, data["tokenized_prompt"]) + assert np.allclose(tok_mask, data["tokenized_prompt_mask"]) + + +def test_tokenize_no_prompt(): + transform = _transforms.TokenizePrompt(_tokenizer.PaligemmaTokenizer()) + + with pytest.raises(ValueError, match="Prompt is required"): + transform({}) + + +def test_transform_dict(): + # Rename and remove keys. + input = {"a": {"b": 1, "c": 2}} + output = _transforms.transform_dict({"a/b": "a/c", "a/c": None}, input) + assert output == {"a": {"c": 1}} + + # Raises and error since the renamed key conflicts with an existing key. + with pytest.raises(ValueError, match="Key 'a/c' already exists in output"): + _transforms.transform_dict({"a/b": "a/c"}, input) + + # Full match is required and so nothing will be removed. + input = {"a": {"b": 1, "c": 2}} + output = _transforms.transform_dict({"a": None}, input) + assert output == input + + # The regex matches the entire key and so the entire input will be removed. + input = {"a": {"b": 1, "c": 2}} + output = _transforms.transform_dict({"a.+": None}, input) + assert output == {} + + # Replace keys using backreferences. All leaves named 'c' are replaced with 'd'. + input = {"a": {"b": 1, "c": 1}, "b": {"c": 2}} + output = _transforms.transform_dict({"(.+)/c": r"\1/d"}, input) + assert output == {"a": {"b": 1, "d": 1}, "b": {"d": 2}} + + +def test_extract_prompt_from_task(): + transform = _transforms.PromptFromLeRobotTask({1: "Hello, world!"}) + + data = transform({"task_index": 1}) + assert data["prompt"] == "Hello, world!" + + with pytest.raises(ValueError, match="task_index=2 not found in task mapping"): + transform({"task_index": 2}) diff --git a/policy/openpi-InternData-A1/stats/real/genie1/Put_the_pen_from_the_table_into_the_pen_holder/norm_stats.json b/policy/openpi-InternData-A1/stats/real/genie1/Put_the_pen_from_the_table_into_the_pen_holder/norm_stats.json new file mode 100644 index 0000000..5cbf65f --- /dev/null +++ b/policy/openpi-InternData-A1/stats/real/genie1/Put_the_pen_from_the_table_into_the_pen_holder/norm_stats.json @@ -0,0 +1,152 @@ +{ + "norm_stats": { + "state": { + "mean": [ + -1.0519208908081055, + 0.5982068181037903, + 0.2764829397201538, + -1.259472370147705, + 0.7162076830863953, + 1.466973900794983, + -0.18341407179832458, + 34.9547233581543, + 1.4191629886627197, + -0.8657249212265015, + -0.5017200708389282, + 0.7024257183074951, + -1.0055301189422607, + -1.4162752628326416, + 0.12899120151996613, + 66.8401107788086 + ], + "std": [ + 0.012867855839431286, + 0.007564420811831951, + 0.0013593174517154694, + 0.013939552940428257, + 0.008034415543079376, + 0.01571628637611866, + 0.00210195011459291, + 0.08414319902658463, + 0.26479974389076233, + 0.23905344307422638, + 0.24278143048286438, + 0.4556991159915924, + 0.44222500920295715, + 0.19030632078647614, + 0.539000391960144, + 35.91145706176758 + ], + "q01": [ + -1.0737202534675598, + 0.5837122011184692, + 0.27438307592868805, + -1.2833692299842834, + 0.7004758152961731, + 1.4395189809799194, + -0.18753521239459514, + 34.91999816894531, + 1.0293428937196731, + -1.2838728544175624, + -1.4010922977998854, + -0.002729081358760599, + -2.084379642021656, + -1.599301796913147, + -0.6068893134593963, + 34.90129645996094 + ], + "q99": [ + -1.027233887243271, + 0.6107559287548066, + 0.2792690585434437, + -1.2330448287963867, + 0.730002007484436, + 1.494816918182373, + -0.17982363909482957, + 34.99998399963379, + 2.1198367824316025, + -0.4155521912842989, + -0.2026851963669063, + 1.3580808508209883, + -0.17537792494595061, + -0.8717073945999145, + 1.6897496788501742, + 115.7632218132019 + ] + }, + "actions": { + "mean": [ + 0.000017017902791849338, + -0.000010326287338102702, + -1.8743894543149509e-6, + 0.000018071779777528718, + -0.000010835143257281743, + -0.000021580266547971405, + 2.6908783183898777e-6, + 0.0, + -0.0011350662680342793, + -0.0004963470855727792, + -0.000027940071959164925, + 0.0008035438950173557, + 0.0010923801455646753, + -0.001457883045077324, + -0.000493113708216697, + 0.41480934619903564 + ], + "std": [ + 0.00006368698086589575, + 0.00004052041913382709, + 0.000018129145246348344, + 0.00006776860391255468, + 0.000046643061068607494, + 0.00008730087574804202, + 0.00001405947932653362, + 0.0, + 0.12716317176818848, + 0.11198671162128448, + 0.1053534597158432, + 0.20787598192691803, + 0.19347502291202545, + 0.10215147584676743, + 0.21788786351680756, + 0.4885028600692749 + ], + "q01": [ + -0.000017584037780761727, + -0.00019209766387939453, + -0.00010478839278221131, + -0.00001752669811248776, + -0.00022706947326660157, + -0.0003840570449829101, + -0.000017502316832542416, + 0.0, + -0.35041321077346804, + -0.34078748130798336, + -0.3201787607192993, + -0.6406574100255966, + -0.5746962131023406, + -0.3734826826334, + -0.8102391283512116, + 0.0 + ], + "q99": [ + 0.00029650416374206543, + 0.000017421483993530253, + 0.000034872889518737776, + 0.00031393361091613774, + 0.00001738977432250979, + 0.000017396736145019543, + 0.00006977811157703398, + 0.0, + 0.45724553642272947, + 0.27194371986389165, + 0.31407657871246353, + 0.5496402114629746, + 0.6863849388122558, + 0.32121847540140136, + 0.5995411795616152, + 0.9998 + ] + } + } +} \ No newline at end of file diff --git a/policy/openpi-InternData-A1/stats/sim b/policy/openpi-InternData-A1/stats/sim new file mode 120000 index 0000000..8a863f6 --- /dev/null +++ b/policy/openpi-InternData-A1/stats/sim @@ -0,0 +1 @@ +/home/internvla/Users/yangyuyin/openpi/opensource/sim \ No newline at end of file diff --git a/policy/openpi-InternData-A1/stats/sim2real/lift2/sort_the_rubbish/norm_stats.json b/policy/openpi-InternData-A1/stats/sim2real/lift2/sort_the_rubbish/norm_stats.json new file mode 100644 index 0000000..ff02f06 --- /dev/null +++ b/policy/openpi-InternData-A1/stats/sim2real/lift2/sort_the_rubbish/norm_stats.json @@ -0,0 +1,136 @@ +{ + "norm_stats": { + "state": { + "mean": [ + 0.07278198003768921, + 1.3749626874923706, + 1.0275450944900513, + -0.3175429403781891, + -0.031400855630636215, + 0.07450000941753387, + 0.0, + -0.062182702124118805, + 1.1001248359680176, + 0.826094925403595, + -0.27029743790626526, + 0.016484776511788368, + -0.07983719557523727, + 0.0 + ], + "std": [ + 0.2493966668844223, + 1.3816585540771484, + 1.0683505535125732, + 0.40367627143859863, + 0.19393578171730042, + 0.3874930441379547, + 0.0, + 0.2290799915790558, + 1.3349381685256958, + 1.0366727113723755, + 0.4106713831424713, + 0.1845966875553131, + 0.40781474113464355, + 0.0 + ], + "q01": [ + -0.502105547964573, + -0.10000448673963547, + -0.10001637041568756, + -1.3024763725280761, + -0.4883152103424072, + -1.183973541688919, + 0.0, + -0.5194591480016708, + -0.10002600401639938, + -0.10003460943698883, + -1.3394330985307694, + -0.46453482048511496, + -1.0444426147699355, + 0.0 + ], + "q99": [ + 0.5214315868020056, + 3.379565859563649, + 2.900210109904408, + 0.23918090438842765, + 0.4690858221054077, + 0.9367179515838626, + 0.0, + 0.49837417399883277, + 3.363055534747243, + 2.8945598488390445, + 0.23353095109462751, + 0.46957369351387035, + 1.2221768005371096, + 0.0 + ] + }, + "actions": { + "mean": [ + -0.0003211508446838707, + 0.000524806440807879, + 0.0009100018069148064, + -0.0013831789838150144, + -0.001640278846025467, + 0.0013033079449087381, + 0.7398703694343567, + 0.001321959774941206, + -0.00611489312723279, + -0.0028704539872705936, + 0.00016590123414061964, + 0.001347821205854416, + -0.0006690842565149069, + 0.7868334650993347 + ], + "std": [ + 0.13259829580783844, + 0.46442505717277527, + 0.3669627010822296, + 0.1361500769853592, + 0.08669629693031311, + 0.18529267609119415, + 0.4387051463127136, + 0.11497953534126282, + 0.4416634142398834, + 0.34725451469421387, + 0.13296839594841003, + 0.07616619765758514, + 0.1774454414844513, + 0.40954434871673584 + ], + "q01": [ + -0.4333923296928406, + -2.2288144336700437, + -1.716401708173752, + -0.4429946930408477, + -0.3105847223997116, + -0.6305994799375534, + 0.0, + -0.4555371717453003, + -2.112543008327484, + -1.647935865688324, + -0.43935268249511705, + -0.2541945210218429, + -0.7101873461723329, + 0.0 + ], + "q99": [ + 0.5244881219625472, + 1.4103552433013915, + 0.9758066391468048, + 0.5010000600814819, + 0.31373253192901607, + 0.7424588066577913, + 0.9998, + 0.3881184175968171, + 1.3495609176158903, + 0.9334945859909056, + 0.5219774361133576, + 0.2944462936878205, + 0.6040878240108487, + 0.9998 + ] + } + } +} \ No newline at end of file diff --git a/policy/openpi-InternData-A1/uv.lock b/policy/openpi-InternData-A1/uv.lock new file mode 100644 index 0000000..11ee7ef --- /dev/null +++ b/policy/openpi-InternData-A1/uv.lock @@ -0,0 +1,5447 @@ +version = 1 +requires-python = ">=3.11" +resolution-markers = [ + "python_full_version >= '3.13' and sys_platform == 'darwin'", + "python_full_version >= '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version >= '3.13' and sys_platform == 'emscripten'", + "python_full_version == '3.12.*' and sys_platform == 'darwin'", + "python_full_version == '3.12.*' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'emscripten'", + "python_full_version < '3.12' and sys_platform == 'darwin'", + "python_full_version < '3.12' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version < '3.12' and sys_platform == 'emscripten'", +] + +[manifest] +members = [ + "openpi", + "openpi-client", +] +overrides = [ + { name = "ml-dtypes", specifier = "==0.4.1" }, + { name = "tensorstore", specifier = "==0.1.74" }, +] + +[[package]] +name = "absl-py" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/03/15/18693af986560a5c3cc0b84a8046b536ffb2cdb536e03cce897f2759e284/absl_py-2.3.0.tar.gz", hash = "sha256:d96fda5c884f1b22178852f30ffa85766d50b99e00775ea626c23304f582fc4f", size = 116400 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/04/9d75e1d3bb4ab8ec67ff10919476ccdee06c098bcfcf3a352da5f985171d/absl_py-2.3.0-py3-none-any.whl", hash = "sha256:9824a48b654a306168f63e0d97714665f8490b8d89ec7bf2efc24bf67cf579b3", size = 135657 }, +] + +[[package]] +name = "aiohappyeyeballs" +version = "2.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/26/30/f84a107a9c4331c14b2b586036f40965c128aa4fee4dda5d3d51cb14ad54/aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558", size = 22760 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/15/5bf3b99495fb160b63f95972b81750f18f7f4e02ad051373b669d17d44f2/aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8", size = 15265 }, +] + +[[package]] +name = "aiohttp" +version = "3.12.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohappyeyeballs" }, + { name = "aiosignal" }, + { name = "attrs" }, + { name = "frozenlist" }, + { name = "multidict" }, + { name = "propcache" }, + { name = "yarl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/57/77/92b356837fad83cc5709afc0b6e21dce65a413293fed15e6999bafdf36b0/aiohttp-3.12.4.tar.gz", hash = "sha256:d8229b412121160740f5745583c786f3f494d2416fe5f76aabd815da6ab6b193", size = 7781788 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/5e/bd16acce20e07e01d7db8f9a5102714f90928f87ec9cb248db642893ebdf/aiohttp-3.12.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6cfe7a78ed06047420f7709b9ae438431ea2dc50a9c00960a4b996736f1a70a3", size = 702194 }, + { url = "https://files.pythonhosted.org/packages/65/1d/cc50b39ca7a24c28e5e79ec7c5a3682c84af76d814f2e1284e1aa473122c/aiohttp-3.12.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1188186a118a6793b1e510399f5deb2dcab9643af05fd5217f7f5b067b863671", size = 474473 }, + { url = "https://files.pythonhosted.org/packages/52/6b/bf1ff91cb6eda30964c29a7fbe2a294db00724ceab344696eeebfe4c9ccf/aiohttp-3.12.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d54362f38f532869553a38328931f5f150f0f4fdbee8e122da447663a86552c5", size = 462734 }, + { url = "https://files.pythonhosted.org/packages/7c/c3/846872117cc6db1db1b86d20119a3132b8519144d5e710c2e066d07cac86/aiohttp-3.12.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4299504448f37ea9803e6ec99295d7a84a66e674300daa51ca69cace8b7ae31a", size = 1732930 }, + { url = "https://files.pythonhosted.org/packages/d0/bd/df557ee83c3e36945499317b9f51dab642c17c779c939fe2df4c0307b85e/aiohttp-3.12.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:1972bac2ee5dc283ccee3d58501bba08599d58dad6dbbbf58da566dc1a3ac039", size = 1681599 }, + { url = "https://files.pythonhosted.org/packages/1b/b9/e043c06325300644fed7685f904323ecf937adc99971ac229ab97b0769d2/aiohttp-3.12.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a286d40eb51d2908130b4e64ca8ae1a1fdf20657ef564eea2556255d52e2147b", size = 1780391 }, + { url = "https://files.pythonhosted.org/packages/6c/98/a43da221916db0b9567914e41de5a7e008904b9301540614feab2a03ee45/aiohttp-3.12.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94650ff81e7370ceb79272914be8250558d595864cb0cc3e9c6932a16738e33b", size = 1819437 }, + { url = "https://files.pythonhosted.org/packages/bb/9d/e315bdfc2e8ba0382699e686330b588f135189c51df79689e6a843513eb0/aiohttp-3.12.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03a2ca7b7e9436ae933d89d41f21ef535f21dcdc883820544102ddda63b595c2", size = 1721898 }, + { url = "https://files.pythonhosted.org/packages/c1/a4/8250493ab4e540df5a3672e5d01c28ca71fd31b4a9afc217c9678ca350e3/aiohttp-3.12.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea47b02ec80408bed4d59b3b824b44514173e4ebd0bc04a901ffd12084142451", size = 1658974 }, + { url = "https://files.pythonhosted.org/packages/94/d3/06c8ba3afb270afa44ffb6cf3fb0a44502be347f0fc7fdce290a60760197/aiohttp-3.12.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:41a6ea58ed974e67d75b39536997d81288a04844d8162194d3947cbff52b093d", size = 1707245 }, + { url = "https://files.pythonhosted.org/packages/da/5c/d889d8edca8cdb6bb0ff9cfa58b3977320186050c8cfe2f4ceeee149b498/aiohttp-3.12.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:d563387ae8966b6668162698a66495c5d72ce864405a7dfc6cc9c4bc851a63ce", size = 1702405 }, + { url = "https://files.pythonhosted.org/packages/e9/db/809ac0c7fa7ddfad33ab888fe3c83aecbfc7f03e44f387a70c20a0a096b7/aiohttp-3.12.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b853c7f7664742d48c57f382ebae5c76efa7f323569c6d93866795092485deec", size = 1682593 }, + { url = "https://files.pythonhosted.org/packages/35/85/9e1f9c7f0b0f70dfae55932c1f080230f885f84137132efc639e98611347/aiohttp-3.12.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:5d74f5fadbab802c598b440b4aecfeadc99194535d87db5764b732a52a0527fb", size = 1776193 }, + { url = "https://files.pythonhosted.org/packages/83/12/b6b7b9c2d08c5346473878575195468a585041daa816ffbd97156c960ed0/aiohttp-3.12.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:9f5065674d38b4a738f38b344429e3688fdcccc9d2d5ec50ca03af5dbf91307e", size = 1796654 }, + { url = "https://files.pythonhosted.org/packages/b7/09/0500ae6b1174abc74ab1a7a36033ecffc11e46e47a23487d75fa00d04b46/aiohttp-3.12.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:567db7411a004acd82be2499c10a22e06d4acb51929ce353a62f02f61d005e1c", size = 1709713 }, + { url = "https://files.pythonhosted.org/packages/7b/55/8f5faa6e13c51609430081b42c39eb12006c9fb9111eeaedca0f3f574d3b/aiohttp-3.12.4-cp311-cp311-win32.whl", hash = "sha256:4bc000b0eee7c4b8fdc13349ab106c4ff15e6f6c1afffb04a8f5af96f1b89af3", size = 419713 }, + { url = "https://files.pythonhosted.org/packages/6a/a9/97e318bfb3fc7a0cffc9dee9f0ec77db5339207887f5f4ebe1a11ecd5f32/aiohttp-3.12.4-cp311-cp311-win_amd64.whl", hash = "sha256:44f1cb869916ba52b7876243b6bb7841430846b66b61933b8e96cfaf44515b78", size = 444103 }, + { url = "https://files.pythonhosted.org/packages/6c/9a/767c8f6520d0ad023d6b975f8fda71b506f64ad597bb7bd16fa5ac1562ca/aiohttp-3.12.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:7947933c67eb33f51076cabf99f9977260329759d66c4d779c6b8e35c71a96bf", size = 693297 }, + { url = "https://files.pythonhosted.org/packages/82/a1/21eddeee169306c974095183c8820a807c3f05dbefcd6b674a52d18e4090/aiohttp-3.12.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bb046723c90db9ecba67549ab5614707168ba7424742cfab40c198d8d75176e4", size = 467909 }, + { url = "https://files.pythonhosted.org/packages/0d/fc/17093fe2d7e4287218fb99b18a6106b0e1fad8a95f974066f8b5fefb0fbc/aiohttp-3.12.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5fe52157c5e160eac99bb3589c2f29186d233fc83f6f42315c828f7e115f87f5", size = 460750 }, + { url = "https://files.pythonhosted.org/packages/f8/4f/6ea71dd61725bdaa9437f1a9f032781c5d869046651ad43a93d769855298/aiohttp-3.12.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5bf2015822cf7177957b8573a5997c3a00b93cd2f40aa8f5155649014563bd8", size = 1707546 }, + { url = "https://files.pythonhosted.org/packages/cc/79/a91f52b0d4e4462ebf37b176164d0f26b065f80f7db1dfe9b44fd9e8f8ac/aiohttp-3.12.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:db28a058b837c2a8cbebd0fae78299a41691694e536bb2ad77377bd4978b8372", size = 1690196 }, + { url = "https://files.pythonhosted.org/packages/d5/e2/5682bfb2583b55f23d785084bf2237339ebebe73cc0734fa8848d33a270c/aiohttp-3.12.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac155f380e100825fe2ae59b5d4e297fea98d90f5b7df5b27a9096992d8672dd", size = 1745291 }, + { url = "https://files.pythonhosted.org/packages/90/1d/5016430fa2ed0d58ca6d6b0f4a1f929c353f72996c95ec33882cd18ed867/aiohttp-3.12.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2de98a1fa249d35f05a6a7525e5823260e8b0c252d72c9cf39d0f945c38da0c7", size = 1791444 }, + { url = "https://files.pythonhosted.org/packages/2b/49/33fd3f82ff187b6d982633962afad24bb459ee1cd357399b7545c8e6ed98/aiohttp-3.12.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4c2de2077ee70b93015b4a74493964d891e730d238371c8d4b70413be36b0cf", size = 1710885 }, + { url = "https://files.pythonhosted.org/packages/d5/11/e895cb33fca34cec9aa375615ba0d4810a3be601962066444b07a90bc306/aiohttp-3.12.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:058199018d700883c86c473814fb0ecabb4e3ae39bafcbc77ed2c94199e5affb", size = 1626686 }, + { url = "https://files.pythonhosted.org/packages/b2/e9/3c98778dbda7cb4c94ddada97cb9ea6d7d5140b487a0444817f8b6a94697/aiohttp-3.12.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b6586aaccf46bc5ae05598fcd09a26fbc9186284eb2551d3262f31a8ec79a463", size = 1687746 }, + { url = "https://files.pythonhosted.org/packages/45/7b/fdb43d32ac2819e181e1339aae1bc7acb87e47452af64409181a2bce2426/aiohttp-3.12.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ededddd6fcc8f4403135609d7fb4bc1c1300464ff8fd57fb097b08cc136f18ea", size = 1709199 }, + { url = "https://files.pythonhosted.org/packages/bb/d9/b7a37bed158bd4aced1585b89082a8642e516f5b08637d7d15971f61ba31/aiohttp-3.12.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:158495f1d1858c07cc691624ccc92498410edfa57900452948f7eb6bc1be4c39", size = 1649853 }, + { url = "https://files.pythonhosted.org/packages/42/4f/7e4d1c52f6e15c59e2f3154d9431a029aab558735e94fec85602207fee8a/aiohttp-3.12.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:41c064200045c344850688b4d7723ebf163b92bfc7c216c29a938d1051385c1c", size = 1729413 }, + { url = "https://files.pythonhosted.org/packages/94/83/2987339271a4d8915370614d0bd6b26b7e50d905adf7398636a278ca059a/aiohttp-3.12.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0834ec8491451780a2a05b0f3a83675911bb0804273ceafcd282bff2548ed962", size = 1757386 }, + { url = "https://files.pythonhosted.org/packages/d2/27/3d0fc578531820d166e51024e86b8d35feaa828aa961909396f7cce7a191/aiohttp-3.12.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2a81e4ebbc8d9fb6748046577525ada0c5292606ced068ec9ab3aa6d653bf5d9", size = 1716999 }, + { url = "https://files.pythonhosted.org/packages/a9/87/1b5466145a55ebf6145eea5e58e5311653946e518e6e04d971acbae81b09/aiohttp-3.12.4-cp312-cp312-win32.whl", hash = "sha256:73cf6ed61849769dce058a6945d7c63da0798e409494c9ca3fddf5b526f7aee4", size = 414443 }, + { url = "https://files.pythonhosted.org/packages/70/0c/c11464953fff9c005e700e060b98436960d85bb60104af868bf5ebec6ace/aiohttp-3.12.4-cp312-cp312-win_amd64.whl", hash = "sha256:1e29de2afbe9c777ff8c58900e19654bf435069535a3a182a50256c8cd3eea17", size = 440544 }, + { url = "https://files.pythonhosted.org/packages/b3/c5/acc9a65cd92b263050dcc2986e2aee598fc6f3e0b251c9ce7138bf9f387c/aiohttp-3.12.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:789e9ddd591a3161a4e222942e10036d3fb4477464d9a454be2613966b0bce6b", size = 687716 }, + { url = "https://files.pythonhosted.org/packages/3b/8b/c36084efb762c8b388e35b564c5c87d287e4d24a77422f7570e36f8195f4/aiohttp-3.12.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8eb37972e6aebe4cab53b0008c4ca7cd412f3f01872f255763ac4bb0ce253d83", size = 465372 }, + { url = "https://files.pythonhosted.org/packages/d0/d5/c390226c7f0a2a0e4a7477fb293d311157092231fdb7ab79eb8ad325b3b0/aiohttp-3.12.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ca6af3e929de2c2d3272680437ee5b1e32fa4ac1fb9dfdcc06f5441542d06110", size = 457673 }, + { url = "https://files.pythonhosted.org/packages/bc/1a/fdf6ade28154d249b605a6e85f7eb424363618ebcb35f93a7f837fd1f9c9/aiohttp-3.12.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a9b8b482be5c81ceee91fecead2c82b7bec7cfb8b81c0389d6fa4cd82f3bb53", size = 1696485 }, + { url = "https://files.pythonhosted.org/packages/71/02/1670b62c82d6e19c77df235b96a56ec055eb40d63b6feff93146544d0224/aiohttp-3.12.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b3f9d7c7486f28cc0fd6bfe5b9accc4ecfe3d4f0471ec53e08aa610e5642dbf3", size = 1677750 }, + { url = "https://files.pythonhosted.org/packages/af/eb/75c9863328a9f1f7200ebadf0fefec3a50a2f31e9ccf489faf9c132b87ad/aiohttp-3.12.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e42986c6fc949926bcf0928b5440e6adf20b9a14c04dd9ea5e3ba9c7bbd4433a", size = 1729821 }, + { url = "https://files.pythonhosted.org/packages/8a/ac/75ef05d10aae033d9bc87d0eea35d904e505c0a7a5d7c7838d1d8b63e954/aiohttp-3.12.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:58dded319d52e63ea3c40dbae3f44c1264fa4bb692845b7ff8ce1ddc9319fce3", size = 1779191 }, + { url = "https://files.pythonhosted.org/packages/b3/5e/36e5957a073dddb69ed37e5ffa8581548d5d7b9d00daa4ba98fff6c85219/aiohttp-3.12.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1102668bf8c4b744528ef0b5bdaeeb17930832653d1ed9558ab59a0fae91dcf9", size = 1701521 }, + { url = "https://files.pythonhosted.org/packages/4e/98/16c3dc7c2534d5109f02da5c88e34e327d8ceddb9b976b4861d787461a59/aiohttp-3.12.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e46c5ad27747416ef0a914da2ad175d9066d8d011960f7b66c9b4f02ef7acfcc", size = 1615227 }, + { url = "https://files.pythonhosted.org/packages/74/cb/87eaf79aa41a6bc99c3dd1219caf190f282b5742647bf3abb7b66b7eb221/aiohttp-3.12.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cbcde696c4d4d07b616e10f942e183f90a86ff65e27a03c338067deb1204b148", size = 1668248 }, + { url = "https://files.pythonhosted.org/packages/d6/04/2ff57af92f76b0973652710bf9a539d66eb78b4cddace90fc39a5b04bdd7/aiohttp-3.12.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:002e027d4840cb187e5ba6889043e1e90ed114ef8e798133d51db834696a6de2", size = 1699915 }, + { url = "https://files.pythonhosted.org/packages/15/d6/0d9916e03cebd697b3c4fc48998733188e8b834368e727b46650a3a1b005/aiohttp-3.12.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cf12c660159897cebdd3ab377550b3563218286f33a57f56753018b1897796ae", size = 1642508 }, + { url = "https://files.pythonhosted.org/packages/83/b4/9cf887a3d2cf58828ac6a076d240171d6196dcf7d1edafcb005103f457fb/aiohttp-3.12.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c9e3db6a3c3e53e48b3324eb40e7c5da2a4c78cdcd3ac4e7d7945876dd421de1", size = 1718642 }, + { url = "https://files.pythonhosted.org/packages/e5/b0/266567f3c5232e211f1c9bea121a05d115a3f7761c7029ff4ee4f88e6fba/aiohttp-3.12.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e10365dcf61a7c5ed9287c4e20edc0d7a6cc09faf042d7dc570f16ed3291c680", size = 1752113 }, + { url = "https://files.pythonhosted.org/packages/61/f9/58b3ce002d1b0b3630ccd02ecbfc6932d00242eb40182e76a65ddbf6ec26/aiohttp-3.12.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c20421e165410bb632f64c5693b1f69e6911dbde197fa0dcd3a0c65d505f776b", size = 1701004 }, + { url = "https://files.pythonhosted.org/packages/ee/7c/c1a5e7704fef91f115bd399e47b9613cf11c8caec041a326e966f190c994/aiohttp-3.12.4-cp313-cp313-win32.whl", hash = "sha256:834a2f08eb800af07066af9f26eda4c2d6f7fe0737a3c0aef448f1ba8132fed9", size = 413468 }, + { url = "https://files.pythonhosted.org/packages/65/31/e252246332a12abf17f66c8f8360730a5a3a1dd354ca48ccfb90bbb122db/aiohttp-3.12.4-cp313-cp313-win_amd64.whl", hash = "sha256:4c78018c4e8118efac767d5d91c3565919c7e021762c4644198ec5b8d426a071", size = 439411 }, +] + +[[package]] +name = "aiosignal" +version = "1.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "frozenlist" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ba/b5/6d55e80f6d8a08ce22b982eafa278d823b541c925f11ee774b0b9c43473d/aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54", size = 19424 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/6a/bc7e17a3e87a2985d3e8f4da4cd0f481060eb78fb08596c42be62c90a4d9/aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5", size = 7597 }, +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643 }, +] + +[[package]] +name = "antlr4-python3-runtime" +version = "4.9.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3e/38/7859ff46355f76f8d19459005ca000b6e7012f2f1ca597746cbcd1fbfe5e/antlr4-python3-runtime-4.9.3.tar.gz", hash = "sha256:f224469b4168294902bb1efa80a8bf7855f24c99aef99cbefc1bcd3cce77881b", size = 117034 } + +[[package]] +name = "appnope" +version = "0.1.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/35/5d/752690df9ef5b76e169e68d6a129fa6d08a7100ca7f754c89495db3c6019/appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee", size = 4170 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/29/5ecc3a15d5a33e31b26c11426c45c501e439cb865d0bff96315d86443b78/appnope-0.1.4-py2.py3-none-any.whl", hash = "sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c", size = 4321 }, +] + +[[package]] +name = "array-record" +version = "0.7.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "absl-py", marker = "sys_platform == 'linux'" }, + { name = "etils", extra = ["epath"], marker = "sys_platform == 'linux'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/da/9e/df7e365bb7516b90709964bd7ca851ad03276a3b33331939bed5cb6d9377/array_record-0.7.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9426431053276e61c9c952393ff37c80825b4edc2fde32aee18b8dc0d653f19c", size = 2135376 }, + { url = "https://files.pythonhosted.org/packages/55/44/944dcbf3c398f0b4c6158d02f6fb70124353cd33bf11c66cdc6c80eb7381/array_record-0.7.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71437d5acf00d7120dfe7fbfa067efde61947e696ca232d2ebd89646903699e3", size = 2195745 }, + { url = "https://files.pythonhosted.org/packages/36/f5/df0e0f0c804807bc0c46d0f9ac8d64dd27bba1a1097e8a9173ed9d2ec07d/array_record-0.7.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e4a502d5e2b65c2d36d281b8d0a2686836e9213da600431f933beaa27702e68", size = 2135019 }, + { url = "https://files.pythonhosted.org/packages/c6/45/e563b02f3b6e312667ecdb908d69617895c368ee4c88a6934845dbc8b608/array_record-0.7.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4be715d0d8575e53b3493af6103d6852e93a535810034657a063da5b11a9fd94", size = 2195948 }, + { url = "https://files.pythonhosted.org/packages/28/e5/390c49785dd1d6589c9bb6a1713843f286908ca6b52ed7f4cf79da1567c9/array_record-0.7.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67c7599dcd834467f89d945e9591dce2b2d3b538b3603258379814ae9f40e3a7", size = 2135039 }, + { url = "https://files.pythonhosted.org/packages/80/00/a1e085ff62a90658b989e004d3c3587f04955570d210d2035221a9c3468c/array_record-0.7.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0bb52fc17574fcd5c0e5d86becd6d4096fca7945a0e70e45d7c68dda80145c04", size = 2196006 }, +] + +[[package]] +name = "asttokens" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4a/e7/82da0a03e7ba5141f05cce0d302e6eed121ae055e0456ca228bf693984bc/asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7", size = 61978 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/8a/c46dcc25341b5bce5472c718902eb3d38600a903b14fa6aeecef3f21a46f/asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2", size = 26918 }, +] + +[[package]] +name = "astunparse" +version = "1.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, + { name = "wheel" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f3/af/4182184d3c338792894f34a62672919db7ca008c89abee9b564dd34d8029/astunparse-1.6.3.tar.gz", hash = "sha256:5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872", size = 18290 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2b/03/13dde6512ad7b4557eb792fbcf0c653af6076b81e5941d36ec61f7ce6028/astunparse-1.6.3-py2.py3-none-any.whl", hash = "sha256:c2652417f2c8b5bb325c885ae329bdf3f86424075c4fd1a128674bc6fba4b8e8", size = 12732 }, +] + +[[package]] +name = "attrs" +version = "25.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815 }, +] + +[[package]] +name = "augmax" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "einops" }, + { name = "jax" }, + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/45/f0/0ab2080eb132cf9bb70ee96e80ff57be323b09aed563825058760404e383/augmax-0.4.1.tar.gz", hash = "sha256:d8e645203f535e243a3b16fb3634b10d4f168d1b9cfde4cda3892ab22bf31e8d", size = 11497 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/3e/1cc3a97f3adbca740310de33ff41fd141f7cd9b2b5baafdfbc3dd6526255/augmax-0.4.1-py3-none-any.whl", hash = "sha256:60f9711a4ffc08f27d1ff0783f7c51c01e6f78e20d4581d075ebf2d904ab2d14", size = 17299 }, +] + +[[package]] +name = "av" +version = "14.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/86/f6/0b473dab52dfdea05f28f3578b1c56b6c796ce85e76951bab7c4e38d5a74/av-14.4.0.tar.gz", hash = "sha256:3ecbf803a7fdf67229c0edada0830d6bfaea4d10bfb24f0c3f4e607cd1064b42", size = 3892203 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/8a/d57418b686ffd05fabd5a0a9cfa97e63b38c35d7101af00e87c51c8cc43c/av-14.4.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5b21d5586a88b9fce0ab78e26bd1c38f8642f8e2aad5b35e619f4d202217c701", size = 19965048 }, + { url = "https://files.pythonhosted.org/packages/f5/aa/3f878b0301efe587e9b07bb773dd6b47ef44ca09a3cffb4af50c08a170f3/av-14.4.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:cf8762d90b0f94a20c9f6e25a94f1757db5a256707964dfd0b1d4403e7a16835", size = 23750064 }, + { url = "https://files.pythonhosted.org/packages/9a/b4/6fe94a31f9ed3a927daa72df67c7151968587106f30f9f8fcd792b186633/av-14.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0ac9f08920c7bbe0795319689d901e27cb3d7870b9a0acae3f26fc9daa801a6", size = 33648775 }, + { url = "https://files.pythonhosted.org/packages/6c/f3/7f3130753521d779450c935aec3f4beefc8d4645471159f27b54e896470c/av-14.4.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a56d9ad2afdb638ec0404e962dc570960aae7e08ae331ad7ff70fbe99a6cf40e", size = 32216915 }, + { url = "https://files.pythonhosted.org/packages/f8/9a/8ffabfcafb42154b4b3a67d63f9b69e68fa8c34cb39ddd5cb813dd049ed4/av-14.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bed513cbcb3437d0ae47743edc1f5b4a113c0b66cdd4e1aafc533abf5b2fbf2", size = 35287279 }, + { url = "https://files.pythonhosted.org/packages/ad/11/7023ba0a2ca94a57aedf3114ab8cfcecb0819b50c30982a4c5be4d31df41/av-14.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d030c2d3647931e53d51f2f6e0fcf465263e7acf9ec6e4faa8dbfc77975318c3", size = 36294683 }, + { url = "https://files.pythonhosted.org/packages/3d/fa/b8ac9636bd5034e2b899354468bef9f4dadb067420a16d8a493a514b7817/av-14.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1cc21582a4f606271d8c2036ec7a6247df0831050306c55cf8a905701d0f0474", size = 34552391 }, + { url = "https://files.pythonhosted.org/packages/fb/29/0db48079c207d1cba7a2783896db5aec3816e17de55942262c244dffbc0f/av-14.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ce7c9cd452153d36f1b1478f904ed5f9ab191d76db873bdd3a597193290805d4", size = 37265250 }, + { url = "https://files.pythonhosted.org/packages/1c/55/715858c3feb7efa4d667ce83a829c8e6ee3862e297fb2b568da3f968639d/av-14.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd261e31cc6b43ca722f80656c39934199d8f2eb391e0147e704b6226acebc29", size = 27925845 }, + { url = "https://files.pythonhosted.org/packages/a6/75/b8641653780336c90ba89e5352cac0afa6256a86a150c7703c0b38851c6d/av-14.4.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:a53e682b239dd23b4e3bc9568cfb1168fc629ab01925fdb2e7556eb426339e94", size = 19954125 }, + { url = "https://files.pythonhosted.org/packages/99/e6/37fe6fa5853a48d54d749526365780a63a4bc530be6abf2115e3a21e292a/av-14.4.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:5aa0b901751a32703fa938d2155d56ce3faf3630e4a48d238b35d2f7e49e5395", size = 23751479 }, + { url = "https://files.pythonhosted.org/packages/f7/75/9a5f0e6bda5f513b62bafd1cff2b495441a8b07ab7fb7b8e62f0c0d1683f/av-14.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3b316fed3597675fe2aacfed34e25fc9d5bb0196dc8c0b014ae5ed4adda48de", size = 33801401 }, + { url = "https://files.pythonhosted.org/packages/6a/c9/e4df32a2ad1cb7f3a112d0ed610c5e43c89da80b63c60d60e3dc23793ec0/av-14.4.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a587b5c5014c3c0e16143a0f8d99874e46b5d0c50db6111aa0b54206b5687c81", size = 32364330 }, + { url = "https://files.pythonhosted.org/packages/ca/f0/64e7444a41817fde49a07d0239c033f7e9280bec4a4bb4784f5c79af95e6/av-14.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d53f75e8ac1ec8877a551c0db32a83c0aaeae719d05285281eaaba211bbc30", size = 35519508 }, + { url = "https://files.pythonhosted.org/packages/c2/a8/a370099daa9033a3b6f9b9bd815304b3d8396907a14d09845f27467ba138/av-14.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c8558cfde79dd8fc92d97c70e0f0fa8c94c7a66f68ae73afdf58598f0fe5e10d", size = 36448593 }, + { url = "https://files.pythonhosted.org/packages/27/bb/edb6ceff8fa7259cb6330c51dbfbc98dd1912bd6eb5f7bc05a4bb14a9d6e/av-14.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:455b6410dea0ab2d30234ffb28df7d62ca3cdf10708528e247bec3a4cdcced09", size = 34701485 }, + { url = "https://files.pythonhosted.org/packages/a7/8a/957da1f581aa1faa9a5dfa8b47ca955edb47f2b76b949950933b457bfa1d/av-14.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1661efbe9d975f927b8512d654704223d936f39016fad2ddab00aee7c40f412c", size = 37521981 }, + { url = "https://files.pythonhosted.org/packages/28/76/3f1cf0568592f100fd68eb40ed8c491ce95ca3c1378cc2d4c1f6d1bd295d/av-14.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:fbbeef1f421a3461086853d6464ad5526b56ffe8ccb0ab3fd0a1f121dfbf26ad", size = 27925944 }, + { url = "https://files.pythonhosted.org/packages/12/4c/b0205f77352312ff457ecdf31723dbf4403b7a03fc1659075d6d32f23ef7/av-14.4.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:3d2aea7c602b105363903e4017103bc4b60336e7aff80e1c22e8b4ec09fd125f", size = 19917341 }, + { url = "https://files.pythonhosted.org/packages/e1/c4/9e783bd7d47828e9c67f9c773c99de45c5ae01b3e942f1abf6cbaf530267/av-14.4.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:38c18f036aeb6dc9abf5e867d998c867f9ec93a5f722b60721fdffc123bbb2ae", size = 23715363 }, + { url = "https://files.pythonhosted.org/packages/b5/26/b2b406a676864d06b1c591205782d8527e7c99e5bc51a09862c3576e0087/av-14.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58c1e18c8be73b6eada2d9ec397852ec74ebe51938451bdf83644a807189d6c8", size = 33496968 }, + { url = "https://files.pythonhosted.org/packages/89/09/0a032bbe30c7049fca243ec8cf01f4be49dd6e7f7b9c3c7f0cc13f83c9d3/av-14.4.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4c32ff03a357feb030634f093089a73cb474b04efe7fbfba31f229cb2fab115", size = 32075498 }, + { url = "https://files.pythonhosted.org/packages/0b/1f/0fee20f74c1f48086366e59dbd37fa0684cd0f3c782a65cbb719d26c7acd/av-14.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af31d16ae25964a6a02e09cc132b9decd5ee493c5dcb21bcdf0d71b2d6adbd59", size = 35224910 }, + { url = "https://files.pythonhosted.org/packages/9e/19/1c4a201c75a2a431a85a43fd15d1fad55a28c22d596461d861c8d70f9b92/av-14.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e9fb297009e528f4851d25f3bb2781b2db18b59b10aed10240e947b77c582fb7", size = 36172918 }, + { url = "https://files.pythonhosted.org/packages/00/48/26b7e5d911c807f5f017a285362470ba16f44e8ea46f8b09ab5e348dd15b/av-14.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:573314cb9eafec2827dc98c416c965330dc7508193adbccd281700d8673b9f0a", size = 34414492 }, + { url = "https://files.pythonhosted.org/packages/6d/26/2f4badfa5b5b7b8f5f83d562b143a83ed940fa458eea4cad495ce95c9741/av-14.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f82ab27ee57c3b80eb50a5293222307dfdc02f810ea41119078cfc85ea3cf9a8", size = 37245826 }, + { url = "https://files.pythonhosted.org/packages/f4/02/88dbb6f5a05998b730d2e695b05060297af127ac4250efbe0739daa446d5/av-14.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f682003bbcaac620b52f68ff0e85830fff165dea53949e217483a615993ca20", size = 27898395 }, +] + +[[package]] +name = "beartype" +version = "0.19.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/e1/00515b97afa3993b4a314e4bc168fbde0917fd5845435cb6f16a19770746/beartype-0.19.0.tar.gz", hash = "sha256:de42dfc1ba5c3710fde6c3002e3bd2cad236ed4d2aabe876345ab0b4234a6573", size = 1294480 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/69/f6db6e4cb2fe2f887dead40b76caa91af4844cb647dd2c7223bb010aa416/beartype-0.19.0-py3-none-any.whl", hash = "sha256:33b2694eda0daf052eb2aff623ed9a8a586703bbf0a90bbc475a83bbf427f699", size = 1039760 }, +] + +[[package]] +name = "beautifulsoup4" +version = "4.13.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "soupsieve" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d8/e4/0c4c39e18fd76d6a628d4dd8da40543d136ce2d1752bd6eeeab0791f4d6b/beautifulsoup4-4.13.4.tar.gz", hash = "sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195", size = 621067 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/cd/30110dc0ffcf3b131156077b90e9f60ed75711223f306da4db08eff8403b/beautifulsoup4-4.13.4-py3-none-any.whl", hash = "sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b", size = 187285 }, +] + +[[package]] +name = "blinker" +version = "1.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/28/9b3f50ce0e048515135495f198351908d99540d69bfdc8c1d15b73dc55ce/blinker-1.9.0.tar.gz", hash = "sha256:b4ce2265a7abece45e7cc896e98dbebe6cead56bcf805a3d23136d145f5445bf", size = 22460 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/10/cb/f2ad4230dc2eb1a74edf38f1a38b9b52277f75bef262d8908e60d957e13c/blinker-1.9.0-py3-none-any.whl", hash = "sha256:ba0efaa9080b619ff2f3459d1d500c57bddea4a6b424b60a91141db6fd2f08bc", size = 8458 }, +] + +[[package]] +name = "cachetools" +version = "5.5.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/81/3747dad6b14fa2cf53fcf10548cf5aea6913e96fab41a3c198676f8948a5/cachetools-5.5.2.tar.gz", hash = "sha256:1a661caa9175d26759571b2e19580f9d6393969e5dfca11fdb1f947a23e640d4", size = 28380 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/72/76/20fa66124dbe6be5cafeb312ece67de6b61dd91a0247d1ea13db4ebb33c2/cachetools-5.5.2-py3-none-any.whl", hash = "sha256:d26a22bcc62eb95c3beabd9f1ee5e820d3d2704fe2967cbe350e20c8ffcd3f0a", size = 10080 }, +] + +[[package]] +name = "certifi" +version = "2025.4.26" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e8/9e/c05b3920a3b7d20d3d3310465f50348e5b3694f4f88c6daf736eef3024c4/certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6", size = 160705 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4a/7e/3db2bd1b1f9e95f7cddca6d6e75e2f2bd9f51b1246e546d88addca0106bd/certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3", size = 159618 }, +] + +[[package]] +name = "cffi" +version = "1.17.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6b/f4/927e3a8899e52a27fa57a48607ff7dc91a9ebe97399b357b85a0c7892e00/cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401", size = 182264 }, + { url = "https://files.pythonhosted.org/packages/6c/f5/6c3a8efe5f503175aaddcbea6ad0d2c96dad6f5abb205750d1b3df44ef29/cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf", size = 178651 }, + { url = "https://files.pythonhosted.org/packages/94/dd/a3f0118e688d1b1a57553da23b16bdade96d2f9bcda4d32e7d2838047ff7/cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4", size = 445259 }, + { url = "https://files.pythonhosted.org/packages/2e/ea/70ce63780f096e16ce8588efe039d3c4f91deb1dc01e9c73a287939c79a6/cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41", size = 469200 }, + { url = "https://files.pythonhosted.org/packages/1c/a0/a4fa9f4f781bda074c3ddd57a572b060fa0df7655d2a4247bbe277200146/cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1", size = 477235 }, + { url = "https://files.pythonhosted.org/packages/62/12/ce8710b5b8affbcdd5c6e367217c242524ad17a02fe5beec3ee339f69f85/cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6", size = 459721 }, + { url = "https://files.pythonhosted.org/packages/ff/6b/d45873c5e0242196f042d555526f92aa9e0c32355a1be1ff8c27f077fd37/cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d", size = 467242 }, + { url = "https://files.pythonhosted.org/packages/1a/52/d9a0e523a572fbccf2955f5abe883cfa8bcc570d7faeee06336fbd50c9fc/cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6", size = 477999 }, + { url = "https://files.pythonhosted.org/packages/44/74/f2a2460684a1a2d00ca799ad880d54652841a780c4c97b87754f660c7603/cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f", size = 454242 }, + { url = "https://files.pythonhosted.org/packages/f8/4a/34599cac7dfcd888ff54e801afe06a19c17787dfd94495ab0c8d35fe99fb/cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b", size = 478604 }, + { url = "https://files.pythonhosted.org/packages/34/33/e1b8a1ba29025adbdcda5fb3a36f94c03d771c1b7b12f726ff7fef2ebe36/cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", size = 171727 }, + { url = "https://files.pythonhosted.org/packages/3d/97/50228be003bb2802627d28ec0627837ac0bf35c90cf769812056f235b2d1/cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", size = 181400 }, + { url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178 }, + { url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840 }, + { url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803 }, + { url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850 }, + { url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729 }, + { url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256 }, + { url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424 }, + { url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568 }, + { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736 }, + { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448 }, + { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976 }, + { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989 }, + { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802 }, + { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792 }, + { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893 }, + { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810 }, + { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200 }, + { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447 }, + { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358 }, + { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469 }, + { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475 }, + { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009 }, +] + +[[package]] +name = "cfgv" +version = "3.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/11/74/539e56497d9bd1d484fd863dd69cbbfa653cd2aa27abfe35653494d85e94/cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560", size = 7114 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c5/55/51844dd50c4fc7a33b653bfaba4c2456f06955289ca770a5dbd5fd267374/cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9", size = 7249 }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e4/33/89c2ced2b67d1c2a61c19c6751aa8902d46ce3dacb23600a283619f5a12d/charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63", size = 126367 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/85/4c40d00dcc6284a1c1ad5de5e0996b06f39d8232f1031cd23c2f5c07ee86/charset_normalizer-3.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2", size = 198794 }, + { url = "https://files.pythonhosted.org/packages/41/d9/7a6c0b9db952598e97e93cbdfcb91bacd89b9b88c7c983250a77c008703c/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645", size = 142846 }, + { url = "https://files.pythonhosted.org/packages/66/82/a37989cda2ace7e37f36c1a8ed16c58cf48965a79c2142713244bf945c89/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd", size = 153350 }, + { url = "https://files.pythonhosted.org/packages/df/68/a576b31b694d07b53807269d05ec3f6f1093e9545e8607121995ba7a8313/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8", size = 145657 }, + { url = "https://files.pythonhosted.org/packages/92/9b/ad67f03d74554bed3aefd56fe836e1623a50780f7c998d00ca128924a499/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f", size = 147260 }, + { url = "https://files.pythonhosted.org/packages/a6/e6/8aebae25e328160b20e31a7e9929b1578bbdc7f42e66f46595a432f8539e/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7", size = 149164 }, + { url = "https://files.pythonhosted.org/packages/8b/f2/b3c2f07dbcc248805f10e67a0262c93308cfa149a4cd3d1fe01f593e5fd2/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9", size = 144571 }, + { url = "https://files.pythonhosted.org/packages/60/5b/c3f3a94bc345bc211622ea59b4bed9ae63c00920e2e8f11824aa5708e8b7/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544", size = 151952 }, + { url = "https://files.pythonhosted.org/packages/e2/4d/ff460c8b474122334c2fa394a3f99a04cf11c646da895f81402ae54f5c42/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82", size = 155959 }, + { url = "https://files.pythonhosted.org/packages/a2/2b/b964c6a2fda88611a1fe3d4c400d39c66a42d6c169c924818c848f922415/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0", size = 153030 }, + { url = "https://files.pythonhosted.org/packages/59/2e/d3b9811db26a5ebf444bc0fa4f4be5aa6d76fc6e1c0fd537b16c14e849b6/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5", size = 148015 }, + { url = "https://files.pythonhosted.org/packages/90/07/c5fd7c11eafd561bb51220d600a788f1c8d77c5eef37ee49454cc5c35575/charset_normalizer-3.4.2-cp311-cp311-win32.whl", hash = "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a", size = 98106 }, + { url = "https://files.pythonhosted.org/packages/a8/05/5e33dbef7e2f773d672b6d79f10ec633d4a71cd96db6673625838a4fd532/charset_normalizer-3.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28", size = 105402 }, + { url = "https://files.pythonhosted.org/packages/d7/a4/37f4d6035c89cac7930395a35cc0f1b872e652eaafb76a6075943754f095/charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7", size = 199936 }, + { url = "https://files.pythonhosted.org/packages/ee/8a/1a5e33b73e0d9287274f899d967907cd0bf9c343e651755d9307e0dbf2b3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3", size = 143790 }, + { url = "https://files.pythonhosted.org/packages/66/52/59521f1d8e6ab1482164fa21409c5ef44da3e9f653c13ba71becdd98dec3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a", size = 153924 }, + { url = "https://files.pythonhosted.org/packages/86/2d/fb55fdf41964ec782febbf33cb64be480a6b8f16ded2dbe8db27a405c09f/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214", size = 146626 }, + { url = "https://files.pythonhosted.org/packages/8c/73/6ede2ec59bce19b3edf4209d70004253ec5f4e319f9a2e3f2f15601ed5f7/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a", size = 148567 }, + { url = "https://files.pythonhosted.org/packages/09/14/957d03c6dc343c04904530b6bef4e5efae5ec7d7990a7cbb868e4595ee30/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd", size = 150957 }, + { url = "https://files.pythonhosted.org/packages/0d/c8/8174d0e5c10ccebdcb1b53cc959591c4c722a3ad92461a273e86b9f5a302/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981", size = 145408 }, + { url = "https://files.pythonhosted.org/packages/58/aa/8904b84bc8084ac19dc52feb4f5952c6df03ffb460a887b42615ee1382e8/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c", size = 153399 }, + { url = "https://files.pythonhosted.org/packages/c2/26/89ee1f0e264d201cb65cf054aca6038c03b1a0c6b4ae998070392a3ce605/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b", size = 156815 }, + { url = "https://files.pythonhosted.org/packages/fd/07/68e95b4b345bad3dbbd3a8681737b4338ff2c9df29856a6d6d23ac4c73cb/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d", size = 154537 }, + { url = "https://files.pythonhosted.org/packages/77/1a/5eefc0ce04affb98af07bc05f3bac9094513c0e23b0562d64af46a06aae4/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f", size = 149565 }, + { url = "https://files.pythonhosted.org/packages/37/a0/2410e5e6032a174c95e0806b1a6585eb21e12f445ebe239fac441995226a/charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c", size = 98357 }, + { url = "https://files.pythonhosted.org/packages/6c/4f/c02d5c493967af3eda9c771ad4d2bbc8df6f99ddbeb37ceea6e8716a32bc/charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e", size = 105776 }, + { url = "https://files.pythonhosted.org/packages/ea/12/a93df3366ed32db1d907d7593a94f1fe6293903e3e92967bebd6950ed12c/charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0", size = 199622 }, + { url = "https://files.pythonhosted.org/packages/04/93/bf204e6f344c39d9937d3c13c8cd5bbfc266472e51fc8c07cb7f64fcd2de/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf", size = 143435 }, + { url = "https://files.pythonhosted.org/packages/22/2a/ea8a2095b0bafa6c5b5a55ffdc2f924455233ee7b91c69b7edfcc9e02284/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e", size = 153653 }, + { url = "https://files.pythonhosted.org/packages/b6/57/1b090ff183d13cef485dfbe272e2fe57622a76694061353c59da52c9a659/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1", size = 146231 }, + { url = "https://files.pythonhosted.org/packages/e2/28/ffc026b26f441fc67bd21ab7f03b313ab3fe46714a14b516f931abe1a2d8/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c", size = 148243 }, + { url = "https://files.pythonhosted.org/packages/c0/0f/9abe9bd191629c33e69e47c6ef45ef99773320e9ad8e9cb08b8ab4a8d4cb/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691", size = 150442 }, + { url = "https://files.pythonhosted.org/packages/67/7c/a123bbcedca91d5916c056407f89a7f5e8fdfce12ba825d7d6b9954a1a3c/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0", size = 145147 }, + { url = "https://files.pythonhosted.org/packages/ec/fe/1ac556fa4899d967b83e9893788e86b6af4d83e4726511eaaad035e36595/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b", size = 153057 }, + { url = "https://files.pythonhosted.org/packages/2b/ff/acfc0b0a70b19e3e54febdd5301a98b72fa07635e56f24f60502e954c461/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff", size = 156454 }, + { url = "https://files.pythonhosted.org/packages/92/08/95b458ce9c740d0645feb0e96cea1f5ec946ea9c580a94adfe0b617f3573/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b", size = 154174 }, + { url = "https://files.pythonhosted.org/packages/78/be/8392efc43487ac051eee6c36d5fbd63032d78f7728cb37aebcc98191f1ff/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148", size = 149166 }, + { url = "https://files.pythonhosted.org/packages/44/96/392abd49b094d30b91d9fbda6a69519e95802250b777841cf3bda8fe136c/charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7", size = 98064 }, + { url = "https://files.pythonhosted.org/packages/e9/b0/0200da600134e001d91851ddc797809e2fe0ea72de90e09bec5a2fbdaccb/charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980", size = 105641 }, + { url = "https://files.pythonhosted.org/packages/20/94/c5790835a017658cbfabd07f3bfb549140c3ac458cfc196323996b10095a/charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0", size = 52626 }, +] + +[[package]] +name = "chex" +version = "0.1.89" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "absl-py" }, + { name = "jax" }, + { name = "jaxlib" }, + { name = "numpy" }, + { name = "setuptools", marker = "python_full_version >= '3.12'" }, + { name = "toolz" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ca/ac/504a8019f7ef372fc6cc3999ec9e3d0fbb38e6992f55d845d5b928010c11/chex-0.1.89.tar.gz", hash = "sha256:78f856e6a0a8459edfcbb402c2c044d2b8102eac4b633838cbdfdcdb09c6c8e0", size = 90676 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5e/6c/309972937d931069816dc8b28193a650485bc35cca92c04c8c15c4bd181e/chex-0.1.89-py3-none-any.whl", hash = "sha256:145241c27d8944adb634fb7d472a460e1c1b643f561507d4031ad5156ef82dfa", size = 99908 }, +] + +[[package]] +name = "click" +version = "8.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/60/6c/8ca2efa64cf75a977a0d7fac081354553ebe483345c734fb6b6515d96bbc/click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", size = 286342 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/32/10bb5764d90a8eee674e9dc6f4db6a0ab47c8c4d0d83c27f7c39ac415a4d/click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b", size = 102215 }, +] + +[[package]] +name = "cloudpickle" +version = "3.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/52/39/069100b84d7418bc358d81669d5748efb14b9cceacd2f9c75f550424132f/cloudpickle-3.1.1.tar.gz", hash = "sha256:b216fa8ae4019d5482a8ac3c95d8f6346115d8835911fd4aefd1a445e4242c64", size = 22113 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/e8/64c37fadfc2816a7701fa8a6ed8d87327c7d54eacfbfb6edab14a2f2be75/cloudpickle-3.1.1-py3-none-any.whl", hash = "sha256:c8c5a44295039331ee9dad40ba100a9c7297b6f988e50e87ccdf3765a668350e", size = 20992 }, +] + +[[package]] +name = "cmake" +version = "4.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/7b/7ad900329f02b7f0fa7e22d4815d1fd63e2fb95d6236b423457385ed57f5/cmake-4.0.2.tar.gz", hash = "sha256:d6ce25b2cbebc073344d38b603ba223f8e633a07335f8056375f397a0f0027e5", size = 34516 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d7/1f/2e86eb03ab8a52525347dede45ef3752b4516c19cc87be8a6546cef28839/cmake-4.0.2-py3-none-macosx_10_10_universal2.whl", hash = "sha256:0e1ade8fc1527c678ff5b2ef732a9a52dad60481097438eb19e43eec8eb2fc9c", size = 48733653 }, + { url = "https://files.pythonhosted.org/packages/a7/9c/492a819ab79371987a709999b6bf5244db83a2bfb415ac79e10333475a17/cmake-4.0.2-py3-none-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2e62d1518e7983b4df9b793fe47897d5f2eaee3781addd8e1663264090eb4bf6", size = 27738741 }, + { url = "https://files.pythonhosted.org/packages/e2/1f/dfe5dfd20698c5fe466b133fdf6f8e0cf00c32cb4c5a774fafc1dbdfe422/cmake-4.0.2-py3-none-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:deee8aae77599c17e32e4c80288e463ed3f1ebed04e1a819118f510854a82d8e", size = 26980799 }, + { url = "https://files.pythonhosted.org/packages/31/f7/fc30d8bb7a0a99a28455de5c7285c24cc9c8f1109441dc9f59b671554d13/cmake-4.0.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0415add60972fb3650a73bcc742bae9e19e03dd29219d9d89e18e0a3c0cd1d1", size = 27255147 }, + { url = "https://files.pythonhosted.org/packages/57/a8/9a9c5d3af7e461d186613afeabfd2dabb6c9bab4fd45ae08d2c5e9f04116/cmake-4.0.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e77546cd96e6edd514ac675a6c1512314519dac6dd4c5b975e564a6f09b4ccbc", size = 29018562 }, + { url = "https://files.pythonhosted.org/packages/88/39/49de74010f4ba3eecb5f673ba841e6eea70b582bab4ce8816b8f75206297/cmake-4.0.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:166a0515a61183149be70df0def8097c6dc638484bcbb785340ae81cb5a94f50", size = 30869968 }, + { url = "https://files.pythonhosted.org/packages/38/16/dc1963516f81ab3c19248f810b8b9d054d61a20ea805fbdcabe0e0475cc8/cmake-4.0.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86ade184b259b18ba53ff343d4d5f263ec59dfb7304633523ba0efacfd98f41a", size = 27026054 }, + { url = "https://files.pythonhosted.org/packages/50/fd/2f872a4618026a244494409262c41181e8fb3e44bd3a75ab47d396f59998/cmake-4.0.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d123ea46c0dffe057fcfeaf448f623d6f79211cdd2b32fe779a86833fd3f4d9", size = 27910624 }, + { url = "https://files.pythonhosted.org/packages/41/29/9cb17a4027612c74511a1a51c1be4a6ccf1a0faf9cd873b19aed1a621027/cmake-4.0.2-py3-none-manylinux_2_31_armv7l.whl", hash = "sha256:47806759aa5748c2b5f1e2a035ef887bbd293b12a2a9603e42673f698c0e1a63", size = 25154444 }, + { url = "https://files.pythonhosted.org/packages/cd/3a/49eff3783a99fc68f08c42eafdb0339cf0a8413c9cdec5661fffab1a7040/cmake-4.0.2-py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:e96921b6abfb627913d02cec9f4736a760741804044ac0740d8eefdcb7c47b4b", size = 28025207 }, + { url = "https://files.pythonhosted.org/packages/57/e5/1aa9b9cbb8625e5bc5db5325990582415c6264ed76063053bab3e64d941b/cmake-4.0.2-py3-none-musllinux_1_1_i686.whl", hash = "sha256:eea2c303cf3f009ffc71135e4e0cf03c3ad6cd409543270dc0601de32b50d0c1", size = 31554257 }, + { url = "https://files.pythonhosted.org/packages/22/63/7aae6e25b4e33f718c622d07e238ce5976982f20726459b2abb1f196c378/cmake-4.0.2-py3-none-musllinux_1_1_ppc64le.whl", hash = "sha256:4a469718c87253e67c81e5518ba19dc789f87a0e9f73ecd5af0ca139933b671f", size = 32279962 }, + { url = "https://files.pythonhosted.org/packages/3f/0f/673ee9ed196a95c2941cf6df4390d8b8e8b44ca9d2bd9ed8684fa9b11d1d/cmake-4.0.2-py3-none-musllinux_1_1_s390x.whl", hash = "sha256:60c7ff7b5fa725bbc4067f3256e68b21454e97f6e646bae123c756553245c7f3", size = 28102064 }, + { url = "https://files.pythonhosted.org/packages/7c/74/251c776092cdd107d71cf156d2780d48620efda42d195355bceb42dff210/cmake-4.0.2-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:fc483ed8a31c22cb1b46c81017b0703b469360584d004ac0f5e346f04b75e3c8", size = 29637502 }, + { url = "https://files.pythonhosted.org/packages/26/85/1724465e3779f883731416db1c8f58a8f08cbe2151eea98a7577beb911ae/cmake-4.0.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:f8ea86bfd9925575d4a49b3d98ce352f07bbae4fdbb6d703bd26314ca7a3db0c", size = 33324254 }, + { url = "https://files.pythonhosted.org/packages/46/ba/f9c2e0cebd9f6276fa7cb896c4b0eb9386cca5dae22b9431d56993f09026/cmake-4.0.2-py3-none-win32.whl", hash = "sha256:dc4ff87bbdf6ccf6cdce1f98089f5669f70e4a6c4d30d315df8e79a8cdc1c581", size = 33705810 }, + { url = "https://files.pythonhosted.org/packages/16/1a/6504170f8cfadde043ed5dabadcca8af50545094428ed74c44c1eac3903f/cmake-4.0.2-py3-none-win_amd64.whl", hash = "sha256:61cddbaa7586b8e9a2718619fd8935811a8af45e102ed3acc506b575e3766266", size = 36669295 }, + { url = "https://files.pythonhosted.org/packages/59/1d/c1900d83286b54c89d7a430c99dc09384a20dc3d7ce993d44dc7bc649aee/cmake-4.0.2-py3-none-win_arm64.whl", hash = "sha256:bb666564334530a9305ce0e5d7137d558e53c2f1a8175b798047550fefe7bb87", size = 35684210 }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, +] + +[[package]] +name = "comm" +version = "0.2.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/a8/fb783cb0abe2b5fded9f55e5703015cdf1c9c85b3669087c538dd15a6a86/comm-0.2.2.tar.gz", hash = "sha256:3fd7a84065306e07bea1773df6eb8282de51ba82f77c72f9c85716ab11fe980e", size = 6210 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e6/75/49e5bfe642f71f272236b5b2d2691cf915a7283cc0ceda56357b61daa538/comm-0.2.2-py3-none-any.whl", hash = "sha256:e6fb86cb70ff661ee8c9c14e7d36d6de3b4066f1441be4063df9c5009f0a64d3", size = 7180 }, +] + +[[package]] +name = "contourpy" +version = "1.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/54/eb9bfc647b19f2009dd5c7f5ec51c4e6ca831725f1aea7a993034f483147/contourpy-1.3.2.tar.gz", hash = "sha256:b6945942715a034c671b7fc54f9588126b0b8bf23db2696e3ca8328f3ff0ab54", size = 13466130 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/b9/ede788a0b56fc5b071639d06c33cb893f68b1178938f3425debebe2dab78/contourpy-1.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6a37a2fb93d4df3fc4c0e363ea4d16f83195fc09c891bc8ce072b9d084853445", size = 269636 }, + { url = "https://files.pythonhosted.org/packages/e6/75/3469f011d64b8bbfa04f709bfc23e1dd71be54d05b1b083be9f5b22750d1/contourpy-1.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b7cd50c38f500bbcc9b6a46643a40e0913673f869315d8e70de0438817cb7773", size = 254636 }, + { url = "https://files.pythonhosted.org/packages/8d/2f/95adb8dae08ce0ebca4fd8e7ad653159565d9739128b2d5977806656fcd2/contourpy-1.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6658ccc7251a4433eebd89ed2672c2ed96fba367fd25ca9512aa92a4b46c4f1", size = 313053 }, + { url = "https://files.pythonhosted.org/packages/c3/a6/8ccf97a50f31adfa36917707fe39c9a0cbc24b3bbb58185577f119736cc9/contourpy-1.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:70771a461aaeb335df14deb6c97439973d253ae70660ca085eec25241137ef43", size = 352985 }, + { url = "https://files.pythonhosted.org/packages/1d/b6/7925ab9b77386143f39d9c3243fdd101621b4532eb126743201160ffa7e6/contourpy-1.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65a887a6e8c4cd0897507d814b14c54a8c2e2aa4ac9f7686292f9769fcf9a6ab", size = 323750 }, + { url = "https://files.pythonhosted.org/packages/c2/f3/20c5d1ef4f4748e52d60771b8560cf00b69d5c6368b5c2e9311bcfa2a08b/contourpy-1.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3859783aefa2b8355697f16642695a5b9792e7a46ab86da1118a4a23a51a33d7", size = 326246 }, + { url = "https://files.pythonhosted.org/packages/8c/e5/9dae809e7e0b2d9d70c52b3d24cba134dd3dad979eb3e5e71f5df22ed1f5/contourpy-1.3.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:eab0f6db315fa4d70f1d8ab514e527f0366ec021ff853d7ed6a2d33605cf4b83", size = 1308728 }, + { url = "https://files.pythonhosted.org/packages/e2/4a/0058ba34aeea35c0b442ae61a4f4d4ca84d6df8f91309bc2d43bb8dd248f/contourpy-1.3.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d91a3ccc7fea94ca0acab82ceb77f396d50a1f67412efe4c526f5d20264e6ecd", size = 1375762 }, + { url = "https://files.pythonhosted.org/packages/09/33/7174bdfc8b7767ef2c08ed81244762d93d5c579336fc0b51ca57b33d1b80/contourpy-1.3.2-cp311-cp311-win32.whl", hash = "sha256:1c48188778d4d2f3d48e4643fb15d8608b1d01e4b4d6b0548d9b336c28fc9b6f", size = 178196 }, + { url = "https://files.pythonhosted.org/packages/5e/fe/4029038b4e1c4485cef18e480b0e2cd2d755448bb071eb9977caac80b77b/contourpy-1.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:5ebac872ba09cb8f2131c46b8739a7ff71de28a24c869bcad554477eb089a878", size = 222017 }, + { url = "https://files.pythonhosted.org/packages/34/f7/44785876384eff370c251d58fd65f6ad7f39adce4a093c934d4a67a7c6b6/contourpy-1.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4caf2bcd2969402bf77edc4cb6034c7dd7c0803213b3523f111eb7460a51b8d2", size = 271580 }, + { url = "https://files.pythonhosted.org/packages/93/3b/0004767622a9826ea3d95f0e9d98cd8729015768075d61f9fea8eeca42a8/contourpy-1.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:82199cb78276249796419fe36b7386bd8d2cc3f28b3bc19fe2454fe2e26c4c15", size = 255530 }, + { url = "https://files.pythonhosted.org/packages/e7/bb/7bd49e1f4fa805772d9fd130e0d375554ebc771ed7172f48dfcd4ca61549/contourpy-1.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:106fab697af11456fcba3e352ad50effe493a90f893fca6c2ca5c033820cea92", size = 307688 }, + { url = "https://files.pythonhosted.org/packages/fc/97/e1d5dbbfa170725ef78357a9a0edc996b09ae4af170927ba8ce977e60a5f/contourpy-1.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d14f12932a8d620e307f715857107b1d1845cc44fdb5da2bc8e850f5ceba9f87", size = 347331 }, + { url = "https://files.pythonhosted.org/packages/6f/66/e69e6e904f5ecf6901be3dd16e7e54d41b6ec6ae3405a535286d4418ffb4/contourpy-1.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:532fd26e715560721bb0d5fc7610fce279b3699b018600ab999d1be895b09415", size = 318963 }, + { url = "https://files.pythonhosted.org/packages/a8/32/b8a1c8965e4f72482ff2d1ac2cd670ce0b542f203c8e1d34e7c3e6925da7/contourpy-1.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26b383144cf2d2c29f01a1e8170f50dacf0eac02d64139dcd709a8ac4eb3cfe", size = 323681 }, + { url = "https://files.pythonhosted.org/packages/30/c6/12a7e6811d08757c7162a541ca4c5c6a34c0f4e98ef2b338791093518e40/contourpy-1.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c49f73e61f1f774650a55d221803b101d966ca0c5a2d6d5e4320ec3997489441", size = 1308674 }, + { url = "https://files.pythonhosted.org/packages/2a/8a/bebe5a3f68b484d3a2b8ffaf84704b3e343ef1addea528132ef148e22b3b/contourpy-1.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3d80b2c0300583228ac98d0a927a1ba6a2ba6b8a742463c564f1d419ee5b211e", size = 1380480 }, + { url = "https://files.pythonhosted.org/packages/34/db/fcd325f19b5978fb509a7d55e06d99f5f856294c1991097534360b307cf1/contourpy-1.3.2-cp312-cp312-win32.whl", hash = "sha256:90df94c89a91b7362e1142cbee7568f86514412ab8a2c0d0fca72d7e91b62912", size = 178489 }, + { url = "https://files.pythonhosted.org/packages/01/c8/fadd0b92ffa7b5eb5949bf340a63a4a496a6930a6c37a7ba0f12acb076d6/contourpy-1.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:8c942a01d9163e2e5cfb05cb66110121b8d07ad438a17f9e766317bcb62abf73", size = 223042 }, + { url = "https://files.pythonhosted.org/packages/2e/61/5673f7e364b31e4e7ef6f61a4b5121c5f170f941895912f773d95270f3a2/contourpy-1.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:de39db2604ae755316cb5967728f4bea92685884b1e767b7c24e983ef5f771cb", size = 271630 }, + { url = "https://files.pythonhosted.org/packages/ff/66/a40badddd1223822c95798c55292844b7e871e50f6bfd9f158cb25e0bd39/contourpy-1.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3f9e896f447c5c8618f1edb2bafa9a4030f22a575ec418ad70611450720b5b08", size = 255670 }, + { url = "https://files.pythonhosted.org/packages/1e/c7/cf9fdee8200805c9bc3b148f49cb9482a4e3ea2719e772602a425c9b09f8/contourpy-1.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71e2bd4a1c4188f5c2b8d274da78faab884b59df20df63c34f74aa1813c4427c", size = 306694 }, + { url = "https://files.pythonhosted.org/packages/dd/e7/ccb9bec80e1ba121efbffad7f38021021cda5be87532ec16fd96533bb2e0/contourpy-1.3.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de425af81b6cea33101ae95ece1f696af39446db9682a0b56daaa48cfc29f38f", size = 345986 }, + { url = "https://files.pythonhosted.org/packages/dc/49/ca13bb2da90391fa4219fdb23b078d6065ada886658ac7818e5441448b78/contourpy-1.3.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:977e98a0e0480d3fe292246417239d2d45435904afd6d7332d8455981c408b85", size = 318060 }, + { url = "https://files.pythonhosted.org/packages/c8/65/5245ce8c548a8422236c13ffcdcdada6a2a812c361e9e0c70548bb40b661/contourpy-1.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:434f0adf84911c924519d2b08fc10491dd282b20bdd3fa8f60fd816ea0b48841", size = 322747 }, + { url = "https://files.pythonhosted.org/packages/72/30/669b8eb48e0a01c660ead3752a25b44fdb2e5ebc13a55782f639170772f9/contourpy-1.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c66c4906cdbc50e9cba65978823e6e00b45682eb09adbb78c9775b74eb222422", size = 1308895 }, + { url = "https://files.pythonhosted.org/packages/05/5a/b569f4250decee6e8d54498be7bdf29021a4c256e77fe8138c8319ef8eb3/contourpy-1.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8b7fc0cd78ba2f4695fd0a6ad81a19e7e3ab825c31b577f384aa9d7817dc3bef", size = 1379098 }, + { url = "https://files.pythonhosted.org/packages/19/ba/b227c3886d120e60e41b28740ac3617b2f2b971b9f601c835661194579f1/contourpy-1.3.2-cp313-cp313-win32.whl", hash = "sha256:15ce6ab60957ca74cff444fe66d9045c1fd3e92c8936894ebd1f3eef2fff075f", size = 178535 }, + { url = "https://files.pythonhosted.org/packages/12/6e/2fed56cd47ca739b43e892707ae9a13790a486a3173be063681ca67d2262/contourpy-1.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:e1578f7eafce927b168752ed7e22646dad6cd9bca673c60bff55889fa236ebf9", size = 223096 }, + { url = "https://files.pythonhosted.org/packages/54/4c/e76fe2a03014a7c767d79ea35c86a747e9325537a8b7627e0e5b3ba266b4/contourpy-1.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0475b1f6604896bc7c53bb070e355e9321e1bc0d381735421a2d2068ec56531f", size = 285090 }, + { url = "https://files.pythonhosted.org/packages/7b/e2/5aba47debd55d668e00baf9651b721e7733975dc9fc27264a62b0dd26eb8/contourpy-1.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:c85bb486e9be652314bb5b9e2e3b0d1b2e643d5eec4992c0fbe8ac71775da739", size = 268643 }, + { url = "https://files.pythonhosted.org/packages/a1/37/cd45f1f051fe6230f751cc5cdd2728bb3a203f5619510ef11e732109593c/contourpy-1.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:745b57db7758f3ffc05a10254edd3182a2a83402a89c00957a8e8a22f5582823", size = 310443 }, + { url = "https://files.pythonhosted.org/packages/8b/a2/36ea6140c306c9ff6dd38e3bcec80b3b018474ef4d17eb68ceecd26675f4/contourpy-1.3.2-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:970e9173dbd7eba9b4e01aab19215a48ee5dd3f43cef736eebde064a171f89a5", size = 349865 }, + { url = "https://files.pythonhosted.org/packages/95/b7/2fc76bc539693180488f7b6cc518da7acbbb9e3b931fd9280504128bf956/contourpy-1.3.2-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6c4639a9c22230276b7bffb6a850dfc8258a2521305e1faefe804d006b2e532", size = 321162 }, + { url = "https://files.pythonhosted.org/packages/f4/10/76d4f778458b0aa83f96e59d65ece72a060bacb20cfbee46cf6cd5ceba41/contourpy-1.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc829960f34ba36aad4302e78eabf3ef16a3a100863f0d4eeddf30e8a485a03b", size = 327355 }, + { url = "https://files.pythonhosted.org/packages/43/a3/10cf483ea683f9f8ab096c24bad3cce20e0d1dd9a4baa0e2093c1c962d9d/contourpy-1.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:d32530b534e986374fc19eaa77fcb87e8a99e5431499949b828312bdcd20ac52", size = 1307935 }, + { url = "https://files.pythonhosted.org/packages/78/73/69dd9a024444489e22d86108e7b913f3528f56cfc312b5c5727a44188471/contourpy-1.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e298e7e70cf4eb179cc1077be1c725b5fd131ebc81181bf0c03525c8abc297fd", size = 1372168 }, + { url = "https://files.pythonhosted.org/packages/0f/1b/96d586ccf1b1a9d2004dd519b25fbf104a11589abfd05484ff12199cca21/contourpy-1.3.2-cp313-cp313t-win32.whl", hash = "sha256:d0e589ae0d55204991450bb5c23f571c64fe43adaa53f93fc902a84c96f52fe1", size = 189550 }, + { url = "https://files.pythonhosted.org/packages/b0/e6/6000d0094e8a5e32ad62591c8609e269febb6e4db83a1c75ff8868b42731/contourpy-1.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:78e9253c3de756b3f6a5174d024c4835acd59eb3f8e2ca13e775dbffe1558f69", size = 238214 }, + { url = "https://files.pythonhosted.org/packages/ff/c0/91f1215d0d9f9f343e4773ba6c9b89e8c0cc7a64a6263f21139da639d848/contourpy-1.3.2-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5f5964cdad279256c084b69c3f412b7801e15356b16efa9d78aa974041903da0", size = 266807 }, + { url = "https://files.pythonhosted.org/packages/d4/79/6be7e90c955c0487e7712660d6cead01fa17bff98e0ea275737cc2bc8e71/contourpy-1.3.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49b65a95d642d4efa8f64ba12558fcb83407e58a2dfba9d796d77b63ccfcaff5", size = 318729 }, + { url = "https://files.pythonhosted.org/packages/87/68/7f46fb537958e87427d98a4074bcde4b67a70b04900cfc5ce29bc2f556c1/contourpy-1.3.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:8c5acb8dddb0752bf252e01a3035b21443158910ac16a3b0d20e7fed7d534ce5", size = 221791 }, +] + +[[package]] +name = "crc32c" +version = "2.7.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7f/4c/4e40cc26347ac8254d3f25b9f94710b8e8df24ee4dddc1ba41907a88a94d/crc32c-2.7.1.tar.gz", hash = "sha256:f91b144a21eef834d64178e01982bb9179c354b3e9e5f4c803b0e5096384968c", size = 45712 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/8e/2f37f46368bbfd50edfc11b96f0aa135699034b1b020966c70ebaff3463b/crc32c-2.7.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:19e03a50545a3ef400bd41667d5525f71030488629c57d819e2dd45064f16192", size = 49672 }, + { url = "https://files.pythonhosted.org/packages/ed/b8/e52f7c4b045b871c2984d70f37c31d4861b533a8082912dfd107a96cf7c1/crc32c-2.7.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8c03286b1e5ce9bed7090084f206aacd87c5146b4b10de56fe9e86cbbbf851cf", size = 37155 }, + { url = "https://files.pythonhosted.org/packages/25/ee/0cfa82a68736697f3c7e435ba658c2ef8c997f42b89f6ab4545efe1b2649/crc32c-2.7.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:80ebbf144a1a56a532b353e81fa0f3edca4f4baa1bf92b1dde2c663a32bb6a15", size = 35372 }, + { url = "https://files.pythonhosted.org/packages/aa/92/c878aaba81c431fcd93a059e9f6c90db397c585742793f0bf6e0c531cc67/crc32c-2.7.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96b794fd11945298fdd5eb1290a812efb497c14bc42592c5c992ca077458eeba", size = 54879 }, + { url = "https://files.pythonhosted.org/packages/5b/f5/ab828ab3907095e06b18918408748950a9f726ee2b37be1b0839fb925ee1/crc32c-2.7.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9df7194dd3c0efb5a21f5d70595b7a8b4fd9921fbbd597d6d8e7a11eca3e2d27", size = 52588 }, + { url = "https://files.pythonhosted.org/packages/6a/2b/9e29e9ac4c4213d60491db09487125db358cd9263490fbadbd55e48fbe03/crc32c-2.7.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d698eec444b18e296a104d0b9bb6c596c38bdcb79d24eba49604636e9d747305", size = 53674 }, + { url = "https://files.pythonhosted.org/packages/79/ed/df3c4c14bf1b29f5c9b52d51fb6793e39efcffd80b2941d994e8f7f5f688/crc32c-2.7.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e07cf10ef852d219d179333fd706d1c415626f1f05e60bd75acf0143a4d8b225", size = 54691 }, + { url = "https://files.pythonhosted.org/packages/0c/47/4917af3c9c1df2fff28bbfa6492673c9adeae5599dcc207bbe209847489c/crc32c-2.7.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d2a051f296e6e92e13efee3b41db388931cdb4a2800656cd1ed1d9fe4f13a086", size = 52896 }, + { url = "https://files.pythonhosted.org/packages/1b/6f/26fc3dda5835cda8f6cd9d856afe62bdeae428de4c34fea200b0888e8835/crc32c-2.7.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a1738259802978cdf428f74156175da6a5fdfb7256f647fdc0c9de1bc6cd7173", size = 53554 }, + { url = "https://files.pythonhosted.org/packages/56/3e/6f39127f7027c75d130c0ba348d86a6150dff23761fbc6a5f71659f4521e/crc32c-2.7.1-cp311-cp311-win32.whl", hash = "sha256:f7786d219a1a1bf27d0aa1869821d11a6f8e90415cfffc1e37791690d4a848a1", size = 38370 }, + { url = "https://files.pythonhosted.org/packages/c9/fb/1587c2705a3a47a3d0067eecf9a6fec510761c96dec45c7b038fb5c8ff46/crc32c-2.7.1-cp311-cp311-win_amd64.whl", hash = "sha256:887f6844bb3ad35f0778cd10793ad217f7123a5422e40041231b8c4c7329649d", size = 39795 }, + { url = "https://files.pythonhosted.org/packages/1d/02/998dc21333413ce63fe4c1ca70eafe61ca26afc7eb353f20cecdb77d614e/crc32c-2.7.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f7d1c4e761fe42bf856130daf8b2658df33fe0ced3c43dadafdfeaa42b57b950", size = 49568 }, + { url = "https://files.pythonhosted.org/packages/9c/3e/e3656bfa76e50ef87b7136fef2dbf3c46e225629432fc9184fdd7fd187ff/crc32c-2.7.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:73361c79a6e4605204457f19fda18b042a94508a52e53d10a4239da5fb0f6a34", size = 37019 }, + { url = "https://files.pythonhosted.org/packages/0b/7d/5ff9904046ad15a08772515db19df43107bf5e3901a89c36a577b5f40ba0/crc32c-2.7.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:afd778fc8ac0ed2ffbfb122a9aa6a0e409a8019b894a1799cda12c01534493e0", size = 35373 }, + { url = "https://files.pythonhosted.org/packages/4d/41/4aedc961893f26858ab89fc772d0eaba91f9870f19eaa933999dcacb94ec/crc32c-2.7.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56ef661b34e9f25991fface7f9ad85e81bbc1b3fe3b916fd58c893eabe2fa0b8", size = 54675 }, + { url = "https://files.pythonhosted.org/packages/d6/63/8cabf09b7e39b9fec8f7010646c8b33057fc8d67e6093b3cc15563d23533/crc32c-2.7.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:571aa4429444b5d7f588e4377663592145d2d25eb1635abb530f1281794fc7c9", size = 52386 }, + { url = "https://files.pythonhosted.org/packages/79/13/13576941bf7cf95026abae43d8427c812c0054408212bf8ed490eda846b0/crc32c-2.7.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c02a3bd67dea95cdb25844aaf44ca2e1b0c1fd70b287ad08c874a95ef4bb38db", size = 53495 }, + { url = "https://files.pythonhosted.org/packages/3d/b6/55ffb26d0517d2d6c6f430ce2ad36ae7647c995c5bfd7abce7f32bb2bad1/crc32c-2.7.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:99d17637c4867672cb8adeea007294e3c3df9d43964369516cfe2c1f47ce500a", size = 54456 }, + { url = "https://files.pythonhosted.org/packages/c2/1a/5562e54cb629ecc5543d3604dba86ddfc7c7b7bf31d64005b38a00d31d31/crc32c-2.7.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:f4a400ac3c69a32e180d8753fd7ec7bccb80ade7ab0812855dce8a208e72495f", size = 52647 }, + { url = "https://files.pythonhosted.org/packages/48/ec/ce4138eaf356cd9aae60bbe931755e5e0151b3eca5f491fce6c01b97fd59/crc32c-2.7.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:588587772e55624dd9c7a906ec9e8773ae0b6ac5e270fc0bc84ee2758eba90d5", size = 53332 }, + { url = "https://files.pythonhosted.org/packages/5e/b5/144b42cd838a901175a916078781cb2c3c9f977151c9ba085aebd6d15b22/crc32c-2.7.1-cp312-cp312-win32.whl", hash = "sha256:9f14b60e5a14206e8173dd617fa0c4df35e098a305594082f930dae5488da428", size = 38371 }, + { url = "https://files.pythonhosted.org/packages/ae/c4/7929dcd5d9b57db0cce4fe6f6c191049380fc6d8c9b9f5581967f4ec018e/crc32c-2.7.1-cp312-cp312-win_amd64.whl", hash = "sha256:7c810a246660a24dc818047dc5f89c7ce7b2814e1e08a8e99993f4103f7219e8", size = 39805 }, + { url = "https://files.pythonhosted.org/packages/bf/98/1a6d60d5b3b5edc8382777b64100343cb4aa6a7e172fae4a6cfcb8ebbbd9/crc32c-2.7.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:24949bffb06fc411cc18188d33357923cb935273642164d0bb37a5f375654169", size = 49567 }, + { url = "https://files.pythonhosted.org/packages/4f/56/0dd652d4e950e6348bbf16b964b3325e4ad8220470774128fc0b0dd069cb/crc32c-2.7.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2d5d326e7e118d4fa60187770d86b66af2fdfc63ce9eeb265f0d3e7d49bebe0b", size = 37018 }, + { url = "https://files.pythonhosted.org/packages/47/02/2bd65fdef10139b6a802d83a7f966b7750fe5ffb1042f7cbe5dbb6403869/crc32c-2.7.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ba110df60c64c8e2d77a9425b982a520ccdb7abe42f06604f4d98a45bb1fff62", size = 35374 }, + { url = "https://files.pythonhosted.org/packages/a9/0d/3e797d1ed92d357a6a4c5b41cea15a538b27a8fdf18c7863747eb50b73ad/crc32c-2.7.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c277f9d16a3283e064d54854af0976b72abaa89824955579b2b3f37444f89aae", size = 54641 }, + { url = "https://files.pythonhosted.org/packages/a7/d3/4ddeef755caaa75680c559562b6c71f5910fee4c4f3a2eb5ea8b57f0e48c/crc32c-2.7.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:881af0478a01331244e27197356929edbdeaef6a9f81b5c6bacfea18d2139289", size = 52338 }, + { url = "https://files.pythonhosted.org/packages/01/cf/32f019be5de9f6e180926a50ee5f08648e686c7d9a59f2c5d0806a77b1c7/crc32c-2.7.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:724d5ff4d29ff093a983ae656be3307093706d850ea2a233bf29fcacc335d945", size = 53447 }, + { url = "https://files.pythonhosted.org/packages/b2/8b/92f3f62f3bafe8f7ab4af7bfb7246dc683fd11ec0d6dfb73f91e09079f69/crc32c-2.7.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b2416c4d88696ac322632555c0f81ab35e15f154bc96055da6cf110d642dbc10", size = 54484 }, + { url = "https://files.pythonhosted.org/packages/98/b2/113a50f8781f76af5ac65ffdb907e72bddbe974de8e02247f0d58bc48040/crc32c-2.7.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:60254251b88ec9b9795215f0f9ec015a6b5eef8b2c5fba1267c672d83c78fc02", size = 52703 }, + { url = "https://files.pythonhosted.org/packages/b4/6c/309229e9acda8cf36a8ff4061d70b54d905f79b7037e16883ce6590a24ab/crc32c-2.7.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:edefc0e46f3c37372183f70338e5bdee42f6789b62fcd36ec53aa933e9dfbeaf", size = 53367 }, + { url = "https://files.pythonhosted.org/packages/b5/2a/6c6324d920396e1bd9f3efbe8753da071be0ca52bd22d6c82d446b8d6975/crc32c-2.7.1-cp313-cp313-win32.whl", hash = "sha256:813af8111218970fe2adb833c5e5239f091b9c9e76f03b4dd91aaba86e99b499", size = 38377 }, + { url = "https://files.pythonhosted.org/packages/db/a0/f01ccfab538db07ef3f6b4ede46357ff147a81dd4f3c59ca6a34c791a549/crc32c-2.7.1-cp313-cp313-win_amd64.whl", hash = "sha256:7d9ede7be8e4ec1c9e90aaf6884decbeef10e3473e6ddac032706d710cab5888", size = 39803 }, + { url = "https://files.pythonhosted.org/packages/1b/80/61dcae7568b33acfde70c9d651c7d891c0c578c39cc049107c1cf61f1367/crc32c-2.7.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:db9ac92294284b22521356715784b91cc9094eee42a5282ab281b872510d1831", size = 49386 }, + { url = "https://files.pythonhosted.org/packages/1e/f1/80f17c089799ab2b4c247443bdd101d6ceda30c46d7f193e16b5ca29c5a0/crc32c-2.7.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:8fcd7f2f29a30dc92af64a9ee3d38bde0c82bd20ad939999427aac94bbd87373", size = 36937 }, + { url = "https://files.pythonhosted.org/packages/63/42/5fcfc71a3de493d920fd2590843762a2749981ea56b802b380e5df82309d/crc32c-2.7.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5c056ef043393085523e149276a7ce0cb534b872e04f3e20d74d9a94a75c0ad7", size = 35292 }, + { url = "https://files.pythonhosted.org/packages/03/de/fef962e898a953558fe1c55141644553e84ef4190693a31244c59a0856c7/crc32c-2.7.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03a92551a343702629af91f78d205801219692b6909f8fa126b830e332bfb0e0", size = 54223 }, + { url = "https://files.pythonhosted.org/packages/21/14/fceca1a6f45c0a1814fe8602a65657b75c27425162445925ba87438cad6b/crc32c-2.7.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb9424ec1a8ca54763155a703e763bcede82e6569fe94762614bb2de1412d4e1", size = 51588 }, + { url = "https://files.pythonhosted.org/packages/13/3b/13d40a7dfbf9ef05c84a0da45544ee72080dca4ce090679e5105689984bd/crc32c-2.7.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88732070f6175530db04e0bb36880ac45c33d49f8ac43fa0e50cfb1830049d23", size = 52678 }, + { url = "https://files.pythonhosted.org/packages/36/09/65ffc4fb9fa60ff6714eeb50a92284a4525e5943f0b040b572c0c76368c1/crc32c-2.7.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:57a20dfc27995f568f64775eea2bbb58ae269f1a1144561df5e4a4955f79db32", size = 53847 }, + { url = "https://files.pythonhosted.org/packages/24/71/938e926085b7288da052db7c84416f3ce25e71baf7ab5b63824c7bcb6f22/crc32c-2.7.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:f7186d098bfd2cff25eac6880b7c7ad80431b90610036131c1c7dd0eab42a332", size = 51860 }, + { url = "https://files.pythonhosted.org/packages/3c/d8/4526d5380189d6f2fa27256c204100f30214fe402f47cf6e9fb9a91ab890/crc32c-2.7.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:55a77e29a265418fa34bef15bd0f2c60afae5348988aaf35ed163b4bbf93cf37", size = 52508 }, + { url = "https://files.pythonhosted.org/packages/19/30/15f7e35176488b77e5b88751947d321d603fccac273099ace27c7b2d50a6/crc32c-2.7.1-cp313-cp313t-win32.whl", hash = "sha256:ae38a4b6aa361595d81cab441405fbee905c72273e80a1c010fb878ae77ac769", size = 38319 }, + { url = "https://files.pythonhosted.org/packages/19/c4/0b3eee04dac195f4730d102d7a9fbea894ae7a32ce075f84336df96a385d/crc32c-2.7.1-cp313-cp313t-win_amd64.whl", hash = "sha256:eee2a43b663feb6c79a6c1c6e5eae339c2b72cfac31ee54ec0209fa736cf7ee5", size = 39781 }, +] + +[[package]] +name = "cycler" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a9/95/a3dbbb5028f35eafb79008e7522a75244477d2838f38cbb722248dabc2a8/cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c", size = 7615 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/05/c19819d5e3d95294a6f5947fb9b9629efb316b96de511b418c53d245aae6/cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30", size = 8321 }, +] + +[[package]] +name = "datasets" +version = "3.6.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "dill" }, + { name = "filelock" }, + { name = "fsspec", extra = ["http"] }, + { name = "huggingface-hub" }, + { name = "multiprocess" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "pandas" }, + { name = "pyarrow" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "tqdm" }, + { name = "xxhash" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1a/89/d3d6fef58a488f8569c82fd293ab7cbd4250244d67f425dcae64c63800ea/datasets-3.6.0.tar.gz", hash = "sha256:1b2bf43b19776e2787e181cfd329cb0ca1a358ea014780c3581e0f276375e041", size = 569336 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/34/a08b0ee99715eaba118cbe19a71f7b5e2425c2718ef96007c325944a1152/datasets-3.6.0-py3-none-any.whl", hash = "sha256:25000c4a2c0873a710df127d08a202a06eab7bf42441a6bc278b499c2f72cd1b", size = 491546 }, +] + +[[package]] +name = "debugpy" +version = "1.8.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bd/75/087fe07d40f490a78782ff3b0a30e3968936854105487decdb33446d4b0e/debugpy-1.8.14.tar.gz", hash = "sha256:7cd287184318416850aa8b60ac90105837bb1e59531898c07569d197d2ed5322", size = 1641444 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/67/e8/57fe0c86915671fd6a3d2d8746e40485fd55e8d9e682388fbb3a3d42b86f/debugpy-1.8.14-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:1b2ac8c13b2645e0b1eaf30e816404990fbdb168e193322be8f545e8c01644a9", size = 2175064 }, + { url = "https://files.pythonhosted.org/packages/3b/97/2b2fd1b1c9569c6764ccdb650a6f752e4ac31be465049563c9eb127a8487/debugpy-1.8.14-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf431c343a99384ac7eab2f763980724834f933a271e90496944195318c619e2", size = 3132359 }, + { url = "https://files.pythonhosted.org/packages/c0/ee/b825c87ed06256ee2a7ed8bab8fb3bb5851293bf9465409fdffc6261c426/debugpy-1.8.14-cp311-cp311-win32.whl", hash = "sha256:c99295c76161ad8d507b413cd33422d7c542889fbb73035889420ac1fad354f2", size = 5133269 }, + { url = "https://files.pythonhosted.org/packages/d5/a6/6c70cd15afa43d37839d60f324213843174c1d1e6bb616bd89f7c1341bac/debugpy-1.8.14-cp311-cp311-win_amd64.whl", hash = "sha256:7816acea4a46d7e4e50ad8d09d963a680ecc814ae31cdef3622eb05ccacf7b01", size = 5158156 }, + { url = "https://files.pythonhosted.org/packages/d9/2a/ac2df0eda4898f29c46eb6713a5148e6f8b2b389c8ec9e425a4a1d67bf07/debugpy-1.8.14-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:8899c17920d089cfa23e6005ad9f22582fd86f144b23acb9feeda59e84405b84", size = 2501268 }, + { url = "https://files.pythonhosted.org/packages/10/53/0a0cb5d79dd9f7039169f8bf94a144ad3efa52cc519940b3b7dde23bcb89/debugpy-1.8.14-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6bb5c0dcf80ad5dbc7b7d6eac484e2af34bdacdf81df09b6a3e62792b722826", size = 4221077 }, + { url = "https://files.pythonhosted.org/packages/f8/d5/84e01821f362327bf4828728aa31e907a2eca7c78cd7c6ec062780d249f8/debugpy-1.8.14-cp312-cp312-win32.whl", hash = "sha256:281d44d248a0e1791ad0eafdbbd2912ff0de9eec48022a5bfbc332957487ed3f", size = 5255127 }, + { url = "https://files.pythonhosted.org/packages/33/16/1ed929d812c758295cac7f9cf3dab5c73439c83d9091f2d91871e648093e/debugpy-1.8.14-cp312-cp312-win_amd64.whl", hash = "sha256:5aa56ef8538893e4502a7d79047fe39b1dae08d9ae257074c6464a7b290b806f", size = 5297249 }, + { url = "https://files.pythonhosted.org/packages/4d/e4/395c792b243f2367d84202dc33689aa3d910fb9826a7491ba20fc9e261f5/debugpy-1.8.14-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:329a15d0660ee09fec6786acdb6e0443d595f64f5d096fc3e3ccf09a4259033f", size = 2485676 }, + { url = "https://files.pythonhosted.org/packages/ba/f1/6f2ee3f991327ad9e4c2f8b82611a467052a0fb0e247390192580e89f7ff/debugpy-1.8.14-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f920c7f9af409d90f5fd26e313e119d908b0dd2952c2393cd3247a462331f15", size = 4217514 }, + { url = "https://files.pythonhosted.org/packages/79/28/b9d146f8f2dc535c236ee09ad3e5ac899adb39d7a19b49f03ac95d216beb/debugpy-1.8.14-cp313-cp313-win32.whl", hash = "sha256:3784ec6e8600c66cbdd4ca2726c72d8ca781e94bce2f396cc606d458146f8f4e", size = 5254756 }, + { url = "https://files.pythonhosted.org/packages/e0/62/a7b4a57013eac4ccaef6977966e6bec5c63906dd25a86e35f155952e29a1/debugpy-1.8.14-cp313-cp313-win_amd64.whl", hash = "sha256:684eaf43c95a3ec39a96f1f5195a7ff3d4144e4a18d69bb66beeb1a6de605d6e", size = 5297119 }, + { url = "https://files.pythonhosted.org/packages/97/1a/481f33c37ee3ac8040d3d51fc4c4e4e7e61cb08b8bc8971d6032acc2279f/debugpy-1.8.14-py2.py3-none-any.whl", hash = "sha256:5cd9a579d553b6cb9759a7908a41988ee6280b961f24f63336835d9418216a20", size = 5256230 }, +] + +[[package]] +name = "decorator" +version = "5.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/fa/6d96a0978d19e17b68d634497769987b16c8f4cd0a7a05048bec693caa6b/decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360", size = 56711 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190 }, +] + +[[package]] +name = "deepdiff" +version = "8.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "orderly-set" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0a/0f/9cd2624f7dcd755cbf1fa21fb7234541f19a1be96a56f387ec9053ebe220/deepdiff-8.5.0.tar.gz", hash = "sha256:a4dd3529fa8d4cd5b9cbb6e3ea9c95997eaa919ba37dac3966c1b8f872dc1cd1", size = 538517 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4a/3b/2e0797200c51531a6d8c97a8e4c9fa6fb56de7e6e2a15c1c067b6b10a0b0/deepdiff-8.5.0-py3-none-any.whl", hash = "sha256:d4599db637f36a1c285f5fdfc2cd8d38bde8d8be8636b65ab5e425b67c54df26", size = 85112 }, +] + +[[package]] +name = "diffusers" +version = "0.33.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "filelock" }, + { name = "huggingface-hub" }, + { name = "importlib-metadata" }, + { name = "numpy" }, + { name = "pillow" }, + { name = "regex" }, + { name = "requests" }, + { name = "safetensors" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/82/cc/1ef6bdc99d3864f6d1ee11bdbe3708b9d33ce35e7671557f641897480956/diffusers-0.33.1.tar.gz", hash = "sha256:fc7f140295d2ec82b1e7474b77bb7057fc0686c14eadc54ca0e52a66527e18a2", size = 2896103 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/7a/f08f610cea8a3395ad3b4f586db23bedb43c68db6c3261145a15e7b63126/diffusers-0.33.1-py3-none-any.whl", hash = "sha256:027469e74f289338eb24127409f8d60d840b1b7ce4b27ffcd3134fd3b8431567", size = 3554612 }, +] + +[[package]] +name = "dill" +version = "0.3.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/17/4d/ac7ffa80c69ea1df30a8aa11b3578692a5118e7cd1aa157e3ef73b092d15/dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca", size = 184847 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c9/7a/cef76fd8438a42f96db64ddaa85280485a9c395e7df3db8158cfec1eee34/dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7", size = 116252 }, +] + +[[package]] +name = "distlib" +version = "0.3.9" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0d/dd/1bec4c5ddb504ca60fc29472f3d27e8d4da1257a854e1d96742f15c1d02d/distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403", size = 613923 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/91/a1/cf2472db20f7ce4a6be1253a81cfdf85ad9c7885ffbed7047fb72c24cf87/distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87", size = 468973 }, +] + +[[package]] +name = "dlimp" +version = "0.0.1" +source = { git = "https://github.com/kvablack/dlimp?rev=ad72ce3a9b414db2185bc0b38461d4101a65477a#ad72ce3a9b414db2185bc0b38461d4101a65477a" } +dependencies = [ + { name = "tensorflow" }, + { name = "tensorflow-datasets" }, +] + +[[package]] +name = "dm-control" +version = "1.0.14" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "absl-py" }, + { name = "dm-env" }, + { name = "dm-tree", version = "0.1.8", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.13'" }, + { name = "dm-tree", version = "0.1.9", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.13'" }, + { name = "glfw" }, + { name = "labmaze" }, + { name = "lxml" }, + { name = "mujoco" }, + { name = "numpy" }, + { name = "protobuf" }, + { name = "pyopengl" }, + { name = "pyparsing" }, + { name = "requests" }, + { name = "scipy" }, + { name = "setuptools" }, + { name = "tqdm" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/02/55f275ffbf63ce4cb5c2d5d0474aa47a706396be69919d880cb91aa00a55/dm_control-1.0.14.tar.gz", hash = "sha256:def1ece747b6f175c581150826b50f1a6134086dab34f8f3fd2d088ea035cf3d", size = 38993145 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2b/83/13d62168962d38ba70257d1fe13f8a4b0d259c9a17c44b73befda8461ef5/dm_control-1.0.14-py3-none-any.whl", hash = "sha256:883c63244a7ebf598700a97564ed19fffd3479ca79efd090aed881609cdb9fc6", size = 39291529 }, +] + +[[package]] +name = "dm-env" +version = "1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "absl-py" }, + { name = "dm-tree", version = "0.1.8", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.13'" }, + { name = "dm-tree", version = "0.1.9", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.13'" }, + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/62/c9/93e8d6239d5806508a2ee4b370e67c6069943ca149f59f533923737a99b7/dm-env-1.6.tar.gz", hash = "sha256:a436eb1c654c39e0c986a516cee218bea7140b510fceff63f97eb4fcff3d93de", size = 20187 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/08/7e/36d548040e61337bf9182637a589c44da407a47a923ee88aec7f0e89867c/dm_env-1.6-py3-none-any.whl", hash = "sha256:0eabb6759dd453b625e041032f7ae0c1e87d4eb61b6a96b9ca586483837abf29", size = 26339 }, +] + +[[package]] +name = "dm-tree" +version = "0.1.8" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13' and sys_platform == 'darwin'", + "python_full_version >= '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version >= '3.13' and sys_platform == 'emscripten'", +] +sdist = { url = "https://files.pythonhosted.org/packages/f8/6d/f1997aac42e0f550c1e952a0b920eaa0bfc4d27d0421499881b934b969fc/dm-tree-0.1.8.tar.gz", hash = "sha256:0fcaabbb14e7980377439e7140bd05552739ca5e515ecb3119f234acee4b9430", size = 35384 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/64/901b324804793743f0fdc9e47db893bf0ded9e074850fab2440af330fe83/dm_tree-0.1.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad16ceba90a56ec47cf45b21856d14962ac314787975ef786efb5e6e9ca75ec7", size = 167628 }, + { url = "https://files.pythonhosted.org/packages/b1/65/4f10a68dde5fa0c91043c9c899e9bc79b1657ba932d39a5f8525c0058e68/dm_tree-0.1.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:803bfc53b4659f447ac694dbd04235f94a73ef7c1fd1e0df7c84ac41e0bc963b", size = 115351 }, + { url = "https://files.pythonhosted.org/packages/08/e2/4c29cb9876456517f21979ddcbb6048f28a3b52c61aa9d14d42adafcdca4/dm_tree-0.1.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:378cc8ad93c5fe3590f405a309980721f021c790ca1bdf9b15bb1d59daec57f5", size = 110661 }, + { url = "https://files.pythonhosted.org/packages/fe/89/386332bbd7567c4ccc13aa2e58f733237503fc75fb389955d3b06b9fb967/dm_tree-0.1.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1607ce49aa42f010d1e5e616d92ce899d66835d4d8bea49679582435285515de", size = 146727 }, + { url = "https://files.pythonhosted.org/packages/a3/e7/b0c04ea5af82c19fd5984bfe980f4012601c4708634c7c51a952b17c93b2/dm_tree-0.1.8-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:343a4a4ebaa127451ff971254a4be4084eb4bdc0b2513c32b46f6f728fd03f9e", size = 174689 }, + { url = "https://files.pythonhosted.org/packages/13/0d/09a4ecb54c03db53d9eb5bbc81609d89de26e3762743f003282c1b48debb/dm_tree-0.1.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fa42a605d099ee7d41ba2b5fb75e21423951fd26e5d50583a00471238fb3021d", size = 150338 }, + { url = "https://files.pythonhosted.org/packages/4a/27/c5e3580a952a07e5a1428ae952874796870dc8db789f3d774e886160a9f4/dm_tree-0.1.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83b7764de0d855338abefc6e3ee9fe40d301668310aa3baea3f778ff051f4393", size = 152800 }, + { url = "https://files.pythonhosted.org/packages/e4/c1/522041457444b67125ac9527208bb3148f63d7dce0a86ffa589ec763a10e/dm_tree-0.1.8-cp311-cp311-win_amd64.whl", hash = "sha256:a5d819c38c03f0bb5b3b3703c60e4b170355a0fc6b5819325bf3d4ceb3ae7e80", size = 101336 }, + { url = "https://files.pythonhosted.org/packages/72/2c/e33dfc96f974ae3cba82c9836371c93fcb4d59d5a82ebb853861618a0b0b/dm_tree-0.1.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ea9e59e0451e7d29aece402d9f908f2e2a80922bcde2ebfd5dcb07750fcbfee8", size = 169495 }, + { url = "https://files.pythonhosted.org/packages/17/af/4030827253a5d50eb8da6f7189bc33d3c850c4109cf3414910e9af677cb7/dm_tree-0.1.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:94d3f0826311f45ee19b75f5b48c99466e4218a0489e81c0f0167bda50cacf22", size = 116525 }, + { url = "https://files.pythonhosted.org/packages/10/10/5f9eed00b1186921e447960443f03cda6374cba8cd5cf7aff2b42ecb8a0e/dm_tree-0.1.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:435227cf3c5dc63f4de054cf3d00183790bd9ead4c3623138c74dde7f67f521b", size = 111436 }, + { url = "https://files.pythonhosted.org/packages/4a/da/3d3d04f7a572f7649f48edc9402ff5836e2f90e18445ffde110fd6142889/dm_tree-0.1.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09964470f76a5201aff2e8f9b26842976de7889300676f927930f6285e256760", size = 146828 }, + { url = "https://files.pythonhosted.org/packages/c4/12/0a8c2152655ca39c1059c762ea1dc12784166c735126eb0ab929c518ef4e/dm_tree-0.1.8-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:75c5d528bb992981c20793b6b453e91560784215dffb8a5440ba999753c14ceb", size = 175054 }, + { url = "https://files.pythonhosted.org/packages/c9/d4/8cbb857612ca69763ee4f4f97c7b91659df1d373d62237cb9c772e55ae97/dm_tree-0.1.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0a94aba18a35457a1b5cd716fd7b46c5dafdc4cf7869b4bae665b91c4682a8e", size = 152834 }, + { url = "https://files.pythonhosted.org/packages/ad/e3/96f5267fe5a47c882dce7f3d06b26ddd756681fc4fbedd55d51b78b08bca/dm_tree-0.1.8-cp312-cp312-win_amd64.whl", hash = "sha256:96a548a406a6fb15fe58f6a30a57ff2f2aafbf25f05afab00c8f5e5977b6c715", size = 101754 }, +] + +[[package]] +name = "dm-tree" +version = "0.1.9" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version == '3.12.*' and sys_platform == 'darwin'", + "python_full_version == '3.12.*' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'emscripten'", + "python_full_version < '3.12' and sys_platform == 'darwin'", + "python_full_version < '3.12' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version < '3.12' and sys_platform == 'emscripten'", +] +dependencies = [ + { name = "absl-py", marker = "python_full_version < '3.13'" }, + { name = "attrs", marker = "python_full_version < '3.13'" }, + { name = "numpy", marker = "python_full_version < '3.13'" }, + { name = "wrapt", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a6/83/ce29720ccf934c6cfa9b9c95ebbe96558386e66886626066632b5e44afed/dm_tree-0.1.9.tar.gz", hash = "sha256:a4c7db3d3935a5a2d5e4b383fc26c6b0cd6f78c6d4605d3e7b518800ecd5342b", size = 35623 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ac/b6/2d2de9f8901ccc5b6f34aea678e732816853015b9d756c86efcec189bf4b/dm_tree-0.1.9-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7d7d784afaeb4b67d87d858261aaf02503939ddc1f09c4cca70728f9892ab004", size = 173561 }, + { url = "https://files.pythonhosted.org/packages/3e/07/57459f32cf5683c25b596ab58f42a3305f91876c2f03d2fa6e9d0df75fcb/dm_tree-0.1.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e660d1779ddcbd1348410d08f67db4870d413a3ec4ba8b4b045bd5ce4bd8f35c", size = 146926 }, + { url = "https://files.pythonhosted.org/packages/e8/46/939fbf81177c7cb3b1e5ddebd696237b3be9520769cce882f064de497103/dm_tree-0.1.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:294dc1cecf87552a45cdd5ddb215e7f5295a5a47c46f1f0a0463c3dd02a527d7", size = 152851 }, + { url = "https://files.pythonhosted.org/packages/35/3e/a46933e0157b0ac87619a754ce1a796b2afc6386fca7c11f95c010f40745/dm_tree-0.1.9-cp311-cp311-win_amd64.whl", hash = "sha256:12f4cc6cd52a39aa38ff31577b6d79b6136a9a89273a876bf62335c9f65c27bf", size = 101522 }, + { url = "https://files.pythonhosted.org/packages/ee/02/61aa90ab695918b4389d75c99bf0ec3cd0abacf1cadbef4053626f23ce34/dm_tree-0.1.9-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:a8d20eeab7fde77a3ed71f07716021eb0edfb4812a128eb381d108af3a310257", size = 175012 }, + { url = "https://files.pythonhosted.org/packages/81/10/120cd40556407879c1069941bd8b0d1a75754128c1a5bf0e27dbcf2a49fc/dm_tree-0.1.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80c43417814b1181d3367b335460bfdd30b79ee187a64220e11f6ddd093a4b15", size = 147204 }, + { url = "https://files.pythonhosted.org/packages/86/52/27607a275c12858b979b8e943d2bd3bd0f9028503bb7079d5830a8b3cac0/dm_tree-0.1.9-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2334cfe9d2ed4293f9f1c7aefba0657deaab9ea74b5fadd966f6d01d9b6b42d9", size = 153013 }, + { url = "https://files.pythonhosted.org/packages/ea/97/4f78412f73a9350bc8f934441bae5b68b102c8f4240a7f06b4114b51d6de/dm_tree-0.1.9-cp312-cp312-win_amd64.whl", hash = "sha256:9020a5ce256fcc83aa4bc190cc96dd66e87685db0a6e501b0c06aa492c2e38fc", size = 102022 }, + { url = "https://files.pythonhosted.org/packages/5f/13/823788cd0f7964cadcfa56d1e0f9e5e987ee73b5db6273bc00168f524f1a/dm_tree-0.1.9-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:cfa33c2e028155810ad1b4e11928707bf47489516763a86e79cab2954d23bf68", size = 175000 }, + { url = "https://files.pythonhosted.org/packages/37/6a/512abdf7f20acc6cd6fce77f7663014d129aa313b5953aa2603d58fdb0c9/dm_tree-0.1.9-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d05622d074353cf434049206e53c12147903a048c4bd7d77f2800d427413ad78", size = 147210 }, + { url = "https://files.pythonhosted.org/packages/e5/0a/f4d72ffb64ab3edc1fa66261f81ee3b4142ab14cd8aa1dfc7bbeca5ee4ba/dm_tree-0.1.9-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f68b0efad76703dd4648586c75618a48cdd671b68c3266fe980e323c15423607", size = 153043 }, + { url = "https://files.pythonhosted.org/packages/0d/ee/529ce999770b4d621a64af86c60cfee52f0cdd7294752105179ebf1c07c6/dm_tree-0.1.9-cp313-cp313-win_amd64.whl", hash = "sha256:e97c34fcb44941c36b7ee81dcdbceba0fbe728bddcc77e5837ab2eb665bcbff8", size = 102043 }, + { url = "https://files.pythonhosted.org/packages/ee/3c/5b40f8862390e9172e776cf610f3791c1af01f140a5698799fbe4a97206f/dm_tree-0.1.9-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b06e7a5da1c31a82521a60060573527e8d24b9920fdd20b2ec86f08412737598", size = 180821 }, + { url = "https://files.pythonhosted.org/packages/84/1d/3cdbeeb3f6937a47a26cee502bffeccc2e55b97dfcce8a1d1135ea1b5b47/dm_tree-0.1.9-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6893fcdc5cf1a4f459cfc383526d35d42e7c671ae565d7e429a2f2cb2cb93e89", size = 147282 }, + { url = "https://files.pythonhosted.org/packages/c5/37/15603079854394f16e3833a7b50696c1f3cbf30a2243a119f64f18a16f36/dm_tree-0.1.9-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1f5d1e96b3a7de22b25b13a5eb30f41f8cf9c02dd4479a24920de99e780903c", size = 153052 }, +] + +[[package]] +name = "docker-pycreds" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c5/e6/d1f6c00b7221e2d7c4b470132c931325c8b22c51ca62417e300f5ce16009/docker-pycreds-0.4.0.tar.gz", hash = "sha256:6ce3270bcaf404cc4c3e27e4b6c70d3521deae82fb508767870fdbf772d584d4", size = 8754 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f5/e8/f6bd1eee09314e7e6dee49cbe2c5e22314ccdb38db16c9fc72d2fa80d054/docker_pycreds-0.4.0-py2.py3-none-any.whl", hash = "sha256:7266112468627868005106ec19cd0d722702d2b7d5912a28e19b826c3d37af49", size = 8982 }, +] + +[[package]] +name = "docstring-parser" +version = "0.16" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/08/12/9c22a58c0b1e29271051222d8906257616da84135af9ed167c9e28f85cb3/docstring_parser-0.16.tar.gz", hash = "sha256:538beabd0af1e2db0146b6bd3caa526c35a34d61af9fd2887f3a8a27a739aa6e", size = 26565 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d5/7c/e9fcff7623954d86bdc17782036cbf715ecab1bec4847c008557affe1ca8/docstring_parser-0.16-py3-none-any.whl", hash = "sha256:bf0a1387354d3691d102edef7ec124f219ef639982d096e26e3b60aeffa90637", size = 36533 }, +] + +[[package]] +name = "donfig" +version = "0.8.1.post1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyyaml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/25/71/80cc718ff6d7abfbabacb1f57aaa42e9c1552bfdd01e64ddd704e4a03638/donfig-0.8.1.post1.tar.gz", hash = "sha256:3bef3413a4c1c601b585e8d297256d0c1470ea012afa6e8461dc28bfb7c23f52", size = 19506 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/d5/c5db1ea3394c6e1732fb3286b3bd878b59507a8f77d32a2cebda7d7b7cd4/donfig-0.8.1.post1-py3-none-any.whl", hash = "sha256:2a3175ce74a06109ff9307d90a230f81215cbac9a751f4d1c6194644b8204f9d", size = 21592 }, +] + +[[package]] +name = "draccus" +version = "0.10.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mergedeep" }, + { name = "pyyaml" }, + { name = "pyyaml-include" }, + { name = "toml" }, + { name = "typing-inspect" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4e/e2/f5012fda17ee5d1eaf3481b6ca3e11dffa5348e5e08ab745538fdc8041bb/draccus-0.10.0.tar.gz", hash = "sha256:8dd08304219becdcd66cd16058ba98e9c3e6b7bfe48ccb9579dae39f8d37ae19", size = 62243 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/9a/a83083b230d352ee5d205757b74006dbe084448ca45e3bc5ca99215b1e55/draccus-0.10.0-py3-none-any.whl", hash = "sha256:90243418ae0e9271c390a59cafb6acfd37001193696ed36fcc8525f791a83282", size = 71783 }, +] + +[[package]] +name = "einops" +version = "0.8.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e5/81/df4fbe24dff8ba3934af99044188e20a98ed441ad17a274539b74e82e126/einops-0.8.1.tar.gz", hash = "sha256:de5d960a7a761225532e0f1959e5315ebeafc0cd43394732f103ca44b9837e84", size = 54805 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/62/9773de14fe6c45c23649e98b83231fffd7b9892b6cf863251dc2afa73643/einops-0.8.1-py3-none-any.whl", hash = "sha256:919387eb55330f5757c6bea9165c5ff5cfe63a642682ea788a6d472576d81737", size = 64359 }, +] + +[[package]] +name = "equinox" +version = "0.12.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jax" }, + { name = "jaxtyping" }, + { name = "typing-extensions" }, + { name = "wadler-lindig" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4c/1c/da174caa2902cee108a542cfb801bd4366a5e44541b625d5a0984c9238e0/equinox-0.12.2.tar.gz", hash = "sha256:648e4206bbc53b228922e8f18cd3cffe543ddda1172c0002f8954e484bab0023", size = 139007 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/a7/5961a7cad10df1e165a8b9c4ba0661aaec9497861e53682effa1787d97aa/equinox-0.12.2-py3-none-any.whl", hash = "sha256:0d9c09c077e7895a5334930ddb9ecd7d39840c3ad252cf8262aa8ddc6bb8ae97", size = 177247 }, +] + +[[package]] +name = "etils" +version = "1.12.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e4/12/1cc11e88a0201280ff389bc4076df7c3432e39d9f22cba8b71aa263f67b8/etils-1.12.2.tar.gz", hash = "sha256:c6b9e1f0ce66d1bbf54f99201b08a60ba396d3446d9eb18d4bc39b26a2e1a5ee", size = 104711 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dd/71/40ee142e564b8a34a7ae9546e99e665e0001011a3254d5bbbe113d72ccba/etils-1.12.2-py3-none-any.whl", hash = "sha256:4600bec9de6cf5cb043a171e1856e38b5f273719cf3ecef90199f7091a6b3912", size = 167613 }, +] + +[package.optional-dependencies] +edc = [ + { name = "typing-extensions" }, +] +enp = [ + { name = "einops" }, + { name = "numpy" }, + { name = "typing-extensions" }, +] +epath = [ + { name = "fsspec" }, + { name = "importlib-resources" }, + { name = "typing-extensions" }, + { name = "zipp" }, +] +epy = [ + { name = "typing-extensions" }, +] +etree = [ + { name = "absl-py" }, + { name = "einops" }, + { name = "numpy" }, + { name = "tqdm" }, + { name = "typing-extensions" }, +] + +[[package]] +name = "evdev" +version = "1.9.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/63/fe/a17c106a1f4061ce83f04d14bcedcfb2c38c7793ea56bfb906a6fadae8cb/evdev-1.9.2.tar.gz", hash = "sha256:5d3278892ce1f92a74d6bf888cc8525d9f68af85dbe336c95d1c87fb8f423069", size = 33301 } + +[[package]] +name = "executing" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/91/50/a9d80c47ff289c611ff12e63f7c5d13942c65d68125160cefd768c73e6e4/executing-2.2.0.tar.gz", hash = "sha256:5d108c028108fe2551d1a7b2e8b713341e2cb4fc0aa7dcf966fa4327a5226755", size = 978693 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/8f/c4d9bafc34ad7ad5d8dc16dd1347ee0e507a52c3adb6bfa8887e1c6a26ba/executing-2.2.0-py2.py3-none-any.whl", hash = "sha256:11387150cad388d62750327a53d3339fad4888b39a6fe233c3afbb54ecffd3aa", size = 26702 }, +] + +[[package]] +name = "farama-notifications" +version = "0.0.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2e/2c/8384832b7a6b1fd6ba95bbdcae26e7137bb3eedc955c42fd5cdcc086cfbf/Farama-Notifications-0.0.4.tar.gz", hash = "sha256:13fceff2d14314cf80703c8266462ebf3733c7d165336eee998fc58e545efd18", size = 2131 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/2c/ffc08c54c05cdce6fbed2aeebc46348dbe180c6d2c541c7af7ba0aa5f5f8/Farama_Notifications-0.0.4-py3-none-any.whl", hash = "sha256:14de931035a41961f7c056361dc7f980762a143d05791ef5794a751a2caf05ae", size = 2511 }, +] + +[[package]] +name = "filelock" +version = "3.18.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0a/10/c23352565a6544bdc5353e0b15fc1c563352101f30e24bf500207a54df9a/filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2", size = 18075 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/36/2a115987e2d8c300a974597416d9de88f2444426de9571f4b59b2cca3acc/filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de", size = 16215 }, +] + +[[package]] +name = "flask" +version = "3.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "blinker" }, + { name = "click" }, + { name = "itsdangerous" }, + { name = "jinja2" }, + { name = "markupsafe" }, + { name = "werkzeug" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/de/e47735752347f4128bcf354e0da07ef311a78244eba9e3dc1d4a5ab21a98/flask-3.1.1.tar.gz", hash = "sha256:284c7b8f2f58cb737f0cf1c30fd7eaf0ccfcde196099d24ecede3fc2005aa59e", size = 753440 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3d/68/9d4508e893976286d2ead7f8f571314af6c2037af34853a30fd769c02e9d/flask-3.1.1-py3-none-any.whl", hash = "sha256:07aae2bb5eaf77993ef57e357491839f5fd9f4dc281593a81a9e4d79a24f295c", size = 103305 }, +] + +[[package]] +name = "flatbuffers" +version = "25.2.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e4/30/eb5dce7994fc71a2f685d98ec33cc660c0a5887db5610137e60d8cbc4489/flatbuffers-25.2.10.tar.gz", hash = "sha256:97e451377a41262f8d9bd4295cc836133415cc03d8cb966410a4af92eb00d26e", size = 22170 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b8/25/155f9f080d5e4bc0082edfda032ea2bc2b8fab3f4d25d46c1e9dd22a1a89/flatbuffers-25.2.10-py2.py3-none-any.whl", hash = "sha256:ebba5f4d5ea615af3f7fd70fc310636fbb2bbd1f566ac0a23d98dd412de50051", size = 30953 }, +] + +[[package]] +name = "flax" +version = "0.10.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jax" }, + { name = "msgpack" }, + { name = "numpy" }, + { name = "optax" }, + { name = "orbax-checkpoint" }, + { name = "pyyaml" }, + { name = "rich" }, + { name = "tensorstore" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ff/38/4a0203198ac9459832abd33246d4e4fe250528b928a1fcd14cd6559bfcb4/flax-0.10.2.tar.gz", hash = "sha256:6f831350026ad48182ba6588bb4dd72dc1084985d9aca923254cb3e4c78d75f3", size = 5082773 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/a2/daca2bc563e1fd53c33fbff1e33e84004639f7ad9e1a9a54370480a7780d/flax-0.10.2-py3-none-any.whl", hash = "sha256:5bc0954b98d1596e8984f8e1bb84105e6e1dd9eae311cee3a777d7a335470a76", size = 424180 }, +] + +[[package]] +name = "fonttools" +version = "4.58.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3e/7a/30c581aeaa86d94e7a29344bccefd2408870bf5b0e7640b6f4ffede61bd0/fonttools-4.58.1.tar.gz", hash = "sha256:cbc8868e0a29c3e22628dfa1432adf7a104d86d1bc661cecc3e9173070b6ab2d", size = 3519505 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/3f/9fecd69149b0eec5ca46ec58de83b2fd34d07204fe2c12c209255082507a/fonttools-4.58.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9966e14729669bcfbb56f83b747a2397c4d97c6d4798cb2e2adc28f9388fa008", size = 2754713 }, + { url = "https://files.pythonhosted.org/packages/c8/19/d04ea5f3ab2afa7799f2b1ebe1d57ff71b479f99f29b82bddc7197d50220/fonttools-4.58.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64cc1647bbe83dea57f5496ec878ad19ccdba7185b0dd34955d3e6f03dc789e6", size = 2316637 }, + { url = "https://files.pythonhosted.org/packages/5c/3f/375f59d756b17318336c050363849011e03ac82904538f39ebe8189835bc/fonttools-4.58.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:464f790ce681d08d1583df0735776aa9cb1999594bf336ddd0bf962c17b629ac", size = 4915730 }, + { url = "https://files.pythonhosted.org/packages/2f/90/069f859d6f6480503574cda21b84ceee98bf5f5fd1764f26674e828a2600/fonttools-4.58.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c53c6a720ee70cc25746d511ba88c45c95ec510fd258026ed209b0b9e3ba92f", size = 4936194 }, + { url = "https://files.pythonhosted.org/packages/01/11/339973e588e1c27f20c578f845bdcf84376c5e42bd35fca05419fd8d1648/fonttools-4.58.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b6823a633bbce29cf3033508ebb54a433c473fb9833eff7f936bfdc5204fd98d", size = 4978982 }, + { url = "https://files.pythonhosted.org/packages/a7/aa/1c627532a69715f54b8d96ab3a7bc8628f6e89989e9275dfc067dc2d6d56/fonttools-4.58.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5701fe66a1408c1974d2f78c00f964f8aad17cccbc32bc041e1b81421f31f448", size = 5090087 }, + { url = "https://files.pythonhosted.org/packages/77/ce/cf7b624db35bce589ac1f2c98329ea91b28f0283d3b7e9e6126dfaeb5abd/fonttools-4.58.1-cp311-cp311-win32.whl", hash = "sha256:4cad2c74adf9ee31ae43be6b0b376fdb386d4d50c60979790e32c3548efec051", size = 2188923 }, + { url = "https://files.pythonhosted.org/packages/b9/22/c4f1f76eeb1b9353e9cc81451d0ae08acc3d3aa31b9ab8f3791a18af1f89/fonttools-4.58.1-cp311-cp311-win_amd64.whl", hash = "sha256:7ade12485abccb0f6b6a6e2a88c50e587ff0e201e48e0153dd9b2e0ed67a2f38", size = 2236853 }, + { url = "https://files.pythonhosted.org/packages/32/97/ed1078b1e138fbc0b4ee75878000d549a70c02d83bb4e557e416efc34140/fonttools-4.58.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f56085a65769dc0100822c814069327541db9c3c4f21e599c6138f9dbda75e96", size = 2740473 }, + { url = "https://files.pythonhosted.org/packages/28/35/53d49fb7d6b30128153d11628b976fda3ce8ae44234b5a81c4edb3023798/fonttools-4.58.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:19c65a88e522c9f1be0c05d73541de20feada99d23d06e9b5354023cc3e517b0", size = 2309936 }, + { url = "https://files.pythonhosted.org/packages/0c/db/8b63c1d673b2bf0cfed77500d47769dc4aa85453b5f0ef525db2cf952895/fonttools-4.58.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b01bb37006e97703300bfde7a73d1c7038574dd1df9d8d92ca99af151becf2ca", size = 4814671 }, + { url = "https://files.pythonhosted.org/packages/a6/13/0b96eeb148b77c521b8e94628c59d15e4fb0e76191c41f5616a656d6adb9/fonttools-4.58.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d629dea240f0fc826d8bb14566e95c663214eece21b5932c9228d3e8907f55aa", size = 4881493 }, + { url = "https://files.pythonhosted.org/packages/ac/b0/9f8aa60e8e5be91aba8dfaa3fa6b33fd950511686921cf27e97bf4154e3d/fonttools-4.58.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ef0b33ff35421a04a638e736823c2dee9d200cdd275cfdb43e875ca745150aae", size = 4874960 }, + { url = "https://files.pythonhosted.org/packages/b6/7e/83b409659eb4818f1283a8319f3570497718d6d3b70f4fca2ddf962e948e/fonttools-4.58.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4db9399ee633855c718fe8bea5eecbdc5bf3fdbed2648e50f67f8946b943ed1c", size = 5026677 }, + { url = "https://files.pythonhosted.org/packages/34/52/1eb69802d3b54e569158c97810195f317d350f56390b83c43e1c999551d8/fonttools-4.58.1-cp312-cp312-win32.whl", hash = "sha256:5cf04c4f73d36b30ea1cff091a7a9e65f8d5b08345b950f82679034e9f7573f4", size = 2176201 }, + { url = "https://files.pythonhosted.org/packages/6f/25/8dcfeb771de8d9cdffab2b957a05af4395d41ec9a198ec139d2326366a07/fonttools-4.58.1-cp312-cp312-win_amd64.whl", hash = "sha256:4a3841b59c67fa1f739542b05211609c453cec5d11d21f863dd2652d5a81ec9b", size = 2225519 }, + { url = "https://files.pythonhosted.org/packages/83/7a/7ed2e4e381f9b1f5122d33b7e626a40f646cacc1ef72d8806aacece9e580/fonttools-4.58.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:68379d1599fc59569956a97eb7b07e0413f76142ac8513fa24c9f2c03970543a", size = 2731231 }, + { url = "https://files.pythonhosted.org/packages/e7/28/74864dc9248e917cbe07c903e0ce1517c89d42e2fab6b0ce218387ef0e24/fonttools-4.58.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8631905657de4f9a7ae1e12186c1ed20ba4d6168c2d593b9e0bd2908061d341b", size = 2305224 }, + { url = "https://files.pythonhosted.org/packages/e7/f1/ced758896188c1632c5b034a0741457f305e087eb4fa762d86aa3c1ae422/fonttools-4.58.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2ecea7289061c2c71468723409a8dd6e70d1ecfce6bc7686e5a74b9ce9154fe", size = 4793934 }, + { url = "https://files.pythonhosted.org/packages/c1/46/8b46469c6edac393de1c380c7ec61922d5440f25605dfca7849e5ffff295/fonttools-4.58.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b8860f8cd48b345bd1df1d7be650f600f69ee971ffe338c5bd5bcb6bdb3b92c", size = 4863415 }, + { url = "https://files.pythonhosted.org/packages/12/1b/82aa678bb96af6663fe163d51493ffb8622948f4908c886cba6b67fbf6c5/fonttools-4.58.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7c9a0acdefcb8d7ccd7c59202056166c400e797047009ecb299b75ab950c2a9c", size = 4865025 }, + { url = "https://files.pythonhosted.org/packages/7d/26/b66ab2f2dc34b962caecd6fa72a036395b1bc9fb849f52856b1e1144cd63/fonttools-4.58.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1e1fac0be6be3e4309058e156948cb73196e5fd994268b89b5e3f5a26ee2b582", size = 5002698 }, + { url = "https://files.pythonhosted.org/packages/7b/56/cdddc63333ed77e810df56e5e7fb93659022d535a670335d8792be6d59fd/fonttools-4.58.1-cp313-cp313-win32.whl", hash = "sha256:aed7f93a9a072f0ce6fb46aad9474824ac6dd9c7c38a72f8295dd14f2215950f", size = 2174515 }, + { url = "https://files.pythonhosted.org/packages/ba/81/c7f395718e44cebe1010fcd7f1b91957d65d512d5f03114d2d6d00cae1c4/fonttools-4.58.1-cp313-cp313-win_amd64.whl", hash = "sha256:b27d69c97c20c9bca807f7ae7fc7df459eb62994859ff6a2a489e420634deac3", size = 2225290 }, + { url = "https://files.pythonhosted.org/packages/21/ff/995277586691c0cc314c28b24b4ec30610440fd7bf580072aed1409f95b0/fonttools-4.58.1-py3-none-any.whl", hash = "sha256:db88365d0962cd6f5bce54b190a4669aeed9c9941aa7bd60a5af084d8d9173d6", size = 1113429 }, +] + +[[package]] +name = "frozenlist" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/f4/d744cba2da59b5c1d88823cf9e8a6c74e4659e2b27604ed973be2a0bf5ab/frozenlist-1.6.0.tar.gz", hash = "sha256:b99655c32c1c8e06d111e7f41c06c29a5318cb1835df23a45518e02a47c63b68", size = 42831 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/53/b5/bc883b5296ec902115c00be161da93bf661199c465ec4c483feec6ea4c32/frozenlist-1.6.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ae8337990e7a45683548ffb2fee1af2f1ed08169284cd829cdd9a7fa7470530d", size = 160912 }, + { url = "https://files.pythonhosted.org/packages/6f/93/51b058b563d0704b39c56baa222828043aafcac17fd3734bec5dbeb619b1/frozenlist-1.6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8c952f69dd524558694818a461855f35d36cc7f5c0adddce37e962c85d06eac0", size = 124315 }, + { url = "https://files.pythonhosted.org/packages/c9/e0/46cd35219428d350558b874d595e132d1c17a9471a1bd0d01d518a261e7c/frozenlist-1.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8f5fef13136c4e2dee91bfb9a44e236fff78fc2cd9f838eddfc470c3d7d90afe", size = 122230 }, + { url = "https://files.pythonhosted.org/packages/d1/0f/7ad2ce928ad06d6dd26a61812b959ded573d3e9d0ee6109d96c2be7172e9/frozenlist-1.6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:716bbba09611b4663ecbb7cd022f640759af8259e12a6ca939c0a6acd49eedba", size = 314842 }, + { url = "https://files.pythonhosted.org/packages/34/76/98cbbd8a20a5c3359a2004ae5e5b216af84a150ccbad67c8f8f30fb2ea91/frozenlist-1.6.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7b8c4dc422c1a3ffc550b465090e53b0bf4839047f3e436a34172ac67c45d595", size = 304919 }, + { url = "https://files.pythonhosted.org/packages/9a/fa/258e771ce3a44348c05e6b01dffc2bc67603fba95761458c238cd09a2c77/frozenlist-1.6.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b11534872256e1666116f6587a1592ef395a98b54476addb5e8d352925cb5d4a", size = 324074 }, + { url = "https://files.pythonhosted.org/packages/d5/a4/047d861fd8c538210e12b208c0479912273f991356b6bdee7ea8356b07c9/frozenlist-1.6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c6eceb88aaf7221f75be6ab498dc622a151f5f88d536661af3ffc486245a626", size = 321292 }, + { url = "https://files.pythonhosted.org/packages/c0/25/cfec8af758b4525676cabd36efcaf7102c1348a776c0d1ad046b8a7cdc65/frozenlist-1.6.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62c828a5b195570eb4b37369fcbbd58e96c905768d53a44d13044355647838ff", size = 301569 }, + { url = "https://files.pythonhosted.org/packages/87/2f/0c819372fa9f0c07b153124bf58683b8d0ca7bb73ea5ccde9b9ef1745beb/frozenlist-1.6.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1c6bd2c6399920c9622362ce95a7d74e7f9af9bfec05fff91b8ce4b9647845a", size = 313625 }, + { url = "https://files.pythonhosted.org/packages/50/5f/f0cf8b0fdedffdb76b3745aa13d5dbe404d63493cc211ce8250f2025307f/frozenlist-1.6.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:49ba23817781e22fcbd45fd9ff2b9b8cdb7b16a42a4851ab8025cae7b22e96d0", size = 312523 }, + { url = "https://files.pythonhosted.org/packages/e1/6c/38c49108491272d3e84125bbabf2c2d0b304899b52f49f0539deb26ad18d/frozenlist-1.6.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:431ef6937ae0f853143e2ca67d6da76c083e8b1fe3df0e96f3802fd37626e606", size = 322657 }, + { url = "https://files.pythonhosted.org/packages/bd/4b/3bd3bad5be06a9d1b04b1c22be80b5fe65b502992d62fab4bdb25d9366ee/frozenlist-1.6.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9d124b38b3c299ca68433597ee26b7819209cb8a3a9ea761dfe9db3a04bba584", size = 303414 }, + { url = "https://files.pythonhosted.org/packages/5b/89/7e225a30bef6e85dbfe22622c24afe932e9444de3b40d58b1ea589a14ef8/frozenlist-1.6.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:118e97556306402e2b010da1ef21ea70cb6d6122e580da64c056b96f524fbd6a", size = 320321 }, + { url = "https://files.pythonhosted.org/packages/22/72/7e3acef4dd9e86366cb8f4d8f28e852c2b7e116927e9722b31a6f71ea4b0/frozenlist-1.6.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:fb3b309f1d4086b5533cf7bbcf3f956f0ae6469664522f1bde4feed26fba60f1", size = 323975 }, + { url = "https://files.pythonhosted.org/packages/d8/85/e5da03d20507e13c66ce612c9792b76811b7a43e3320cce42d95b85ac755/frozenlist-1.6.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:54dece0d21dce4fdb188a1ffc555926adf1d1c516e493c2914d7c370e454bc9e", size = 316553 }, + { url = "https://files.pythonhosted.org/packages/ac/8e/6c609cbd0580ae8a0661c408149f196aade7d325b1ae7adc930501b81acb/frozenlist-1.6.0-cp311-cp311-win32.whl", hash = "sha256:654e4ba1d0b2154ca2f096bed27461cf6160bc7f504a7f9a9ef447c293caf860", size = 115511 }, + { url = "https://files.pythonhosted.org/packages/f2/13/a84804cfde6de12d44ed48ecbf777ba62b12ff09e761f76cdd1ff9e14bb1/frozenlist-1.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:3e911391bffdb806001002c1f860787542f45916c3baf764264a52765d5a5603", size = 120863 }, + { url = "https://files.pythonhosted.org/packages/9c/8a/289b7d0de2fbac832ea80944d809759976f661557a38bb8e77db5d9f79b7/frozenlist-1.6.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:c5b9e42ace7d95bf41e19b87cec8f262c41d3510d8ad7514ab3862ea2197bfb1", size = 160193 }, + { url = "https://files.pythonhosted.org/packages/19/80/2fd17d322aec7f430549f0669f599997174f93ee17929ea5b92781ec902c/frozenlist-1.6.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ca9973735ce9f770d24d5484dcb42f68f135351c2fc81a7a9369e48cf2998a29", size = 123831 }, + { url = "https://files.pythonhosted.org/packages/99/06/f5812da431273f78c6543e0b2f7de67dfd65eb0a433978b2c9c63d2205e4/frozenlist-1.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6ac40ec76041c67b928ca8aaffba15c2b2ee3f5ae8d0cb0617b5e63ec119ca25", size = 121862 }, + { url = "https://files.pythonhosted.org/packages/d0/31/9e61c6b5fc493cf24d54881731204d27105234d09878be1a5983182cc4a5/frozenlist-1.6.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95b7a8a3180dfb280eb044fdec562f9b461614c0ef21669aea6f1d3dac6ee576", size = 316361 }, + { url = "https://files.pythonhosted.org/packages/9d/55/22ca9362d4f0222324981470fd50192be200154d51509ee6eb9baa148e96/frozenlist-1.6.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c444d824e22da6c9291886d80c7d00c444981a72686e2b59d38b285617cb52c8", size = 307115 }, + { url = "https://files.pythonhosted.org/packages/ae/39/4fff42920a57794881e7bb3898dc7f5f539261711ea411b43bba3cde8b79/frozenlist-1.6.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb52c8166499a8150bfd38478248572c924c003cbb45fe3bcd348e5ac7c000f9", size = 322505 }, + { url = "https://files.pythonhosted.org/packages/55/f2/88c41f374c1e4cf0092a5459e5f3d6a1e17ed274c98087a76487783df90c/frozenlist-1.6.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b35298b2db9c2468106278537ee529719228950a5fdda686582f68f247d1dc6e", size = 322666 }, + { url = "https://files.pythonhosted.org/packages/75/51/034eeb75afdf3fd03997856195b500722c0b1a50716664cde64e28299c4b/frozenlist-1.6.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d108e2d070034f9d57210f22fefd22ea0d04609fc97c5f7f5a686b3471028590", size = 302119 }, + { url = "https://files.pythonhosted.org/packages/2b/a6/564ecde55ee633270a793999ef4fd1d2c2b32b5a7eec903b1012cb7c5143/frozenlist-1.6.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e1be9111cb6756868ac242b3c2bd1f09d9aea09846e4f5c23715e7afb647103", size = 316226 }, + { url = "https://files.pythonhosted.org/packages/f1/c8/6c0682c32377f402b8a6174fb16378b683cf6379ab4d2827c580892ab3c7/frozenlist-1.6.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:94bb451c664415f02f07eef4ece976a2c65dcbab9c2f1705b7031a3a75349d8c", size = 312788 }, + { url = "https://files.pythonhosted.org/packages/b6/b8/10fbec38f82c5d163ca1750bfff4ede69713badf236a016781cf1f10a0f0/frozenlist-1.6.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:d1a686d0b0949182b8faddea596f3fc11f44768d1f74d4cad70213b2e139d821", size = 325914 }, + { url = "https://files.pythonhosted.org/packages/62/ca/2bf4f3a1bd40cdedd301e6ecfdbb291080d5afc5f9ce350c0739f773d6b9/frozenlist-1.6.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ea8e59105d802c5a38bdbe7362822c522230b3faba2aa35c0fa1765239b7dd70", size = 305283 }, + { url = "https://files.pythonhosted.org/packages/09/64/20cc13ccf94abc2a1f482f74ad210703dc78a590d0b805af1c9aa67f76f9/frozenlist-1.6.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:abc4e880a9b920bc5020bf6a431a6bb40589d9bca3975c980495f63632e8382f", size = 319264 }, + { url = "https://files.pythonhosted.org/packages/20/ff/86c6a2bbe98cfc231519f5e6d712a0898488ceac804a917ce014f32e68f6/frozenlist-1.6.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9a79713adfe28830f27a3c62f6b5406c37376c892b05ae070906f07ae4487046", size = 326482 }, + { url = "https://files.pythonhosted.org/packages/2f/da/8e381f66367d79adca245d1d71527aac774e30e291d41ef161ce2d80c38e/frozenlist-1.6.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9a0318c2068e217a8f5e3b85e35899f5a19e97141a45bb925bb357cfe1daf770", size = 318248 }, + { url = "https://files.pythonhosted.org/packages/39/24/1a1976563fb476ab6f0fa9fefaac7616a4361dbe0461324f9fd7bf425dbe/frozenlist-1.6.0-cp312-cp312-win32.whl", hash = "sha256:853ac025092a24bb3bf09ae87f9127de9fe6e0c345614ac92536577cf956dfcc", size = 115161 }, + { url = "https://files.pythonhosted.org/packages/80/2e/fb4ed62a65f8cd66044706b1013f0010930d8cbb0729a2219561ea075434/frozenlist-1.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:2bdfe2d7e6c9281c6e55523acd6c2bf77963cb422fdc7d142fb0cb6621b66878", size = 120548 }, + { url = "https://files.pythonhosted.org/packages/6f/e5/04c7090c514d96ca00887932417f04343ab94904a56ab7f57861bf63652d/frozenlist-1.6.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1d7fb014fe0fbfee3efd6a94fc635aeaa68e5e1720fe9e57357f2e2c6e1a647e", size = 158182 }, + { url = "https://files.pythonhosted.org/packages/e9/8f/60d0555c61eec855783a6356268314d204137f5e0c53b59ae2fc28938c99/frozenlist-1.6.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:01bcaa305a0fdad12745502bfd16a1c75b14558dabae226852f9159364573117", size = 122838 }, + { url = "https://files.pythonhosted.org/packages/5a/a7/d0ec890e3665b4b3b7c05dc80e477ed8dc2e2e77719368e78e2cd9fec9c8/frozenlist-1.6.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8b314faa3051a6d45da196a2c495e922f987dc848e967d8cfeaee8a0328b1cd4", size = 120980 }, + { url = "https://files.pythonhosted.org/packages/cc/19/9b355a5e7a8eba903a008579964192c3e427444752f20b2144b10bb336df/frozenlist-1.6.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da62fecac21a3ee10463d153549d8db87549a5e77eefb8c91ac84bb42bb1e4e3", size = 305463 }, + { url = "https://files.pythonhosted.org/packages/9c/8d/5b4c758c2550131d66935ef2fa700ada2461c08866aef4229ae1554b93ca/frozenlist-1.6.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d1eb89bf3454e2132e046f9599fbcf0a4483ed43b40f545551a39316d0201cd1", size = 297985 }, + { url = "https://files.pythonhosted.org/packages/48/2c/537ec09e032b5865715726b2d1d9813e6589b571d34d01550c7aeaad7e53/frozenlist-1.6.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18689b40cb3936acd971f663ccb8e2589c45db5e2c5f07e0ec6207664029a9c", size = 311188 }, + { url = "https://files.pythonhosted.org/packages/31/2f/1aa74b33f74d54817055de9a4961eff798f066cdc6f67591905d4fc82a84/frozenlist-1.6.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e67ddb0749ed066b1a03fba812e2dcae791dd50e5da03be50b6a14d0c1a9ee45", size = 311874 }, + { url = "https://files.pythonhosted.org/packages/bf/f0/cfec18838f13ebf4b37cfebc8649db5ea71a1b25dacd691444a10729776c/frozenlist-1.6.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc5e64626e6682638d6e44398c9baf1d6ce6bc236d40b4b57255c9d3f9761f1f", size = 291897 }, + { url = "https://files.pythonhosted.org/packages/ea/a5/deb39325cbbea6cd0a46db8ccd76150ae2fcbe60d63243d9df4a0b8c3205/frozenlist-1.6.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:437cfd39564744ae32ad5929e55b18ebd88817f9180e4cc05e7d53b75f79ce85", size = 305799 }, + { url = "https://files.pythonhosted.org/packages/78/22/6ddec55c5243a59f605e4280f10cee8c95a449f81e40117163383829c241/frozenlist-1.6.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:62dd7df78e74d924952e2feb7357d826af8d2f307557a779d14ddf94d7311be8", size = 302804 }, + { url = "https://files.pythonhosted.org/packages/5d/b7/d9ca9bab87f28855063c4d202936800219e39db9e46f9fb004d521152623/frozenlist-1.6.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:a66781d7e4cddcbbcfd64de3d41a61d6bdde370fc2e38623f30b2bd539e84a9f", size = 316404 }, + { url = "https://files.pythonhosted.org/packages/a6/3a/1255305db7874d0b9eddb4fe4a27469e1fb63720f1fc6d325a5118492d18/frozenlist-1.6.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:482fe06e9a3fffbcd41950f9d890034b4a54395c60b5e61fae875d37a699813f", size = 295572 }, + { url = "https://files.pythonhosted.org/packages/2a/f2/8d38eeee39a0e3a91b75867cc102159ecccf441deb6ddf67be96d3410b84/frozenlist-1.6.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e4f9373c500dfc02feea39f7a56e4f543e670212102cc2eeb51d3a99c7ffbde6", size = 307601 }, + { url = "https://files.pythonhosted.org/packages/38/04/80ec8e6b92f61ef085422d7b196822820404f940950dde5b2e367bede8bc/frozenlist-1.6.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e69bb81de06827147b7bfbaeb284d85219fa92d9f097e32cc73675f279d70188", size = 314232 }, + { url = "https://files.pythonhosted.org/packages/3a/58/93b41fb23e75f38f453ae92a2f987274c64637c450285577bd81c599b715/frozenlist-1.6.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7613d9977d2ab4a9141dde4a149f4357e4065949674c5649f920fec86ecb393e", size = 308187 }, + { url = "https://files.pythonhosted.org/packages/6a/a2/e64df5c5aa36ab3dee5a40d254f3e471bb0603c225f81664267281c46a2d/frozenlist-1.6.0-cp313-cp313-win32.whl", hash = "sha256:4def87ef6d90429f777c9d9de3961679abf938cb6b7b63d4a7eb8a268babfce4", size = 114772 }, + { url = "https://files.pythonhosted.org/packages/a0/77/fead27441e749b2d574bb73d693530d59d520d4b9e9679b8e3cb779d37f2/frozenlist-1.6.0-cp313-cp313-win_amd64.whl", hash = "sha256:37a8a52c3dfff01515e9bbbee0e6063181362f9de3db2ccf9bc96189b557cbfd", size = 119847 }, + { url = "https://files.pythonhosted.org/packages/df/bd/cc6d934991c1e5d9cafda83dfdc52f987c7b28343686aef2e58a9cf89f20/frozenlist-1.6.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:46138f5a0773d064ff663d273b309b696293d7a7c00a0994c5c13a5078134b64", size = 174937 }, + { url = "https://files.pythonhosted.org/packages/f2/a2/daf945f335abdbfdd5993e9dc348ef4507436936ab3c26d7cfe72f4843bf/frozenlist-1.6.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:f88bc0a2b9c2a835cb888b32246c27cdab5740059fb3688852bf91e915399b91", size = 136029 }, + { url = "https://files.pythonhosted.org/packages/51/65/4c3145f237a31247c3429e1c94c384d053f69b52110a0d04bfc8afc55fb2/frozenlist-1.6.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:777704c1d7655b802c7850255639672e90e81ad6fa42b99ce5ed3fbf45e338dd", size = 134831 }, + { url = "https://files.pythonhosted.org/packages/77/38/03d316507d8dea84dfb99bdd515ea245628af964b2bf57759e3c9205cc5e/frozenlist-1.6.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85ef8d41764c7de0dcdaf64f733a27352248493a85a80661f3c678acd27e31f2", size = 392981 }, + { url = "https://files.pythonhosted.org/packages/37/02/46285ef9828f318ba400a51d5bb616ded38db8466836a9cfa39f3903260b/frozenlist-1.6.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:da5cb36623f2b846fb25009d9d9215322318ff1c63403075f812b3b2876c8506", size = 371999 }, + { url = "https://files.pythonhosted.org/packages/0d/64/1212fea37a112c3c5c05bfb5f0a81af4836ce349e69be75af93f99644da9/frozenlist-1.6.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cbb56587a16cf0fb8acd19e90ff9924979ac1431baea8681712716a8337577b0", size = 392200 }, + { url = "https://files.pythonhosted.org/packages/81/ce/9a6ea1763e3366e44a5208f76bf37c76c5da570772375e4d0be85180e588/frozenlist-1.6.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6154c3ba59cda3f954c6333025369e42c3acd0c6e8b6ce31eb5c5b8116c07e0", size = 390134 }, + { url = "https://files.pythonhosted.org/packages/bc/36/939738b0b495b2c6d0c39ba51563e453232813042a8d908b8f9544296c29/frozenlist-1.6.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e8246877afa3f1ae5c979fe85f567d220f86a50dc6c493b9b7d8191181ae01e", size = 365208 }, + { url = "https://files.pythonhosted.org/packages/b4/8b/939e62e93c63409949c25220d1ba8e88e3960f8ef6a8d9ede8f94b459d27/frozenlist-1.6.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b0f6cce16306d2e117cf9db71ab3a9e8878a28176aeaf0dbe35248d97b28d0c", size = 385548 }, + { url = "https://files.pythonhosted.org/packages/62/38/22d2873c90102e06a7c5a3a5b82ca47e393c6079413e8a75c72bff067fa8/frozenlist-1.6.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:1b8e8cd8032ba266f91136d7105706ad57770f3522eac4a111d77ac126a25a9b", size = 391123 }, + { url = "https://files.pythonhosted.org/packages/44/78/63aaaf533ee0701549500f6d819be092c6065cb5c577edb70c09df74d5d0/frozenlist-1.6.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:e2ada1d8515d3ea5378c018a5f6d14b4994d4036591a52ceaf1a1549dec8e1ad", size = 394199 }, + { url = "https://files.pythonhosted.org/packages/54/45/71a6b48981d429e8fbcc08454dc99c4c2639865a646d549812883e9c9dd3/frozenlist-1.6.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:cdb2c7f071e4026c19a3e32b93a09e59b12000751fc9b0b7758da899e657d215", size = 373854 }, + { url = "https://files.pythonhosted.org/packages/3f/f3/dbf2a5e11736ea81a66e37288bf9f881143a7822b288a992579ba1b4204d/frozenlist-1.6.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:03572933a1969a6d6ab509d509e5af82ef80d4a5d4e1e9f2e1cdd22c77a3f4d2", size = 395412 }, + { url = "https://files.pythonhosted.org/packages/b3/f1/c63166806b331f05104d8ea385c4acd511598568b1f3e4e8297ca54f2676/frozenlist-1.6.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:77effc978947548b676c54bbd6a08992759ea6f410d4987d69feea9cd0919911", size = 394936 }, + { url = "https://files.pythonhosted.org/packages/ef/ea/4f3e69e179a430473eaa1a75ff986526571215fefc6b9281cdc1f09a4eb8/frozenlist-1.6.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a2bda8be77660ad4089caf2223fdbd6db1858462c4b85b67fbfa22102021e497", size = 391459 }, + { url = "https://files.pythonhosted.org/packages/d3/c3/0fc2c97dea550df9afd072a37c1e95421652e3206bbeaa02378b24c2b480/frozenlist-1.6.0-cp313-cp313t-win32.whl", hash = "sha256:a4d96dc5bcdbd834ec6b0f91027817214216b5b30316494d2b1aebffb87c534f", size = 128797 }, + { url = "https://files.pythonhosted.org/packages/ae/f5/79c9320c5656b1965634fe4be9c82b12a3305bdbc58ad9cb941131107b20/frozenlist-1.6.0-cp313-cp313t-win_amd64.whl", hash = "sha256:e18036cb4caa17ea151fd5f3d70be9d354c99eb8cf817a3ccde8a7873b074348", size = 134709 }, + { url = "https://files.pythonhosted.org/packages/71/3e/b04a0adda73bd52b390d730071c0d577073d3d26740ee1bad25c3ad0f37b/frozenlist-1.6.0-py3-none-any.whl", hash = "sha256:535eec9987adb04701266b92745d6cdcef2e77669299359c3009c3404dd5d191", size = 12404 }, +] + +[[package]] +name = "fsspec" +version = "2025.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/34/f4/5721faf47b8c499e776bc34c6a8fc17efdf7fdef0b00f398128bc5dcb4ac/fsspec-2025.3.0.tar.gz", hash = "sha256:a935fd1ea872591f2b5148907d103488fc523295e6c64b835cfad8c3eca44972", size = 298491 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/56/53/eb690efa8513166adef3e0669afd31e95ffde69fb3c52ec2ac7223ed6018/fsspec-2025.3.0-py3-none-any.whl", hash = "sha256:efb87af3efa9103f94ca91a7f8cb7a4df91af9f74fc106c9c7ea0efd7277c1b3", size = 193615 }, +] + +[package.optional-dependencies] +gcs = [ + { name = "gcsfs" }, +] +http = [ + { name = "aiohttp" }, +] + +[[package]] +name = "gast" +version = "0.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3c/14/c566f5ca00c115db7725263408ff952b8ae6d6a4e792ef9c84e77d9af7a1/gast-0.6.0.tar.gz", hash = "sha256:88fc5300d32c7ac6ca7b515310862f71e6fdf2c029bbec7c66c0f5dd47b6b1fb", size = 27708 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/61/8001b38461d751cd1a0c3a6ae84346796a5758123f3ed97a1b121dfbf4f3/gast-0.6.0-py3-none-any.whl", hash = "sha256:52b182313f7330389f72b069ba00f174cfe2a06411099547288839c6cbafbd54", size = 21173 }, +] + +[[package]] +name = "gcsfs" +version = "2025.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "decorator" }, + { name = "fsspec" }, + { name = "google-auth" }, + { name = "google-auth-oauthlib" }, + { name = "google-cloud-storage" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/59/81/441e9f7f8b9b4cabb89ff19cd58da12cebb5e6ea2864920ae8862061fac0/gcsfs-2025.3.0.tar.gz", hash = "sha256:f68d7bc24bd4b944cd55a6963b9fd722c7bd5791f46c6aebacc380e648292c04", size = 81174 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/dd/874223310565a336820a70727b61e7dd23f7be6cb91006f2cbb634670142/gcsfs-2025.3.0-py2.py3-none-any.whl", hash = "sha256:afbc2b26a481de66519e9cce7762340ef4781ce01c6663af0d63eda10f6d2c9c", size = 36133 }, +] + +[[package]] +name = "gdown" +version = "5.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "beautifulsoup4" }, + { name = "filelock" }, + { name = "requests", extra = ["socks"] }, + { name = "tqdm" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/09/6a/37e6b70c5bda3161e40265861e63b64a86bfc6ca6a8f1c35328a675c84fd/gdown-5.2.0.tar.gz", hash = "sha256:2145165062d85520a3cd98b356c9ed522c5e7984d408535409fd46f94defc787", size = 284647 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/70/e07c381e6488a77094f04c85c9caf1c8008cdc30778f7019bc52e5285ef0/gdown-5.2.0-py3-none-any.whl", hash = "sha256:33083832d82b1101bdd0e9df3edd0fbc0e1c5f14c9d8c38d2a35bf1683b526d6", size = 18235 }, +] + +[[package]] +name = "gitdb" +version = "4.0.12" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "smmap" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/94/63b0fc47eb32792c7ba1fe1b694daec9a63620db1e313033d18140c2320a/gitdb-4.0.12.tar.gz", hash = "sha256:5ef71f855d191a3326fcfbc0d5da835f26b13fbcba60c32c21091c349ffdb571", size = 394684 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/61/5c78b91c3143ed5c14207f463aecfc8f9dbb5092fb2869baf37c273b2705/gitdb-4.0.12-py3-none-any.whl", hash = "sha256:67073e15955400952c6565cc3e707c554a4eea2e428946f7a4c162fab9bd9bcf", size = 62794 }, +] + +[[package]] +name = "gitpython" +version = "3.1.44" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "gitdb" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/89/37df0b71473153574a5cdef8f242de422a0f5d26d7a9e231e6f169b4ad14/gitpython-3.1.44.tar.gz", hash = "sha256:c87e30b26253bf5418b01b0660f818967f3c503193838337fe5e573331249269", size = 214196 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/9a/4114a9057db2f1462d5c8f8390ab7383925fe1ac012eaa42402ad65c2963/GitPython-3.1.44-py3-none-any.whl", hash = "sha256:9e0e10cda9bed1ee64bc9a6de50e7e38a9c9943241cd7f585f6df3ed28011110", size = 207599 }, +] + +[[package]] +name = "glfw" +version = "2.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/38/97/a2d667c98b8474f6b8294042488c1bd488681fb3cb4c3b9cdac1a9114287/glfw-2.9.0.tar.gz", hash = "sha256:077111a150ff09bc302c5e4ae265a5eb6aeaff0c8b01f727f7fb34e3764bb8e2", size = 31453 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/21/71/13dd8a8d547809543d21de9438a3a76a8728fc7966d01ad9fb54599aebf5/glfw-2.9.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.p39.p310.p311.p312.p313-none-macosx_10_6_intel.whl", hash = "sha256:183da99152f63469e9263146db2eb1b6cc4ee0c4082b280743e57bd1b0a3bd70", size = 105297 }, + { url = "https://files.pythonhosted.org/packages/f8/a2/45e6dceec1e0a0ffa8dd3c0ecf1e11d74639a55186243129160c6434d456/glfw-2.9.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.p39.p310.p311.p312.p313-none-macosx_11_0_arm64.whl", hash = "sha256:aef5b555673b9555216e4cd7bc0bdbbb9983f66c620a85ba7310cfcfda5cd38c", size = 102146 }, + { url = "https://files.pythonhosted.org/packages/d2/72/b6261ed918e3747c6070fe80636c63a3c8f1c42ce122670315eeeada156f/glfw-2.9.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.p39.p310.p311.p312.p313-none-manylinux2014_aarch64.whl", hash = "sha256:fcc430cb21984afba74945b7df38a5e1a02b36c0b4a2a2bab42b4a26d7cc51d6", size = 230002 }, + { url = "https://files.pythonhosted.org/packages/45/d6/7f95786332e8b798569b8e60db2ee081874cec2a62572b8ec55c309d85b7/glfw-2.9.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.p39.p310.p311.p312.p313-none-manylinux2014_x86_64.whl", hash = "sha256:7f85b58546880466ac445fc564c5c831ca93c8a99795ab8eaf0a2d521af293d7", size = 241949 }, + { url = "https://files.pythonhosted.org/packages/a1/e6/093ab7874a74bba351e754f6e7748c031bd7276702135da6cbcd00e1f3e2/glfw-2.9.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.p39.p310.p311.p312.p313-none-manylinux_2_28_aarch64.whl", hash = "sha256:2123716c8086b80b797e849a534fc6f21aebca300519e57c80618a65ca8135dc", size = 231016 }, + { url = "https://files.pythonhosted.org/packages/7f/ba/de3630757c7d7fc2086aaf3994926d6b869d31586e4d0c14f1666af31b93/glfw-2.9.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.p39.p310.p311.p312.p313-none-manylinux_2_28_x86_64.whl", hash = "sha256:4e11271e49eb9bc53431ade022e284d5a59abeace81fe3b178db1bf3ccc0c449", size = 243489 }, + { url = "https://files.pythonhosted.org/packages/32/36/c3bada8503681806231d1705ea1802bac8febf69e4186b9f0f0b9e2e4f7e/glfw-2.9.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.p39.p310.p311.p312.p313-none-win32.whl", hash = "sha256:8e4fbff88e4e953bb969b6813195d5de4641f886530cc8083897e56b00bf2c8e", size = 552655 }, + { url = "https://files.pythonhosted.org/packages/cb/70/7f2f052ca20c3b69892818f2ee1fea53b037ea9145ff75b944ed1dc4ff82/glfw-2.9.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.p39.p310.p311.p312.p313-none-win_amd64.whl", hash = "sha256:9aa3ae51601601c53838315bd2a03efb1e6bebecd072b2f64ddbd0b2556d511a", size = 559441 }, +] + +[[package]] +name = "google-api-core" +version = "2.24.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-auth" }, + { name = "googleapis-common-protos" }, + { name = "proto-plus" }, + { name = "protobuf" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/09/5c/085bcb872556934bb119e5e09de54daa07873f6866b8f0303c49e72287f7/google_api_core-2.24.2.tar.gz", hash = "sha256:81718493daf06d96d6bc76a91c23874dbf2fac0adbbf542831b805ee6e974696", size = 163516 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/46/95/f472d85adab6e538da2025dfca9e976a0d125cc0af2301f190e77b76e51c/google_api_core-2.24.2-py3-none-any.whl", hash = "sha256:810a63ac95f3c441b7c0e43d344e372887f62ce9071ba972eacf32672e072de9", size = 160061 }, +] + +[[package]] +name = "google-auth" +version = "2.40.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools" }, + { name = "pyasn1-modules" }, + { name = "rsa" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/84/f67f53c505a6b2c5da05c988e2a5483f5ba9eee4b1841d2e3ff22f547cd5/google_auth-2.40.2.tar.gz", hash = "sha256:a33cde547a2134273226fa4b853883559947ebe9207521f7afc707efbf690f58", size = 280990 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/c7/e2d82e6702e2a9e2311c138f8e1100f21d08aed0231290872b229ae57a86/google_auth-2.40.2-py2.py3-none-any.whl", hash = "sha256:f7e568d42eedfded58734f6a60c58321896a621f7c116c411550a4b4a13da90b", size = 216102 }, +] + +[[package]] +name = "google-auth-oauthlib" +version = "1.2.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-auth" }, + { name = "requests-oauthlib" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fb/87/e10bf24f7bcffc1421b84d6f9c3377c30ec305d082cd737ddaa6d8f77f7c/google_auth_oauthlib-1.2.2.tar.gz", hash = "sha256:11046fb8d3348b296302dd939ace8af0a724042e8029c1b872d87fabc9f41684", size = 20955 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ac/84/40ee070be95771acd2f4418981edb834979424565c3eec3cd88b6aa09d24/google_auth_oauthlib-1.2.2-py3-none-any.whl", hash = "sha256:fd619506f4b3908b5df17b65f39ca8d66ea56986e5472eb5978fd8f3786f00a2", size = 19072 }, +] + +[[package]] +name = "google-cloud-core" +version = "2.4.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-api-core" }, + { name = "google-auth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d6/b8/2b53838d2acd6ec6168fd284a990c76695e84c65deee79c9f3a4276f6b4f/google_cloud_core-2.4.3.tar.gz", hash = "sha256:1fab62d7102844b278fe6dead3af32408b1df3eb06f5c7e8634cbd40edc4da53", size = 35861 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/86/bda7241a8da2d28a754aad2ba0f6776e35b67e37c36ae0c45d49370f1014/google_cloud_core-2.4.3-py2.py3-none-any.whl", hash = "sha256:5130f9f4c14b4fafdff75c79448f9495cfade0d8775facf1b09c3bf67e027f6e", size = 29348 }, +] + +[[package]] +name = "google-cloud-storage" +version = "3.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-api-core" }, + { name = "google-auth" }, + { name = "google-cloud-core" }, + { name = "google-crc32c" }, + { name = "google-resumable-media" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f3/08/52143124415a889bbab60a8ecede1e31ea0e8d992ca078317886f26dc3be/google_cloud_storage-3.1.0.tar.gz", hash = "sha256:944273179897c7c8a07ee15f2e6466a02da0c7c4b9ecceac2a26017cb2972049", size = 7666527 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/b8/c99c965659f45efa73080477c49ffddf7b9aecb00806be8422560bb5b824/google_cloud_storage-3.1.0-py2.py3-none-any.whl", hash = "sha256:eaf36966b68660a9633f03b067e4a10ce09f1377cae3ff9f2c699f69a81c66c6", size = 174861 }, +] + +[[package]] +name = "google-crc32c" +version = "1.7.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/19/ae/87802e6d9f9d69adfaedfcfd599266bf386a54d0be058b532d04c794f76d/google_crc32c-1.7.1.tar.gz", hash = "sha256:2bff2305f98846f3e825dbeec9ee406f89da7962accdb29356e4eadc251bd472", size = 14495 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f7/94/220139ea87822b6fdfdab4fb9ba81b3fff7ea2c82e2af34adc726085bffc/google_crc32c-1.7.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:6fbab4b935989e2c3610371963ba1b86afb09537fd0c633049be82afe153ac06", size = 30468 }, + { url = "https://files.pythonhosted.org/packages/94/97/789b23bdeeb9d15dc2904660463ad539d0318286d7633fe2760c10ed0c1c/google_crc32c-1.7.1-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:ed66cbe1ed9cbaaad9392b5259b3eba4a9e565420d734e6238813c428c3336c9", size = 30313 }, + { url = "https://files.pythonhosted.org/packages/81/b8/976a2b843610c211e7ccb3e248996a61e87dbb2c09b1499847e295080aec/google_crc32c-1.7.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee6547b657621b6cbed3562ea7826c3e11cab01cd33b74e1f677690652883e77", size = 33048 }, + { url = "https://files.pythonhosted.org/packages/c9/16/a3842c2cf591093b111d4a5e2bfb478ac6692d02f1b386d2a33283a19dc9/google_crc32c-1.7.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d68e17bad8f7dd9a49181a1f5a8f4b251c6dbc8cc96fb79f1d321dfd57d66f53", size = 32669 }, + { url = "https://files.pythonhosted.org/packages/04/17/ed9aba495916fcf5fe4ecb2267ceb851fc5f273c4e4625ae453350cfd564/google_crc32c-1.7.1-cp311-cp311-win_amd64.whl", hash = "sha256:6335de12921f06e1f774d0dd1fbea6bf610abe0887a1638f64d694013138be5d", size = 33476 }, + { url = "https://files.pythonhosted.org/packages/dd/b7/787e2453cf8639c94b3d06c9d61f512234a82e1d12d13d18584bd3049904/google_crc32c-1.7.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:2d73a68a653c57281401871dd4aeebbb6af3191dcac751a76ce430df4d403194", size = 30470 }, + { url = "https://files.pythonhosted.org/packages/ed/b4/6042c2b0cbac3ec3a69bb4c49b28d2f517b7a0f4a0232603c42c58e22b44/google_crc32c-1.7.1-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:22beacf83baaf59f9d3ab2bbb4db0fb018da8e5aebdce07ef9f09fce8220285e", size = 30315 }, + { url = "https://files.pythonhosted.org/packages/29/ad/01e7a61a5d059bc57b702d9ff6a18b2585ad97f720bd0a0dbe215df1ab0e/google_crc32c-1.7.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19eafa0e4af11b0a4eb3974483d55d2d77ad1911e6cf6f832e1574f6781fd337", size = 33180 }, + { url = "https://files.pythonhosted.org/packages/3b/a5/7279055cf004561894ed3a7bfdf5bf90a53f28fadd01af7cd166e88ddf16/google_crc32c-1.7.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6d86616faaea68101195c6bdc40c494e4d76f41e07a37ffdef270879c15fb65", size = 32794 }, + { url = "https://files.pythonhosted.org/packages/0f/d6/77060dbd140c624e42ae3ece3df53b9d811000729a5c821b9fd671ceaac6/google_crc32c-1.7.1-cp312-cp312-win_amd64.whl", hash = "sha256:b7491bdc0c7564fcf48c0179d2048ab2f7c7ba36b84ccd3a3e1c3f7a72d3bba6", size = 33477 }, + { url = "https://files.pythonhosted.org/packages/8b/72/b8d785e9184ba6297a8620c8a37cf6e39b81a8ca01bb0796d7cbb28b3386/google_crc32c-1.7.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:df8b38bdaf1629d62d51be8bdd04888f37c451564c2042d36e5812da9eff3c35", size = 30467 }, + { url = "https://files.pythonhosted.org/packages/34/25/5f18076968212067c4e8ea95bf3b69669f9fc698476e5f5eb97d5b37999f/google_crc32c-1.7.1-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:e42e20a83a29aa2709a0cf271c7f8aefaa23b7ab52e53b322585297bb94d4638", size = 30309 }, + { url = "https://files.pythonhosted.org/packages/92/83/9228fe65bf70e93e419f38bdf6c5ca5083fc6d32886ee79b450ceefd1dbd/google_crc32c-1.7.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:905a385140bf492ac300026717af339790921f411c0dfd9aa5a9e69a08ed32eb", size = 33133 }, + { url = "https://files.pythonhosted.org/packages/c3/ca/1ea2fd13ff9f8955b85e7956872fdb7050c4ace8a2306a6d177edb9cf7fe/google_crc32c-1.7.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b211ddaf20f7ebeec5c333448582c224a7c90a9d98826fbab82c0ddc11348e6", size = 32773 }, + { url = "https://files.pythonhosted.org/packages/89/32/a22a281806e3ef21b72db16f948cad22ec68e4bdd384139291e00ff82fe2/google_crc32c-1.7.1-cp313-cp313-win_amd64.whl", hash = "sha256:0f99eaa09a9a7e642a61e06742856eec8b19fc0037832e03f941fe7cf0c8e4db", size = 33475 }, + { url = "https://files.pythonhosted.org/packages/b8/c5/002975aff514e57fc084ba155697a049b3f9b52225ec3bc0f542871dd524/google_crc32c-1.7.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32d1da0d74ec5634a05f53ef7df18fc646666a25efaaca9fc7dcfd4caf1d98c3", size = 33243 }, + { url = "https://files.pythonhosted.org/packages/61/cb/c585282a03a0cea70fcaa1bf55d5d702d0f2351094d663ec3be1c6c67c52/google_crc32c-1.7.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e10554d4abc5238823112c2ad7e4560f96c7bf3820b202660373d769d9e6e4c9", size = 32870 }, + { url = "https://files.pythonhosted.org/packages/16/1b/1693372bf423ada422f80fd88260dbfd140754adb15cbc4d7e9a68b1cb8e/google_crc32c-1.7.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85fef7fae11494e747c9fd1359a527e5970fc9603c90764843caabd3a16a0a48", size = 28241 }, + { url = "https://files.pythonhosted.org/packages/fd/3c/2a19a60a473de48717b4efb19398c3f914795b64a96cf3fbe82588044f78/google_crc32c-1.7.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6efb97eb4369d52593ad6f75e7e10d053cf00c48983f7a973105bc70b0ac4d82", size = 28048 }, +] + +[[package]] +name = "google-pasta" +version = "0.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/35/4a/0bd53b36ff0323d10d5f24ebd67af2de10a1117f5cf4d7add90df92756f1/google-pasta-0.2.0.tar.gz", hash = "sha256:c9f2c8dfc8f96d0d5808299920721be30c9eec37f2389f28904f454565c8a16e", size = 40430 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/de/c648ef6835192e6e2cc03f40b19eeda4382c49b5bafb43d88b931c4c74ac/google_pasta-0.2.0-py3-none-any.whl", hash = "sha256:b32482794a366b5366a32c92a9a9201b107821889935a02b3e51f6b432ea84ed", size = 57471 }, +] + +[[package]] +name = "google-resumable-media" +version = "2.7.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-crc32c" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/58/5a/0efdc02665dca14e0837b62c8a1a93132c264bd02054a15abb2218afe0ae/google_resumable_media-2.7.2.tar.gz", hash = "sha256:5280aed4629f2b60b847b0d42f9857fd4935c11af266744df33d8074cae92fe0", size = 2163099 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/82/35/b8d3baf8c46695858cb9d8835a53baa1eeb9906ddaf2f728a5f5b640fd1e/google_resumable_media-2.7.2-py2.py3-none-any.whl", hash = "sha256:3ce7551e9fe6d99e9a126101d2536612bb73486721951e9562fee0f90c6ababa", size = 81251 }, +] + +[[package]] +name = "googleapis-common-protos" +version = "1.70.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/39/24/33db22342cf4a2ea27c9955e6713140fedd51e8b141b5ce5260897020f1a/googleapis_common_protos-1.70.0.tar.gz", hash = "sha256:0e1b44e0ea153e6594f9f394fef15193a68aaaea2d843f83e2742717ca753257", size = 145903 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/f1/62a193f0227cf15a920390abe675f386dec35f7ae3ffe6da582d3ade42c7/googleapis_common_protos-1.70.0-py3-none-any.whl", hash = "sha256:b8bfcca8c25a2bb253e0e0b0adaf8c00773e5e6af6fd92397576680b807e0fd8", size = 294530 }, +] + +[[package]] +name = "grpcio" +version = "1.73.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/7b/ca3f561aeecf0c846d15e1b38921a60dffffd5d4113931198fbf455334ee/grpcio-1.73.0.tar.gz", hash = "sha256:3af4c30918a7f0d39de500d11255f8d9da4f30e94a2033e70fe2a720e184bd8e", size = 12786424 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dd/31/9de81fd12f7b27e6af403531b7249d76f743d58e0654e624b3df26a43ce2/grpcio-1.73.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:51036f641f171eebe5fa7aaca5abbd6150f0c338dab3a58f9111354240fe36ec", size = 5363773 }, + { url = "https://files.pythonhosted.org/packages/32/9e/2cb78be357a7f1fc4942b81468ef3c7e5fd3df3ac010540459c10895a57b/grpcio-1.73.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:d12bbb88381ea00bdd92c55aff3da3391fd85bc902c41275c8447b86f036ce0f", size = 10621912 }, + { url = "https://files.pythonhosted.org/packages/59/2f/b43954811a2e218a2761c0813800773ac0ca187b94fd2b8494e8ef232dc8/grpcio-1.73.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:483c507c2328ed0e01bc1adb13d1eada05cc737ec301d8e5a8f4a90f387f1790", size = 5807985 }, + { url = "https://files.pythonhosted.org/packages/1b/bf/68e9f47e7ee349ffee712dcd907ee66826cf044f0dec7ab517421e56e857/grpcio-1.73.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c201a34aa960c962d0ce23fe5f423f97e9d4b518ad605eae6d0a82171809caaa", size = 6448218 }, + { url = "https://files.pythonhosted.org/packages/af/dd/38ae43dd58480d609350cf1411fdac5c2ebb243e2c770f6f7aa3773d5e29/grpcio-1.73.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:859f70c8e435e8e1fa060e04297c6818ffc81ca9ebd4940e180490958229a45a", size = 6044343 }, + { url = "https://files.pythonhosted.org/packages/93/44/b6770b55071adb86481f36dae87d332fcad883b7f560bba9a940394ba018/grpcio-1.73.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e2459a27c6886e7e687e4e407778425f3c6a971fa17a16420227bda39574d64b", size = 6135858 }, + { url = "https://files.pythonhosted.org/packages/d3/9f/63de49fcef436932fcf0ffb978101a95c83c177058dbfb56dbf30ab81659/grpcio-1.73.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e0084d4559ee3dbdcce9395e1bc90fdd0262529b32c417a39ecbc18da8074ac7", size = 6775806 }, + { url = "https://files.pythonhosted.org/packages/4d/67/c11f1953469162e958f09690ec3a9be3fdb29dea7f5661362a664f9d609a/grpcio-1.73.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ef5fff73d5f724755693a464d444ee0a448c6cdfd3c1616a9223f736c622617d", size = 6308413 }, + { url = "https://files.pythonhosted.org/packages/ba/6a/9dd04426337db07f28bd51a986b7a038ba56912c81b5bb1083c17dd63404/grpcio-1.73.0-cp311-cp311-win32.whl", hash = "sha256:965a16b71a8eeef91fc4df1dc40dc39c344887249174053814f8a8e18449c4c3", size = 3678972 }, + { url = "https://files.pythonhosted.org/packages/04/8b/8c0a8a4fdc2e7977d325eafc587c9cf468039693ac23ad707153231d3cb2/grpcio-1.73.0-cp311-cp311-win_amd64.whl", hash = "sha256:b71a7b4483d1f753bbc11089ff0f6fa63b49c97a9cc20552cded3fcad466d23b", size = 4342967 }, + { url = "https://files.pythonhosted.org/packages/9d/4d/e938f3a0e51a47f2ce7e55f12f19f316e7074770d56a7c2765e782ec76bc/grpcio-1.73.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:fb9d7c27089d9ba3746f18d2109eb530ef2a37452d2ff50f5a6696cd39167d3b", size = 5334911 }, + { url = "https://files.pythonhosted.org/packages/13/56/f09c72c43aa8d6f15a71f2c63ebdfac9cf9314363dea2598dc501d8370db/grpcio-1.73.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:128ba2ebdac41e41554d492b82c34586a90ebd0766f8ebd72160c0e3a57b9155", size = 10601460 }, + { url = "https://files.pythonhosted.org/packages/20/e3/85496edc81e41b3c44ebefffc7bce133bb531120066877df0f910eabfa19/grpcio-1.73.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:068ecc415f79408d57a7f146f54cdf9f0acb4b301a52a9e563973dc981e82f3d", size = 5759191 }, + { url = "https://files.pythonhosted.org/packages/88/cc/fef74270a6d29f35ad744bfd8e6c05183f35074ff34c655a2c80f3b422b2/grpcio-1.73.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ddc1cfb2240f84d35d559ade18f69dcd4257dbaa5ba0de1a565d903aaab2968", size = 6409961 }, + { url = "https://files.pythonhosted.org/packages/b0/e6/13cfea15e3b8f79c4ae7b676cb21fab70978b0fde1e1d28bb0e073291290/grpcio-1.73.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e53007f70d9783f53b41b4cf38ed39a8e348011437e4c287eee7dd1d39d54b2f", size = 6003948 }, + { url = "https://files.pythonhosted.org/packages/c2/ed/b1a36dad4cc0dbf1f83f6d7b58825fefd5cc9ff3a5036e46091335649473/grpcio-1.73.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4dd8d8d092efede7d6f48d695ba2592046acd04ccf421436dd7ed52677a9ad29", size = 6103788 }, + { url = "https://files.pythonhosted.org/packages/e7/c8/d381433d3d46d10f6858126d2d2245ef329e30f3752ce4514c93b95ca6fc/grpcio-1.73.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:70176093d0a95b44d24baa9c034bb67bfe2b6b5f7ebc2836f4093c97010e17fd", size = 6749508 }, + { url = "https://files.pythonhosted.org/packages/87/0a/ff0c31dbd15e63b34320efafac647270aa88c31aa19ff01154a73dc7ce86/grpcio-1.73.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:085ebe876373ca095e24ced95c8f440495ed0b574c491f7f4f714ff794bbcd10", size = 6284342 }, + { url = "https://files.pythonhosted.org/packages/fd/73/f762430c0ba867403b9d6e463afe026bf019bd9206eee753785239719273/grpcio-1.73.0-cp312-cp312-win32.whl", hash = "sha256:cfc556c1d6aef02c727ec7d0016827a73bfe67193e47c546f7cadd3ee6bf1a60", size = 3669319 }, + { url = "https://files.pythonhosted.org/packages/10/8b/3411609376b2830449cf416f457ad9d2aacb7f562e1b90fdd8bdedf26d63/grpcio-1.73.0-cp312-cp312-win_amd64.whl", hash = "sha256:bbf45d59d090bf69f1e4e1594832aaf40aa84b31659af3c5e2c3f6a35202791a", size = 4335596 }, + { url = "https://files.pythonhosted.org/packages/60/da/6f3f7a78e5455c4cbe87c85063cc6da05d65d25264f9d4aed800ece46294/grpcio-1.73.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:da1d677018ef423202aca6d73a8d3b2cb245699eb7f50eb5f74cae15a8e1f724", size = 5335867 }, + { url = "https://files.pythonhosted.org/packages/53/14/7d1f2526b98b9658d7be0bb163fd78d681587de6709d8b0c74b4b481b013/grpcio-1.73.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:36bf93f6a657f37c131d9dd2c391b867abf1426a86727c3575393e9e11dadb0d", size = 10595587 }, + { url = "https://files.pythonhosted.org/packages/02/24/a293c398ae44e741da1ed4b29638edbb002258797b07a783f65506165b4c/grpcio-1.73.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:d84000367508ade791d90c2bafbd905574b5ced8056397027a77a215d601ba15", size = 5765793 }, + { url = "https://files.pythonhosted.org/packages/e1/24/d84dbd0b5bf36fb44922798d525a85cefa2ffee7b7110e61406e9750ed15/grpcio-1.73.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c98ba1d928a178ce33f3425ff823318040a2b7ef875d30a0073565e5ceb058d9", size = 6415494 }, + { url = "https://files.pythonhosted.org/packages/5e/85/c80dc65aed8e9dce3d54688864bac45331d9c7600985541f18bd5cb301d4/grpcio-1.73.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a73c72922dfd30b396a5f25bb3a4590195ee45ecde7ee068acb0892d2900cf07", size = 6007279 }, + { url = "https://files.pythonhosted.org/packages/37/fc/207c00a4c6fa303d26e2cbd62fbdb0582facdfd08f55500fd83bf6b0f8db/grpcio-1.73.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:10e8edc035724aba0346a432060fd192b42bd03675d083c01553cab071a28da5", size = 6105505 }, + { url = "https://files.pythonhosted.org/packages/72/35/8fe69af820667b87ebfcb24214e42a1d53da53cb39edd6b4f84f6b36da86/grpcio-1.73.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:f5cdc332b503c33b1643b12ea933582c7b081957c8bc2ea4cc4bc58054a09288", size = 6753792 }, + { url = "https://files.pythonhosted.org/packages/e2/d8/738c77c1e821e350da4a048849f695ff88a02b291f8c69db23908867aea6/grpcio-1.73.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:07ad7c57233c2109e4ac999cb9c2710c3b8e3f491a73b058b0ce431f31ed8145", size = 6287593 }, + { url = "https://files.pythonhosted.org/packages/09/ec/8498eabc018fa39ae8efe5e47e3f4c1bc9ed6281056713871895dc998807/grpcio-1.73.0-cp313-cp313-win32.whl", hash = "sha256:0eb5df4f41ea10bda99a802b2a292d85be28958ede2a50f2beb8c7fc9a738419", size = 3668637 }, + { url = "https://files.pythonhosted.org/packages/d7/35/347db7d2e7674b621afd21b12022e7f48c7b0861b5577134b4e939536141/grpcio-1.73.0-cp313-cp313-win_amd64.whl", hash = "sha256:38cf518cc54cd0c47c9539cefa8888549fcc067db0b0c66a46535ca8032020c4", size = 4335872 }, +] + +[[package]] +name = "gym-aloha" +version = "0.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "dm-control" }, + { name = "gymnasium" }, + { name = "imageio", extra = ["ffmpeg"] }, + { name = "mujoco" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5b/54/0d386001505d0e64cb52c4ec4f4ac29c2259a6dda7032f2854c8b2bac9c9/gym_aloha-0.1.1.tar.gz", hash = "sha256:614ae1cf116323e7b5ae2f0e9bd282c4f052aee15e839e5587ddce45995359bc", size = 443256 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d7/ac/8df1fe5462c068132688897a3f3d62fbede48c674026baecb1012c585cfc/gym_aloha-0.1.1-py3-none-any.whl", hash = "sha256:2698037246dbb106828f0bc229b61007b0a21d5967c72cc373f7bc1083203584", size = 446935 }, +] + +[[package]] +name = "gymnasium" +version = "0.29.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cloudpickle" }, + { name = "farama-notifications" }, + { name = "numpy" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0d/f8/5699ddb3e1c4f6d97b8930e573074849b921da8374fccd141f0f3a9bd713/gymnasium-0.29.1.tar.gz", hash = "sha256:1a532752efcb7590478b1cc7aa04f608eb7a2fdad5570cd217b66b6a35274bb1", size = 820485 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/4d/3cbfd81ed84db450dbe73a89afcd8bc405273918415649ac6683356afe92/gymnasium-0.29.1-py3-none-any.whl", hash = "sha256:61c3384b5575985bb7f85e43213bcb40f36fcdff388cae6bc229304c71f2843e", size = 953939 }, +] + +[[package]] +name = "h5py" +version = "3.13.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/03/2e/a22d6a8bfa6f8be33e7febd985680fba531562795f0a9077ed1eb047bfb0/h5py-3.13.0.tar.gz", hash = "sha256:1870e46518720023da85d0895a1960ff2ce398c5671eac3b1a41ec696b7105c3", size = 414876 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/2b/50b15fdefb577d073b49699e6ea6a0a77a3a1016c2b67e2149fc50124a10/h5py-3.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8a8e38ef4ceb969f832cc230c0cf808c613cc47e31e768fd7b1106c55afa1cb8", size = 3422922 }, + { url = "https://files.pythonhosted.org/packages/94/59/36d87a559cab9c59b59088d52e86008d27a9602ce3afc9d3b51823014bf3/h5py-3.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f35640e81b03c02a88b8bf99fb6a9d3023cc52f7c627694db2f379e0028f2868", size = 2921619 }, + { url = "https://files.pythonhosted.org/packages/37/ef/6f80b19682c0b0835bbee7b253bec9c16af9004f2fd6427b1dd858100273/h5py-3.13.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:337af114616f3656da0c83b68fcf53ecd9ce9989a700b0883a6e7c483c3235d4", size = 4259366 }, + { url = "https://files.pythonhosted.org/packages/03/71/c99f662d4832c8835453cf3476f95daa28372023bda4aa1fca9e97c24f09/h5py-3.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:782ff0ac39f455f21fd1c8ebc007328f65f43d56718a89327eec76677ebf238a", size = 4509058 }, + { url = "https://files.pythonhosted.org/packages/56/89/e3ff23e07131ff73a72a349be9639e4de84e163af89c1c218b939459a98a/h5py-3.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:22ffe2a25770a2d67213a1b94f58006c14dce06933a42d2aaa0318c5868d1508", size = 2966428 }, + { url = "https://files.pythonhosted.org/packages/d8/20/438f6366ba4ded80eadb38f8927f5e2cd6d2e087179552f20ae3dbcd5d5b/h5py-3.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:477c58307b6b9a2509c59c57811afb9f598aedede24a67da808262dfa0ee37b4", size = 3384442 }, + { url = "https://files.pythonhosted.org/packages/10/13/cc1cb7231399617d9951233eb12fddd396ff5d4f7f057ee5d2b1ca0ee7e7/h5py-3.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:57c4c74f627c616f02b7aec608a8c706fe08cb5b0ba7c08555a4eb1dde20805a", size = 2917567 }, + { url = "https://files.pythonhosted.org/packages/9e/d9/aed99e1c858dc698489f916eeb7c07513bc864885d28ab3689d572ba0ea0/h5py-3.13.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:357e6dc20b101a805ccfd0024731fbaf6e8718c18c09baf3b5e4e9d198d13fca", size = 4669544 }, + { url = "https://files.pythonhosted.org/packages/a7/da/3c137006ff5f0433f0fb076b1ebe4a7bf7b5ee1e8811b5486af98b500dd5/h5py-3.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6f13f9b5ce549448c01e4dfe08ea8d1772e6078799af2c1c8d09e941230a90d", size = 4932139 }, + { url = "https://files.pythonhosted.org/packages/25/61/d897952629cae131c19d4c41b2521e7dd6382f2d7177c87615c2e6dced1a/h5py-3.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:21daf38171753899b5905f3d82c99b0b1ec2cbbe282a037cad431feb620e62ec", size = 2954179 }, + { url = "https://files.pythonhosted.org/packages/60/43/f276f27921919a9144074320ce4ca40882fc67b3cfee81c3f5c7df083e97/h5py-3.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e520ec76de00943dd017c8ea3f354fa1d2f542eac994811943a8faedf2a7d5cb", size = 3358040 }, + { url = "https://files.pythonhosted.org/packages/1b/86/ad4a4cf781b08d4572be8bbdd8f108bb97b266a14835c640dc43dafc0729/h5py-3.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e79d8368cd9295045956bfb436656bea3f915beaa11d342e9f79f129f5178763", size = 2892766 }, + { url = "https://files.pythonhosted.org/packages/69/84/4c6367d6b58deaf0fa84999ec819e7578eee96cea6cbd613640d0625ed5e/h5py-3.13.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56dd172d862e850823c4af02dc4ddbc308f042b85472ffdaca67f1598dff4a57", size = 4664255 }, + { url = "https://files.pythonhosted.org/packages/fd/41/bc2df86b72965775f6d621e0ee269a5f3ac23e8f870abf519de9c7d93b4d/h5py-3.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be949b46b7388074c5acae017fbbe3e5ba303fd9daaa52157fdfef30bbdacadd", size = 4927580 }, + { url = "https://files.pythonhosted.org/packages/97/34/165b87ea55184770a0c1fcdb7e017199974ad2e271451fd045cfe35f3add/h5py-3.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:4f97ecde7ac6513b21cd95efdfc38dc6d19f96f6ca6f2a30550e94e551458e0a", size = 2940890 }, +] + +[[package]] +name = "hf-transfer" +version = "0.1.9" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1a/eb/8fc64f40388c29ce8ce3b2b180a089d4d6b25b1d0d232d016704cb852104/hf_transfer-0.1.9.tar.gz", hash = "sha256:035572865dab29d17e783fbf1e84cf1cb24f3fcf8f1b17db1cfc7fdf139f02bf", size = 25201 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/78/0dce00208f585fae675f40033ef9a30dedfa83665d5ac79f16beb4a0a6c2/hf_transfer-0.1.9-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:6e94e8822da79573c9b6ae4d6b2f847c59a7a06c5327d7db20751b68538dc4f6", size = 1386084 }, + { url = "https://files.pythonhosted.org/packages/ea/2e/3d60b1a9e9f29a2152aa66c823bf5e399ae7be3fef310ff0de86779c5d2d/hf_transfer-0.1.9-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3ebc4ab9023414880c8b1d3c38174d1c9989eb5022d37e814fa91a3060123eb0", size = 1343558 }, + { url = "https://files.pythonhosted.org/packages/fb/38/130a5ac3747f104033591bcac1c961cb1faadfdc91704f59b09c0b465ff2/hf_transfer-0.1.9-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8674026f21ed369aa2a0a4b46000aca850fc44cd2b54af33a172ce5325b4fc82", size = 3726676 }, + { url = "https://files.pythonhosted.org/packages/15/a1/f4e27c5ad17aac616ae0849e2aede5aae31db8267a948c6b3eeb9fd96446/hf_transfer-0.1.9-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3a736dfbb2c84f5a2c975478ad200c0c8bfcb58a25a35db402678fb87ce17fa4", size = 3062920 }, + { url = "https://files.pythonhosted.org/packages/8d/0d/727abdfba39bc3f1132cfa4c970588c2c0bb0d82fe2d645cc10f4e2f8e0b/hf_transfer-0.1.9-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:504b8427fd785dd8546d53b9fafe6e436bd7a3adf76b9dce556507650a7b4567", size = 3578681 }, + { url = "https://files.pythonhosted.org/packages/50/d0/2b213eb1ea8b1252ccaf1a6c804d0aba03fea38aae4124df6a3acb70511a/hf_transfer-0.1.9-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2c7fc1b85f4d0f76e452765d7648c9f4bfd0aedb9ced2ae1ebfece2d8cfaf8e2", size = 3398837 }, + { url = "https://files.pythonhosted.org/packages/8c/8a/79dbce9006e0bd6b74516f97451a7b7c64dbbb426df15d901dd438cfeee3/hf_transfer-0.1.9-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d991376f0eac70a60f0cbc95602aa708a6f7c8617f28b4945c1431d67b8e3c8", size = 3546986 }, + { url = "https://files.pythonhosted.org/packages/a9/f7/9ac239b6ee6fe0bad130325d987a93ea58c4118e50479f0786f1733b37e8/hf_transfer-0.1.9-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e6ac4eddcd99575ed3735ed911ddf9d1697e2bd13aa3f0ad7e3904dd4863842e", size = 4071715 }, + { url = "https://files.pythonhosted.org/packages/d8/a3/0ed697279f5eeb7a40f279bd783cf50e6d0b91f24120dcf66ef2cf8822b4/hf_transfer-0.1.9-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:57fd9880da1ee0f47250f735f791fab788f0aa1ee36afc49f761349869c8b4d9", size = 3388081 }, + { url = "https://files.pythonhosted.org/packages/dc/eb/47e477bdf1d784f31c7540db6cc8c354b777e51a186897a7abda34517f36/hf_transfer-0.1.9-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:5d561f0520f493c66b016d99ceabe69c23289aa90be38dd802d2aef279f15751", size = 3658654 }, + { url = "https://files.pythonhosted.org/packages/45/07/6661e43fbee09594a8a5e9bb778107d95fe38dac4c653982afe03d32bd4d/hf_transfer-0.1.9-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a5b366d34cd449fe9b20ef25941e6eef0460a2f74e7389f02e673e1f88ebd538", size = 3690551 }, + { url = "https://files.pythonhosted.org/packages/81/f5/461d2e5f307e5048289b1168d5c642ae3bb2504e88dff1a38b92ed990a21/hf_transfer-0.1.9-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:e66acf91df4a8b72f60223059df3003062a5ae111757187ed1a06750a30e911b", size = 1393046 }, + { url = "https://files.pythonhosted.org/packages/41/ba/8d9fd9f1083525edfcb389c93738c802f3559cb749324090d7109c8bf4c2/hf_transfer-0.1.9-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:8669dbcc7a3e2e8d61d42cd24da9c50d57770bd74b445c65123291ca842a7e7a", size = 1348126 }, + { url = "https://files.pythonhosted.org/packages/8e/a2/cd7885bc9959421065a6fae0fe67b6c55becdeda4e69b873e52976f9a9f0/hf_transfer-0.1.9-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8fd0167c4407a3bc4cdd0307e65ada2294ec04f1813d8a69a5243e379b22e9d8", size = 3728604 }, + { url = "https://files.pythonhosted.org/packages/f6/2e/a072cf196edfeda3310c9a5ade0a0fdd785e6154b3ce24fc738c818da2a7/hf_transfer-0.1.9-cp38-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ee8b10afedcb75f71091bcc197c526a6ebf5c58bbbadb34fdeee6160f55f619f", size = 3064995 }, + { url = "https://files.pythonhosted.org/packages/c2/84/aec9ef4c0fab93c1ea2b1badff38c78b4b2f86f0555b26d2051dbc920cde/hf_transfer-0.1.9-cp38-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5828057e313de59300dd1abb489444bc452efe3f479d3c55b31a8f680936ba42", size = 3580908 }, + { url = "https://files.pythonhosted.org/packages/29/63/b560d39651a56603d64f1a0212d0472a44cbd965db2fa62b99d99cb981bf/hf_transfer-0.1.9-cp38-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc6bd19e1cc177c66bdef15ef8636ad3bde79d5a4f608c158021153b4573509d", size = 3400839 }, + { url = "https://files.pythonhosted.org/packages/d6/d8/f87ea6f42456254b48915970ed98e993110521e9263472840174d32c880d/hf_transfer-0.1.9-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdca9bfb89e6f8f281890cc61a8aff2d3cecaff7e1a4d275574d96ca70098557", size = 3552664 }, + { url = "https://files.pythonhosted.org/packages/d6/56/1267c39b65fc8f4e2113b36297320f102718bf5799b544a6cbe22013aa1d/hf_transfer-0.1.9-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:89a23f58b7b7effbc047b8ca286f131b17728c99a9f972723323003ffd1bb916", size = 4073732 }, + { url = "https://files.pythonhosted.org/packages/82/1a/9c748befbe3decf7cb415e34f8a0c3789a0a9c55910dea73d581e48c0ce5/hf_transfer-0.1.9-cp38-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:dc7fff1345980d6c0ebb92c811d24afa4b98b3e07ed070c8e38cc91fd80478c5", size = 3390096 }, + { url = "https://files.pythonhosted.org/packages/72/85/4c03da147b6b4b7cb12e074d3d44eee28604a387ed0eaf7eaaead5069c57/hf_transfer-0.1.9-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:1a6bd16c667ebe89a069ca163060127a794fa3a3525292c900b8c8cc47985b0d", size = 3664743 }, + { url = "https://files.pythonhosted.org/packages/e7/6e/e597b04f753f1b09e6893075d53a82a30c13855cbaa791402695b01e369f/hf_transfer-0.1.9-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:d2fde99d502093ade3ab1b53f80da18480e9902aa960dab7f74fb1b9e5bc5746", size = 3695243 }, + { url = "https://files.pythonhosted.org/packages/09/89/d4e234727a26b2546c8fb70a276cd924260d60135f2165bf8b9ed67bb9a4/hf_transfer-0.1.9-cp38-abi3-win32.whl", hash = "sha256:435cc3cdc8524ce57b074032b8fd76eed70a4224d2091232fa6a8cef8fd6803e", size = 1086605 }, + { url = "https://files.pythonhosted.org/packages/a1/14/f1e15b851d1c2af5b0b1a82bf8eb10bda2da62d98180220ba6fd8879bb5b/hf_transfer-0.1.9-cp38-abi3-win_amd64.whl", hash = "sha256:16f208fc678911c37e11aa7b586bc66a37d02e636208f18b6bc53d29b5df40ad", size = 1160240 }, +] + +[[package]] +name = "hf-xet" +version = "1.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/95/be/58f20728a5b445f8b064e74f0618897b3439f5ef90934da1916b9dfac76f/hf_xet-1.1.2.tar.gz", hash = "sha256:3712d6d4819d3976a1c18e36db9f503e296283f9363af818f50703506ed63da3", size = 467009 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/ae/f1a63f75d9886f18a80220ba31a1c7b9c4752f03aae452f358f538c6a991/hf_xet-1.1.2-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:dfd1873fd648488c70735cb60f7728512bca0e459e61fcd107069143cd798469", size = 2642559 }, + { url = "https://files.pythonhosted.org/packages/50/ab/d2c83ae18f1015d926defd5bfbe94c62d15e93f900e6a192e318ee947105/hf_xet-1.1.2-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:29b584983b2d977c44157d9241dcf0fd50acde0b7bff8897fe4386912330090d", size = 2541360 }, + { url = "https://files.pythonhosted.org/packages/9f/a7/693dc9f34f979e30a378125e2150a0b2d8d166e6d83ce3950eeb81e560aa/hf_xet-1.1.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b29ac84298147fe9164cc55ad994ba47399f90b5d045b0b803b99cf5f06d8ec", size = 5183081 }, + { url = "https://files.pythonhosted.org/packages/3d/23/c48607883f692a36c0a7735f47f98bad32dbe459a32d1568c0f21576985d/hf_xet-1.1.2-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d921ba32615676e436a0d15e162331abc9ed43d440916b1d836dc27ce1546173", size = 5356100 }, + { url = "https://files.pythonhosted.org/packages/eb/5b/b2316c7f1076da0582b52ea228f68bea95e243c388440d1dc80297c9d813/hf_xet-1.1.2-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:d9b03c34e13c44893ab6e8fea18ee8d2a6878c15328dd3aabedbdd83ee9f2ed3", size = 5647688 }, + { url = "https://files.pythonhosted.org/packages/2c/98/e6995f0fa579929da7795c961f403f4ee84af36c625963f52741d56f242c/hf_xet-1.1.2-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:01b18608955b3d826307d37da8bd38b28a46cd2d9908b3a3655d1363274f941a", size = 5322627 }, + { url = "https://files.pythonhosted.org/packages/59/40/8f1d5a44a64d8bf9e3c19576e789f716af54875b46daae65426714e75db1/hf_xet-1.1.2-cp37-abi3-win_amd64.whl", hash = "sha256:3562902c81299b09f3582ddfb324400c6a901a2f3bc854f83556495755f4954c", size = 2739542 }, +] + +[[package]] +name = "huggingface-hub" +version = "0.32.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "filelock" }, + { name = "fsspec" }, + { name = "hf-xet", marker = "platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'" }, + { name = "packaging" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "tqdm" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/59/74/c4961b31e0f142a032ea24f477c3a7524dfabfd8126398a968b3cc6bf804/huggingface_hub-0.32.3.tar.gz", hash = "sha256:752c889ebf3a63cbd39803f6d87ccc135a463bbcb36abfa2faff0ccbf1cec087", size = 424525 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/df/dc/4f4d8080cbce7a38c1d0f1ba4932f9134480b9761af8ef4c65d49254b2bd/huggingface_hub-0.32.3-py3-none-any.whl", hash = "sha256:e46f7ea7fe2b5e5f67cc4e37eb201140091946a314d7c2b134a9673dadd80b6a", size = 512094 }, +] + +[package.optional-dependencies] +cli = [ + { name = "inquirerpy" }, +] +hf-transfer = [ + { name = "hf-transfer" }, +] + +[[package]] +name = "humanize" +version = "4.12.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/22/d1/bbc4d251187a43f69844f7fd8941426549bbe4723e8ff0a7441796b0789f/humanize-4.12.3.tar.gz", hash = "sha256:8430be3a615106fdfceb0b2c1b41c4c98c6b0fc5cc59663a5539b111dd325fb0", size = 80514 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/1e/62a2ec3104394a2975a2629eec89276ede9dbe717092f6966fcf963e1bf0/humanize-4.12.3-py3-none-any.whl", hash = "sha256:2cbf6370af06568fa6d2da77c86edb7886f3160ecd19ee1ffef07979efc597f6", size = 128487 }, +] + +[[package]] +name = "identify" +version = "2.6.12" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/88/d193a27416618628a5eea64e3223acd800b40749a96ffb322a9b55a49ed1/identify-2.6.12.tar.gz", hash = "sha256:d8de45749f1efb108badef65ee8386f0f7bb19a7f26185f74de6367bffbaf0e6", size = 99254 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7a/cd/18f8da995b658420625f7ef13f037be53ae04ec5ad33f9b718240dcfd48c/identify-2.6.12-py2.py3-none-any.whl", hash = "sha256:ad9672d5a72e0d2ff7c5c8809b62dfa60458626352fb0eb7b55e69bdc45334a2", size = 99145 }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, +] + +[[package]] +name = "imageio" +version = "2.37.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "pillow" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0c/47/57e897fb7094afb2d26e8b2e4af9a45c7cf1a405acdeeca001fdf2c98501/imageio-2.37.0.tar.gz", hash = "sha256:71b57b3669666272c818497aebba2b4c5f20d5b37c81720e5e1a56d59c492996", size = 389963 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/bd/b394387b598ed84d8d0fa90611a90bee0adc2021820ad5729f7ced74a8e2/imageio-2.37.0-py3-none-any.whl", hash = "sha256:11efa15b87bc7871b61590326b2d635439acc321cf7f8ce996f812543ce10eed", size = 315796 }, +] + +[package.optional-dependencies] +ffmpeg = [ + { name = "imageio-ffmpeg" }, + { name = "psutil" }, +] + +[[package]] +name = "imageio-ffmpeg" +version = "0.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/44/bd/c3343c721f2a1b0c9fc71c1aebf1966a3b7f08c2eea8ed5437a2865611d6/imageio_ffmpeg-0.6.0.tar.gz", hash = "sha256:e2556bed8e005564a9f925bb7afa4002d82770d6b08825078b7697ab88ba1755", size = 25210 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/da/58/87ef68ac83f4c7690961bce288fd8e382bc5f1513860fc7f90a9c1c1c6bf/imageio_ffmpeg-0.6.0-py3-none-macosx_10_9_intel.macosx_10_9_x86_64.whl", hash = "sha256:9d2baaf867088508d4a3458e61eeb30e945c4ad8016025545f66c4b5aaef0a61", size = 24932969 }, + { url = "https://files.pythonhosted.org/packages/40/5c/f3d8a657d362cc93b81aab8feda487317da5b5d31c0e1fdfd5e986e55d17/imageio_ffmpeg-0.6.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:b1ae3173414b5fc5f538a726c4e48ea97edc0d2cdc11f103afee655c463fa742", size = 21113891 }, + { url = "https://files.pythonhosted.org/packages/33/e7/1925bfbc563c39c1d2e82501d8372734a5c725e53ac3b31b4c2d081e895b/imageio_ffmpeg-0.6.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1d47bebd83d2c5fc770720d211855f208af8a596c82d17730aa51e815cdee6dc", size = 25632706 }, + { url = "https://files.pythonhosted.org/packages/a0/2d/43c8522a2038e9d0e7dbdf3a61195ecc31ca576fb1527a528c877e87d973/imageio_ffmpeg-0.6.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c7e46fcec401dd990405049d2e2f475e2b397779df2519b544b8aab515195282", size = 29498237 }, + { url = "https://files.pythonhosted.org/packages/a0/13/59da54728351883c3c1d9fca1710ab8eee82c7beba585df8f25ca925f08f/imageio_ffmpeg-0.6.0-py3-none-win32.whl", hash = "sha256:196faa79366b4a82f95c0f4053191d2013f4714a715780f0ad2a68ff37483cc2", size = 19652251 }, + { url = "https://files.pythonhosted.org/packages/2c/c6/fa760e12a2483469e2bf5058c5faff664acf66cadb4df2ad6205b016a73d/imageio_ffmpeg-0.6.0-py3-none-win_amd64.whl", hash = "sha256:02fa47c83703c37df6bfe4896aab339013f62bf02c5ebf2dce6da56af04ffc0a", size = 31246824 }, +] + +[[package]] +name = "immutabledict" +version = "4.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/c5/4240186fbabc58fba41bbe17c5f0cd37ffd4c0b85a5029ab104f946df175/immutabledict-4.2.1.tar.gz", hash = "sha256:d91017248981c72eb66c8ff9834e99c2f53562346f23e7f51e7a5ebcf66a3bcc", size = 6228 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/59/56/25ca7b848164b7d93dbd5fc97dd7751700c93e324fe854afbeb562ee2f98/immutabledict-4.2.1-py3-none-any.whl", hash = "sha256:c56a26ced38c236f79e74af3ccce53772827cef5c3bce7cab33ff2060f756373", size = 4700 }, +] + +[[package]] +name = "importlib-metadata" +version = "8.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656 }, +] + +[[package]] +name = "importlib-resources" +version = "6.5.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cf/8c/f834fbf984f691b4f7ff60f50b514cc3de5cc08abfc3295564dd89c5e2e7/importlib_resources-6.5.2.tar.gz", hash = "sha256:185f87adef5bcc288449d98fb4fba07cea78bc036455dd44c5fc4a2fe78fed2c", size = 44693 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/ed/1f1afb2e9e7f38a545d628f864d562a5ae64fe6f7a10e28ffb9b185b4e89/importlib_resources-6.5.2-py3-none-any.whl", hash = "sha256:789cfdc3ed28c78b67a06acb8126751ced69a3d5f79c095a98298cd8a760ccec", size = 37461 }, +] + +[[package]] +name = "iniconfig" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050 }, +] + +[[package]] +name = "inquirerpy" +version = "0.3.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pfzy" }, + { name = "prompt-toolkit" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/64/73/7570847b9da026e07053da3bbe2ac7ea6cde6bb2cbd3c7a5a950fa0ae40b/InquirerPy-0.3.4.tar.gz", hash = "sha256:89d2ada0111f337483cb41ae31073108b2ec1e618a49d7110b0d7ade89fc197e", size = 44431 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ce/ff/3b59672c47c6284e8005b42e84ceba13864aa0f39f067c973d1af02f5d91/InquirerPy-0.3.4-py3-none-any.whl", hash = "sha256:c65fdfbac1fa00e3ee4fb10679f4d3ed7a012abf4833910e63c295827fe2a7d4", size = 67677 }, +] + +[[package]] +name = "ipykernel" +version = "6.29.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "appnope", marker = "sys_platform == 'darwin'" }, + { name = "comm" }, + { name = "debugpy" }, + { name = "ipython" }, + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "matplotlib-inline" }, + { name = "nest-asyncio" }, + { name = "packaging" }, + { name = "psutil" }, + { name = "pyzmq" }, + { name = "tornado" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/5c/67594cb0c7055dc50814b21731c22a601101ea3b1b50a9a1b090e11f5d0f/ipykernel-6.29.5.tar.gz", hash = "sha256:f093a22c4a40f8828f8e330a9c297cb93dcab13bd9678ded6de8e5cf81c56215", size = 163367 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/5c/368ae6c01c7628438358e6d337c19b05425727fbb221d2a3c4303c372f42/ipykernel-6.29.5-py3-none-any.whl", hash = "sha256:afdb66ba5aa354b09b91379bac28ae4afebbb30e8b39510c9690afb7a10421b5", size = 117173 }, +] + +[[package]] +name = "ipython" +version = "9.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "decorator" }, + { name = "ipython-pygments-lexers" }, + { name = "jedi" }, + { name = "matplotlib-inline" }, + { name = "pexpect", marker = "sys_platform != 'emscripten' and sys_platform != 'win32'" }, + { name = "prompt-toolkit" }, + { name = "pygments" }, + { name = "stack-data" }, + { name = "traitlets" }, + { name = "typing-extensions", marker = "python_full_version < '3.12'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9d/02/63a84444a7409b3c0acd1de9ffe524660e0e5d82ee473e78b45e5bfb64a4/ipython-9.2.0.tar.gz", hash = "sha256:62a9373dbc12f28f9feaf4700d052195bf89806279fc8ca11f3f54017d04751b", size = 4424394 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/ce/5e897ee51b7d26ab4e47e5105e7368d40ce6cfae2367acdf3165396d50be/ipython-9.2.0-py3-none-any.whl", hash = "sha256:fef5e33c4a1ae0759e0bba5917c9db4eb8c53fee917b6a526bd973e1ca5159f6", size = 604277 }, +] + +[[package]] +name = "ipython-pygments-lexers" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ef/4c/5dd1d8af08107f88c7f741ead7a40854b8ac24ddf9ae850afbcf698aa552/ipython_pygments_lexers-1.1.1.tar.gz", hash = "sha256:09c0138009e56b6854f9535736f4171d855c8c08a563a0dcd8022f78355c7e81", size = 8393 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/33/1f075bf72b0b747cb3288d011319aaf64083cf2efef8354174e3ed4540e2/ipython_pygments_lexers-1.1.1-py3-none-any.whl", hash = "sha256:a9462224a505ade19a605f71f8fa63c2048833ce50abc86768a0d81d876dc81c", size = 8074 }, +] + +[[package]] +name = "ipywidgets" +version = "8.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "comm" }, + { name = "ipython" }, + { name = "jupyterlab-widgets" }, + { name = "traitlets" }, + { name = "widgetsnbextension" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3e/48/d3dbac45c2814cb73812f98dd6b38bbcc957a4e7bb31d6ea9c03bf94ed87/ipywidgets-8.1.7.tar.gz", hash = "sha256:15f1ac050b9ccbefd45dccfbb2ef6bed0029d8278682d569d71b8dd96bee0376", size = 116721 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/58/6a/9166369a2f092bd286d24e6307de555d63616e8ddb373ebad2b5635ca4cd/ipywidgets-8.1.7-py3-none-any.whl", hash = "sha256:764f2602d25471c213919b8a1997df04bef869251db4ca8efba1b76b1bd9f7bb", size = 139806 }, +] + +[[package]] +name = "itsdangerous" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9c/cb/8ac0172223afbccb63986cc25049b154ecfb5e85932587206f42317be31d/itsdangerous-2.2.0.tar.gz", hash = "sha256:e0050c0b7da1eea53ffaf149c0cfbb5c6e2e2b69c4bef22c81fa6eb73e5f6173", size = 54410 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/96/92447566d16df59b2a776c0fb82dbc4d9e07cd95062562af01e408583fc4/itsdangerous-2.2.0-py3-none-any.whl", hash = "sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef", size = 16234 }, +] + +[[package]] +name = "jax" +version = "0.5.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jaxlib" }, + { name = "ml-dtypes" }, + { name = "numpy" }, + { name = "opt-einsum" }, + { name = "scipy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/13/e5/dabb73ab10330e9535aba14fc668b04a46fcd8e78f06567c4f4f1adce340/jax-0.5.3.tar.gz", hash = "sha256:f17fcb0fd61dc289394af6ce4de2dada2312f2689bb0d73642c6f026a95fbb2c", size = 2072748 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/bb/fdc6513a9aada13fd21e9860e2adee5f6eea2b4f0a145b219288875acb26/jax-0.5.3-py3-none-any.whl", hash = "sha256:1483dc237b4f47e41755d69429e8c3c138736716147cd43bb2b99b259d4e3c41", size = 2406371 }, +] + +[package.optional-dependencies] +cuda12 = [ + { name = "jax-cuda12-plugin", extra = ["with-cuda"] }, + { name = "jaxlib" }, +] + +[[package]] +name = "jax-cuda12-pjrt" +version = "0.5.3" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3f/1f/016875cb4dd320fe0801b4a1bf132dd7ff9793d844aea659fe370c93d1b6/jax_cuda12_pjrt-0.5.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:04ee111eaf5fc2692978ad4a5c84d5925e42eb05c1701849ba3a53f6515400cc", size = 90705751 }, + { url = "https://files.pythonhosted.org/packages/58/c4/a603473feae00cd1b20ba3829413da53fd48977af052491ea7dab16fa618/jax_cuda12_pjrt-0.5.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c5378306568ba0c81b230a779dd3194c9dd10339ab6360ae80928108d37e7f75", size = 104655464 }, +] + +[[package]] +name = "jax-cuda12-plugin" +version = "0.5.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jax-cuda12-pjrt" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/fd/8e/dd1f84222d680d4f50c05823d6dd6812f9550b8fd710d8f287829dcca4ea/jax_cuda12_plugin-0.5.3-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:298d2d768f1029b74a0b1d01270e549349d2c37dc07658796542cda967eb7bd3", size = 16696091 }, + { url = "https://files.pythonhosted.org/packages/bf/15/740d34283f041e1f28452eace1b25afc7cf65117e2011d3208330aa156f1/jax_cuda12_plugin-0.5.3-cp311-cp311-manylinux2014_x86_64.whl", hash = "sha256:aaa704a5ef547595d022db1c1e4878a0677116412a9360c115d67ff4b64e1596", size = 16699554 }, + { url = "https://files.pythonhosted.org/packages/eb/b3/8e35a75362dbd4ad000ed50fa07ec2dfae512c03be35d33d7eb4e0d84fbc/jax_cuda12_plugin-0.5.3-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:c2517a7c2186f8708894696e26cf96ebd60b7879ceca398b2c46abb28d2c96c8", size = 16691718 }, + { url = "https://files.pythonhosted.org/packages/ee/8b/1b00720b693d29bf41491a099fb81fc9118f73e54696b507428e691bad0e/jax_cuda12_plugin-0.5.3-cp312-cp312-manylinux2014_x86_64.whl", hash = "sha256:2030cf1208ce4ea70ee56cac61ddd239f9798695fc39bb7739c50a25d6e9da44", size = 16696110 }, + { url = "https://files.pythonhosted.org/packages/fa/38/d5debf1cc41722494d6f595eb42e9a4428d511a01a6d465e5ca6f7a198b7/jax_cuda12_plugin-0.5.3-cp313-cp313-manylinux2014_aarch64.whl", hash = "sha256:21fec1b56c98783ea0569b747a56751f1f9ff2187b48acc11c700d3bfc5e1a31", size = 16690773 }, + { url = "https://files.pythonhosted.org/packages/34/a2/ffa883b05b8dedf98e513517ab92a79c69ce57233481b6a40c27c2fdcdc9/jax_cuda12_plugin-0.5.3-cp313-cp313-manylinux2014_x86_64.whl", hash = "sha256:1862595b2b6d815679d11e0e889e523185ee54a46d46e022689f70fc4554dd91", size = 16696010 }, + { url = "https://files.pythonhosted.org/packages/93/14/13d77e20bb41ce3fac17a0f047954f378ad8f0ef36c1d652a3e804232454/jax_cuda12_plugin-0.5.3-cp313-cp313t-manylinux2014_aarch64.whl", hash = "sha256:6d43677f22f3be9544a205216cd6dac591335b1d9bbbed018cd17dbb1f3f4def", size = 16812249 }, + { url = "https://files.pythonhosted.org/packages/43/7a/6badc42730609cc906a070ff1b39555b58b09ea0240b6115c2ce6fcf4973/jax_cuda12_plugin-0.5.3-cp313-cp313t-manylinux2014_x86_64.whl", hash = "sha256:5bb9ea0e68d72d44e57e4cb6a58a1a729fe3fe32e964f71e398d8a25c2103b19", size = 16902210 }, +] + +[package.optional-dependencies] +with-cuda = [ + { name = "nvidia-cublas-cu12", version = "12.6.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "nvidia-cublas-cu12", version = "12.9.0.13", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" }, + { name = "nvidia-cuda-cupti-cu12", version = "12.6.80", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "nvidia-cuda-cupti-cu12", version = "12.9.19", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" }, + { name = "nvidia-cuda-nvcc-cu12" }, + { name = "nvidia-cuda-runtime-cu12", version = "12.6.77", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "nvidia-cuda-runtime-cu12", version = "12.9.37", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" }, + { name = "nvidia-cudnn-cu12", version = "9.5.1.17", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "nvidia-cudnn-cu12", version = "9.10.1.4", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" }, + { name = "nvidia-cufft-cu12", version = "11.3.0.4", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "nvidia-cufft-cu12", version = "11.4.0.6", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" }, + { name = "nvidia-cusolver-cu12", version = "11.7.1.2", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "nvidia-cusolver-cu12", version = "11.7.4.40", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" }, + { name = "nvidia-cusparse-cu12", version = "12.5.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "nvidia-cusparse-cu12", version = "12.5.9.5", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" }, + { name = "nvidia-nccl-cu12", version = "2.26.2", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "nvidia-nccl-cu12", version = "2.26.5", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" }, + { name = "nvidia-nvjitlink-cu12", version = "12.6.85", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "nvidia-nvjitlink-cu12", version = "12.9.41", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" }, +] + +[[package]] +name = "jaxlib" +version = "0.5.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ml-dtypes" }, + { name = "numpy" }, + { name = "scipy" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/f2/d9397f264141f2289e229b2faf3b3ddb6397b014a09abe234367814f9697/jaxlib-0.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b62bd8b29e5a4f9bfaa57c8daf6e04820b2c994f448f3dec602d64255545e9f2", size = 63696815 }, + { url = "https://files.pythonhosted.org/packages/e8/91/04bf391a21ccfb299b9952f91d5c082e5f9877221e5d98592875af4a50e4/jaxlib-0.5.3-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:a4666f81d72c060ed3e581ded116a9caa9b0a70a148a54cb12a1d3afca3624b5", size = 95770114 }, + { url = "https://files.pythonhosted.org/packages/67/de/50debb40944baa5ba459604578f8c721be9f38c78ef9e8902895566e6a66/jaxlib-0.5.3-cp311-cp311-manylinux2014_x86_64.whl", hash = "sha256:29e1530fc81833216f1e28b578d0c59697654f72ee31c7a44ed7753baf5ac466", size = 105119259 }, + { url = "https://files.pythonhosted.org/packages/20/91/d73c842d1e5cc6b914bb521006d668fbfda4c53cd4424ce9c3a097f6c071/jaxlib-0.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:8eb54e38d789557579f900ea3d70f104a440f8555a9681ed45f4a122dcbfd92e", size = 65765739 }, + { url = "https://files.pythonhosted.org/packages/d5/a5/646af791ccf75641b4df84fb6cb6e3914b0df87ec5fa5f82397fd5dc30ee/jaxlib-0.5.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d394dbde4a1c6bd67501cfb29d3819a10b900cb534cc0fc603319f7092f24cfa", size = 63711839 }, + { url = "https://files.pythonhosted.org/packages/53/8c/cbd861e40f0efe7923962ade21919fddcea43fae2794634833e800009b14/jaxlib-0.5.3-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:bddf6360377aa1c792e47fd87f307c342e331e5ff3582f940b1bca00f6b4bc73", size = 95764647 }, + { url = "https://files.pythonhosted.org/packages/3e/03/bace4acec295febca9329b3d2dd927b8ac74841e620e0d675f76109b805b/jaxlib-0.5.3-cp312-cp312-manylinux2014_x86_64.whl", hash = "sha256:5a5e88ab1cd6fdf78d69abe3544e8f09cce200dd339bb85fbe3c2ea67f2a5e68", size = 105132789 }, + { url = "https://files.pythonhosted.org/packages/79/f8/34568ec75f53d55b68649b6e1d6befd976fb9646e607954477264f5379ce/jaxlib-0.5.3-cp312-cp312-win_amd64.whl", hash = "sha256:520665929649f29f7d948d4070dbaf3e032a4c1f7c11f2863eac73320fcee784", size = 65789714 }, + { url = "https://files.pythonhosted.org/packages/b4/d0/ed6007cd17dc0f37f950f89e785092d9f0541f3fa6021d029657955206b5/jaxlib-0.5.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:31321c25282a06a6dfc940507bc14d0a0ac838d8ced6c07aa00a7fae34ce7b3f", size = 63710483 }, + { url = "https://files.pythonhosted.org/packages/36/8f/cafdf24170084de897ffe2a030241c2ba72d12eede85b940a81a94cab156/jaxlib-0.5.3-cp313-cp313-manylinux2014_aarch64.whl", hash = "sha256:e904b92dedfbc7e545725a8d7676987030ae9c069001d94701bc109c6dab4100", size = 95765995 }, + { url = "https://files.pythonhosted.org/packages/86/c7/fc0755ebd999c7c66ac4203d99f958d5ffc0a34eb270f57932ca0213bb54/jaxlib-0.5.3-cp313-cp313-manylinux2014_x86_64.whl", hash = "sha256:bb7593cb7fffcb13963f22fa5229ed960b8fb4ae5ec3b0820048cbd67f1e8e31", size = 105130796 }, + { url = "https://files.pythonhosted.org/packages/83/98/e32da21a490dc408d172ba246d6c47428482fe50d771c3f813e5fc063781/jaxlib-0.5.3-cp313-cp313-win_amd64.whl", hash = "sha256:8019f73a10b1290f988dd3768c684f3a8a147239091c3b790ce7e47e3bbc00bd", size = 65792205 }, + { url = "https://files.pythonhosted.org/packages/88/c6/0d69ed0d408c811959a471563afa99baecacdc56ed1799002e309520b565/jaxlib-0.5.3-cp313-cp313t-manylinux2014_x86_64.whl", hash = "sha256:4c9a9d4cda091a3ef068ace8379fff9e98eea2fc51dbdd7c3386144a1bdf715d", size = 105318736 }, +] + +[[package]] +name = "jaxtyping" +version = "0.2.36" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a6/cc/76e38d7d24e590d1a819c9b203b537e5c6416e1c1aebc8c25f598a00d474/jaxtyping-0.2.36.tar.gz", hash = "sha256:781ac44a3cf8982063d7ee48b5008ccfad7b13793bf878eb3058d5319aa08f0f", size = 45171 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/99/c83c6a97f4382caf1c9bfeeeca935d3eb1f479f711665aeadf4408048107/jaxtyping-0.2.36-py3-none-any.whl", hash = "sha256:b19bcbd4009df8734602203402483a4066ad2eb3382904432e370588e9c9707d", size = 55823 }, +] + +[[package]] +name = "jedi" +version = "0.19.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "parso" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278 }, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899 }, +] + +[[package]] +name = "jsonlines" +version = "4.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/35/87/bcda8e46c88d0e34cad2f09ee2d0c7f5957bccdb9791b0b934ec84d84be4/jsonlines-4.0.0.tar.gz", hash = "sha256:0c6d2c09117550c089995247f605ae4cf77dd1533041d366351f6f298822ea74", size = 11359 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f8/62/d9ba6323b9202dd2fe166beab8a86d29465c41a0288cbe229fac60c1ab8d/jsonlines-4.0.0-py3-none-any.whl", hash = "sha256:185b334ff2ca5a91362993f42e83588a360cf95ce4b71a73548502bda52a7c55", size = 8701 }, +] + +[[package]] +name = "jupyter-client" +version = "8.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jupyter-core" }, + { name = "python-dateutil" }, + { name = "pyzmq" }, + { name = "tornado" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/22/bf9f12fdaeae18019a468b68952a60fe6dbab5d67cd2a103cac7659b41ca/jupyter_client-8.6.3.tar.gz", hash = "sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419", size = 342019 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/85/b0394e0b6fcccd2c1eeefc230978a6f8cb0c5df1e4cd3e7625735a0d7d1e/jupyter_client-8.6.3-py3-none-any.whl", hash = "sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f", size = 106105 }, +] + +[[package]] +name = "jupyter-core" +version = "5.8.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "platformdirs" }, + { name = "pywin32", marker = "platform_python_implementation != 'PyPy' and sys_platform == 'win32'" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/99/1b/72906d554acfeb588332eaaa6f61577705e9ec752ddb486f302dafa292d9/jupyter_core-5.8.1.tar.gz", hash = "sha256:0a5f9706f70e64786b75acba995988915ebd4601c8a52e534a40b51c95f59941", size = 88923 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2f/57/6bffd4b20b88da3800c5d691e0337761576ee688eb01299eae865689d2df/jupyter_core-5.8.1-py3-none-any.whl", hash = "sha256:c28d268fc90fb53f1338ded2eb410704c5449a358406e8a948b75706e24863d0", size = 28880 }, +] + +[[package]] +name = "jupyterlab-widgets" +version = "3.0.15" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b9/7d/160595ca88ee87ac6ba95d82177d29ec60aaa63821d3077babb22ce031a5/jupyterlab_widgets-3.0.15.tar.gz", hash = "sha256:2920888a0c2922351a9202817957a68c07d99673504d6cd37345299e971bb08b", size = 213149 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/6a/ca128561b22b60bd5a0c4ea26649e68c8556b82bc70a0c396eebc977fe86/jupyterlab_widgets-3.0.15-py3-none-any.whl", hash = "sha256:d59023d7d7ef71400d51e6fee9a88867f6e65e10a4201605d2d7f3e8f012a31c", size = 216571 }, +] + +[[package]] +name = "keras" +version = "2.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b5/03/80072f4ee46e3c77e95b06d684fadf90a67759e4e9f1d86a563e0965c71a/keras-2.15.0.tar.gz", hash = "sha256:81871d298c064dc4ac6b58440fdae67bfcf47c8d7ad28580fab401834c06a575", size = 1252015 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fc/a7/0d4490de967a67f68a538cc9cdb259bff971c4b5787f7765dc7c8f118f71/keras-2.15.0-py3-none-any.whl", hash = "sha256:2dcc6d2e30cf9c951064b63c1f4c404b966c59caf09e01f3549138ec8ee0dd1f", size = 1710438 }, +] + +[[package]] +name = "kiwisolver" +version = "1.4.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/82/59/7c91426a8ac292e1cdd53a63b6d9439abd573c875c3f92c146767dd33faf/kiwisolver-1.4.8.tar.gz", hash = "sha256:23d5f023bdc8c7e54eb65f03ca5d5bb25b601eac4d7f1a042888a1f45237987e", size = 97538 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/da/ed/c913ee28936c371418cb167b128066ffb20bbf37771eecc2c97edf8a6e4c/kiwisolver-1.4.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a4d3601908c560bdf880f07d94f31d734afd1bb71e96585cace0e38ef44c6d84", size = 124635 }, + { url = "https://files.pythonhosted.org/packages/4c/45/4a7f896f7467aaf5f56ef093d1f329346f3b594e77c6a3c327b2d415f521/kiwisolver-1.4.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:856b269c4d28a5c0d5e6c1955ec36ebfd1651ac00e1ce0afa3e28da95293b561", size = 66717 }, + { url = "https://files.pythonhosted.org/packages/5f/b4/c12b3ac0852a3a68f94598d4c8d569f55361beef6159dce4e7b624160da2/kiwisolver-1.4.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c2b9a96e0f326205af81a15718a9073328df1173a2619a68553decb7097fd5d7", size = 65413 }, + { url = "https://files.pythonhosted.org/packages/a9/98/1df4089b1ed23d83d410adfdc5947245c753bddfbe06541c4aae330e9e70/kiwisolver-1.4.8-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5020c83e8553f770cb3b5fc13faac40f17e0b205bd237aebd21d53d733adb03", size = 1343994 }, + { url = "https://files.pythonhosted.org/packages/8d/bf/b4b169b050c8421a7c53ea1ea74e4ef9c335ee9013216c558a047f162d20/kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dace81d28c787956bfbfbbfd72fdcef014f37d9b48830829e488fdb32b49d954", size = 1434804 }, + { url = "https://files.pythonhosted.org/packages/66/5a/e13bd341fbcf73325ea60fdc8af752addf75c5079867af2e04cc41f34434/kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11e1022b524bd48ae56c9b4f9296bce77e15a2e42a502cceba602f804b32bb79", size = 1450690 }, + { url = "https://files.pythonhosted.org/packages/9b/4f/5955dcb376ba4a830384cc6fab7d7547bd6759fe75a09564910e9e3bb8ea/kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b9b4d2892fefc886f30301cdd80debd8bb01ecdf165a449eb6e78f79f0fabd6", size = 1376839 }, + { url = "https://files.pythonhosted.org/packages/3a/97/5edbed69a9d0caa2e4aa616ae7df8127e10f6586940aa683a496c2c280b9/kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a96c0e790ee875d65e340ab383700e2b4891677b7fcd30a699146f9384a2bb0", size = 1435109 }, + { url = "https://files.pythonhosted.org/packages/13/fc/e756382cb64e556af6c1809a1bbb22c141bbc2445049f2da06b420fe52bf/kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:23454ff084b07ac54ca8be535f4174170c1094a4cff78fbae4f73a4bcc0d4dab", size = 2245269 }, + { url = "https://files.pythonhosted.org/packages/76/15/e59e45829d7f41c776d138245cabae6515cb4eb44b418f6d4109c478b481/kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:87b287251ad6488e95b4f0b4a79a6d04d3ea35fde6340eb38fbd1ca9cd35bbbc", size = 2393468 }, + { url = "https://files.pythonhosted.org/packages/e9/39/483558c2a913ab8384d6e4b66a932406f87c95a6080112433da5ed668559/kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:b21dbe165081142b1232a240fc6383fd32cdd877ca6cc89eab93e5f5883e1c25", size = 2355394 }, + { url = "https://files.pythonhosted.org/packages/01/aa/efad1fbca6570a161d29224f14b082960c7e08268a133fe5dc0f6906820e/kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:768cade2c2df13db52475bd28d3a3fac8c9eff04b0e9e2fda0f3760f20b3f7fc", size = 2490901 }, + { url = "https://files.pythonhosted.org/packages/c9/4f/15988966ba46bcd5ab9d0c8296914436720dd67fca689ae1a75b4ec1c72f/kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d47cfb2650f0e103d4bf68b0b5804c68da97272c84bb12850d877a95c056bd67", size = 2312306 }, + { url = "https://files.pythonhosted.org/packages/2d/27/bdf1c769c83f74d98cbc34483a972f221440703054894a37d174fba8aa68/kiwisolver-1.4.8-cp311-cp311-win_amd64.whl", hash = "sha256:ed33ca2002a779a2e20eeb06aea7721b6e47f2d4b8a8ece979d8ba9e2a167e34", size = 71966 }, + { url = "https://files.pythonhosted.org/packages/4a/c9/9642ea855604aeb2968a8e145fc662edf61db7632ad2e4fb92424be6b6c0/kiwisolver-1.4.8-cp311-cp311-win_arm64.whl", hash = "sha256:16523b40aab60426ffdebe33ac374457cf62863e330a90a0383639ce14bf44b2", size = 65311 }, + { url = "https://files.pythonhosted.org/packages/fc/aa/cea685c4ab647f349c3bc92d2daf7ae34c8e8cf405a6dcd3a497f58a2ac3/kiwisolver-1.4.8-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d6af5e8815fd02997cb6ad9bbed0ee1e60014438ee1a5c2444c96f87b8843502", size = 124152 }, + { url = "https://files.pythonhosted.org/packages/c5/0b/8db6d2e2452d60d5ebc4ce4b204feeb16176a851fd42462f66ade6808084/kiwisolver-1.4.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bade438f86e21d91e0cf5dd7c0ed00cda0f77c8c1616bd83f9fc157fa6760d31", size = 66555 }, + { url = "https://files.pythonhosted.org/packages/60/26/d6a0db6785dd35d3ba5bf2b2df0aedc5af089962c6eb2cbf67a15b81369e/kiwisolver-1.4.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b83dc6769ddbc57613280118fb4ce3cd08899cc3369f7d0e0fab518a7cf37fdb", size = 65067 }, + { url = "https://files.pythonhosted.org/packages/c9/ed/1d97f7e3561e09757a196231edccc1bcf59d55ddccefa2afc9c615abd8e0/kiwisolver-1.4.8-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:111793b232842991be367ed828076b03d96202c19221b5ebab421ce8bcad016f", size = 1378443 }, + { url = "https://files.pythonhosted.org/packages/29/61/39d30b99954e6b46f760e6289c12fede2ab96a254c443639052d1b573fbc/kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:257af1622860e51b1a9d0ce387bf5c2c4f36a90594cb9514f55b074bcc787cfc", size = 1472728 }, + { url = "https://files.pythonhosted.org/packages/0c/3e/804163b932f7603ef256e4a715e5843a9600802bb23a68b4e08c8c0ff61d/kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:69b5637c3f316cab1ec1c9a12b8c5f4750a4c4b71af9157645bf32830e39c03a", size = 1478388 }, + { url = "https://files.pythonhosted.org/packages/8a/9e/60eaa75169a154700be74f875a4d9961b11ba048bef315fbe89cb6999056/kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:782bb86f245ec18009890e7cb8d13a5ef54dcf2ebe18ed65f795e635a96a1c6a", size = 1413849 }, + { url = "https://files.pythonhosted.org/packages/bc/b3/9458adb9472e61a998c8c4d95cfdfec91c73c53a375b30b1428310f923e4/kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc978a80a0db3a66d25767b03688f1147a69e6237175c0f4ffffaaedf744055a", size = 1475533 }, + { url = "https://files.pythonhosted.org/packages/e4/7a/0a42d9571e35798de80aef4bb43a9b672aa7f8e58643d7bd1950398ffb0a/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:36dbbfd34838500a31f52c9786990d00150860e46cd5041386f217101350f0d3", size = 2268898 }, + { url = "https://files.pythonhosted.org/packages/d9/07/1255dc8d80271400126ed8db35a1795b1a2c098ac3a72645075d06fe5c5d/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:eaa973f1e05131de5ff3569bbba7f5fd07ea0595d3870ed4a526d486fe57fa1b", size = 2425605 }, + { url = "https://files.pythonhosted.org/packages/84/df/5a3b4cf13780ef6f6942df67b138b03b7e79e9f1f08f57c49957d5867f6e/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a66f60f8d0c87ab7f59b6fb80e642ebb29fec354a4dfad687ca4092ae69d04f4", size = 2375801 }, + { url = "https://files.pythonhosted.org/packages/8f/10/2348d068e8b0f635c8c86892788dac7a6b5c0cb12356620ab575775aad89/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:858416b7fb777a53f0c59ca08190ce24e9abbd3cffa18886a5781b8e3e26f65d", size = 2520077 }, + { url = "https://files.pythonhosted.org/packages/32/d8/014b89fee5d4dce157d814303b0fce4d31385a2af4c41fed194b173b81ac/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:085940635c62697391baafaaeabdf3dd7a6c3643577dde337f4d66eba021b2b8", size = 2338410 }, + { url = "https://files.pythonhosted.org/packages/bd/72/dfff0cc97f2a0776e1c9eb5bef1ddfd45f46246c6533b0191887a427bca5/kiwisolver-1.4.8-cp312-cp312-win_amd64.whl", hash = "sha256:01c3d31902c7db5fb6182832713d3b4122ad9317c2c5877d0539227d96bb2e50", size = 71853 }, + { url = "https://files.pythonhosted.org/packages/dc/85/220d13d914485c0948a00f0b9eb419efaf6da81b7d72e88ce2391f7aed8d/kiwisolver-1.4.8-cp312-cp312-win_arm64.whl", hash = "sha256:a3c44cb68861de93f0c4a8175fbaa691f0aa22550c331fefef02b618a9dcb476", size = 65424 }, + { url = "https://files.pythonhosted.org/packages/79/b3/e62464a652f4f8cd9006e13d07abad844a47df1e6537f73ddfbf1bc997ec/kiwisolver-1.4.8-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1c8ceb754339793c24aee1c9fb2485b5b1f5bb1c2c214ff13368431e51fc9a09", size = 124156 }, + { url = "https://files.pythonhosted.org/packages/8d/2d/f13d06998b546a2ad4f48607a146e045bbe48030774de29f90bdc573df15/kiwisolver-1.4.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54a62808ac74b5e55a04a408cda6156f986cefbcf0ada13572696b507cc92fa1", size = 66555 }, + { url = "https://files.pythonhosted.org/packages/59/e3/b8bd14b0a54998a9fd1e8da591c60998dc003618cb19a3f94cb233ec1511/kiwisolver-1.4.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:68269e60ee4929893aad82666821aaacbd455284124817af45c11e50a4b42e3c", size = 65071 }, + { url = "https://files.pythonhosted.org/packages/f0/1c/6c86f6d85ffe4d0ce04228d976f00674f1df5dc893bf2dd4f1928748f187/kiwisolver-1.4.8-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34d142fba9c464bc3bbfeff15c96eab0e7310343d6aefb62a79d51421fcc5f1b", size = 1378053 }, + { url = "https://files.pythonhosted.org/packages/4e/b9/1c6e9f6dcb103ac5cf87cb695845f5fa71379021500153566d8a8a9fc291/kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc373e0eef45b59197de815b1b28ef89ae3955e7722cc9710fb91cd77b7f47", size = 1472278 }, + { url = "https://files.pythonhosted.org/packages/ee/81/aca1eb176de671f8bda479b11acdc42c132b61a2ac861c883907dde6debb/kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77e6f57a20b9bd4e1e2cedda4d0b986ebd0216236f0106e55c28aea3d3d69b16", size = 1478139 }, + { url = "https://files.pythonhosted.org/packages/49/f4/e081522473671c97b2687d380e9e4c26f748a86363ce5af48b4a28e48d06/kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08e77738ed7538f036cd1170cbed942ef749137b1311fa2bbe2a7fda2f6bf3cc", size = 1413517 }, + { url = "https://files.pythonhosted.org/packages/8f/e9/6a7d025d8da8c4931522922cd706105aa32b3291d1add8c5427cdcd66e63/kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5ce1e481a74b44dd5e92ff03ea0cb371ae7a0268318e202be06c8f04f4f1246", size = 1474952 }, + { url = "https://files.pythonhosted.org/packages/82/13/13fa685ae167bee5d94b415991c4fc7bb0a1b6ebea6e753a87044b209678/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:fc2ace710ba7c1dfd1a3b42530b62b9ceed115f19a1656adefce7b1782a37794", size = 2269132 }, + { url = "https://files.pythonhosted.org/packages/ef/92/bb7c9395489b99a6cb41d502d3686bac692586db2045adc19e45ee64ed23/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:3452046c37c7692bd52b0e752b87954ef86ee2224e624ef7ce6cb21e8c41cc1b", size = 2425997 }, + { url = "https://files.pythonhosted.org/packages/ed/12/87f0e9271e2b63d35d0d8524954145837dd1a6c15b62a2d8c1ebe0f182b4/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7e9a60b50fe8b2ec6f448fe8d81b07e40141bfced7f896309df271a0b92f80f3", size = 2376060 }, + { url = "https://files.pythonhosted.org/packages/02/6e/c8af39288edbce8bf0fa35dee427b082758a4b71e9c91ef18fa667782138/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:918139571133f366e8362fa4a297aeba86c7816b7ecf0bc79168080e2bd79957", size = 2520471 }, + { url = "https://files.pythonhosted.org/packages/13/78/df381bc7b26e535c91469f77f16adcd073beb3e2dd25042efd064af82323/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e063ef9f89885a1d68dd8b2e18f5ead48653176d10a0e324e3b0030e3a69adeb", size = 2338793 }, + { url = "https://files.pythonhosted.org/packages/d0/dc/c1abe38c37c071d0fc71c9a474fd0b9ede05d42f5a458d584619cfd2371a/kiwisolver-1.4.8-cp313-cp313-win_amd64.whl", hash = "sha256:a17b7c4f5b2c51bb68ed379defd608a03954a1845dfed7cc0117f1cc8a9b7fd2", size = 71855 }, + { url = "https://files.pythonhosted.org/packages/a0/b6/21529d595b126ac298fdd90b705d87d4c5693de60023e0efcb4f387ed99e/kiwisolver-1.4.8-cp313-cp313-win_arm64.whl", hash = "sha256:3cd3bc628b25f74aedc6d374d5babf0166a92ff1317f46267f12d2ed54bc1d30", size = 65430 }, + { url = "https://files.pythonhosted.org/packages/34/bd/b89380b7298e3af9b39f49334e3e2a4af0e04819789f04b43d560516c0c8/kiwisolver-1.4.8-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:370fd2df41660ed4e26b8c9d6bbcad668fbe2560462cba151a721d49e5b6628c", size = 126294 }, + { url = "https://files.pythonhosted.org/packages/83/41/5857dc72e5e4148eaac5aa76e0703e594e4465f8ab7ec0fc60e3a9bb8fea/kiwisolver-1.4.8-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:84a2f830d42707de1d191b9490ac186bf7997a9495d4e9072210a1296345f7dc", size = 67736 }, + { url = "https://files.pythonhosted.org/packages/e1/d1/be059b8db56ac270489fb0b3297fd1e53d195ba76e9bbb30e5401fa6b759/kiwisolver-1.4.8-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7a3ad337add5148cf51ce0b55642dc551c0b9d6248458a757f98796ca7348712", size = 66194 }, + { url = "https://files.pythonhosted.org/packages/e1/83/4b73975f149819eb7dcf9299ed467eba068ecb16439a98990dcb12e63fdd/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7506488470f41169b86d8c9aeff587293f530a23a23a49d6bc64dab66bedc71e", size = 1465942 }, + { url = "https://files.pythonhosted.org/packages/c7/2c/30a5cdde5102958e602c07466bce058b9d7cb48734aa7a4327261ac8e002/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f0121b07b356a22fb0414cec4666bbe36fd6d0d759db3d37228f496ed67c880", size = 1595341 }, + { url = "https://files.pythonhosted.org/packages/ff/9b/1e71db1c000385aa069704f5990574b8244cce854ecd83119c19e83c9586/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d6d6bd87df62c27d4185de7c511c6248040afae67028a8a22012b010bc7ad062", size = 1598455 }, + { url = "https://files.pythonhosted.org/packages/85/92/c8fec52ddf06231b31cbb779af77e99b8253cd96bd135250b9498144c78b/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:291331973c64bb9cce50bbe871fb2e675c4331dab4f31abe89f175ad7679a4d7", size = 1522138 }, + { url = "https://files.pythonhosted.org/packages/0b/51/9eb7e2cd07a15d8bdd976f6190c0164f92ce1904e5c0c79198c4972926b7/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:893f5525bb92d3d735878ec00f781b2de998333659507d29ea4466208df37bed", size = 1582857 }, + { url = "https://files.pythonhosted.org/packages/0f/95/c5a00387a5405e68ba32cc64af65ce881a39b98d73cc394b24143bebc5b8/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b47a465040146981dc9db8647981b8cb96366fbc8d452b031e4f8fdffec3f26d", size = 2293129 }, + { url = "https://files.pythonhosted.org/packages/44/83/eeb7af7d706b8347548313fa3a3a15931f404533cc54fe01f39e830dd231/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:99cea8b9dd34ff80c521aef46a1dddb0dcc0283cf18bde6d756f1e6f31772165", size = 2421538 }, + { url = "https://files.pythonhosted.org/packages/05/f9/27e94c1b3eb29e6933b6986ffc5fa1177d2cd1f0c8efc5f02c91c9ac61de/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:151dffc4865e5fe6dafce5480fab84f950d14566c480c08a53c663a0020504b6", size = 2390661 }, + { url = "https://files.pythonhosted.org/packages/d9/d4/3c9735faa36ac591a4afcc2980d2691000506050b7a7e80bcfe44048daa7/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:577facaa411c10421314598b50413aa1ebcf5126f704f1e5d72d7e4e9f020d90", size = 2546710 }, + { url = "https://files.pythonhosted.org/packages/4c/fa/be89a49c640930180657482a74970cdcf6f7072c8d2471e1babe17a222dc/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:be4816dc51c8a471749d664161b434912eee82f2ea66bd7628bd14583a833e85", size = 2349213 }, +] + +[[package]] +name = "labmaze" +version = "1.0.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "absl-py" }, + { name = "numpy" }, + { name = "setuptools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/93/0a/139c4ae896b9413bd4ca69c62b08ee98dcfc78a9cbfdb7cadd0dce2ad31d/labmaze-1.0.6.tar.gz", hash = "sha256:2e8de7094042a77d6972f1965cf5c9e8f971f1b34d225752f343190a825ebe73", size = 4670455 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/3c/cdc95db2aa8cd80c193b7b30b9a9be071897c4f0b558d5fc007b1adf74c3/labmaze-1.0.6-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a0c2cb9dec971814ea9c5d7150af15fa3964482131fa969e0afb94bd224348af", size = 4815406 }, + { url = "https://files.pythonhosted.org/packages/75/46/eb96e23ccddd40f403cea3f9f5d15eae7759317a1762b761692541edd6d9/labmaze-1.0.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2c6ba9538d819543f4be448d36b4926a3881e53646a2b331ebb5a1f353047d05", size = 4806777 }, + { url = "https://files.pythonhosted.org/packages/0d/7e/787e0d3c17e29a46484158460e21fcf5cd7a076c81b2ec31807f2753ea43/labmaze-1.0.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70635d1cdb0147a02efb6b3f607a52cdc51723bc3dcc42717a0d4ef55fa0a987", size = 4871563 }, + { url = "https://files.pythonhosted.org/packages/a7/ce/be3952d7036b009f6dd004b6f5dfe97bbff79572ef0cf56a734aaead030f/labmaze-1.0.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff472793238bd9b6dabea8094594d6074ad3c111455de3afcae72f6c40c6817e", size = 4875913 }, + { url = "https://files.pythonhosted.org/packages/50/a5/8c9f9be038401a31f9f87bd44f28c8edff63c0c3f1168ca882e351215761/labmaze-1.0.6-cp311-cp311-win_amd64.whl", hash = "sha256:2317e65e12fa3d1abecda7e0488dab15456cee8a2e717a586bfc8f02a91579e7", size = 4813089 }, + { url = "https://files.pythonhosted.org/packages/cf/12/670a6e6beeeb166aa911fe861c1a16f62a9f3cfc7b54ea4b114cc23d0380/labmaze-1.0.6-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e36b6fadcd78f22057b597c1c77823e806a0987b3bdfbf850e14b6b5b502075e", size = 4814941 }, + { url = "https://files.pythonhosted.org/packages/e5/3a/47a3f83736e0b70f78b22d53e0a3230160a61e8ba6267003f25d2b24b832/labmaze-1.0.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d1a4f8de29c2c3d7f14163759b69cd3f237093b85334c983619c1db5403a223b", size = 4807545 }, + { url = "https://files.pythonhosted.org/packages/ad/95/2ca4dd1efff4456f44baf4c4a980cfea6f6fb8729912a760ec9bf912876b/labmaze-1.0.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a394f8bb857fcaa2884b809d63e750841c2662a106cfe8c045f2112d201ac7d5", size = 4873133 }, + { url = "https://files.pythonhosted.org/packages/f9/9c/1c928d0f5a20e4b9544d564e43ecda785f09a29ecbaa37f4e70989d0d4bd/labmaze-1.0.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d17abb69d4dfc56183afb5c317e8b2eaca0587abb3aabd2326efd3143c81f4e", size = 4875122 }, + { url = "https://files.pythonhosted.org/packages/5b/0f/13f0d54305e66c14c90512f3682f713273ec9aa94d107be7947157b37a74/labmaze-1.0.6-cp312-cp312-win_amd64.whl", hash = "sha256:5af997598cc46b1929d1c5a1febc32fd56c75874fe481a2a5982c65cee8450c9", size = 4811813 }, +] + +[[package]] +name = "lerobot" +version = "0.1.0" +source = { git = "https://github.com/huggingface/lerobot?rev=0cf864870cf29f4738d3ade893e6fd13fbd7cdb5#0cf864870cf29f4738d3ade893e6fd13fbd7cdb5" } +dependencies = [ + { name = "av" }, + { name = "cmake" }, + { name = "datasets" }, + { name = "deepdiff" }, + { name = "diffusers" }, + { name = "draccus" }, + { name = "einops" }, + { name = "flask" }, + { name = "gdown" }, + { name = "gymnasium" }, + { name = "h5py" }, + { name = "huggingface-hub", extra = ["cli", "hf-transfer"], marker = "python_full_version < '4.0'" }, + { name = "imageio", extra = ["ffmpeg"] }, + { name = "jsonlines" }, + { name = "numba" }, + { name = "omegaconf" }, + { name = "opencv-python-headless" }, + { name = "packaging" }, + { name = "pymunk" }, + { name = "pynput" }, + { name = "pyzmq" }, + { name = "rerun-sdk" }, + { name = "termcolor" }, + { name = "torch" }, + { name = "torchcodec", marker = "(platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l' and sys_platform == 'linux') or (platform_machine != 'x86_64' and sys_platform == 'darwin') or (sys_platform != 'darwin' and sys_platform != 'linux' and sys_platform != 'win32')" }, + { name = "torchvision" }, + { name = "wandb" }, + { name = "zarr" }, +] + +[[package]] +name = "libclang" +version = "18.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6e/5c/ca35e19a4f142adffa27e3d652196b7362fa612243e2b916845d801454fc/libclang-18.1.1.tar.gz", hash = "sha256:a1214966d08d73d971287fc3ead8dfaf82eb07fb197680d8b3859dbbbbf78250", size = 39612 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/49/f5e3e7e1419872b69f6f5e82ba56e33955a74bd537d8a1f5f1eff2f3668a/libclang-18.1.1-1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:0b2e143f0fac830156feb56f9231ff8338c20aecfe72b4ffe96f19e5a1dbb69a", size = 25836045 }, + { url = "https://files.pythonhosted.org/packages/e2/e5/fc61bbded91a8830ccce94c5294ecd6e88e496cc85f6704bf350c0634b70/libclang-18.1.1-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:6f14c3f194704e5d09769108f03185fce7acaf1d1ae4bbb2f30a72c2400cb7c5", size = 26502641 }, + { url = "https://files.pythonhosted.org/packages/db/ed/1df62b44db2583375f6a8a5e2ca5432bbdc3edb477942b9b7c848c720055/libclang-18.1.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:83ce5045d101b669ac38e6da8e58765f12da2d3aafb3b9b98d88b286a60964d8", size = 26420207 }, + { url = "https://files.pythonhosted.org/packages/1d/fc/716c1e62e512ef1c160e7984a73a5fc7df45166f2ff3f254e71c58076f7c/libclang-18.1.1-py2.py3-none-manylinux2010_x86_64.whl", hash = "sha256:c533091d8a3bbf7460a00cb6c1a71da93bffe148f172c7d03b1c31fbf8aa2a0b", size = 24515943 }, + { url = "https://files.pythonhosted.org/packages/3c/3d/f0ac1150280d8d20d059608cf2d5ff61b7c3b7f7bcf9c0f425ab92df769a/libclang-18.1.1-py2.py3-none-manylinux2014_aarch64.whl", hash = "sha256:54dda940a4a0491a9d1532bf071ea3ef26e6dbaf03b5000ed94dd7174e8f9592", size = 23784972 }, + { url = "https://files.pythonhosted.org/packages/fe/2f/d920822c2b1ce9326a4c78c0c2b4aa3fde610c7ee9f631b600acb5376c26/libclang-18.1.1-py2.py3-none-manylinux2014_armv7l.whl", hash = "sha256:cf4a99b05376513717ab5d82a0db832c56ccea4fd61a69dbb7bccf2dfb207dbe", size = 20259606 }, + { url = "https://files.pythonhosted.org/packages/2d/c2/de1db8c6d413597076a4259cea409b83459b2db997c003578affdd32bf66/libclang-18.1.1-py2.py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:69f8eb8f65c279e765ffd28aaa7e9e364c776c17618af8bff22a8df58677ff4f", size = 24921494 }, + { url = "https://files.pythonhosted.org/packages/0b/2d/3f480b1e1d31eb3d6de5e3ef641954e5c67430d5ac93b7fa7e07589576c7/libclang-18.1.1-py2.py3-none-win_amd64.whl", hash = "sha256:4dd2d3b82fab35e2bf9ca717d7b63ac990a3519c7e312f19fa8e86dcc712f7fb", size = 26415083 }, + { url = "https://files.pythonhosted.org/packages/71/cf/e01dc4cc79779cd82d77888a88ae2fa424d93b445ad4f6c02bfc18335b70/libclang-18.1.1-py2.py3-none-win_arm64.whl", hash = "sha256:3f0e1f49f04d3cd198985fea0511576b0aee16f9ff0e0f0cad7f9c57ec3c20e8", size = 22361112 }, +] + +[[package]] +name = "llvmlite" +version = "0.44.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/89/6a/95a3d3610d5c75293d5dbbb2a76480d5d4eeba641557b69fe90af6c5b84e/llvmlite-0.44.0.tar.gz", hash = "sha256:07667d66a5d150abed9157ab6c0b9393c9356f229784a4385c02f99e94fc94d4", size = 171880 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b5/e2/86b245397052386595ad726f9742e5223d7aea999b18c518a50e96c3aca4/llvmlite-0.44.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:eed7d5f29136bda63b6d7804c279e2b72e08c952b7c5df61f45db408e0ee52f3", size = 28132305 }, + { url = "https://files.pythonhosted.org/packages/ff/ec/506902dc6870249fbe2466d9cf66d531265d0f3a1157213c8f986250c033/llvmlite-0.44.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ace564d9fa44bb91eb6e6d8e7754977783c68e90a471ea7ce913bff30bd62427", size = 26201090 }, + { url = "https://files.pythonhosted.org/packages/99/fe/d030f1849ebb1f394bb3f7adad5e729b634fb100515594aca25c354ffc62/llvmlite-0.44.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5d22c3bfc842668168a786af4205ec8e3ad29fb1bc03fd11fd48460d0df64c1", size = 42361858 }, + { url = "https://files.pythonhosted.org/packages/d7/7a/ce6174664b9077fc673d172e4c888cb0b128e707e306bc33fff8c2035f0d/llvmlite-0.44.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f01a394e9c9b7b1d4e63c327b096d10f6f0ed149ef53d38a09b3749dcf8c9610", size = 41184200 }, + { url = "https://files.pythonhosted.org/packages/5f/c6/258801143975a6d09a373f2641237992496e15567b907a4d401839d671b8/llvmlite-0.44.0-cp311-cp311-win_amd64.whl", hash = "sha256:d8489634d43c20cd0ad71330dde1d5bc7b9966937a263ff1ec1cebb90dc50955", size = 30331193 }, + { url = "https://files.pythonhosted.org/packages/15/86/e3c3195b92e6e492458f16d233e58a1a812aa2bfbef9bdd0fbafcec85c60/llvmlite-0.44.0-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:1d671a56acf725bf1b531d5ef76b86660a5ab8ef19bb6a46064a705c6ca80aad", size = 28132297 }, + { url = "https://files.pythonhosted.org/packages/d6/53/373b6b8be67b9221d12b24125fd0ec56b1078b660eeae266ec388a6ac9a0/llvmlite-0.44.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f79a728e0435493611c9f405168682bb75ffd1fbe6fc360733b850c80a026db", size = 26201105 }, + { url = "https://files.pythonhosted.org/packages/cb/da/8341fd3056419441286c8e26bf436923021005ece0bff5f41906476ae514/llvmlite-0.44.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0143a5ef336da14deaa8ec26c5449ad5b6a2b564df82fcef4be040b9cacfea9", size = 42361901 }, + { url = "https://files.pythonhosted.org/packages/53/ad/d79349dc07b8a395a99153d7ce8b01d6fcdc9f8231355a5df55ded649b61/llvmlite-0.44.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d752f89e31b66db6f8da06df8b39f9b91e78c5feea1bf9e8c1fba1d1c24c065d", size = 41184247 }, + { url = "https://files.pythonhosted.org/packages/e2/3b/a9a17366af80127bd09decbe2a54d8974b6d8b274b39bf47fbaedeec6307/llvmlite-0.44.0-cp312-cp312-win_amd64.whl", hash = "sha256:eae7e2d4ca8f88f89d315b48c6b741dcb925d6a1042da694aa16ab3dd4cbd3a1", size = 30332380 }, + { url = "https://files.pythonhosted.org/packages/89/24/4c0ca705a717514c2092b18476e7a12c74d34d875e05e4d742618ebbf449/llvmlite-0.44.0-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:319bddd44e5f71ae2689859b7203080716448a3cd1128fb144fe5c055219d516", size = 28132306 }, + { url = "https://files.pythonhosted.org/packages/01/cf/1dd5a60ba6aee7122ab9243fd614abcf22f36b0437cbbe1ccf1e3391461c/llvmlite-0.44.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c58867118bad04a0bb22a2e0068c693719658105e40009ffe95c7000fcde88e", size = 26201090 }, + { url = "https://files.pythonhosted.org/packages/d2/1b/656f5a357de7135a3777bd735cc7c9b8f23b4d37465505bd0eaf4be9befe/llvmlite-0.44.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46224058b13c96af1365290bdfebe9a6264ae62fb79b2b55693deed11657a8bf", size = 42361904 }, + { url = "https://files.pythonhosted.org/packages/d8/e1/12c5f20cb9168fb3464a34310411d5ad86e4163c8ff2d14a2b57e5cc6bac/llvmlite-0.44.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:aa0097052c32bf721a4efc03bd109d335dfa57d9bffb3d4c24cc680711b8b4fc", size = 41184245 }, + { url = "https://files.pythonhosted.org/packages/d0/81/e66fc86539293282fd9cb7c9417438e897f369e79ffb62e1ae5e5154d4dd/llvmlite-0.44.0-cp313-cp313-win_amd64.whl", hash = "sha256:2fb7c4f2fb86cbae6dca3db9ab203eeea0e22d73b99bc2341cdf9de93612e930", size = 30331193 }, +] + +[[package]] +name = "lxml" +version = "5.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/76/3d/14e82fc7c8fb1b7761f7e748fd47e2ec8276d137b6acfe5a4bb73853e08f/lxml-5.4.0.tar.gz", hash = "sha256:d12832e1dbea4be280b22fd0ea7c9b87f0d8fc51ba06e92dc62d52f804f78ebd", size = 3679479 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/2d/67693cc8a605a12e5975380d7ff83020dcc759351b5a066e1cced04f797b/lxml-5.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:98a3912194c079ef37e716ed228ae0dcb960992100461b704aea4e93af6b0bb9", size = 8083240 }, + { url = "https://files.pythonhosted.org/packages/73/53/b5a05ab300a808b72e848efd152fe9c022c0181b0a70b8bca1199f1bed26/lxml-5.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0ea0252b51d296a75f6118ed0d8696888e7403408ad42345d7dfd0d1e93309a7", size = 4387685 }, + { url = "https://files.pythonhosted.org/packages/d8/cb/1a3879c5f512bdcd32995c301886fe082b2edd83c87d41b6d42d89b4ea4d/lxml-5.4.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b92b69441d1bd39f4940f9eadfa417a25862242ca2c396b406f9272ef09cdcaa", size = 4991164 }, + { url = "https://files.pythonhosted.org/packages/f9/94/bbc66e42559f9d04857071e3b3d0c9abd88579367fd2588a4042f641f57e/lxml-5.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20e16c08254b9b6466526bc1828d9370ee6c0d60a4b64836bc3ac2917d1e16df", size = 4746206 }, + { url = "https://files.pythonhosted.org/packages/66/95/34b0679bee435da2d7cae895731700e519a8dfcab499c21662ebe671603e/lxml-5.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7605c1c32c3d6e8c990dd28a0970a3cbbf1429d5b92279e37fda05fb0c92190e", size = 5342144 }, + { url = "https://files.pythonhosted.org/packages/e0/5d/abfcc6ab2fa0be72b2ba938abdae1f7cad4c632f8d552683ea295d55adfb/lxml-5.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ecf4c4b83f1ab3d5a7ace10bafcb6f11df6156857a3c418244cef41ca9fa3e44", size = 4825124 }, + { url = "https://files.pythonhosted.org/packages/5a/78/6bd33186c8863b36e084f294fc0a5e5eefe77af95f0663ef33809cc1c8aa/lxml-5.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cef4feae82709eed352cd7e97ae062ef6ae9c7b5dbe3663f104cd2c0e8d94ba", size = 4876520 }, + { url = "https://files.pythonhosted.org/packages/3b/74/4d7ad4839bd0fc64e3d12da74fc9a193febb0fae0ba6ebd5149d4c23176a/lxml-5.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:df53330a3bff250f10472ce96a9af28628ff1f4efc51ccba351a8820bca2a8ba", size = 4765016 }, + { url = "https://files.pythonhosted.org/packages/24/0d/0a98ed1f2471911dadfc541003ac6dd6879fc87b15e1143743ca20f3e973/lxml-5.4.0-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:aefe1a7cb852fa61150fcb21a8c8fcea7b58c4cb11fbe59c97a0a4b31cae3c8c", size = 5362884 }, + { url = "https://files.pythonhosted.org/packages/48/de/d4f7e4c39740a6610f0f6959052b547478107967362e8424e1163ec37ae8/lxml-5.4.0-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:ef5a7178fcc73b7d8c07229e89f8eb45b2908a9238eb90dcfc46571ccf0383b8", size = 4902690 }, + { url = "https://files.pythonhosted.org/packages/07/8c/61763abd242af84f355ca4ef1ee096d3c1b7514819564cce70fd18c22e9a/lxml-5.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d2ed1b3cb9ff1c10e6e8b00941bb2e5bb568b307bfc6b17dffbbe8be5eecba86", size = 4944418 }, + { url = "https://files.pythonhosted.org/packages/f9/c5/6d7e3b63e7e282619193961a570c0a4c8a57fe820f07ca3fe2f6bd86608a/lxml-5.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:72ac9762a9f8ce74c9eed4a4e74306f2f18613a6b71fa065495a67ac227b3056", size = 4827092 }, + { url = "https://files.pythonhosted.org/packages/71/4a/e60a306df54680b103348545706a98a7514a42c8b4fbfdcaa608567bb065/lxml-5.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f5cb182f6396706dc6cc1896dd02b1c889d644c081b0cdec38747573db88a7d7", size = 5418231 }, + { url = "https://files.pythonhosted.org/packages/27/f2/9754aacd6016c930875854f08ac4b192a47fe19565f776a64004aa167521/lxml-5.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:3a3178b4873df8ef9457a4875703488eb1622632a9cee6d76464b60e90adbfcd", size = 5261798 }, + { url = "https://files.pythonhosted.org/packages/38/a2/0c49ec6941428b1bd4f280650d7b11a0f91ace9db7de32eb7aa23bcb39ff/lxml-5.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e094ec83694b59d263802ed03a8384594fcce477ce484b0cbcd0008a211ca751", size = 4988195 }, + { url = "https://files.pythonhosted.org/packages/7a/75/87a3963a08eafc46a86c1131c6e28a4de103ba30b5ae903114177352a3d7/lxml-5.4.0-cp311-cp311-win32.whl", hash = "sha256:4329422de653cdb2b72afa39b0aa04252fca9071550044904b2e7036d9d97fe4", size = 3474243 }, + { url = "https://files.pythonhosted.org/packages/fa/f9/1f0964c4f6c2be861c50db380c554fb8befbea98c6404744ce243a3c87ef/lxml-5.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd3be6481ef54b8cfd0e1e953323b7aa9d9789b94842d0e5b142ef4bb7999539", size = 3815197 }, + { url = "https://files.pythonhosted.org/packages/f8/4c/d101ace719ca6a4ec043eb516fcfcb1b396a9fccc4fcd9ef593df34ba0d5/lxml-5.4.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b5aff6f3e818e6bdbbb38e5967520f174b18f539c2b9de867b1e7fde6f8d95a4", size = 8127392 }, + { url = "https://files.pythonhosted.org/packages/11/84/beddae0cec4dd9ddf46abf156f0af451c13019a0fa25d7445b655ba5ccb7/lxml-5.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:942a5d73f739ad7c452bf739a62a0f83e2578afd6b8e5406308731f4ce78b16d", size = 4415103 }, + { url = "https://files.pythonhosted.org/packages/d0/25/d0d93a4e763f0462cccd2b8a665bf1e4343dd788c76dcfefa289d46a38a9/lxml-5.4.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:460508a4b07364d6abf53acaa0a90b6d370fafde5693ef37602566613a9b0779", size = 5024224 }, + { url = "https://files.pythonhosted.org/packages/31/ce/1df18fb8f7946e7f3388af378b1f34fcf253b94b9feedb2cec5969da8012/lxml-5.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:529024ab3a505fed78fe3cc5ddc079464e709f6c892733e3f5842007cec8ac6e", size = 4769913 }, + { url = "https://files.pythonhosted.org/packages/4e/62/f4a6c60ae7c40d43657f552f3045df05118636be1165b906d3423790447f/lxml-5.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ca56ebc2c474e8f3d5761debfd9283b8b18c76c4fc0967b74aeafba1f5647f9", size = 5290441 }, + { url = "https://files.pythonhosted.org/packages/9e/aa/04f00009e1e3a77838c7fc948f161b5d2d5de1136b2b81c712a263829ea4/lxml-5.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a81e1196f0a5b4167a8dafe3a66aa67c4addac1b22dc47947abd5d5c7a3f24b5", size = 4820165 }, + { url = "https://files.pythonhosted.org/packages/c9/1f/e0b2f61fa2404bf0f1fdf1898377e5bd1b74cc9b2cf2c6ba8509b8f27990/lxml-5.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00b8686694423ddae324cf614e1b9659c2edb754de617703c3d29ff568448df5", size = 4932580 }, + { url = "https://files.pythonhosted.org/packages/24/a2/8263f351b4ffe0ed3e32ea7b7830f845c795349034f912f490180d88a877/lxml-5.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:c5681160758d3f6ac5b4fea370495c48aac0989d6a0f01bb9a72ad8ef5ab75c4", size = 4759493 }, + { url = "https://files.pythonhosted.org/packages/05/00/41db052f279995c0e35c79d0f0fc9f8122d5b5e9630139c592a0b58c71b4/lxml-5.4.0-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:2dc191e60425ad70e75a68c9fd90ab284df64d9cd410ba8d2b641c0c45bc006e", size = 5324679 }, + { url = "https://files.pythonhosted.org/packages/1d/be/ee99e6314cdef4587617d3b3b745f9356d9b7dd12a9663c5f3b5734b64ba/lxml-5.4.0-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:67f779374c6b9753ae0a0195a892a1c234ce8416e4448fe1e9f34746482070a7", size = 4890691 }, + { url = "https://files.pythonhosted.org/packages/ad/36/239820114bf1d71f38f12208b9c58dec033cbcf80101cde006b9bde5cffd/lxml-5.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:79d5bfa9c1b455336f52343130b2067164040604e41f6dc4d8313867ed540079", size = 4955075 }, + { url = "https://files.pythonhosted.org/packages/d4/e1/1b795cc0b174efc9e13dbd078a9ff79a58728a033142bc6d70a1ee8fc34d/lxml-5.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3d3c30ba1c9b48c68489dc1829a6eede9873f52edca1dda900066542528d6b20", size = 4838680 }, + { url = "https://files.pythonhosted.org/packages/72/48/3c198455ca108cec5ae3662ae8acd7fd99476812fd712bb17f1b39a0b589/lxml-5.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:1af80c6316ae68aded77e91cd9d80648f7dd40406cef73df841aa3c36f6907c8", size = 5391253 }, + { url = "https://files.pythonhosted.org/packages/d6/10/5bf51858971c51ec96cfc13e800a9951f3fd501686f4c18d7d84fe2d6352/lxml-5.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4d885698f5019abe0de3d352caf9466d5de2baded00a06ef3f1216c1a58ae78f", size = 5261651 }, + { url = "https://files.pythonhosted.org/packages/2b/11/06710dd809205377da380546f91d2ac94bad9ff735a72b64ec029f706c85/lxml-5.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:aea53d51859b6c64e7c51d522c03cc2c48b9b5d6172126854cc7f01aa11f52bc", size = 5024315 }, + { url = "https://files.pythonhosted.org/packages/f5/b0/15b6217834b5e3a59ebf7f53125e08e318030e8cc0d7310355e6edac98ef/lxml-5.4.0-cp312-cp312-win32.whl", hash = "sha256:d90b729fd2732df28130c064aac9bb8aff14ba20baa4aee7bd0795ff1187545f", size = 3486149 }, + { url = "https://files.pythonhosted.org/packages/91/1e/05ddcb57ad2f3069101611bd5f5084157d90861a2ef460bf42f45cced944/lxml-5.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1dc4ca99e89c335a7ed47d38964abcb36c5910790f9bd106f2a8fa2ee0b909d2", size = 3817095 }, + { url = "https://files.pythonhosted.org/packages/87/cb/2ba1e9dd953415f58548506fa5549a7f373ae55e80c61c9041b7fd09a38a/lxml-5.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:773e27b62920199c6197130632c18fb7ead3257fce1ffb7d286912e56ddb79e0", size = 8110086 }, + { url = "https://files.pythonhosted.org/packages/b5/3e/6602a4dca3ae344e8609914d6ab22e52ce42e3e1638c10967568c5c1450d/lxml-5.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ce9c671845de9699904b1e9df95acfe8dfc183f2310f163cdaa91a3535af95de", size = 4404613 }, + { url = "https://files.pythonhosted.org/packages/4c/72/bf00988477d3bb452bef9436e45aeea82bb40cdfb4684b83c967c53909c7/lxml-5.4.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9454b8d8200ec99a224df8854786262b1bd6461f4280064c807303c642c05e76", size = 5012008 }, + { url = "https://files.pythonhosted.org/packages/92/1f/93e42d93e9e7a44b2d3354c462cd784dbaaf350f7976b5d7c3f85d68d1b1/lxml-5.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cccd007d5c95279e529c146d095f1d39ac05139de26c098166c4beb9374b0f4d", size = 4760915 }, + { url = "https://files.pythonhosted.org/packages/45/0b/363009390d0b461cf9976a499e83b68f792e4c32ecef092f3f9ef9c4ba54/lxml-5.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0fce1294a0497edb034cb416ad3e77ecc89b313cff7adbee5334e4dc0d11f422", size = 5283890 }, + { url = "https://files.pythonhosted.org/packages/19/dc/6056c332f9378ab476c88e301e6549a0454dbee8f0ae16847414f0eccb74/lxml-5.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:24974f774f3a78ac12b95e3a20ef0931795ff04dbb16db81a90c37f589819551", size = 4812644 }, + { url = "https://files.pythonhosted.org/packages/ee/8a/f8c66bbb23ecb9048a46a5ef9b495fd23f7543df642dabeebcb2eeb66592/lxml-5.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:497cab4d8254c2a90bf988f162ace2ddbfdd806fce3bda3f581b9d24c852e03c", size = 4921817 }, + { url = "https://files.pythonhosted.org/packages/04/57/2e537083c3f381f83d05d9b176f0d838a9e8961f7ed8ddce3f0217179ce3/lxml-5.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:e794f698ae4c5084414efea0f5cc9f4ac562ec02d66e1484ff822ef97c2cadff", size = 4753916 }, + { url = "https://files.pythonhosted.org/packages/d8/80/ea8c4072109a350848f1157ce83ccd9439601274035cd045ac31f47f3417/lxml-5.4.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:2c62891b1ea3094bb12097822b3d44b93fc6c325f2043c4d2736a8ff09e65f60", size = 5289274 }, + { url = "https://files.pythonhosted.org/packages/b3/47/c4be287c48cdc304483457878a3f22999098b9a95f455e3c4bda7ec7fc72/lxml-5.4.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:142accb3e4d1edae4b392bd165a9abdee8a3c432a2cca193df995bc3886249c8", size = 4874757 }, + { url = "https://files.pythonhosted.org/packages/2f/04/6ef935dc74e729932e39478e44d8cfe6a83550552eaa072b7c05f6f22488/lxml-5.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1a42b3a19346e5601d1b8296ff6ef3d76038058f311902edd574461e9c036982", size = 4947028 }, + { url = "https://files.pythonhosted.org/packages/cb/f9/c33fc8daa373ef8a7daddb53175289024512b6619bc9de36d77dca3df44b/lxml-5.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4291d3c409a17febf817259cb37bc62cb7eb398bcc95c1356947e2871911ae61", size = 4834487 }, + { url = "https://files.pythonhosted.org/packages/8d/30/fc92bb595bcb878311e01b418b57d13900f84c2b94f6eca9e5073ea756e6/lxml-5.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4f5322cf38fe0e21c2d73901abf68e6329dc02a4994e483adbcf92b568a09a54", size = 5381688 }, + { url = "https://files.pythonhosted.org/packages/43/d1/3ba7bd978ce28bba8e3da2c2e9d5ae3f8f521ad3f0ca6ea4788d086ba00d/lxml-5.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:0be91891bdb06ebe65122aa6bf3fc94489960cf7e03033c6f83a90863b23c58b", size = 5242043 }, + { url = "https://files.pythonhosted.org/packages/ee/cd/95fa2201041a610c4d08ddaf31d43b98ecc4b1d74b1e7245b1abdab443cb/lxml-5.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:15a665ad90054a3d4f397bc40f73948d48e36e4c09f9bcffc7d90c87410e478a", size = 5021569 }, + { url = "https://files.pythonhosted.org/packages/2d/a6/31da006fead660b9512d08d23d31e93ad3477dd47cc42e3285f143443176/lxml-5.4.0-cp313-cp313-win32.whl", hash = "sha256:d5663bc1b471c79f5c833cffbc9b87d7bf13f87e055a5c86c363ccd2348d7e82", size = 3485270 }, + { url = "https://files.pythonhosted.org/packages/fc/14/c115516c62a7d2499781d2d3d7215218c0731b2c940753bf9f9b7b73924d/lxml-5.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:bcb7a1096b4b6b24ce1ac24d4942ad98f983cd3810f9711bcd0293f43a9d8b9f", size = 3814606 }, +] + +[[package]] +name = "markdown" +version = "3.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2f/15/222b423b0b88689c266d9eac4e61396fe2cc53464459d6a37618ac863b24/markdown-3.8.tar.gz", hash = "sha256:7df81e63f0df5c4b24b7d156eb81e4690595239b7d70937d0409f1b0de319c6f", size = 360906 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/51/3f/afe76f8e2246ffbc867440cbcf90525264df0e658f8a5ca1f872b3f6192a/markdown-3.8-py3-none-any.whl", hash = "sha256:794a929b79c5af141ef5ab0f2f642d0f7b1872981250230e72682346f7cc90dc", size = 106210 }, +] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mdurl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528 }, +] + +[[package]] +name = "markupsafe" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6b/28/bbf83e3f76936960b850435576dd5e67034e200469571be53f69174a2dfd/MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d", size = 14353 }, + { url = "https://files.pythonhosted.org/packages/6c/30/316d194b093cde57d448a4c3209f22e3046c5bb2fb0820b118292b334be7/MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93", size = 12392 }, + { url = "https://files.pythonhosted.org/packages/f2/96/9cdafba8445d3a53cae530aaf83c38ec64c4d5427d975c974084af5bc5d2/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832", size = 23984 }, + { url = "https://files.pythonhosted.org/packages/f1/a4/aefb044a2cd8d7334c8a47d3fb2c9f328ac48cb349468cc31c20b539305f/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84", size = 23120 }, + { url = "https://files.pythonhosted.org/packages/8d/21/5e4851379f88f3fad1de30361db501300d4f07bcad047d3cb0449fc51f8c/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca", size = 23032 }, + { url = "https://files.pythonhosted.org/packages/00/7b/e92c64e079b2d0d7ddf69899c98842f3f9a60a1ae72657c89ce2655c999d/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798", size = 24057 }, + { url = "https://files.pythonhosted.org/packages/f9/ac/46f960ca323037caa0a10662ef97d0a4728e890334fc156b9f9e52bcc4ca/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e", size = 23359 }, + { url = "https://files.pythonhosted.org/packages/69/84/83439e16197337b8b14b6a5b9c2105fff81d42c2a7c5b58ac7b62ee2c3b1/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4", size = 23306 }, + { url = "https://files.pythonhosted.org/packages/9a/34/a15aa69f01e2181ed8d2b685c0d2f6655d5cca2c4db0ddea775e631918cd/MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d", size = 15094 }, + { url = "https://files.pythonhosted.org/packages/da/b8/3a3bd761922d416f3dc5d00bfbed11f66b1ab89a0c2b6e887240a30b0f6b/MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b", size = 15521 }, + { url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274 }, + { url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348 }, + { url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149 }, + { url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118 }, + { url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993 }, + { url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178 }, + { url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319 }, + { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352 }, + { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097 }, + { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601 }, + { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274 }, + { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352 }, + { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122 }, + { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085 }, + { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978 }, + { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208 }, + { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357 }, + { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344 }, + { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101 }, + { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603 }, + { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510 }, + { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486 }, + { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480 }, + { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914 }, + { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796 }, + { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473 }, + { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114 }, + { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098 }, + { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208 }, + { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739 }, +] + +[[package]] +name = "matplotlib" +version = "3.10.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "contourpy" }, + { name = "cycler" }, + { name = "fonttools" }, + { name = "kiwisolver" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "pillow" }, + { name = "pyparsing" }, + { name = "python-dateutil" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/26/91/d49359a21893183ed2a5b6c76bec40e0b1dcbf8ca148f864d134897cfc75/matplotlib-3.10.3.tar.gz", hash = "sha256:2f82d2c5bb7ae93aaaa4cd42aca65d76ce6376f83304fa3a630b569aca274df0", size = 34799811 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f5/bd/af9f655456f60fe1d575f54fb14704ee299b16e999704817a7645dfce6b0/matplotlib-3.10.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:0ef061f74cd488586f552d0c336b2f078d43bc00dc473d2c3e7bfee2272f3fa8", size = 8178873 }, + { url = "https://files.pythonhosted.org/packages/c2/86/e1c86690610661cd716eda5f9d0b35eaf606ae6c9b6736687cfc8f2d0cd8/matplotlib-3.10.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d96985d14dc5f4a736bbea4b9de9afaa735f8a0fc2ca75be2fa9e96b2097369d", size = 8052205 }, + { url = "https://files.pythonhosted.org/packages/54/51/a9f8e49af3883dacddb2da1af5fca1f7468677f1188936452dd9aaaeb9ed/matplotlib-3.10.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c5f0283da91e9522bdba4d6583ed9d5521566f63729ffb68334f86d0bb98049", size = 8465823 }, + { url = "https://files.pythonhosted.org/packages/e7/e3/c82963a3b86d6e6d5874cbeaa390166458a7f1961bab9feb14d3d1a10f02/matplotlib-3.10.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdfa07c0ec58035242bc8b2c8aae37037c9a886370eef6850703d7583e19964b", size = 8606464 }, + { url = "https://files.pythonhosted.org/packages/0e/34/24da1027e7fcdd9e82da3194c470143c551852757a4b473a09a012f5b945/matplotlib-3.10.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c0b9849a17bce080a16ebcb80a7b714b5677d0ec32161a2cc0a8e5a6030ae220", size = 9413103 }, + { url = "https://files.pythonhosted.org/packages/a6/da/948a017c3ea13fd4a97afad5fdebe2f5bbc4d28c0654510ce6fd6b06b7bd/matplotlib-3.10.3-cp311-cp311-win_amd64.whl", hash = "sha256:eef6ed6c03717083bc6d69c2d7ee8624205c29a8e6ea5a31cd3492ecdbaee1e1", size = 8065492 }, + { url = "https://files.pythonhosted.org/packages/eb/43/6b80eb47d1071f234ef0c96ca370c2ca621f91c12045f1401b5c9b28a639/matplotlib-3.10.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0ab1affc11d1f495ab9e6362b8174a25afc19c081ba5b0775ef00533a4236eea", size = 8179689 }, + { url = "https://files.pythonhosted.org/packages/0f/70/d61a591958325c357204870b5e7b164f93f2a8cca1dc6ce940f563909a13/matplotlib-3.10.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2a818d8bdcafa7ed2eed74487fdb071c09c1ae24152d403952adad11fa3c65b4", size = 8050466 }, + { url = "https://files.pythonhosted.org/packages/e7/75/70c9d2306203148cc7902a961240c5927dd8728afedf35e6a77e105a2985/matplotlib-3.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:748ebc3470c253e770b17d8b0557f0aa85cf8c63fd52f1a61af5b27ec0b7ffee", size = 8456252 }, + { url = "https://files.pythonhosted.org/packages/c4/91/ba0ae1ff4b3f30972ad01cd4a8029e70a0ec3b8ea5be04764b128b66f763/matplotlib-3.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed70453fd99733293ace1aec568255bc51c6361cb0da94fa5ebf0649fdb2150a", size = 8601321 }, + { url = "https://files.pythonhosted.org/packages/d2/88/d636041eb54a84b889e11872d91f7cbf036b3b0e194a70fa064eb8b04f7a/matplotlib-3.10.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dbed9917b44070e55640bd13419de83b4c918e52d97561544814ba463811cbc7", size = 9406972 }, + { url = "https://files.pythonhosted.org/packages/b1/79/0d1c165eac44405a86478082e225fce87874f7198300bbebc55faaf6d28d/matplotlib-3.10.3-cp312-cp312-win_amd64.whl", hash = "sha256:cf37d8c6ef1a48829443e8ba5227b44236d7fcaf7647caa3178a4ff9f7a5be05", size = 8067954 }, + { url = "https://files.pythonhosted.org/packages/3b/c1/23cfb566a74c696a3b338d8955c549900d18fe2b898b6e94d682ca21e7c2/matplotlib-3.10.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9f2efccc8dcf2b86fc4ee849eea5dcaecedd0773b30f47980dc0cbeabf26ec84", size = 8180318 }, + { url = "https://files.pythonhosted.org/packages/6c/0c/02f1c3b66b30da9ee343c343acbb6251bef5b01d34fad732446eaadcd108/matplotlib-3.10.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3ddbba06a6c126e3301c3d272a99dcbe7f6c24c14024e80307ff03791a5f294e", size = 8051132 }, + { url = "https://files.pythonhosted.org/packages/b4/ab/8db1a5ac9b3a7352fb914133001dae889f9fcecb3146541be46bed41339c/matplotlib-3.10.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:748302b33ae9326995b238f606e9ed840bf5886ebafcb233775d946aa8107a15", size = 8457633 }, + { url = "https://files.pythonhosted.org/packages/f5/64/41c4367bcaecbc03ef0d2a3ecee58a7065d0a36ae1aa817fe573a2da66d4/matplotlib-3.10.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a80fcccbef63302c0efd78042ea3c2436104c5b1a4d3ae20f864593696364ac7", size = 8601031 }, + { url = "https://files.pythonhosted.org/packages/12/6f/6cc79e9e5ab89d13ed64da28898e40fe5b105a9ab9c98f83abd24e46d7d7/matplotlib-3.10.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:55e46cbfe1f8586adb34f7587c3e4f7dedc59d5226719faf6cb54fc24f2fd52d", size = 9406988 }, + { url = "https://files.pythonhosted.org/packages/b1/0f/eed564407bd4d935ffabf561ed31099ed609e19287409a27b6d336848653/matplotlib-3.10.3-cp313-cp313-win_amd64.whl", hash = "sha256:151d89cb8d33cb23345cd12490c76fd5d18a56581a16d950b48c6ff19bb2ab93", size = 8068034 }, + { url = "https://files.pythonhosted.org/packages/3e/e5/2f14791ff69b12b09e9975e1d116d9578ac684460860ce542c2588cb7a1c/matplotlib-3.10.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:c26dd9834e74d164d06433dc7be5d75a1e9890b926b3e57e74fa446e1a62c3e2", size = 8218223 }, + { url = "https://files.pythonhosted.org/packages/5c/08/30a94afd828b6e02d0a52cae4a29d6e9ccfcf4c8b56cc28b021d3588873e/matplotlib-3.10.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:24853dad5b8c84c8c2390fc31ce4858b6df504156893292ce8092d190ef8151d", size = 8094985 }, + { url = "https://files.pythonhosted.org/packages/89/44/f3bc6b53066c889d7a1a3ea8094c13af6a667c5ca6220ec60ecceec2dabe/matplotlib-3.10.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68f7878214d369d7d4215e2a9075fef743be38fa401d32e6020bab2dfabaa566", size = 8483109 }, + { url = "https://files.pythonhosted.org/packages/ba/c7/473bc559beec08ebee9f86ca77a844b65747e1a6c2691e8c92e40b9f42a8/matplotlib-3.10.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6929fc618cb6db9cb75086f73b3219bbb25920cb24cee2ea7a12b04971a4158", size = 8618082 }, + { url = "https://files.pythonhosted.org/packages/d8/e9/6ce8edd264c8819e37bbed8172e0ccdc7107fe86999b76ab5752276357a4/matplotlib-3.10.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6c7818292a5cc372a2dc4c795e5c356942eb8350b98ef913f7fda51fe175ac5d", size = 9413699 }, + { url = "https://files.pythonhosted.org/packages/1b/92/9a45c91089c3cf690b5badd4be81e392ff086ccca8a1d4e3a08463d8a966/matplotlib-3.10.3-cp313-cp313t-win_amd64.whl", hash = "sha256:4f23ffe95c5667ef8a2b56eea9b53db7f43910fa4a2d5472ae0f72b64deab4d5", size = 8139044 }, +] + +[[package]] +name = "matplotlib-inline" +version = "0.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/99/5b/a36a337438a14116b16480db471ad061c36c3694df7c2084a0da7ba538b7/matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", size = 8159 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/8e/9ad090d3553c280a8060fbf6e24dc1c0c29704ee7d1c372f0c174aa59285/matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca", size = 9899 }, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979 }, +] + +[[package]] +name = "mergedeep" +version = "1.3.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3a/41/580bb4006e3ed0361b8151a01d324fb03f420815446c7def45d02f74c270/mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8", size = 4661 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/19/04f9b178c2d8a15b076c8b5140708fa6ffc5601fb6f1e975537072df5b2a/mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307", size = 6354 }, +] + +[[package]] +name = "ml-collections" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "absl-py" }, + { name = "pyyaml" }, + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/31/f9/74689ff3e3ff6e4ec8616887cb00c9c66bca7e6243fd328358ea3665d547/ml_collections-1.0.0.tar.gz", hash = "sha256:00b11a1a339dd6c2d9b7f0daab47ab17e10e29ca1b2a656058605e2b7210897f", size = 61151 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5b/3c/2663b8b41a6f7dae1f1058cc75d9b1d09cf58e6482cb562976d4babe483c/ml_collections-1.0.0-py3-none-any.whl", hash = "sha256:17dbca4d83aba64f56b4b96e59637026d99d9e922569118b8a7f2e0ca6d203a6", size = 76451 }, +] + +[[package]] +name = "ml-dtypes" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fd/15/76f86faa0902836cc133939732f7611ace68cf54148487a99c539c272dc8/ml_dtypes-0.4.1.tar.gz", hash = "sha256:fad5f2de464fd09127e49b7fd1252b9006fb43d2edc1ff112d390c324af5ca7a", size = 692594 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/76/9835c8609c29f2214359e88f29255fc4aad4ea0f613fb48aa8815ceda1b6/ml_dtypes-0.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2d55b588116a7085d6e074cf0cdb1d6fa3875c059dddc4d2c94a4cc81c23e975", size = 397973 }, + { url = "https://files.pythonhosted.org/packages/7e/99/e68c56fac5de973007a10254b6e17a0362393724f40f66d5e4033f4962c2/ml_dtypes-0.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e138a9b7a48079c900ea969341a5754019a1ad17ae27ee330f7ebf43f23877f9", size = 2185134 }, + { url = "https://files.pythonhosted.org/packages/28/bc/6a2344338ea7b61cd7b46fb24ec459360a5a0903b57c55b156c1e46c644a/ml_dtypes-0.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74c6cfb5cf78535b103fde9ea3ded8e9f16f75bc07789054edc7776abfb3d752", size = 2163661 }, + { url = "https://files.pythonhosted.org/packages/e8/d3/ddfd9878b223b3aa9a930c6100a99afca5cfab7ea703662e00323acb7568/ml_dtypes-0.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:274cc7193dd73b35fb26bef6c5d40ae3eb258359ee71cd82f6e96a8c948bdaa6", size = 126727 }, + { url = "https://files.pythonhosted.org/packages/ba/1a/99e924f12e4b62139fbac87419698c65f956d58de0dbfa7c028fa5b096aa/ml_dtypes-0.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:827d3ca2097085cf0355f8fdf092b888890bb1b1455f52801a2d7756f056f54b", size = 405077 }, + { url = "https://files.pythonhosted.org/packages/8f/8c/7b610bd500617854c8cc6ed7c8cfb9d48d6a5c21a1437a36a4b9bc8a3598/ml_dtypes-0.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:772426b08a6172a891274d581ce58ea2789cc8abc1c002a27223f314aaf894e7", size = 2181554 }, + { url = "https://files.pythonhosted.org/packages/c7/c6/f89620cecc0581dc1839e218c4315171312e46c62a62da6ace204bda91c0/ml_dtypes-0.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:126e7d679b8676d1a958f2651949fbfa182832c3cd08020d8facd94e4114f3e9", size = 2160488 }, + { url = "https://files.pythonhosted.org/packages/ae/11/a742d3c31b2cc8557a48efdde53427fd5f9caa2fa3c9c27d826e78a66f51/ml_dtypes-0.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:df0fb650d5c582a9e72bb5bd96cfebb2cdb889d89daff621c8fbc60295eba66c", size = 127462 }, +] + +[[package]] +name = "mpmath" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198 }, +] + +[[package]] +name = "msgpack" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cb/d0/7555686ae7ff5731205df1012ede15dd9d927f6227ea151e901c7406af4f/msgpack-1.1.0.tar.gz", hash = "sha256:dd432ccc2c72b914e4cb77afce64aab761c1137cc698be3984eee260bcb2896e", size = 167260 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/5e/a4c7154ba65d93be91f2f1e55f90e76c5f91ccadc7efc4341e6f04c8647f/msgpack-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d364a55082fb2a7416f6c63ae383fbd903adb5a6cf78c5b96cc6316dc1cedc7", size = 150803 }, + { url = "https://files.pythonhosted.org/packages/60/c2/687684164698f1d51c41778c838d854965dd284a4b9d3a44beba9265c931/msgpack-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79ec007767b9b56860e0372085f8504db5d06bd6a327a335449508bbee9648fa", size = 84343 }, + { url = "https://files.pythonhosted.org/packages/42/ae/d3adea9bb4a1342763556078b5765e666f8fdf242e00f3f6657380920972/msgpack-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6ad622bf7756d5a497d5b6836e7fc3752e2dd6f4c648e24b1803f6048596f701", size = 81408 }, + { url = "https://files.pythonhosted.org/packages/dc/17/6313325a6ff40ce9c3207293aee3ba50104aed6c2c1559d20d09e5c1ff54/msgpack-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e59bca908d9ca0de3dc8684f21ebf9a690fe47b6be93236eb40b99af28b6ea6", size = 396096 }, + { url = "https://files.pythonhosted.org/packages/a8/a1/ad7b84b91ab5a324e707f4c9761633e357820b011a01e34ce658c1dda7cc/msgpack-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1da8f11a3dd397f0a32c76165cf0c4eb95b31013a94f6ecc0b280c05c91b59", size = 403671 }, + { url = "https://files.pythonhosted.org/packages/bb/0b/fd5b7c0b308bbf1831df0ca04ec76fe2f5bf6319833646b0a4bd5e9dc76d/msgpack-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452aff037287acb1d70a804ffd022b21fa2bb7c46bee884dbc864cc9024128a0", size = 387414 }, + { url = "https://files.pythonhosted.org/packages/f0/03/ff8233b7c6e9929a1f5da3c7860eccd847e2523ca2de0d8ef4878d354cfa/msgpack-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8da4bf6d54ceed70e8861f833f83ce0814a2b72102e890cbdfe4b34764cdd66e", size = 383759 }, + { url = "https://files.pythonhosted.org/packages/1f/1b/eb82e1fed5a16dddd9bc75f0854b6e2fe86c0259c4353666d7fab37d39f4/msgpack-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:41c991beebf175faf352fb940bf2af9ad1fb77fd25f38d9142053914947cdbf6", size = 394405 }, + { url = "https://files.pythonhosted.org/packages/90/2e/962c6004e373d54ecf33d695fb1402f99b51832631e37c49273cc564ffc5/msgpack-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a52a1f3a5af7ba1c9ace055b659189f6c669cf3657095b50f9602af3a3ba0fe5", size = 396041 }, + { url = "https://files.pythonhosted.org/packages/f8/20/6e03342f629474414860c48aeffcc2f7f50ddaf351d95f20c3f1c67399a8/msgpack-1.1.0-cp311-cp311-win32.whl", hash = "sha256:58638690ebd0a06427c5fe1a227bb6b8b9fdc2bd07701bec13c2335c82131a88", size = 68538 }, + { url = "https://files.pythonhosted.org/packages/aa/c4/5a582fc9a87991a3e6f6800e9bb2f3c82972912235eb9539954f3e9997c7/msgpack-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd2906780f25c8ed5d7b323379f6138524ba793428db5d0e9d226d3fa6aa1788", size = 74871 }, + { url = "https://files.pythonhosted.org/packages/e1/d6/716b7ca1dbde63290d2973d22bbef1b5032ca634c3ff4384a958ec3f093a/msgpack-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d46cf9e3705ea9485687aa4001a76e44748b609d260af21c4ceea7f2212a501d", size = 152421 }, + { url = "https://files.pythonhosted.org/packages/70/da/5312b067f6773429cec2f8f08b021c06af416bba340c912c2ec778539ed6/msgpack-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5dbad74103df937e1325cc4bfeaf57713be0b4f15e1c2da43ccdd836393e2ea2", size = 85277 }, + { url = "https://files.pythonhosted.org/packages/28/51/da7f3ae4462e8bb98af0d5bdf2707f1b8c65a0d4f496e46b6afb06cbc286/msgpack-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58dfc47f8b102da61e8949708b3eafc3504509a5728f8b4ddef84bd9e16ad420", size = 82222 }, + { url = "https://files.pythonhosted.org/packages/33/af/dc95c4b2a49cff17ce47611ca9ba218198806cad7796c0b01d1e332c86bb/msgpack-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676e5be1b472909b2ee6356ff425ebedf5142427842aa06b4dfd5117d1ca8a2", size = 392971 }, + { url = "https://files.pythonhosted.org/packages/f1/54/65af8de681fa8255402c80eda2a501ba467921d5a7a028c9c22a2c2eedb5/msgpack-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17fb65dd0bec285907f68b15734a993ad3fc94332b5bb21b0435846228de1f39", size = 401403 }, + { url = "https://files.pythonhosted.org/packages/97/8c/e333690777bd33919ab7024269dc3c41c76ef5137b211d776fbb404bfead/msgpack-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a51abd48c6d8ac89e0cfd4fe177c61481aca2d5e7ba42044fd218cfd8ea9899f", size = 385356 }, + { url = "https://files.pythonhosted.org/packages/57/52/406795ba478dc1c890559dd4e89280fa86506608a28ccf3a72fbf45df9f5/msgpack-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2137773500afa5494a61b1208619e3871f75f27b03bcfca7b3a7023284140247", size = 383028 }, + { url = "https://files.pythonhosted.org/packages/e7/69/053b6549bf90a3acadcd8232eae03e2fefc87f066a5b9fbb37e2e608859f/msgpack-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:398b713459fea610861c8a7b62a6fec1882759f308ae0795b5413ff6a160cf3c", size = 391100 }, + { url = "https://files.pythonhosted.org/packages/23/f0/d4101d4da054f04274995ddc4086c2715d9b93111eb9ed49686c0f7ccc8a/msgpack-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:06f5fd2f6bb2a7914922d935d3b8bb4a7fff3a9a91cfce6d06c13bc42bec975b", size = 394254 }, + { url = "https://files.pythonhosted.org/packages/1c/12/cf07458f35d0d775ff3a2dc5559fa2e1fcd06c46f1ef510e594ebefdca01/msgpack-1.1.0-cp312-cp312-win32.whl", hash = "sha256:ad33e8400e4ec17ba782f7b9cf868977d867ed784a1f5f2ab46e7ba53b6e1e1b", size = 69085 }, + { url = "https://files.pythonhosted.org/packages/73/80/2708a4641f7d553a63bc934a3eb7214806b5b39d200133ca7f7afb0a53e8/msgpack-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:115a7af8ee9e8cddc10f87636767857e7e3717b7a2e97379dc2054712693e90f", size = 75347 }, + { url = "https://files.pythonhosted.org/packages/c8/b0/380f5f639543a4ac413e969109978feb1f3c66e931068f91ab6ab0f8be00/msgpack-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:071603e2f0771c45ad9bc65719291c568d4edf120b44eb36324dcb02a13bfddf", size = 151142 }, + { url = "https://files.pythonhosted.org/packages/c8/ee/be57e9702400a6cb2606883d55b05784fada898dfc7fd12608ab1fdb054e/msgpack-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0f92a83b84e7c0749e3f12821949d79485971f087604178026085f60ce109330", size = 84523 }, + { url = "https://files.pythonhosted.org/packages/7e/3a/2919f63acca3c119565449681ad08a2f84b2171ddfcff1dba6959db2cceb/msgpack-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1964df7b81285d00a84da4e70cb1383f2e665e0f1f2a7027e683956d04b734", size = 81556 }, + { url = "https://files.pythonhosted.org/packages/7c/43/a11113d9e5c1498c145a8925768ea2d5fce7cbab15c99cda655aa09947ed/msgpack-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59caf6a4ed0d164055ccff8fe31eddc0ebc07cf7326a2aaa0dbf7a4001cd823e", size = 392105 }, + { url = "https://files.pythonhosted.org/packages/2d/7b/2c1d74ca6c94f70a1add74a8393a0138172207dc5de6fc6269483519d048/msgpack-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0907e1a7119b337971a689153665764adc34e89175f9a34793307d9def08e6ca", size = 399979 }, + { url = "https://files.pythonhosted.org/packages/82/8c/cf64ae518c7b8efc763ca1f1348a96f0e37150061e777a8ea5430b413a74/msgpack-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65553c9b6da8166e819a6aa90ad15288599b340f91d18f60b2061f402b9a4915", size = 383816 }, + { url = "https://files.pythonhosted.org/packages/69/86/a847ef7a0f5ef3fa94ae20f52a4cacf596a4e4a010197fbcc27744eb9a83/msgpack-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7a946a8992941fea80ed4beae6bff74ffd7ee129a90b4dd5cf9c476a30e9708d", size = 380973 }, + { url = "https://files.pythonhosted.org/packages/aa/90/c74cf6e1126faa93185d3b830ee97246ecc4fe12cf9d2d31318ee4246994/msgpack-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4b51405e36e075193bc051315dbf29168d6141ae2500ba8cd80a522964e31434", size = 387435 }, + { url = "https://files.pythonhosted.org/packages/7a/40/631c238f1f338eb09f4acb0f34ab5862c4e9d7eda11c1b685471a4c5ea37/msgpack-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4c01941fd2ff87c2a934ee6055bda4ed353a7846b8d4f341c428109e9fcde8c", size = 399082 }, + { url = "https://files.pythonhosted.org/packages/e9/1b/fa8a952be252a1555ed39f97c06778e3aeb9123aa4cccc0fd2acd0b4e315/msgpack-1.1.0-cp313-cp313-win32.whl", hash = "sha256:7c9a35ce2c2573bada929e0b7b3576de647b0defbd25f5139dcdaba0ae35a4cc", size = 69037 }, + { url = "https://files.pythonhosted.org/packages/b6/bc/8bd826dd03e022153bfa1766dcdec4976d6c818865ed54223d71f07862b3/msgpack-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:bce7d9e614a04d0883af0b3d4d501171fbfca038f12c77fa838d9f198147a23f", size = 75140 }, +] + +[[package]] +name = "mujoco" +version = "2.3.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "absl-py" }, + { name = "glfw" }, + { name = "numpy" }, + { name = "pyopengl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/51/57/083bcb22c6b1d6ad06ac2e0d751b4113f8fcd1ed4adaf369bf4365db703c/mujoco-2.3.7.tar.gz", hash = "sha256:422041f1ce37c6d151fbced1048df626837e94fe3cd9f813585907046336a7d0", size = 593293 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/58/92/e9ff86733133ea97aeb5ba3babfb8bcbdf3d0b6e580f55d1261d6c2d2809/mujoco-2.3.7-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:779520216f72a8e370e3f0cdd71b45c3b7384c63331a3189194c930a3e7cff5c", size = 4418190 }, + { url = "https://files.pythonhosted.org/packages/4e/bc/0af8bd535e7c80b081f1b9ea5426b0592a7122443215e0e1f5228081620f/mujoco-2.3.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9d4018053879016282d27ab7a91e292c72d44efb5a88553feacfe5b843dde103", size = 4360833 }, + { url = "https://files.pythonhosted.org/packages/3e/23/f609446dde9bb1cf30ea2cfd7765c9a658675e7910e522a09497fbf3b096/mujoco-2.3.7-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:3149b16b8122ee62642474bfd2871064e8edc40235471cf5d84be3569afc0312", size = 4418137 }, + { url = "https://files.pythonhosted.org/packages/63/4e/62739d9d96a05331a1d39133b567bb7beea793a2112f6d312f6d1f74578c/mujoco-2.3.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c08660a8d52ef3efde76095f0991e807703a950c1e882d2bcd984b9a846626f7", size = 4297111 }, + { url = "https://files.pythonhosted.org/packages/be/c8/183dee0066e64da88b50df6a72e96dc662ae1bc2c422a2d35605ff19e154/mujoco-2.3.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:426af8965f8636d94a0f75740c3024a62b3e585020ee817ef5208ec844a1ad94", size = 4588037 }, + { url = "https://files.pythonhosted.org/packages/5e/24/498a36bba5a08fbd975155691e723d55bf25de64704bab845178a3bc8e55/mujoco-2.3.7-cp311-cp311-win_amd64.whl", hash = "sha256:215415a8e98a4b50625beae859079d5e0810b2039e50420f0ba81763c34abb59", size = 3353524 }, +] + +[[package]] +name = "multidict" +version = "6.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/91/2f/a3470242707058fe856fe59241eee5635d79087100b7042a867368863a27/multidict-6.4.4.tar.gz", hash = "sha256:69ee9e6ba214b5245031b76233dd95408a0fd57fdb019ddcc1ead4790932a8e8", size = 90183 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/19/1b/4c6e638195851524a63972c5773c7737bea7e47b1ba402186a37773acee2/multidict-6.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4f5f29794ac0e73d2a06ac03fd18870adc0135a9d384f4a306a951188ed02f95", size = 65515 }, + { url = "https://files.pythonhosted.org/packages/25/d5/10e6bca9a44b8af3c7f920743e5fc0c2bcf8c11bf7a295d4cfe00b08fb46/multidict-6.4.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c04157266344158ebd57b7120d9b0b35812285d26d0e78193e17ef57bfe2979a", size = 38609 }, + { url = "https://files.pythonhosted.org/packages/26/b4/91fead447ccff56247edc7f0535fbf140733ae25187a33621771ee598a18/multidict-6.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bb61ffd3ab8310d93427e460f565322c44ef12769f51f77277b4abad7b6f7223", size = 37871 }, + { url = "https://files.pythonhosted.org/packages/3b/37/cbc977cae59277e99d15bbda84cc53b5e0c4929ffd91d958347200a42ad0/multidict-6.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e0ba18a9afd495f17c351d08ebbc4284e9c9f7971d715f196b79636a4d0de44", size = 226661 }, + { url = "https://files.pythonhosted.org/packages/15/cd/7e0b57fbd4dc2fc105169c4ecce5be1a63970f23bb4ec8c721b67e11953d/multidict-6.4.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9faf1b1dcaadf9f900d23a0e6d6c8eadd6a95795a0e57fcca73acce0eb912065", size = 223422 }, + { url = "https://files.pythonhosted.org/packages/f1/01/1de268da121bac9f93242e30cd3286f6a819e5f0b8896511162d6ed4bf8d/multidict-6.4.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a4d1cb1327c6082c4fce4e2a438483390964c02213bc6b8d782cf782c9b1471f", size = 235447 }, + { url = "https://files.pythonhosted.org/packages/d2/8c/8b9a5e4aaaf4f2de14e86181a3a3d7b105077f668b6a06f043ec794f684c/multidict-6.4.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:941f1bec2f5dbd51feeb40aea654c2747f811ab01bdd3422a48a4e4576b7d76a", size = 231455 }, + { url = "https://files.pythonhosted.org/packages/35/db/e1817dcbaa10b319c412769cf999b1016890849245d38905b73e9c286862/multidict-6.4.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5f8a146184da7ea12910a4cec51ef85e44f6268467fb489c3caf0cd512f29c2", size = 223666 }, + { url = "https://files.pythonhosted.org/packages/4a/e1/66e8579290ade8a00e0126b3d9a93029033ffd84f0e697d457ed1814d0fc/multidict-6.4.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:232b7237e57ec3c09be97206bfb83a0aa1c5d7d377faa019c68a210fa35831f1", size = 217392 }, + { url = "https://files.pythonhosted.org/packages/7b/6f/f8639326069c24a48c7747c2a5485d37847e142a3f741ff3340c88060a9a/multidict-6.4.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:55ae0721c1513e5e3210bca4fc98456b980b0c2c016679d3d723119b6b202c42", size = 228969 }, + { url = "https://files.pythonhosted.org/packages/d2/c3/3d58182f76b960eeade51c89fcdce450f93379340457a328e132e2f8f9ed/multidict-6.4.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:51d662c072579f63137919d7bb8fc250655ce79f00c82ecf11cab678f335062e", size = 217433 }, + { url = "https://files.pythonhosted.org/packages/e1/4b/f31a562906f3bd375f3d0e83ce314e4a660c01b16c2923e8229b53fba5d7/multidict-6.4.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0e05c39962baa0bb19a6b210e9b1422c35c093b651d64246b6c2e1a7e242d9fd", size = 225418 }, + { url = "https://files.pythonhosted.org/packages/99/89/78bb95c89c496d64b5798434a3deee21996114d4d2c28dd65850bf3a691e/multidict-6.4.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:d5b1cc3ab8c31d9ebf0faa6e3540fb91257590da330ffe6d2393d4208e638925", size = 235042 }, + { url = "https://files.pythonhosted.org/packages/74/91/8780a6e5885a8770442a8f80db86a0887c4becca0e5a2282ba2cae702bc4/multidict-6.4.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:93ec84488a384cd7b8a29c2c7f467137d8a73f6fe38bb810ecf29d1ade011a7c", size = 230280 }, + { url = "https://files.pythonhosted.org/packages/68/c1/fcf69cabd542eb6f4b892469e033567ee6991d361d77abdc55e3a0f48349/multidict-6.4.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b308402608493638763abc95f9dc0030bbd6ac6aff784512e8ac3da73a88af08", size = 223322 }, + { url = "https://files.pythonhosted.org/packages/b8/85/5b80bf4b83d8141bd763e1d99142a9cdfd0db83f0739b4797172a4508014/multidict-6.4.4-cp311-cp311-win32.whl", hash = "sha256:343892a27d1a04d6ae455ecece12904d242d299ada01633d94c4f431d68a8c49", size = 35070 }, + { url = "https://files.pythonhosted.org/packages/09/66/0bed198ffd590ab86e001f7fa46b740d58cf8ff98c2f254e4a36bf8861ad/multidict-6.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:73484a94f55359780c0f458bbd3c39cb9cf9c182552177d2136e828269dee529", size = 38667 }, + { url = "https://files.pythonhosted.org/packages/d2/b5/5675377da23d60875fe7dae6be841787755878e315e2f517235f22f59e18/multidict-6.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:dc388f75a1c00000824bf28b7633e40854f4127ede80512b44c3cfeeea1839a2", size = 64293 }, + { url = "https://files.pythonhosted.org/packages/34/a7/be384a482754bb8c95d2bbe91717bf7ccce6dc38c18569997a11f95aa554/multidict-6.4.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:98af87593a666f739d9dba5d0ae86e01b0e1a9cfcd2e30d2d361fbbbd1a9162d", size = 38096 }, + { url = "https://files.pythonhosted.org/packages/66/6d/d59854bb4352306145bdfd1704d210731c1bb2c890bfee31fb7bbc1c4c7f/multidict-6.4.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aff4cafea2d120327d55eadd6b7f1136a8e5a0ecf6fb3b6863e8aca32cd8e50a", size = 37214 }, + { url = "https://files.pythonhosted.org/packages/99/e0/c29d9d462d7cfc5fc8f9bf24f9c6843b40e953c0b55e04eba2ad2cf54fba/multidict-6.4.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:169c4ba7858176b797fe551d6e99040c531c775d2d57b31bcf4de6d7a669847f", size = 224686 }, + { url = "https://files.pythonhosted.org/packages/dc/4a/da99398d7fd8210d9de068f9a1b5f96dfaf67d51e3f2521f17cba4ee1012/multidict-6.4.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b9eb4c59c54421a32b3273d4239865cb14ead53a606db066d7130ac80cc8ec93", size = 231061 }, + { url = "https://files.pythonhosted.org/packages/21/f5/ac11add39a0f447ac89353e6ca46666847051103649831c08a2800a14455/multidict-6.4.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7cf3bd54c56aa16fdb40028d545eaa8d051402b61533c21e84046e05513d5780", size = 232412 }, + { url = "https://files.pythonhosted.org/packages/d9/11/4b551e2110cded705a3c13a1d4b6a11f73891eb5a1c449f1b2b6259e58a6/multidict-6.4.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f682c42003c7264134bfe886376299db4cc0c6cd06a3295b41b347044bcb5482", size = 231563 }, + { url = "https://files.pythonhosted.org/packages/4c/02/751530c19e78fe73b24c3da66618eda0aa0d7f6e7aa512e46483de6be210/multidict-6.4.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a920f9cf2abdf6e493c519492d892c362007f113c94da4c239ae88429835bad1", size = 223811 }, + { url = "https://files.pythonhosted.org/packages/c7/cb/2be8a214643056289e51ca356026c7b2ce7225373e7a1f8c8715efee8988/multidict-6.4.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:530d86827a2df6504526106b4c104ba19044594f8722d3e87714e847c74a0275", size = 216524 }, + { url = "https://files.pythonhosted.org/packages/19/f3/6d5011ec375c09081f5250af58de85f172bfcaafebff286d8089243c4bd4/multidict-6.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ecde56ea2439b96ed8a8d826b50c57364612ddac0438c39e473fafad7ae1c23b", size = 229012 }, + { url = "https://files.pythonhosted.org/packages/67/9c/ca510785df5cf0eaf5b2a8132d7d04c1ce058dcf2c16233e596ce37a7f8e/multidict-6.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:dc8c9736d8574b560634775ac0def6bdc1661fc63fa27ffdfc7264c565bcb4f2", size = 226765 }, + { url = "https://files.pythonhosted.org/packages/36/c8/ca86019994e92a0f11e642bda31265854e6ea7b235642f0477e8c2e25c1f/multidict-6.4.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:7f3d3b3c34867579ea47cbd6c1f2ce23fbfd20a273b6f9e3177e256584f1eacc", size = 222888 }, + { url = "https://files.pythonhosted.org/packages/c6/67/bc25a8e8bd522935379066950ec4e2277f9b236162a73548a2576d4b9587/multidict-6.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:87a728af265e08f96b6318ebe3c0f68b9335131f461efab2fc64cc84a44aa6ed", size = 234041 }, + { url = "https://files.pythonhosted.org/packages/f1/a0/70c4c2d12857fccbe607b334b7ee28b6b5326c322ca8f73ee54e70d76484/multidict-6.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9f193eeda1857f8e8d3079a4abd258f42ef4a4bc87388452ed1e1c4d2b0c8740", size = 231046 }, + { url = "https://files.pythonhosted.org/packages/c1/0f/52954601d02d39742aab01d6b92f53c1dd38b2392248154c50797b4df7f1/multidict-6.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:be06e73c06415199200e9a2324a11252a3d62030319919cde5e6950ffeccf72e", size = 227106 }, + { url = "https://files.pythonhosted.org/packages/af/24/679d83ec4379402d28721790dce818e5d6b9f94ce1323a556fb17fa9996c/multidict-6.4.4-cp312-cp312-win32.whl", hash = "sha256:622f26ea6a7e19b7c48dd9228071f571b2fbbd57a8cd71c061e848f281550e6b", size = 35351 }, + { url = "https://files.pythonhosted.org/packages/52/ef/40d98bc5f986f61565f9b345f102409534e29da86a6454eb6b7c00225a13/multidict-6.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:5e2bcda30d5009996ff439e02a9f2b5c3d64a20151d34898c000a6281faa3781", size = 38791 }, + { url = "https://files.pythonhosted.org/packages/df/2a/e166d2ffbf4b10131b2d5b0e458f7cee7d986661caceae0de8753042d4b2/multidict-6.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:82ffabefc8d84c2742ad19c37f02cde5ec2a1ee172d19944d380f920a340e4b9", size = 64123 }, + { url = "https://files.pythonhosted.org/packages/8c/96/e200e379ae5b6f95cbae472e0199ea98913f03d8c9a709f42612a432932c/multidict-6.4.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6a2f58a66fe2c22615ad26156354005391e26a2f3721c3621504cd87c1ea87bf", size = 38049 }, + { url = "https://files.pythonhosted.org/packages/75/fb/47afd17b83f6a8c7fa863c6d23ac5ba6a0e6145ed8a6bcc8da20b2b2c1d2/multidict-6.4.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5883d6ee0fd9d8a48e9174df47540b7545909841ac82354c7ae4cbe9952603bd", size = 37078 }, + { url = "https://files.pythonhosted.org/packages/fa/70/1af3143000eddfb19fd5ca5e78393985ed988ac493bb859800fe0914041f/multidict-6.4.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9abcf56a9511653fa1d052bfc55fbe53dbee8f34e68bd6a5a038731b0ca42d15", size = 224097 }, + { url = "https://files.pythonhosted.org/packages/b1/39/d570c62b53d4fba844e0378ffbcd02ac25ca423d3235047013ba2f6f60f8/multidict-6.4.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6ed5ae5605d4ad5a049fad2a28bb7193400700ce2f4ae484ab702d1e3749c3f9", size = 230768 }, + { url = "https://files.pythonhosted.org/packages/fd/f8/ed88f2c4d06f752b015933055eb291d9bc184936903752c66f68fb3c95a7/multidict-6.4.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbfcb60396f9bcfa63e017a180c3105b8c123a63e9d1428a36544e7d37ca9e20", size = 231331 }, + { url = "https://files.pythonhosted.org/packages/9c/6f/8e07cffa32f483ab887b0d56bbd8747ac2c1acd00dc0af6fcf265f4a121e/multidict-6.4.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0f1987787f5f1e2076b59692352ab29a955b09ccc433c1f6b8e8e18666f608b", size = 230169 }, + { url = "https://files.pythonhosted.org/packages/e6/2b/5dcf173be15e42f330110875a2668ddfc208afc4229097312212dc9c1236/multidict-6.4.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d0121ccce8c812047d8d43d691a1ad7641f72c4f730474878a5aeae1b8ead8c", size = 222947 }, + { url = "https://files.pythonhosted.org/packages/39/75/4ddcbcebe5ebcd6faa770b629260d15840a5fc07ce8ad295a32e14993726/multidict-6.4.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83ec4967114295b8afd120a8eec579920c882831a3e4c3331d591a8e5bfbbc0f", size = 215761 }, + { url = "https://files.pythonhosted.org/packages/6a/c9/55e998ae45ff15c5608e384206aa71a11e1b7f48b64d166db400b14a3433/multidict-6.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:995f985e2e268deaf17867801b859a282e0448633f1310e3704b30616d269d69", size = 227605 }, + { url = "https://files.pythonhosted.org/packages/04/49/c2404eac74497503c77071bd2e6f88c7e94092b8a07601536b8dbe99be50/multidict-6.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:d832c608f94b9f92a0ec8b7e949be7792a642b6e535fcf32f3e28fab69eeb046", size = 226144 }, + { url = "https://files.pythonhosted.org/packages/62/c5/0cd0c3c6f18864c40846aa2252cd69d308699cb163e1c0d989ca301684da/multidict-6.4.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d21c1212171cf7da703c5b0b7a0e85be23b720818aef502ad187d627316d5645", size = 221100 }, + { url = "https://files.pythonhosted.org/packages/71/7b/f2f3887bea71739a046d601ef10e689528d4f911d84da873b6be9194ffea/multidict-6.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:cbebaa076aaecad3d4bb4c008ecc73b09274c952cf6a1b78ccfd689e51f5a5b0", size = 232731 }, + { url = "https://files.pythonhosted.org/packages/e5/b3/d9de808349df97fa75ec1372758701b5800ebad3c46ae377ad63058fbcc6/multidict-6.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:c93a6fb06cc8e5d3628b2b5fda215a5db01e8f08fc15fadd65662d9b857acbe4", size = 229637 }, + { url = "https://files.pythonhosted.org/packages/5e/57/13207c16b615eb4f1745b44806a96026ef8e1b694008a58226c2d8f5f0a5/multidict-6.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8cd8f81f1310182362fb0c7898145ea9c9b08a71081c5963b40ee3e3cac589b1", size = 225594 }, + { url = "https://files.pythonhosted.org/packages/3a/e4/d23bec2f70221604f5565000632c305fc8f25ba953e8ce2d8a18842b9841/multidict-6.4.4-cp313-cp313-win32.whl", hash = "sha256:3e9f1cd61a0ab857154205fb0b1f3d3ace88d27ebd1409ab7af5096e409614cd", size = 35359 }, + { url = "https://files.pythonhosted.org/packages/a7/7a/cfe1a47632be861b627f46f642c1d031704cc1c0f5c0efbde2ad44aa34bd/multidict-6.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:8ffb40b74400e4455785c2fa37eba434269149ec525fc8329858c862e4b35373", size = 38903 }, + { url = "https://files.pythonhosted.org/packages/68/7b/15c259b0ab49938a0a1c8f3188572802704a779ddb294edc1b2a72252e7c/multidict-6.4.4-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:6a602151dbf177be2450ef38966f4be3467d41a86c6a845070d12e17c858a156", size = 68895 }, + { url = "https://files.pythonhosted.org/packages/f1/7d/168b5b822bccd88142e0a3ce985858fea612404edd228698f5af691020c9/multidict-6.4.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0d2b9712211b860d123815a80b859075d86a4d54787e247d7fbee9db6832cf1c", size = 40183 }, + { url = "https://files.pythonhosted.org/packages/e0/b7/d4b8d98eb850ef28a4922ba508c31d90715fd9b9da3801a30cea2967130b/multidict-6.4.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d2fa86af59f8fc1972e121ade052145f6da22758f6996a197d69bb52f8204e7e", size = 39592 }, + { url = "https://files.pythonhosted.org/packages/18/28/a554678898a19583548e742080cf55d169733baf57efc48c2f0273a08583/multidict-6.4.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50855d03e9e4d66eab6947ba688ffb714616f985838077bc4b490e769e48da51", size = 226071 }, + { url = "https://files.pythonhosted.org/packages/ee/dc/7ba6c789d05c310e294f85329efac1bf5b450338d2542498db1491a264df/multidict-6.4.4-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:5bce06b83be23225be1905dcdb6b789064fae92499fbc458f59a8c0e68718601", size = 222597 }, + { url = "https://files.pythonhosted.org/packages/24/4f/34eadbbf401b03768dba439be0fb94b0d187facae9142821a3d5599ccb3b/multidict-6.4.4-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66ed0731f8e5dfd8369a883b6e564aca085fb9289aacabd9decd70568b9a30de", size = 228253 }, + { url = "https://files.pythonhosted.org/packages/c0/e6/493225a3cdb0d8d80d43a94503fc313536a07dae54a3f030d279e629a2bc/multidict-6.4.4-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:329ae97fc2f56f44d91bc47fe0972b1f52d21c4b7a2ac97040da02577e2daca2", size = 226146 }, + { url = "https://files.pythonhosted.org/packages/2f/70/e411a7254dc3bff6f7e6e004303b1b0591358e9f0b7c08639941e0de8bd6/multidict-6.4.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c27e5dcf520923d6474d98b96749e6805f7677e93aaaf62656005b8643f907ab", size = 220585 }, + { url = "https://files.pythonhosted.org/packages/08/8f/beb3ae7406a619100d2b1fb0022c3bb55a8225ab53c5663648ba50dfcd56/multidict-6.4.4-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:058cc59b9e9b143cc56715e59e22941a5d868c322242278d28123a5d09cdf6b0", size = 212080 }, + { url = "https://files.pythonhosted.org/packages/9c/ec/355124e9d3d01cf8edb072fd14947220f357e1c5bc79c88dff89297e9342/multidict-6.4.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:69133376bc9a03f8c47343d33f91f74a99c339e8b58cea90433d8e24bb298031", size = 226558 }, + { url = "https://files.pythonhosted.org/packages/fd/22/d2b95cbebbc2ada3be3812ea9287dcc9712d7f1a012fad041770afddb2ad/multidict-6.4.4-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:d6b15c55721b1b115c5ba178c77104123745b1417527ad9641a4c5e2047450f0", size = 212168 }, + { url = "https://files.pythonhosted.org/packages/4d/c5/62bfc0b2f9ce88326dbe7179f9824a939c6c7775b23b95de777267b9725c/multidict-6.4.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:a887b77f51d3d41e6e1a63cf3bc7ddf24de5939d9ff69441387dfefa58ac2e26", size = 217970 }, + { url = "https://files.pythonhosted.org/packages/79/74/977cea1aadc43ff1c75d23bd5bc4768a8fac98c14e5878d6ee8d6bab743c/multidict-6.4.4-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:632a3bf8f1787f7ef7d3c2f68a7bde5be2f702906f8b5842ad6da9d974d0aab3", size = 226980 }, + { url = "https://files.pythonhosted.org/packages/48/fc/cc4a1a2049df2eb84006607dc428ff237af38e0fcecfdb8a29ca47b1566c/multidict-6.4.4-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:a145c550900deb7540973c5cdb183b0d24bed6b80bf7bddf33ed8f569082535e", size = 220641 }, + { url = "https://files.pythonhosted.org/packages/3b/6a/a7444d113ab918701988d4abdde373dbdfd2def7bd647207e2bf645c7eac/multidict-6.4.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:cc5d83c6619ca5c9672cb78b39ed8542f1975a803dee2cda114ff73cbb076edd", size = 221728 }, + { url = "https://files.pythonhosted.org/packages/2b/b0/fdf4c73ad1c55e0f4dbbf2aa59dd37037334091f9a4961646d2b7ac91a86/multidict-6.4.4-cp313-cp313t-win32.whl", hash = "sha256:3312f63261b9df49be9d57aaa6abf53a6ad96d93b24f9cc16cf979956355ce6e", size = 41913 }, + { url = "https://files.pythonhosted.org/packages/8e/92/27989ecca97e542c0d01d05a98a5ae12198a243a9ee12563a0313291511f/multidict-6.4.4-cp313-cp313t-win_amd64.whl", hash = "sha256:ba852168d814b2c73333073e1c7116d9395bea69575a01b0b3c89d2d5a87c8fb", size = 46112 }, + { url = "https://files.pythonhosted.org/packages/84/5d/e17845bb0fa76334477d5de38654d27946d5b5d3695443987a094a71b440/multidict-6.4.4-py3-none-any.whl", hash = "sha256:bd4557071b561a8b3b6075c3ce93cf9bfb6182cb241805c3d66ced3b75eff4ac", size = 10481 }, +] + +[[package]] +name = "multiprocess" +version = "0.70.16" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "dill" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b5/ae/04f39c5d0d0def03247c2893d6f2b83c136bf3320a2154d7b8858f2ba72d/multiprocess-0.70.16.tar.gz", hash = "sha256:161af703d4652a0e1410be6abccecde4a7ddffd19341be0a7011b94aeb171ac1", size = 1772603 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/f7/7ec7fddc92e50714ea3745631f79bd9c96424cb2702632521028e57d3a36/multiprocess-0.70.16-py310-none-any.whl", hash = "sha256:c4a9944c67bd49f823687463660a2d6daae94c289adff97e0f9d696ba6371d02", size = 134824 }, + { url = "https://files.pythonhosted.org/packages/50/15/b56e50e8debaf439f44befec5b2af11db85f6e0f344c3113ae0be0593a91/multiprocess-0.70.16-py311-none-any.whl", hash = "sha256:af4cabb0dac72abfb1e794fa7855c325fd2b55a10a44628a3c1ad3311c04127a", size = 143519 }, + { url = "https://files.pythonhosted.org/packages/0a/7d/a988f258104dcd2ccf1ed40fdc97e26c4ac351eeaf81d76e266c52d84e2f/multiprocess-0.70.16-py312-none-any.whl", hash = "sha256:fc0544c531920dde3b00c29863377f87e1632601092ea2daca74e4beb40faa2e", size = 146741 }, + { url = "https://files.pythonhosted.org/packages/ea/89/38df130f2c799090c978b366cfdf5b96d08de5b29a4a293df7f7429fa50b/multiprocess-0.70.16-py38-none-any.whl", hash = "sha256:a71d82033454891091a226dfc319d0cfa8019a4e888ef9ca910372a446de4435", size = 132628 }, + { url = "https://files.pythonhosted.org/packages/da/d9/f7f9379981e39b8c2511c9e0326d212accacb82f12fbfdc1aa2ce2a7b2b6/multiprocess-0.70.16-py39-none-any.whl", hash = "sha256:a0bafd3ae1b732eac64be2e72038231c1ba97724b60b09400d68f229fcc2fbf3", size = 133351 }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963 }, +] + +[[package]] +name = "nest-asyncio" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/83/f8/51569ac65d696c8ecbee95938f89d4abf00f47d58d48f6fbabfe8f0baefe/nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe", size = 7418 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/c4/c2971a3ba4c6103a3d10c4b0f24f461ddc027f0f09763220cf35ca1401b3/nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c", size = 5195 }, +] + +[[package]] +name = "networkx" +version = "3.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/4f/ccdb8ad3a38e583f214547fd2f7ff1fc160c43a75af88e6aec213404b96a/networkx-3.5.tar.gz", hash = "sha256:d4c6f9cf81f52d69230866796b82afbccdec3db7ae4fbd1b65ea750feed50037", size = 2471065 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/8d/776adee7bbf76365fdd7f2552710282c79a4ead5d2a46408c9043a2b70ba/networkx-3.5-py3-none-any.whl", hash = "sha256:0030d386a9a06dee3565298b4a734b68589749a544acbb6c412dc9e2489ec6ec", size = 2034406 }, +] + +[[package]] +name = "nodeenv" +version = "1.9.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314 }, +] + +[[package]] +name = "numba" +version = "0.61.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "llvmlite" }, + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1c/a0/e21f57604304aa03ebb8e098429222722ad99176a4f979d34af1d1ee80da/numba-0.61.2.tar.gz", hash = "sha256:8750ee147940a6637b80ecf7f95062185ad8726c8c28a2295b8ec1160a196f7d", size = 2820615 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3f/97/c99d1056aed767503c228f7099dc11c402906b42a4757fec2819329abb98/numba-0.61.2-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:efd3db391df53aaa5cfbee189b6c910a5b471488749fd6606c3f33fc984c2ae2", size = 2775825 }, + { url = "https://files.pythonhosted.org/packages/95/9e/63c549f37136e892f006260c3e2613d09d5120672378191f2dc387ba65a2/numba-0.61.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:49c980e4171948ffebf6b9a2520ea81feed113c1f4890747ba7f59e74be84b1b", size = 2778695 }, + { url = "https://files.pythonhosted.org/packages/97/c8/8740616c8436c86c1b9a62e72cb891177d2c34c2d24ddcde4c390371bf4c/numba-0.61.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3945615cd73c2c7eba2a85ccc9c1730c21cd3958bfcf5a44302abae0fb07bb60", size = 3829227 }, + { url = "https://files.pythonhosted.org/packages/fc/06/66e99ae06507c31d15ff3ecd1f108f2f59e18b6e08662cd5f8a5853fbd18/numba-0.61.2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbfdf4eca202cebade0b7d43896978e146f39398909a42941c9303f82f403a18", size = 3523422 }, + { url = "https://files.pythonhosted.org/packages/0f/a4/2b309a6a9f6d4d8cfba583401c7c2f9ff887adb5d54d8e2e130274c0973f/numba-0.61.2-cp311-cp311-win_amd64.whl", hash = "sha256:76bcec9f46259cedf888041b9886e257ae101c6268261b19fda8cfbc52bec9d1", size = 2831505 }, + { url = "https://files.pythonhosted.org/packages/b4/a0/c6b7b9c615cfa3b98c4c63f4316e3f6b3bbe2387740277006551784218cd/numba-0.61.2-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:34fba9406078bac7ab052efbf0d13939426c753ad72946baaa5bf9ae0ebb8dd2", size = 2776626 }, + { url = "https://files.pythonhosted.org/packages/92/4a/fe4e3c2ecad72d88f5f8cd04e7f7cff49e718398a2fac02d2947480a00ca/numba-0.61.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4ddce10009bc097b080fc96876d14c051cc0c7679e99de3e0af59014dab7dfe8", size = 2779287 }, + { url = "https://files.pythonhosted.org/packages/9a/2d/e518df036feab381c23a624dac47f8445ac55686ec7f11083655eb707da3/numba-0.61.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5b1bb509d01f23d70325d3a5a0e237cbc9544dd50e50588bc581ba860c213546", size = 3885928 }, + { url = "https://files.pythonhosted.org/packages/10/0f/23cced68ead67b75d77cfcca3df4991d1855c897ee0ff3fe25a56ed82108/numba-0.61.2-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:48a53a3de8f8793526cbe330f2a39fe9a6638efcbf11bd63f3d2f9757ae345cd", size = 3577115 }, + { url = "https://files.pythonhosted.org/packages/68/1d/ddb3e704c5a8fb90142bf9dc195c27db02a08a99f037395503bfbc1d14b3/numba-0.61.2-cp312-cp312-win_amd64.whl", hash = "sha256:97cf4f12c728cf77c9c1d7c23707e4d8fb4632b46275f8f3397de33e5877af18", size = 2831929 }, + { url = "https://files.pythonhosted.org/packages/0b/f3/0fe4c1b1f2569e8a18ad90c159298d862f96c3964392a20d74fc628aee44/numba-0.61.2-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:3a10a8fc9afac40b1eac55717cece1b8b1ac0b946f5065c89e00bde646b5b154", size = 2771785 }, + { url = "https://files.pythonhosted.org/packages/e9/71/91b277d712e46bd5059f8a5866862ed1116091a7cb03bd2704ba8ebe015f/numba-0.61.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7d3bcada3c9afba3bed413fba45845f2fb9cd0d2b27dd58a1be90257e293d140", size = 2773289 }, + { url = "https://files.pythonhosted.org/packages/0d/e0/5ea04e7ad2c39288c0f0f9e8d47638ad70f28e275d092733b5817cf243c9/numba-0.61.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bdbca73ad81fa196bd53dc12e3aaf1564ae036e0c125f237c7644fe64a4928ab", size = 3893918 }, + { url = "https://files.pythonhosted.org/packages/17/58/064f4dcb7d7e9412f16ecf80ed753f92297e39f399c905389688cf950b81/numba-0.61.2-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:5f154aaea625fb32cfbe3b80c5456d514d416fcdf79733dd69c0df3a11348e9e", size = 3584056 }, + { url = "https://files.pythonhosted.org/packages/af/a4/6d3a0f2d3989e62a18749e1e9913d5fa4910bbb3e3311a035baea6caf26d/numba-0.61.2-cp313-cp313-win_amd64.whl", hash = "sha256:59321215e2e0ac5fa928a8020ab00b8e57cda8a97384963ac0dfa4d4e6aa54e7", size = 2831846 }, +] + +[[package]] +name = "numcodecs" +version = "0.16.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/00/35/49da850ce5371da3930d099da364a73ce9ae4fc64075e521674b48f4804d/numcodecs-0.16.1.tar.gz", hash = "sha256:c47f20d656454568c6b4697ce02081e6bbb512f198738c6a56fafe8029c97fb1", size = 6268134 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6c/82/8d6ca1166dc9b020f383073c1c604e004f0495d243647a83e5d5fff2b7ad/numcodecs-0.16.1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:5348a25aefbce37ea7c00c3363d36176155233c95597e5905a932e9620df960d", size = 1623980 }, + { url = "https://files.pythonhosted.org/packages/aa/4e/11258b7945c6cd3579f16228c803a13291d16ef7ef46f9551008090b6763/numcodecs-0.16.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2058b0a985470809c720d2457758b61e6c9495a49d5f20dfac9b5ebabd8848eb", size = 1153826 }, + { url = "https://files.pythonhosted.org/packages/a1/24/4099ccb29754fc1d2e55dbd9b540f58a24cab6e844dc996e37812c3fb79d/numcodecs-0.16.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b216b6d7bc207b85d41fddbc25b09fd00d76e265454db6e3fb09d5da0216397", size = 8263684 }, + { url = "https://files.pythonhosted.org/packages/04/e3/816a82b984dd7fb7a0afadd16842260ccfee23cc5edbda48a92649ee161b/numcodecs-0.16.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2308d56c4f84a5b942f8668b4adedd3d9cdd6a22e6e6e20768ec356c77050f38", size = 8788927 }, + { url = "https://files.pythonhosted.org/packages/6f/54/dbea8b17928670412db0efb20efc087b30c2a67b84b1605fa8a136e482af/numcodecs-0.16.1-cp311-cp311-win_amd64.whl", hash = "sha256:acd8d68b4b815e62cb91e6064a53dac51ee99849350784ee16dd52cdbb4bc70f", size = 790259 }, + { url = "https://files.pythonhosted.org/packages/b7/ee/e2a903c88fed347dc74c70bbd7a8dab9aa22bb0dac68c5bc6393c2e9373b/numcodecs-0.16.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1abe0651ecb6f207656ebfc802effa55c4ae3136cf172c295a067749a2699122", size = 1663434 }, + { url = "https://files.pythonhosted.org/packages/f2/f0/37819d4f6896b1ac43a164ffd3ab99d7cbf63bf63cb375fef97aedaef4f0/numcodecs-0.16.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:abb39b7102d0816c8563669cdddca40392d34d0cbf31e3e996706b244586a458", size = 1150402 }, + { url = "https://files.pythonhosted.org/packages/60/3c/5059a29750305b80b7428b1e6695878dea9ea3b537d7fba57875e4bbc2c7/numcodecs-0.16.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3359a951f8b23317f12736a7ad1e7375ec3d735465f92049c76d032ebca4c40", size = 8237455 }, + { url = "https://files.pythonhosted.org/packages/1b/f5/515f98d659ab0cbe3738da153eddae22186fd38f05a808511e10f04cf679/numcodecs-0.16.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82cc70592ec18060786b1bfa0da23afd2a7807d7975d766e626954d6628ec609", size = 8770711 }, + { url = "https://files.pythonhosted.org/packages/a2/3a/9fc6104f888af11bad804ebd32dffe0bcb83337f4525b4fe5b379942fefd/numcodecs-0.16.1-cp312-cp312-win_amd64.whl", hash = "sha256:4b48ddc8a7d132b7808bc53eb2705342de5c1e39289d725f988bd143c0fd86df", size = 788701 }, + { url = "https://files.pythonhosted.org/packages/5e/1e/73ffb1074f03d52cb1c4f4deaba26a2008ca45262f3622ed26dbec7a7362/numcodecs-0.16.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2ad8ee940315f59188accfc3f2d39726a4ca0d76b49bf8d0018e121f01c49028", size = 1659453 }, + { url = "https://files.pythonhosted.org/packages/42/72/5affb1ce92b7a6becee17921de7c6b521a48fa61fc3d36d9f1eea2cf83f5/numcodecs-0.16.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:179ca7bf3525a0f7379df7767d87dd495253de44597cb7e511198b28b09da633", size = 1143932 }, + { url = "https://files.pythonhosted.org/packages/e3/f1/b092679d84c67c6ed62e4df5781d89bbb089f24a0df4187cbab9db51cf6b/numcodecs-0.16.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e2babbb50bf348ae982818d5560af330eab0dcd925fb0e49509785ad57d11db", size = 8187716 }, + { url = "https://files.pythonhosted.org/packages/a8/e8/86e7741adb43261aff409b53c53c8bac2797bfca055d64dd65dc731d5141/numcodecs-0.16.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4b29d8d3284b72bfad4fb83d672a17f497ae86ee1ef8087bac7222b620d3d91", size = 8728650 }, + { url = "https://files.pythonhosted.org/packages/21/03/87c5c217232aa3515d350728c6dcefca252fa582246100ef68a51fbda456/numcodecs-0.16.1-cp313-cp313-win_amd64.whl", hash = "sha256:06489635f43e1a959aea73cb830d78cf3adb07ac5f34daccb92091e4d9ac6b07", size = 785553 }, +] + +[package.optional-dependencies] +crc32c = [ + { name = "crc32c" }, +] + +[[package]] +name = "numpy" +version = "1.26.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/65/6e/09db70a523a96d25e115e71cc56a6f9031e7b8cd166c1ac8438307c14058/numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010", size = 15786129 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/57/baae43d14fe163fa0e4c47f307b6b2511ab8d7d30177c491960504252053/numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71", size = 20630554 }, + { url = "https://files.pythonhosted.org/packages/1a/2e/151484f49fd03944c4a3ad9c418ed193cfd02724e138ac8a9505d056c582/numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef", size = 13997127 }, + { url = "https://files.pythonhosted.org/packages/79/ae/7e5b85136806f9dadf4878bf73cf223fe5c2636818ba3ab1c585d0403164/numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e", size = 14222994 }, + { url = "https://files.pythonhosted.org/packages/3a/d0/edc009c27b406c4f9cbc79274d6e46d634d139075492ad055e3d68445925/numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5", size = 18252005 }, + { url = "https://files.pythonhosted.org/packages/09/bf/2b1aaf8f525f2923ff6cfcf134ae5e750e279ac65ebf386c75a0cf6da06a/numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a", size = 13885297 }, + { url = "https://files.pythonhosted.org/packages/df/a0/4e0f14d847cfc2a633a1c8621d00724f3206cfeddeb66d35698c4e2cf3d2/numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a", size = 18093567 }, + { url = "https://files.pythonhosted.org/packages/d2/b7/a734c733286e10a7f1a8ad1ae8c90f2d33bf604a96548e0a4a3a6739b468/numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20", size = 5968812 }, + { url = "https://files.pythonhosted.org/packages/3f/6b/5610004206cf7f8e7ad91c5a85a8c71b2f2f8051a0c0c4d5916b76d6cbb2/numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2", size = 15811913 }, + { url = "https://files.pythonhosted.org/packages/95/12/8f2020a8e8b8383ac0177dc9570aad031a3beb12e38847f7129bacd96228/numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218", size = 20335901 }, + { url = "https://files.pythonhosted.org/packages/75/5b/ca6c8bd14007e5ca171c7c03102d17b4f4e0ceb53957e8c44343a9546dcc/numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b", size = 13685868 }, + { url = "https://files.pythonhosted.org/packages/79/f8/97f10e6755e2a7d027ca783f63044d5b1bc1ae7acb12afe6a9b4286eac17/numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b", size = 13925109 }, + { url = "https://files.pythonhosted.org/packages/0f/50/de23fde84e45f5c4fda2488c759b69990fd4512387a8632860f3ac9cd225/numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed", size = 17950613 }, + { url = "https://files.pythonhosted.org/packages/4c/0c/9c603826b6465e82591e05ca230dfc13376da512b25ccd0894709b054ed0/numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a", size = 13572172 }, + { url = "https://files.pythonhosted.org/packages/76/8c/2ba3902e1a0fc1c74962ea9bb33a534bb05984ad7ff9515bf8d07527cadd/numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0", size = 17786643 }, + { url = "https://files.pythonhosted.org/packages/28/4a/46d9e65106879492374999e76eb85f87b15328e06bd1550668f79f7b18c6/numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110", size = 5677803 }, + { url = "https://files.pythonhosted.org/packages/16/2e/86f24451c2d530c88daf997cb8d6ac622c1d40d19f5a031ed68a4b73a374/numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818", size = 15517754 }, +] + +[[package]] +name = "numpydantic" +version = "1.6.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c1/af/5e4ecfdfbb35b9119f42d12466970f24c02e93577c4b1d5d230b5b7cabdf/numpydantic-1.6.9.tar.gz", hash = "sha256:bb2c563e76894abffb06cf0e991d6cb0aa42e2b39d40426ebb0699011d18ec0d", size = 77899 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/56/2a/46f1e3059b3bd899ab1335ae3a42f7cbff9a5a9ae9294cb1d7a3eb04a9ce/numpydantic-1.6.9-py3-none-any.whl", hash = "sha256:149ed4b7dfec907fb1e7c0874fd7d41bc95734c22764124d22c7c27aa8f059fd", size = 85598 }, +] + +[[package]] +name = "nvidia-cublas-cu12" +version = "12.6.4.1" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version >= '3.13' and sys_platform == 'emscripten'", + "python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'emscripten'", + "python_full_version < '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version < '3.12' and sys_platform == 'emscripten'", +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/af/eb/ff4b8c503fa1f1796679dce648854d58751982426e4e4b37d6fce49d259c/nvidia_cublas_cu12-12.6.4.1-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:08ed2686e9875d01b58e3cb379c6896df8e76c75e0d4a7f7dace3d7b6d9ef8eb", size = 393138322 }, + { url = "https://files.pythonhosted.org/packages/97/0d/f1f0cadbf69d5b9ef2e4f744c9466cb0a850741d08350736dfdb4aa89569/nvidia_cublas_cu12-12.6.4.1-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:235f728d6e2a409eddf1df58d5b0921cf80cfa9e72b9f2775ccb7b4a87984668", size = 390794615 }, + { url = "https://files.pythonhosted.org/packages/84/f7/985e9bdbe3e0ac9298fcc8cfa51a392862a46a0ffaccbbd56939b62a9c83/nvidia_cublas_cu12-12.6.4.1-py3-none-win_amd64.whl", hash = "sha256:9e4fa264f4d8a4eb0cdbd34beadc029f453b3bafae02401e999cf3d5a5af75f8", size = 434535301 }, +] + +[[package]] +name = "nvidia-cublas-cu12" +version = "12.9.0.13" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13' and sys_platform == 'darwin'", + "python_full_version >= '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'darwin'", + "python_full_version == '3.12.*' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform == 'darwin'", + "python_full_version < '3.12' and platform_machine == 'aarch64' and sys_platform == 'linux'", +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/bb/03d1ad7162859beb0078645a39230a469603e5110175beb377821fdd1b1f/nvidia_cublas_cu12-12.9.0.13-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:752ba5830d4cad93ba49dfe9a5c724cfd864c23073bc5139f56b4d8b44cb82ee", size = 574394588 }, + { url = "https://files.pythonhosted.org/packages/0d/77/503d17375b1ddbf455a9b4fada9b389b218b905d9160b04288049f11cb9d/nvidia_cublas_cu12-12.9.0.13-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:1a2ca56324acaccb0c9ca8fb40647f04a8c4defd076c6230182805324204765c", size = 580744785 }, +] + +[[package]] +name = "nvidia-cuda-cupti-cu12" +version = "12.6.80" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version >= '3.13' and sys_platform == 'emscripten'", + "python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'emscripten'", + "python_full_version < '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version < '3.12' and sys_platform == 'emscripten'", +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/e6/8b/2f6230cb715646c3a9425636e513227ce5c93c4d65823a734f4bb86d43c3/nvidia_cuda_cupti_cu12-12.6.80-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:166ee35a3ff1587f2490364f90eeeb8da06cd867bd5b701bf7f9a02b78bc63fc", size = 8236764 }, + { url = "https://files.pythonhosted.org/packages/25/0f/acb326ac8fd26e13c799e0b4f3b2751543e1834f04d62e729485872198d4/nvidia_cuda_cupti_cu12-12.6.80-py3-none-manylinux2014_aarch64.whl", hash = "sha256:358b4a1d35370353d52e12f0a7d1769fc01ff74a191689d3870b2123156184c4", size = 8236756 }, + { url = "https://files.pythonhosted.org/packages/49/60/7b6497946d74bcf1de852a21824d63baad12cd417db4195fc1bfe59db953/nvidia_cuda_cupti_cu12-12.6.80-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6768bad6cab4f19e8292125e5f1ac8aa7d1718704012a0e3272a6f61c4bce132", size = 8917980 }, + { url = "https://files.pythonhosted.org/packages/a5/24/120ee57b218d9952c379d1e026c4479c9ece9997a4fb46303611ee48f038/nvidia_cuda_cupti_cu12-12.6.80-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a3eff6cdfcc6a4c35db968a06fcadb061cbc7d6dde548609a941ff8701b98b73", size = 8917972 }, + { url = "https://files.pythonhosted.org/packages/1c/81/7796f096afaf726796b1b648f3bc80cafc61fe7f77f44a483c89e6c5ef34/nvidia_cuda_cupti_cu12-12.6.80-py3-none-win_amd64.whl", hash = "sha256:bbe6ae76e83ce5251b56e8c8e61a964f757175682bbad058b170b136266ab00a", size = 5724175 }, +] + +[[package]] +name = "nvidia-cuda-cupti-cu12" +version = "12.9.19" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13' and sys_platform == 'darwin'", + "python_full_version >= '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'darwin'", + "python_full_version == '3.12.*' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform == 'darwin'", + "python_full_version < '3.12' and platform_machine == 'aarch64' and sys_platform == 'linux'", +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/f1/cd/8f09d533c709034db94ce3b5a994e0ca333cbfe10e7980612db806e5c86f/nvidia_cuda_cupti_cu12-12.9.19-py3-none-manylinux_2_25_aarch64.whl", hash = "sha256:811ec3a3d7013c72b0a490c9ba48cfc67603a5ffb16a3364aa0c1e12e2d2114f", size = 10077564 }, + { url = "https://files.pythonhosted.org/packages/f1/84/1ae6d8cbbd876249e4732f487bdc6296d7c8277512e919bed2815f1d9ee5/nvidia_cuda_cupti_cu12-12.9.19-py3-none-manylinux_2_25_x86_64.whl", hash = "sha256:300a5087166edc80693ce4291b44dbd03cbf3bbabfeee3456bfb79b355456ad1", size = 10815583 }, +] + +[[package]] +name = "nvidia-cuda-nvcc-cu12" +version = "12.9.41" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/58/cebcea534569058ea5e0dbc1eef8d7ceccc647759cfd63e522eba92a0bf5/nvidia_cuda_nvcc_cu12-12.9.41-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:1a4edb53162f87519c1dd4fe948bfb6b80c272530a9bbb5ebd6833527abc3233", size = 40550305 }, + { url = "https://files.pythonhosted.org/packages/c6/ba/62ea941712209bc0883b4139b375d58a7181eeecc8d01d54a965f75fd0cd/nvidia_cuda_nvcc_cu12-12.9.41-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1170cdef5b1908ee2330a84ae8ac3e1d4e24747bdcb2ad4f030f9240eac580d4", size = 39421413 }, + { url = "https://files.pythonhosted.org/packages/ac/2b/882078da26c27062b86fdfd46ce52f8936a5fbe3b8e68f490b032063e19e/nvidia_cuda_nvcc_cu12-12.9.41-py3-none-win_amd64.whl", hash = "sha256:51dfb1b94f34282ab65b843b10d62610886b1de8ff33055efa7055d9e801e5b6", size = 34669918 }, +] + +[[package]] +name = "nvidia-cuda-nvrtc-cu12" +version = "12.6.77" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/75/2e/46030320b5a80661e88039f59060d1790298b4718944a65a7f2aeda3d9e9/nvidia_cuda_nvrtc_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:35b0cc6ee3a9636d5409133e79273ce1f3fd087abb0532d2d2e8fff1fe9efc53", size = 23650380 }, +] + +[[package]] +name = "nvidia-cuda-runtime-cu12" +version = "12.6.77" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version >= '3.13' and sys_platform == 'emscripten'", + "python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'emscripten'", + "python_full_version < '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version < '3.12' and sys_platform == 'emscripten'", +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/ea/590b2ac00d772a8abd1c387a92b46486d2679ca6622fd25c18ff76265663/nvidia_cuda_runtime_cu12-12.6.77-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6116fad3e049e04791c0256a9778c16237837c08b27ed8c8401e2e45de8d60cd", size = 908052 }, + { url = "https://files.pythonhosted.org/packages/b7/3d/159023799677126e20c8fd580cca09eeb28d5c5a624adc7f793b9aa8bbfa/nvidia_cuda_runtime_cu12-12.6.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:d461264ecb429c84c8879a7153499ddc7b19b5f8d84c204307491989a365588e", size = 908040 }, + { url = "https://files.pythonhosted.org/packages/e1/23/e717c5ac26d26cf39a27fbc076240fad2e3b817e5889d671b67f4f9f49c5/nvidia_cuda_runtime_cu12-12.6.77-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ba3b56a4f896141e25e19ab287cd71e52a6a0f4b29d0d31609f60e3b4d5219b7", size = 897690 }, + { url = "https://files.pythonhosted.org/packages/f0/62/65c05e161eeddbafeca24dc461f47de550d9fa8a7e04eb213e32b55cfd99/nvidia_cuda_runtime_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a84d15d5e1da416dd4774cb42edf5e954a3e60cc945698dc1d5be02321c44dc8", size = 897678 }, + { url = "https://files.pythonhosted.org/packages/fa/76/4c80fa138333cc975743fd0687a745fccb30d167f906f13c1c7f9a85e5ea/nvidia_cuda_runtime_cu12-12.6.77-py3-none-win_amd64.whl", hash = "sha256:86c58044c824bf3c173c49a2dbc7a6c8b53cb4e4dca50068be0bf64e9dab3f7f", size = 891773 }, +] + +[[package]] +name = "nvidia-cuda-runtime-cu12" +version = "12.9.37" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13' and sys_platform == 'darwin'", + "python_full_version >= '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'darwin'", + "python_full_version == '3.12.*' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform == 'darwin'", + "python_full_version < '3.12' and platform_machine == 'aarch64' and sys_platform == 'linux'", +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/fc/0efcb40754e694eafd7356f005e909706f61888c6896752db5c5430dd10c/nvidia_cuda_runtime_cu12-12.9.37-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4f981cb23568cb26063cfacd4291bf65f5fbd75ac9abe98ba846aa212ecd59c4", size = 3511941 }, + { url = "https://files.pythonhosted.org/packages/ee/60/73efa0b5c147a1bc2f92ae24afec40dec9ea02b48fdc1591b647562bcbcd/nvidia_cuda_runtime_cu12-12.9.37-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:60a3ee8b71b2bf8af594e67d2e8e3b0228713cd99be77209a662f1f35c08f105", size = 3494319 }, +] + +[[package]] +name = "nvidia-cudnn-cu12" +version = "9.5.1.17" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version >= '3.13' and sys_platform == 'emscripten'", + "python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'emscripten'", + "python_full_version < '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version < '3.12' and sys_platform == 'emscripten'", +] +dependencies = [ + { name = "nvidia-cublas-cu12", version = "12.6.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/93/a201a12d3ec1caa8c6ac34c1c2f9eeb696b886f0c36ff23c638b46603bd0/nvidia_cudnn_cu12-9.5.1.17-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:9fd4584468533c61873e5fda8ca41bac3a38bcb2d12350830c69b0a96a7e4def", size = 570523509 }, + { url = "https://files.pythonhosted.org/packages/2a/78/4535c9c7f859a64781e43c969a3a7e84c54634e319a996d43ef32ce46f83/nvidia_cudnn_cu12-9.5.1.17-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:30ac3869f6db17d170e0e556dd6cc5eee02647abc31ca856634d5a40f82c15b2", size = 570988386 }, + { url = "https://files.pythonhosted.org/packages/b6/b2/3f60d15f037fa5419d9d7f788b100ef33ea913ae5315c87ca6d6fa606c35/nvidia_cudnn_cu12-9.5.1.17-py3-none-win_amd64.whl", hash = "sha256:d7af0f8a4f3b4b9dbb3122f2ef553b45694ed9c384d5a75bab197b8eefb79ab8", size = 565440743 }, +] + +[[package]] +name = "nvidia-cudnn-cu12" +version = "9.10.1.4" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13' and sys_platform == 'darwin'", + "python_full_version >= '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'darwin'", + "python_full_version == '3.12.*' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform == 'darwin'", + "python_full_version < '3.12' and platform_machine == 'aarch64' and sys_platform == 'linux'", +] +dependencies = [ + { name = "nvidia-cublas-cu12", version = "12.9.0.13", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/75/b9/0d76c9c94b2c078ef20a6100b532335d3c2416ae0d3b2e68c36170912a64/nvidia_cudnn_cu12-9.10.1.4-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:5b68945b89195c9eee91d812c375e9db784265b95635d173d695bb85e920e0d3", size = 705022940 }, + { url = "https://files.pythonhosted.org/packages/18/ec/79464a7371a028d1f443b8516b55cb2f70bb91bd3b2f2a831d707c003ccf/nvidia_cudnn_cu12-9.10.1.4-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:df73c4dab84df2c54f0a40e6427cde26e8d80feeffef02d749ee42d7da3c8204", size = 706752133 }, +] + +[[package]] +name = "nvidia-cufft-cu12" +version = "11.3.0.4" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version >= '3.13' and sys_platform == 'emscripten'", + "python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'emscripten'", + "python_full_version < '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version < '3.12' and sys_platform == 'emscripten'", +] +dependencies = [ + { name = "nvidia-nvjitlink-cu12", version = "12.6.85", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/1f/37/c50d2b2f2c07e146776389e3080f4faf70bcc4fa6e19d65bb54ca174ebc3/nvidia_cufft_cu12-11.3.0.4-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d16079550df460376455cba121db6564089176d9bac9e4f360493ca4741b22a6", size = 200164144 }, + { url = "https://files.pythonhosted.org/packages/ce/f5/188566814b7339e893f8d210d3a5332352b1409815908dad6a363dcceac1/nvidia_cufft_cu12-11.3.0.4-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8510990de9f96c803a051822618d42bf6cb8f069ff3f48d93a8486efdacb48fb", size = 200164135 }, + { url = "https://files.pythonhosted.org/packages/8f/16/73727675941ab8e6ffd86ca3a4b7b47065edcca7a997920b831f8147c99d/nvidia_cufft_cu12-11.3.0.4-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ccba62eb9cef5559abd5e0d54ceed2d9934030f51163df018532142a8ec533e5", size = 200221632 }, + { url = "https://files.pythonhosted.org/packages/60/de/99ec247a07ea40c969d904fc14f3a356b3e2a704121675b75c366b694ee1/nvidia_cufft_cu12-11.3.0.4-py3-none-manylinux2014_x86_64.whl", hash = "sha256:768160ac89f6f7b459bee747e8d175dbf53619cfe74b2a5636264163138013ca", size = 200221622 }, + { url = "https://files.pythonhosted.org/packages/b4/38/36fd800cec8f6e89b7c1576edaaf8076e69ec631644cdbc1b5f2e2b5a9df/nvidia_cufft_cu12-11.3.0.4-py3-none-win_amd64.whl", hash = "sha256:6048ebddfb90d09d2707efb1fd78d4e3a77cb3ae4dc60e19aab6be0ece2ae464", size = 199356881 }, +] + +[[package]] +name = "nvidia-cufft-cu12" +version = "11.4.0.6" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13' and sys_platform == 'darwin'", + "python_full_version >= '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'darwin'", + "python_full_version == '3.12.*' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform == 'darwin'", + "python_full_version < '3.12' and platform_machine == 'aarch64' and sys_platform == 'linux'", +] +dependencies = [ + { name = "nvidia-nvjitlink-cu12", version = "12.9.41", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/e7/707a484520d970fc958afc3a1051fbf98ea1b692bdd85b18c6a77e881f2f/nvidia_cufft_cu12-11.4.0.6-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4836f5a8fb3d94dc8d7daa8fcec8414a3ffed28ab588ce3bdacc319fdfd95f02", size = 200850753 }, + { url = "https://files.pythonhosted.org/packages/7a/11/a03064927d6304414246dfa5f3e53f1af2dc872212e6916c65ca250048d0/nvidia_cufft_cu12-11.4.0.6-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0edd879bf82d587f24f9e282735fbfbf101335c085f30d7613b032f3325e8abb", size = 200908163 }, +] + +[[package]] +name = "nvidia-cufile-cu12" +version = "1.11.1.6" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b2/66/cc9876340ac68ae71b15c743ddb13f8b30d5244af344ec8322b449e35426/nvidia_cufile_cu12-1.11.1.6-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cc23469d1c7e52ce6c1d55253273d32c565dd22068647f3aa59b3c6b005bf159", size = 1142103 }, +] + +[[package]] +name = "nvidia-curand-cu12" +version = "10.3.7.77" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/1b/44a01c4e70933637c93e6e1a8063d1e998b50213a6b65ac5a9169c47e98e/nvidia_curand_cu12-10.3.7.77-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a42cd1344297f70b9e39a1e4f467a4e1c10f1da54ff7a85c12197f6c652c8bdf", size = 56279010 }, + { url = "https://files.pythonhosted.org/packages/4a/aa/2c7ff0b5ee02eaef890c0ce7d4f74bc30901871c5e45dee1ae6d0083cd80/nvidia_curand_cu12-10.3.7.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:99f1a32f1ac2bd134897fc7a203f779303261268a65762a623bf30cc9fe79117", size = 56279000 }, +] + +[[package]] +name = "nvidia-cusolver-cu12" +version = "11.7.1.2" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version >= '3.13' and sys_platform == 'emscripten'", + "python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'emscripten'", + "python_full_version < '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version < '3.12' and sys_platform == 'emscripten'", +] +dependencies = [ + { name = "nvidia-cublas-cu12", version = "12.6.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "nvidia-cusparse-cu12", version = "12.5.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, + { name = "nvidia-nvjitlink-cu12", version = "12.6.85", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/93/17/dbe1aa865e4fdc7b6d4d0dd308fdd5aaab60f939abfc0ea1954eac4fb113/nvidia_cusolver_cu12-11.7.1.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0ce237ef60acde1efc457335a2ddadfd7610b892d94efee7b776c64bb1cac9e0", size = 157833628 }, + { url = "https://files.pythonhosted.org/packages/f0/6e/c2cf12c9ff8b872e92b4a5740701e51ff17689c4d726fca91875b07f655d/nvidia_cusolver_cu12-11.7.1.2-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e9e49843a7707e42022babb9bcfa33c29857a93b88020c4e4434656a655b698c", size = 158229790 }, + { url = "https://files.pythonhosted.org/packages/9f/81/baba53585da791d043c10084cf9553e074548408e04ae884cfe9193bd484/nvidia_cusolver_cu12-11.7.1.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6cf28f17f64107a0c4d7802be5ff5537b2130bfc112f25d5a30df227058ca0e6", size = 158229780 }, + { url = "https://files.pythonhosted.org/packages/7c/5f/07d0ba3b7f19be5a5ec32a8679fc9384cfd9fc6c869825e93be9f28d6690/nvidia_cusolver_cu12-11.7.1.2-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:dbbe4fc38ec1289c7e5230e16248365e375c3673c9c8bac5796e2e20db07f56e", size = 157833630 }, + { url = "https://files.pythonhosted.org/packages/d4/53/fff50a0808df7113d77e3bbc7c2b7eaed6f57d5eb80fbe93ead2aea1e09a/nvidia_cusolver_cu12-11.7.1.2-py3-none-win_amd64.whl", hash = "sha256:6813f9d8073f555444a8705f3ab0296d3e1cb37a16d694c5fc8b862a0d8706d7", size = 149287877 }, +] + +[[package]] +name = "nvidia-cusolver-cu12" +version = "11.7.4.40" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13' and sys_platform == 'darwin'", + "python_full_version >= '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'darwin'", + "python_full_version == '3.12.*' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform == 'darwin'", + "python_full_version < '3.12' and platform_machine == 'aarch64' and sys_platform == 'linux'", +] +dependencies = [ + { name = "nvidia-cublas-cu12", version = "12.9.0.13", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" }, + { name = "nvidia-cusparse-cu12", version = "12.5.9.5", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" }, + { name = "nvidia-nvjitlink-cu12", version = "12.9.41", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/00/83e34680d78d4ea88010bbdd6fdeaf79b210087e355da57f329262456ec6/nvidia_cusolver_cu12-11.7.4.40-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:75e64d827e391a4e0c75832c7c35ca24fdcadcd048b3de18dcc639a783187a7f", size = 331520918 }, + { url = "https://files.pythonhosted.org/packages/da/a6/85683980fee8c1c3b6be7e02cf60210de950d6b34f1b97ffa4b58989ff03/nvidia_cusolver_cu12-11.7.4.40-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:7fb531b95d46f71ca9c296d49a59212ded0a8349560e1664e1b3ce4b35ec0bd7", size = 331810924 }, +] + +[[package]] +name = "nvidia-cusparse-cu12" +version = "12.5.4.2" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version >= '3.13' and sys_platform == 'emscripten'", + "python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'emscripten'", + "python_full_version < '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version < '3.12' and sys_platform == 'emscripten'", +] +dependencies = [ + { name = "nvidia-nvjitlink-cu12", version = "12.6.85", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/eb/6681efd0aa7df96b4f8067b3ce7246833dd36830bb4cec8896182773db7d/nvidia_cusparse_cu12-12.5.4.2-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d25b62fb18751758fe3c93a4a08eff08effedfe4edf1c6bb5afd0890fe88f887", size = 216451147 }, + { url = "https://files.pythonhosted.org/packages/d3/56/3af21e43014eb40134dea004e8d0f1ef19d9596a39e4d497d5a7de01669f/nvidia_cusparse_cu12-12.5.4.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7aa32fa5470cf754f72d1116c7cbc300b4e638d3ae5304cfa4a638a5b87161b1", size = 216451135 }, + { url = "https://files.pythonhosted.org/packages/06/1e/b8b7c2f4099a37b96af5c9bb158632ea9e5d9d27d7391d7eb8fc45236674/nvidia_cusparse_cu12-12.5.4.2-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7556d9eca156e18184b94947ade0fba5bb47d69cec46bf8660fd2c71a4b48b73", size = 216561367 }, + { url = "https://files.pythonhosted.org/packages/43/ac/64c4316ba163e8217a99680c7605f779accffc6a4bcd0c778c12948d3707/nvidia_cusparse_cu12-12.5.4.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:23749a6571191a215cb74d1cdbff4a86e7b19f1200c071b3fcf844a5bea23a2f", size = 216561357 }, + { url = "https://files.pythonhosted.org/packages/45/ef/876ad8e4260e1128e6d4aac803d9d51baf3791ebdb4a9b8d9b8db032b4b0/nvidia_cusparse_cu12-12.5.4.2-py3-none-win_amd64.whl", hash = "sha256:4acb8c08855a26d737398cba8fb6f8f5045d93f82612b4cfd84645a2332ccf20", size = 213712630 }, +] + +[[package]] +name = "nvidia-cusparse-cu12" +version = "12.5.9.5" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13' and sys_platform == 'darwin'", + "python_full_version >= '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'darwin'", + "python_full_version == '3.12.*' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform == 'darwin'", + "python_full_version < '3.12' and platform_machine == 'aarch64' and sys_platform == 'linux'", +] +dependencies = [ + { name = "nvidia-nvjitlink-cu12", version = "12.9.41", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/a8/527f641cf3094d5ab550f820c7cfa71d81f472523bb289e6962a6aa79b45/nvidia_cusparse_cu12-12.5.9.5-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:248d302dba92860b85bc81aa43b7ed5726f1d63466c73d55bf764945514ddd94", size = 366343938 }, + { url = "https://files.pythonhosted.org/packages/a0/b3/8482b29fb64488db98eeb740a858afb5b94c1186ecc6ce719d734b819a86/nvidia_cusparse_cu12-12.5.9.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b2f816d34a7bceab4cb00f0eaadb6a99fc9c285204da6401b42b952e4d61b77b", size = 366449081 }, +] + +[[package]] +name = "nvidia-cusparselt-cu12" +version = "0.6.3" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/9a/72ef35b399b0e183bc2e8f6f558036922d453c4d8237dab26c666a04244b/nvidia_cusparselt_cu12-0.6.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:e5c8a26c36445dd2e6812f1177978a24e2d37cacce7e090f297a688d1ec44f46", size = 156785796 }, +] + +[[package]] +name = "nvidia-ml-py" +version = "12.575.51" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d2/4d/6f017814ed5ac28e08e1b8a62e3a258957da27582c89b7f8f8b15ac3d2e7/nvidia_ml_py-12.575.51.tar.gz", hash = "sha256:6490e93fea99eb4e966327ae18c6eec6256194c921f23459c8767aee28c54581", size = 46597 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/24/552ebea28f0570b9e65e62b50287a273804c9f997cc1c2dcd4e2d64b9e7d/nvidia_ml_py-12.575.51-py3-none-any.whl", hash = "sha256:eb8641800d98ce40a22f479873f34b482e214a7e80349c63be51c3919845446e", size = 47547 }, +] + +[[package]] +name = "nvidia-nccl-cu12" +version = "2.26.2" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version >= '3.13' and sys_platform == 'emscripten'", + "python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'emscripten'", + "python_full_version < '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version < '3.12' and sys_platform == 'emscripten'", +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/69/5b/ca2f213f637305633814ae8c36b153220e40a07ea001966dcd87391f3acb/nvidia_nccl_cu12-2.26.2-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5c196e95e832ad30fbbb50381eb3cbd1fadd5675e587a548563993609af19522", size = 291671495 }, + { url = "https://files.pythonhosted.org/packages/67/ca/f42388aed0fddd64ade7493dbba36e1f534d4e6fdbdd355c6a90030ae028/nvidia_nccl_cu12-2.26.2-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:694cf3879a206553cc9d7dbda76b13efaf610fdb70a50cba303de1b0d1530ac6", size = 201319755 }, +] + +[[package]] +name = "nvidia-nccl-cu12" +version = "2.26.5" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13' and sys_platform == 'darwin'", + "python_full_version >= '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'darwin'", + "python_full_version == '3.12.*' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform == 'darwin'", + "python_full_version < '3.12' and platform_machine == 'aarch64' and sys_platform == 'linux'", +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/55/66/ed9d28946ead0fe1322df2f4fc6ea042340c0fe73b79a1419dc1fdbdd211/nvidia_nccl_cu12-2.26.5-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:adb1bf4adcc5a47f597738a0700da6aef61f8ea4251b375540ae138c7d239588", size = 318058262 }, + { url = "https://files.pythonhosted.org/packages/48/fb/ec4ac065d9b0d56f72eaf1d9b0df601e33da28197b32ca351dc05b342611/nvidia_nccl_cu12-2.26.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ea5ed3e053c735f16809bee7111deac62ac35b10128a8c102960a0462ce16cbe", size = 318069637 }, +] + +[[package]] +name = "nvidia-nvjitlink-cu12" +version = "12.6.85" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version >= '3.13' and sys_platform == 'emscripten'", + "python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'emscripten'", + "python_full_version < '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux'", + "python_full_version < '3.12' and sys_platform == 'emscripten'", +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/9d/d7/c5383e47c7e9bf1c99d5bd2a8c935af2b6d705ad831a7ec5c97db4d82f4f/nvidia_nvjitlink_cu12-12.6.85-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:eedc36df9e88b682efe4309aa16b5b4e78c2407eac59e8c10a6a47535164369a", size = 19744971 }, + { url = "https://files.pythonhosted.org/packages/31/db/dc71113d441f208cdfe7ae10d4983884e13f464a6252450693365e166dcf/nvidia_nvjitlink_cu12-12.6.85-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cf4eaa7d4b6b543ffd69d6abfb11efdeb2db48270d94dfd3a452c24150829e41", size = 19270338 }, + { url = "https://files.pythonhosted.org/packages/89/76/93c1467b1387387440a4d25102d86b7794535449b689f8e2dc22c1c8ff7f/nvidia_nvjitlink_cu12-12.6.85-py3-none-win_amd64.whl", hash = "sha256:e61120e52ed675747825cdd16febc6a0730537451d867ee58bee3853b1b13d1c", size = 161908572 }, +] + +[[package]] +name = "nvidia-nvjitlink-cu12" +version = "12.9.41" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13' and sys_platform == 'darwin'", + "python_full_version >= '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'darwin'", + "python_full_version == '3.12.*' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform == 'darwin'", + "python_full_version < '3.12' and platform_machine == 'aarch64' and sys_platform == 'linux'", +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/69/01de7245968084eb73e50512daa72b36e27dfd2994bf268e6a205eff5093/nvidia_nvjitlink_cu12-12.9.41-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:c3a2cd87cecf3f0ca5e5df97115ede3a81efec1d4b7e2ec89d13f66834042930", size = 39748650 }, + { url = "https://files.pythonhosted.org/packages/2d/0a/9970b6e178a02aff42362ca2f75b9a8423690075dd8ceb068e28ff6e4435/nvidia_nvjitlink_cu12-12.9.41-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:631270891e78de08ebc669bb9ba4418b7899da9efb927fcf6fdff85c9507f54f", size = 39516557 }, +] + +[[package]] +name = "nvidia-nvtx-cu12" +version = "12.6.77" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/56/9a/fff8376f8e3d084cd1530e1ef7b879bb7d6d265620c95c1b322725c694f4/nvidia_nvtx_cu12-12.6.77-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b90bed3df379fa79afbd21be8e04a0314336b8ae16768b58f2d34cb1d04cd7d2", size = 89276 }, + { url = "https://files.pythonhosted.org/packages/9e/4e/0d0c945463719429b7bd21dece907ad0bde437a2ff12b9b12fee94722ab0/nvidia_nvtx_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6574241a3ec5fdc9334353ab8c479fe75841dbe8f4532a8fc97ce63503330ba1", size = 89265 }, +] + +[[package]] +name = "oauthlib" +version = "3.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6d/fa/fbf4001037904031639e6bfbfc02badfc7e12f137a8afa254df6c4c8a670/oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918", size = 177352 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/80/cab10959dc1faead58dc8384a781dfbf93cb4d33d50988f7a69f1b7c9bbe/oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca", size = 151688 }, +] + +[[package]] +name = "omegaconf" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "antlr4-python3-runtime" }, + { name = "pyyaml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/09/48/6388f1bb9da707110532cb70ec4d2822858ddfb44f1cdf1233c20a80ea4b/omegaconf-2.3.0.tar.gz", hash = "sha256:d5d4b6d29955cc50ad50c46dc269bcd92c6e00f5f90d23ab5fee7bfca4ba4cc7", size = 3298120 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e3/94/1843518e420fa3ed6919835845df698c7e27e183cb997394e4a670973a65/omegaconf-2.3.0-py3-none-any.whl", hash = "sha256:7b4df175cdb08ba400f45cae3bdcae7ba8365db4d165fc65fd04b050ab63b46b", size = 79500 }, +] + +[[package]] +name = "opencv-python" +version = "4.11.0.86" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/17/06/68c27a523103dad5837dc5b87e71285280c4f098c60e4fe8a8db6486ab09/opencv-python-4.11.0.86.tar.gz", hash = "sha256:03d60ccae62304860d232272e4a4fda93c39d595780cb40b161b310244b736a4", size = 95171956 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/4d/53b30a2a3ac1f75f65a59eb29cf2ee7207ce64867db47036ad61743d5a23/opencv_python-4.11.0.86-cp37-abi3-macosx_13_0_arm64.whl", hash = "sha256:432f67c223f1dc2824f5e73cdfcd9db0efc8710647d4e813012195dc9122a52a", size = 37326322 }, + { url = "https://files.pythonhosted.org/packages/3b/84/0a67490741867eacdfa37bc18df96e08a9d579583b419010d7f3da8ff503/opencv_python-4.11.0.86-cp37-abi3-macosx_13_0_x86_64.whl", hash = "sha256:9d05ef13d23fe97f575153558653e2d6e87103995d54e6a35db3f282fe1f9c66", size = 56723197 }, + { url = "https://files.pythonhosted.org/packages/f3/bd/29c126788da65c1fb2b5fb621b7fed0ed5f9122aa22a0868c5e2c15c6d23/opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b92ae2c8852208817e6776ba1ea0d6b1e0a1b5431e971a2a0ddd2a8cc398202", size = 42230439 }, + { url = "https://files.pythonhosted.org/packages/2c/8b/90eb44a40476fa0e71e05a0283947cfd74a5d36121a11d926ad6f3193cc4/opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b02611523803495003bd87362db3e1d2a0454a6a63025dc6658a9830570aa0d", size = 62986597 }, + { url = "https://files.pythonhosted.org/packages/fb/d7/1d5941a9dde095468b288d989ff6539dd69cd429dbf1b9e839013d21b6f0/opencv_python-4.11.0.86-cp37-abi3-win32.whl", hash = "sha256:810549cb2a4aedaa84ad9a1c92fbfdfc14090e2749cedf2c1589ad8359aa169b", size = 29384337 }, + { url = "https://files.pythonhosted.org/packages/a4/7d/f1c30a92854540bf789e9cd5dde7ef49bbe63f855b85a2e6b3db8135c591/opencv_python-4.11.0.86-cp37-abi3-win_amd64.whl", hash = "sha256:085ad9b77c18853ea66283e98affefe2de8cc4c1f43eda4c100cf9b2721142ec", size = 39488044 }, +] + +[[package]] +name = "opencv-python-headless" +version = "4.11.0.86" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/36/2f/5b2b3ba52c864848885ba988f24b7f105052f68da9ab0e693cc7c25b0b30/opencv-python-headless-4.11.0.86.tar.gz", hash = "sha256:996eb282ca4b43ec6a3972414de0e2331f5d9cda2b41091a49739c19fb843798", size = 95177929 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/53/2c50afa0b1e05ecdb4603818e85f7d174e683d874ef63a6abe3ac92220c8/opencv_python_headless-4.11.0.86-cp37-abi3-macosx_13_0_arm64.whl", hash = "sha256:48128188ade4a7e517237c8e1e11a9cdf5c282761473383e77beb875bb1e61ca", size = 37326460 }, + { url = "https://files.pythonhosted.org/packages/3b/43/68555327df94bb9b59a1fd645f63fafb0762515344d2046698762fc19d58/opencv_python_headless-4.11.0.86-cp37-abi3-macosx_13_0_x86_64.whl", hash = "sha256:a66c1b286a9de872c343ee7c3553b084244299714ebb50fbdcd76f07ebbe6c81", size = 56723330 }, + { url = "https://files.pythonhosted.org/packages/45/be/1438ce43ebe65317344a87e4b150865c5585f4c0db880a34cdae5ac46881/opencv_python_headless-4.11.0.86-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6efabcaa9df731f29e5ea9051776715b1bdd1845d7c9530065c7951d2a2899eb", size = 29487060 }, + { url = "https://files.pythonhosted.org/packages/dd/5c/c139a7876099916879609372bfa513b7f1257f7f1a908b0bdc1c2328241b/opencv_python_headless-4.11.0.86-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e0a27c19dd1f40ddff94976cfe43066fbbe9dfbb2ec1907d66c19caef42a57b", size = 49969856 }, + { url = "https://files.pythonhosted.org/packages/95/dd/ed1191c9dc91abcc9f752b499b7928aacabf10567bb2c2535944d848af18/opencv_python_headless-4.11.0.86-cp37-abi3-win32.whl", hash = "sha256:f447d8acbb0b6f2808da71fddd29c1cdd448d2bc98f72d9bb78a7a898fc9621b", size = 29324425 }, + { url = "https://files.pythonhosted.org/packages/86/8a/69176a64335aed183529207ba8bc3d329c2999d852b4f3818027203f50e6/opencv_python_headless-4.11.0.86-cp37-abi3-win_amd64.whl", hash = "sha256:6c304df9caa7a6a5710b91709dd4786bf20a74d57672b3c31f7033cc638174ca", size = 39402386 }, +] + +[[package]] +name = "openpi" +version = "0.1.0" +source = { editable = "." } +dependencies = [ + { name = "augmax" }, + { name = "beartype" }, + { name = "dm-tree", version = "0.1.8", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.13'" }, + { name = "dm-tree", version = "0.1.9", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.13'" }, + { name = "einops" }, + { name = "equinox" }, + { name = "filelock" }, + { name = "flatbuffers" }, + { name = "flax" }, + { name = "fsspec", extra = ["gcs"] }, + { name = "gym-aloha" }, + { name = "imageio" }, + { name = "jax", extra = ["cuda12"] }, + { name = "jaxtyping" }, + { name = "lerobot" }, + { name = "ml-collections" }, + { name = "numpy" }, + { name = "numpydantic" }, + { name = "opencv-python" }, + { name = "openpi-client" }, + { name = "orbax-checkpoint" }, + { name = "pillow" }, + { name = "polars" }, + { name = "rich" }, + { name = "sentencepiece" }, + { name = "torch" }, + { name = "tqdm-loggable" }, + { name = "transformers" }, + { name = "treescope" }, + { name = "typing-extensions" }, + { name = "tyro" }, + { name = "wandb" }, +] + +[package.dev-dependencies] +dev = [ + { name = "ipykernel" }, + { name = "ipywidgets" }, + { name = "matplotlib" }, + { name = "pre-commit" }, + { name = "pynvml" }, + { name = "pytest" }, + { name = "ruff" }, +] +rlds = [ + { name = "dlimp" }, + { name = "tensorflow-cpu" }, + { name = "tensorflow-datasets" }, +] + +[package.metadata] +requires-dist = [ + { name = "augmax", specifier = ">=0.3.4" }, + { name = "beartype", specifier = "==0.19.0" }, + { name = "dm-tree", specifier = ">=0.1.8" }, + { name = "einops", specifier = ">=0.8.0" }, + { name = "equinox", specifier = ">=0.11.8" }, + { name = "filelock", specifier = ">=3.16.1" }, + { name = "flatbuffers", specifier = ">=24.3.25" }, + { name = "flax", specifier = "==0.10.2" }, + { name = "fsspec", extras = ["gcs"], specifier = ">=2024.6.0" }, + { name = "gym-aloha", specifier = ">=0.1.1" }, + { name = "imageio", specifier = ">=2.36.1" }, + { name = "jax", extras = ["cuda12"], specifier = "==0.5.3" }, + { name = "jaxtyping", specifier = "==0.2.36" }, + { name = "lerobot", git = "https://github.com/huggingface/lerobot?rev=0cf864870cf29f4738d3ade893e6fd13fbd7cdb5" }, + { name = "ml-collections", specifier = "==1.0.0" }, + { name = "numpy", specifier = ">=1.22.4,<2.0.0" }, + { name = "numpydantic", specifier = ">=1.6.6" }, + { name = "opencv-python", specifier = ">=4.10.0.84" }, + { name = "openpi-client", editable = "packages/openpi-client" }, + { name = "orbax-checkpoint", specifier = "==0.11.13" }, + { name = "pillow", specifier = ">=11.0.0" }, + { name = "polars", specifier = ">=1.30.0" }, + { name = "rich", specifier = ">=14.0.0" }, + { name = "sentencepiece", specifier = ">=0.2.0" }, + { name = "torch", specifier = "==2.7.1" }, + { name = "tqdm-loggable", specifier = ">=0.2" }, + { name = "transformers", specifier = "==4.53.2" }, + { name = "treescope", specifier = ">=0.1.7" }, + { name = "typing-extensions", specifier = ">=4.12.2" }, + { name = "tyro", specifier = ">=0.9.5" }, + { name = "wandb", specifier = ">=0.19.1" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "ipykernel", specifier = ">=6.29.5" }, + { name = "ipywidgets", specifier = ">=8.1.5" }, + { name = "matplotlib", specifier = ">=3.10.0" }, + { name = "pre-commit", specifier = ">=4.0.1" }, + { name = "pynvml", specifier = ">=12.0.0" }, + { name = "pytest", specifier = ">=8.3.4" }, + { name = "ruff", specifier = ">=0.8.6" }, +] +rlds = [ + { name = "dlimp", git = "https://github.com/kvablack/dlimp?rev=ad72ce3a9b414db2185bc0b38461d4101a65477a" }, + { name = "tensorflow-cpu", specifier = "==2.15.0" }, + { name = "tensorflow-datasets", specifier = "==4.9.9" }, +] + +[[package]] +name = "openpi-client" +version = "0.1.0" +source = { editable = "packages/openpi-client" } +dependencies = [ + { name = "dm-tree", version = "0.1.8", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.13'" }, + { name = "dm-tree", version = "0.1.9", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.13'" }, + { name = "msgpack" }, + { name = "numpy" }, + { name = "pillow" }, + { name = "tree" }, + { name = "websockets" }, +] + +[package.dev-dependencies] +dev = [ + { name = "pytest" }, +] + +[package.metadata] +requires-dist = [ + { name = "dm-tree", specifier = ">=0.1.8" }, + { name = "msgpack", specifier = ">=1.0.5" }, + { name = "numpy", specifier = ">=1.22.4,<2.0.0" }, + { name = "pillow", specifier = ">=9.0.0" }, + { name = "tree", specifier = ">=0.2.4" }, + { name = "websockets", specifier = ">=11.0" }, +] + +[package.metadata.requires-dev] +dev = [{ name = "pytest", specifier = ">=8.3.4" }] + +[[package]] +name = "opt-einsum" +version = "3.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8c/b9/2ac072041e899a52f20cf9510850ff58295003aa75525e58343591b0cbfb/opt_einsum-3.4.0.tar.gz", hash = "sha256:96ca72f1b886d148241348783498194c577fa30a8faac108586b14f1ba4473ac", size = 63004 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl", hash = "sha256:69bb92469f86a1565195ece4ac0323943e83477171b91d24c35afe028a90d7cd", size = 71932 }, +] + +[[package]] +name = "optax" +version = "0.2.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "absl-py" }, + { name = "chex" }, + { name = "etils", extra = ["epy"] }, + { name = "jax" }, + { name = "jaxlib" }, + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/af/b5/f88a0d851547b2e6b2c7e7e6509ad66236b3e7019f1f095bb03dbaa61fa1/optax-0.2.4.tar.gz", hash = "sha256:4e05d3d5307e6dde4c319187ae36e6cd3a0c035d4ed25e9e992449a304f47336", size = 229717 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5c/24/28d0bb21600a78e46754947333ec9a297044af884d360092eb8561575fe9/optax-0.2.4-py3-none-any.whl", hash = "sha256:db35c04e50b52596662efb002334de08c2a0a74971e4da33f467e84fac08886a", size = 319212 }, +] + +[[package]] +name = "orbax-checkpoint" +version = "0.11.13" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "absl-py" }, + { name = "etils", extra = ["epath", "epy"] }, + { name = "humanize" }, + { name = "jax" }, + { name = "msgpack" }, + { name = "nest-asyncio" }, + { name = "numpy" }, + { name = "protobuf" }, + { name = "pyyaml" }, + { name = "simplejson" }, + { name = "tensorstore" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c3/cb/e122160888cb922caabfd67582d402e6202fc7383c64f2e05a81727cef6a/orbax_checkpoint-0.11.13.tar.gz", hash = "sha256:6ce6f4458d0755a7ae556d4da3b2e3a943d4a830aeec2f98881643f1997e11bc", size = 316151 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/57/700709ca012b8595230dd2a004fbe284a57e6838f966d58c956d4529a2db/orbax_checkpoint-0.11.13-py3-none-any.whl", hash = "sha256:096eb6f475857d7aa73235989cdfe5d34c425628d24be881686dfbc3b566f495", size = 442700 }, +] + +[[package]] +name = "orderly-set" +version = "5.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/03/4a/38030da31c13dcd5a531490006e63a0954083fb115113be9393179738e25/orderly_set-5.4.1.tar.gz", hash = "sha256:a1fb5a4fdc5e234e9e8d8e5c1bbdbc4540f4dfe50d12bf17c8bc5dbf1c9c878d", size = 20943 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/12/bc/e0dfb4db9210d92b44e49d6e61ba5caefbd411958357fa9d7ff489eeb835/orderly_set-5.4.1-py3-none-any.whl", hash = "sha256:b5e21d21680bd9ef456885db800c5cb4f76a03879880c0175e1b077fb166fd83", size = 12339 }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469 }, +] + +[[package]] +name = "pandas" +version = "2.2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "python-dateutil" }, + { name = "pytz" }, + { name = "tzdata" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9c/d6/9f8431bacc2e19dca897724cd097b1bb224a6ad5433784a44b587c7c13af/pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667", size = 4399213 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/44/d9502bf0ed197ba9bf1103c9867d5904ddcaf869e52329787fc54ed70cc8/pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039", size = 12602222 }, + { url = "https://files.pythonhosted.org/packages/52/11/9eac327a38834f162b8250aab32a6781339c69afe7574368fffe46387edf/pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd", size = 11321274 }, + { url = "https://files.pythonhosted.org/packages/45/fb/c4beeb084718598ba19aa9f5abbc8aed8b42f90930da861fcb1acdb54c3a/pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698", size = 15579836 }, + { url = "https://files.pythonhosted.org/packages/cd/5f/4dba1d39bb9c38d574a9a22548c540177f78ea47b32f99c0ff2ec499fac5/pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc", size = 13058505 }, + { url = "https://files.pythonhosted.org/packages/b9/57/708135b90391995361636634df1f1130d03ba456e95bcf576fada459115a/pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3", size = 16744420 }, + { url = "https://files.pythonhosted.org/packages/86/4a/03ed6b7ee323cf30404265c284cee9c65c56a212e0a08d9ee06984ba2240/pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32", size = 14440457 }, + { url = "https://files.pythonhosted.org/packages/ed/8c/87ddf1fcb55d11f9f847e3c69bb1c6f8e46e2f40ab1a2d2abadb2401b007/pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5", size = 11617166 }, + { url = "https://files.pythonhosted.org/packages/17/a3/fb2734118db0af37ea7433f57f722c0a56687e14b14690edff0cdb4b7e58/pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9", size = 12529893 }, + { url = "https://files.pythonhosted.org/packages/e1/0c/ad295fd74bfac85358fd579e271cded3ac969de81f62dd0142c426b9da91/pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4", size = 11363475 }, + { url = "https://files.pythonhosted.org/packages/c6/2a/4bba3f03f7d07207481fed47f5b35f556c7441acddc368ec43d6643c5777/pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3", size = 15188645 }, + { url = "https://files.pythonhosted.org/packages/38/f8/d8fddee9ed0d0c0f4a2132c1dfcf0e3e53265055da8df952a53e7eaf178c/pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319", size = 12739445 }, + { url = "https://files.pythonhosted.org/packages/20/e8/45a05d9c39d2cea61ab175dbe6a2de1d05b679e8de2011da4ee190d7e748/pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8", size = 16359235 }, + { url = "https://files.pythonhosted.org/packages/1d/99/617d07a6a5e429ff90c90da64d428516605a1ec7d7bea494235e1c3882de/pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a", size = 14056756 }, + { url = "https://files.pythonhosted.org/packages/29/d4/1244ab8edf173a10fd601f7e13b9566c1b525c4f365d6bee918e68381889/pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13", size = 11504248 }, + { url = "https://files.pythonhosted.org/packages/64/22/3b8f4e0ed70644e85cfdcd57454686b9057c6c38d2f74fe4b8bc2527214a/pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015", size = 12477643 }, + { url = "https://files.pythonhosted.org/packages/e4/93/b3f5d1838500e22c8d793625da672f3eec046b1a99257666c94446969282/pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28", size = 11281573 }, + { url = "https://files.pythonhosted.org/packages/f5/94/6c79b07f0e5aab1dcfa35a75f4817f5c4f677931d4234afcd75f0e6a66ca/pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0", size = 15196085 }, + { url = "https://files.pythonhosted.org/packages/e8/31/aa8da88ca0eadbabd0a639788a6da13bb2ff6edbbb9f29aa786450a30a91/pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24", size = 12711809 }, + { url = "https://files.pythonhosted.org/packages/ee/7c/c6dbdb0cb2a4344cacfb8de1c5808ca885b2e4dcfde8008266608f9372af/pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659", size = 16356316 }, + { url = "https://files.pythonhosted.org/packages/57/b7/8b757e7d92023b832869fa8881a992696a0bfe2e26f72c9ae9f255988d42/pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb", size = 14022055 }, + { url = "https://files.pythonhosted.org/packages/3b/bc/4b18e2b8c002572c5a441a64826252ce5da2aa738855747247a971988043/pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d", size = 11481175 }, + { url = "https://files.pythonhosted.org/packages/76/a3/a5d88146815e972d40d19247b2c162e88213ef51c7c25993942c39dbf41d/pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468", size = 12615650 }, + { url = "https://files.pythonhosted.org/packages/9c/8c/f0fd18f6140ddafc0c24122c8a964e48294acc579d47def376fef12bcb4a/pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18", size = 11290177 }, + { url = "https://files.pythonhosted.org/packages/ed/f9/e995754eab9c0f14c6777401f7eece0943840b7a9fc932221c19d1abee9f/pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2", size = 14651526 }, + { url = "https://files.pythonhosted.org/packages/25/b0/98d6ae2e1abac4f35230aa756005e8654649d305df9a28b16b9ae4353bff/pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4", size = 11871013 }, + { url = "https://files.pythonhosted.org/packages/cc/57/0f72a10f9db6a4628744c8e8f0df4e6e21de01212c7c981d31e50ffc8328/pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d", size = 15711620 }, + { url = "https://files.pythonhosted.org/packages/ab/5f/b38085618b950b79d2d9164a711c52b10aefc0ae6833b96f626b7021b2ed/pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a", size = 13098436 }, +] + +[[package]] +name = "parso" +version = "0.8.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/66/94/68e2e17afaa9169cf6412ab0f28623903be73d1b32e208d9e8e541bb086d/parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d", size = 400609 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/ac/dac4a63f978e4dcb3c6d3a78c4d8e0192a113d288502a1216950c41b1027/parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", size = 103650 }, +] + +[[package]] +name = "pexpect" +version = "4.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ptyprocess", marker = "sys_platform != 'emscripten'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772 }, +] + +[[package]] +name = "pfzy" +version = "0.3.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d9/5a/32b50c077c86bfccc7bed4881c5a2b823518f5450a30e639db5d3711952e/pfzy-0.3.4.tar.gz", hash = "sha256:717ea765dd10b63618e7298b2d98efd819e0b30cd5905c9707223dceeb94b3f1", size = 8396 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8c/d7/8ff98376b1acc4503253b685ea09981697385ce344d4e3935c2af49e044d/pfzy-0.3.4-py3-none-any.whl", hash = "sha256:5f50d5b2b3207fa72e7ec0ef08372ef652685470974a107d0d4999fc5a903a96", size = 8537 }, +] + +[[package]] +name = "pillow" +version = "11.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/af/cb/bb5c01fcd2a69335b86c22142b2bccfc3464087efb7fd382eee5ffc7fdf7/pillow-11.2.1.tar.gz", hash = "sha256:a64dd61998416367b7ef979b73d3a85853ba9bec4c2925f74e588879a58716b6", size = 47026707 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/08/3fbf4b98924c73037a8e8b4c2c774784805e0fb4ebca6c5bb60795c40125/pillow-11.2.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:35ca289f712ccfc699508c4658a1d14652e8033e9b69839edf83cbdd0ba39e70", size = 3198450 }, + { url = "https://files.pythonhosted.org/packages/84/92/6505b1af3d2849d5e714fc75ba9e69b7255c05ee42383a35a4d58f576b16/pillow-11.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e0409af9f829f87a2dfb7e259f78f317a5351f2045158be321fd135973fff7bf", size = 3030550 }, + { url = "https://files.pythonhosted.org/packages/3c/8c/ac2f99d2a70ff966bc7eb13dacacfaab57c0549b2ffb351b6537c7840b12/pillow-11.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4e5c5edee874dce4f653dbe59db7c73a600119fbea8d31f53423586ee2aafd7", size = 4415018 }, + { url = "https://files.pythonhosted.org/packages/1f/e3/0a58b5d838687f40891fff9cbaf8669f90c96b64dc8f91f87894413856c6/pillow-11.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b93a07e76d13bff9444f1a029e0af2964e654bfc2e2c2d46bfd080df5ad5f3d8", size = 4498006 }, + { url = "https://files.pythonhosted.org/packages/21/f5/6ba14718135f08fbfa33308efe027dd02b781d3f1d5c471444a395933aac/pillow-11.2.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:e6def7eed9e7fa90fde255afaf08060dc4b343bbe524a8f69bdd2a2f0018f600", size = 4517773 }, + { url = "https://files.pythonhosted.org/packages/20/f2/805ad600fc59ebe4f1ba6129cd3a75fb0da126975c8579b8f57abeb61e80/pillow-11.2.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:8f4f3724c068be008c08257207210c138d5f3731af6c155a81c2b09a9eb3a788", size = 4607069 }, + { url = "https://files.pythonhosted.org/packages/71/6b/4ef8a288b4bb2e0180cba13ca0a519fa27aa982875882392b65131401099/pillow-11.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a0a6709b47019dff32e678bc12c63008311b82b9327613f534e496dacaefb71e", size = 4583460 }, + { url = "https://files.pythonhosted.org/packages/62/ae/f29c705a09cbc9e2a456590816e5c234382ae5d32584f451c3eb41a62062/pillow-11.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f6b0c664ccb879109ee3ca702a9272d877f4fcd21e5eb63c26422fd6e415365e", size = 4661304 }, + { url = "https://files.pythonhosted.org/packages/6e/1a/c8217b6f2f73794a5e219fbad087701f412337ae6dbb956db37d69a9bc43/pillow-11.2.1-cp311-cp311-win32.whl", hash = "sha256:cc5d875d56e49f112b6def6813c4e3d3036d269c008bf8aef72cd08d20ca6df6", size = 2331809 }, + { url = "https://files.pythonhosted.org/packages/e2/72/25a8f40170dc262e86e90f37cb72cb3de5e307f75bf4b02535a61afcd519/pillow-11.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:0f5c7eda47bf8e3c8a283762cab94e496ba977a420868cb819159980b6709193", size = 2676338 }, + { url = "https://files.pythonhosted.org/packages/06/9e/76825e39efee61efea258b479391ca77d64dbd9e5804e4ad0fa453b4ba55/pillow-11.2.1-cp311-cp311-win_arm64.whl", hash = "sha256:4d375eb838755f2528ac8cbc926c3e31cc49ca4ad0cf79cff48b20e30634a4a7", size = 2414918 }, + { url = "https://files.pythonhosted.org/packages/c7/40/052610b15a1b8961f52537cc8326ca6a881408bc2bdad0d852edeb6ed33b/pillow-11.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:78afba22027b4accef10dbd5eed84425930ba41b3ea0a86fa8d20baaf19d807f", size = 3190185 }, + { url = "https://files.pythonhosted.org/packages/e5/7e/b86dbd35a5f938632093dc40d1682874c33dcfe832558fc80ca56bfcb774/pillow-11.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78092232a4ab376a35d68c4e6d5e00dfd73454bd12b230420025fbe178ee3b0b", size = 3030306 }, + { url = "https://files.pythonhosted.org/packages/a4/5c/467a161f9ed53e5eab51a42923c33051bf8d1a2af4626ac04f5166e58e0c/pillow-11.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25a5f306095c6780c52e6bbb6109624b95c5b18e40aab1c3041da3e9e0cd3e2d", size = 4416121 }, + { url = "https://files.pythonhosted.org/packages/62/73/972b7742e38ae0e2ac76ab137ca6005dcf877480da0d9d61d93b613065b4/pillow-11.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c7b29dbd4281923a2bfe562acb734cee96bbb129e96e6972d315ed9f232bef4", size = 4501707 }, + { url = "https://files.pythonhosted.org/packages/e4/3a/427e4cb0b9e177efbc1a84798ed20498c4f233abde003c06d2650a6d60cb/pillow-11.2.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:3e645b020f3209a0181a418bffe7b4a93171eef6c4ef6cc20980b30bebf17b7d", size = 4522921 }, + { url = "https://files.pythonhosted.org/packages/fe/7c/d8b1330458e4d2f3f45d9508796d7caf0c0d3764c00c823d10f6f1a3b76d/pillow-11.2.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b2dbea1012ccb784a65349f57bbc93730b96e85b42e9bf7b01ef40443db720b4", size = 4612523 }, + { url = "https://files.pythonhosted.org/packages/b3/2f/65738384e0b1acf451de5a573d8153fe84103772d139e1e0bdf1596be2ea/pillow-11.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:da3104c57bbd72948d75f6a9389e6727d2ab6333c3617f0a89d72d4940aa0443", size = 4587836 }, + { url = "https://files.pythonhosted.org/packages/6a/c5/e795c9f2ddf3debb2dedd0df889f2fe4b053308bb59a3cc02a0cd144d641/pillow-11.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:598174aef4589af795f66f9caab87ba4ff860ce08cd5bb447c6fc553ffee603c", size = 4669390 }, + { url = "https://files.pythonhosted.org/packages/96/ae/ca0099a3995976a9fce2f423166f7bff9b12244afdc7520f6ed38911539a/pillow-11.2.1-cp312-cp312-win32.whl", hash = "sha256:1d535df14716e7f8776b9e7fee118576d65572b4aad3ed639be9e4fa88a1cad3", size = 2332309 }, + { url = "https://files.pythonhosted.org/packages/7c/18/24bff2ad716257fc03da964c5e8f05d9790a779a8895d6566e493ccf0189/pillow-11.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:14e33b28bf17c7a38eede290f77db7c664e4eb01f7869e37fa98a5aa95978941", size = 2676768 }, + { url = "https://files.pythonhosted.org/packages/da/bb/e8d656c9543276517ee40184aaa39dcb41e683bca121022f9323ae11b39d/pillow-11.2.1-cp312-cp312-win_arm64.whl", hash = "sha256:21e1470ac9e5739ff880c211fc3af01e3ae505859392bf65458c224d0bf283eb", size = 2415087 }, + { url = "https://files.pythonhosted.org/packages/36/9c/447528ee3776e7ab8897fe33697a7ff3f0475bb490c5ac1456a03dc57956/pillow-11.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fdec757fea0b793056419bca3e9932eb2b0ceec90ef4813ea4c1e072c389eb28", size = 3190098 }, + { url = "https://files.pythonhosted.org/packages/b5/09/29d5cd052f7566a63e5b506fac9c60526e9ecc553825551333e1e18a4858/pillow-11.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b0e130705d568e2f43a17bcbe74d90958e8a16263868a12c3e0d9c8162690830", size = 3030166 }, + { url = "https://files.pythonhosted.org/packages/71/5d/446ee132ad35e7600652133f9c2840b4799bbd8e4adba881284860da0a36/pillow-11.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bdb5e09068332578214cadd9c05e3d64d99e0e87591be22a324bdbc18925be0", size = 4408674 }, + { url = "https://files.pythonhosted.org/packages/69/5f/cbe509c0ddf91cc3a03bbacf40e5c2339c4912d16458fcb797bb47bcb269/pillow-11.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d189ba1bebfbc0c0e529159631ec72bb9e9bc041f01ec6d3233d6d82eb823bc1", size = 4496005 }, + { url = "https://files.pythonhosted.org/packages/f9/b3/dd4338d8fb8a5f312021f2977fb8198a1184893f9b00b02b75d565c33b51/pillow-11.2.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:191955c55d8a712fab8934a42bfefbf99dd0b5875078240943f913bb66d46d9f", size = 4518707 }, + { url = "https://files.pythonhosted.org/packages/13/eb/2552ecebc0b887f539111c2cd241f538b8ff5891b8903dfe672e997529be/pillow-11.2.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:ad275964d52e2243430472fc5d2c2334b4fc3ff9c16cb0a19254e25efa03a155", size = 4610008 }, + { url = "https://files.pythonhosted.org/packages/72/d1/924ce51bea494cb6e7959522d69d7b1c7e74f6821d84c63c3dc430cbbf3b/pillow-11.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:750f96efe0597382660d8b53e90dd1dd44568a8edb51cb7f9d5d918b80d4de14", size = 4585420 }, + { url = "https://files.pythonhosted.org/packages/43/ab/8f81312d255d713b99ca37479a4cb4b0f48195e530cdc1611990eb8fd04b/pillow-11.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fe15238d3798788d00716637b3d4e7bb6bde18b26e5d08335a96e88564a36b6b", size = 4667655 }, + { url = "https://files.pythonhosted.org/packages/94/86/8f2e9d2dc3d308dfd137a07fe1cc478df0a23d42a6c4093b087e738e4827/pillow-11.2.1-cp313-cp313-win32.whl", hash = "sha256:3fe735ced9a607fee4f481423a9c36701a39719252a9bb251679635f99d0f7d2", size = 2332329 }, + { url = "https://files.pythonhosted.org/packages/6d/ec/1179083b8d6067a613e4d595359b5fdea65d0a3b7ad623fee906e1b3c4d2/pillow-11.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:74ee3d7ecb3f3c05459ba95eed5efa28d6092d751ce9bf20e3e253a4e497e691", size = 2676388 }, + { url = "https://files.pythonhosted.org/packages/23/f1/2fc1e1e294de897df39fa8622d829b8828ddad938b0eaea256d65b84dd72/pillow-11.2.1-cp313-cp313-win_arm64.whl", hash = "sha256:5119225c622403afb4b44bad4c1ca6c1f98eed79db8d3bc6e4e160fc6339d66c", size = 2414950 }, + { url = "https://files.pythonhosted.org/packages/c4/3e/c328c48b3f0ead7bab765a84b4977acb29f101d10e4ef57a5e3400447c03/pillow-11.2.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:8ce2e8411c7aaef53e6bb29fe98f28cd4fbd9a1d9be2eeea434331aac0536b22", size = 3192759 }, + { url = "https://files.pythonhosted.org/packages/18/0e/1c68532d833fc8b9f404d3a642991441d9058eccd5606eab31617f29b6d4/pillow-11.2.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9ee66787e095127116d91dea2143db65c7bb1e232f617aa5957c0d9d2a3f23a7", size = 3033284 }, + { url = "https://files.pythonhosted.org/packages/b7/cb/6faf3fb1e7705fd2db74e070f3bf6f88693601b0ed8e81049a8266de4754/pillow-11.2.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9622e3b6c1d8b551b6e6f21873bdcc55762b4b2126633014cea1803368a9aa16", size = 4445826 }, + { url = "https://files.pythonhosted.org/packages/07/94/8be03d50b70ca47fb434a358919d6a8d6580f282bbb7af7e4aa40103461d/pillow-11.2.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63b5dff3a68f371ea06025a1a6966c9a1e1ee452fc8020c2cd0ea41b83e9037b", size = 4527329 }, + { url = "https://files.pythonhosted.org/packages/fd/a4/bfe78777076dc405e3bd2080bc32da5ab3945b5a25dc5d8acaa9de64a162/pillow-11.2.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:31df6e2d3d8fc99f993fd253e97fae451a8db2e7207acf97859732273e108406", size = 4549049 }, + { url = "https://files.pythonhosted.org/packages/65/4d/eaf9068dc687c24979e977ce5677e253624bd8b616b286f543f0c1b91662/pillow-11.2.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:062b7a42d672c45a70fa1f8b43d1d38ff76b63421cbbe7f88146b39e8a558d91", size = 4635408 }, + { url = "https://files.pythonhosted.org/packages/1d/26/0fd443365d9c63bc79feb219f97d935cd4b93af28353cba78d8e77b61719/pillow-11.2.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4eb92eca2711ef8be42fd3f67533765d9fd043b8c80db204f16c8ea62ee1a751", size = 4614863 }, + { url = "https://files.pythonhosted.org/packages/49/65/dca4d2506be482c2c6641cacdba5c602bc76d8ceb618fd37de855653a419/pillow-11.2.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f91ebf30830a48c825590aede79376cb40f110b387c17ee9bd59932c961044f9", size = 4692938 }, + { url = "https://files.pythonhosted.org/packages/b3/92/1ca0c3f09233bd7decf8f7105a1c4e3162fb9142128c74adad0fb361b7eb/pillow-11.2.1-cp313-cp313t-win32.whl", hash = "sha256:e0b55f27f584ed623221cfe995c912c61606be8513bfa0e07d2c674b4516d9dd", size = 2335774 }, + { url = "https://files.pythonhosted.org/packages/a5/ac/77525347cb43b83ae905ffe257bbe2cc6fd23acb9796639a1f56aa59d191/pillow-11.2.1-cp313-cp313t-win_amd64.whl", hash = "sha256:36d6b82164c39ce5482f649b437382c0fb2395eabc1e2b1702a6deb8ad647d6e", size = 2681895 }, + { url = "https://files.pythonhosted.org/packages/67/32/32dc030cfa91ca0fc52baebbba2e009bb001122a1daa8b6a79ad830b38d3/pillow-11.2.1-cp313-cp313t-win_arm64.whl", hash = "sha256:225c832a13326e34f212d2072982bb1adb210e0cc0b153e688743018c94a2681", size = 2417234 }, + { url = "https://files.pythonhosted.org/packages/a4/ad/2613c04633c7257d9481ab21d6b5364b59fc5d75faafd7cb8693523945a3/pillow-11.2.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:80f1df8dbe9572b4b7abdfa17eb5d78dd620b1d55d9e25f834efdbee872d3aed", size = 3181734 }, + { url = "https://files.pythonhosted.org/packages/a4/fd/dcdda4471ed667de57bb5405bb42d751e6cfdd4011a12c248b455c778e03/pillow-11.2.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:ea926cfbc3957090becbcbbb65ad177161a2ff2ad578b5a6ec9bb1e1cd78753c", size = 2999841 }, + { url = "https://files.pythonhosted.org/packages/ac/89/8a2536e95e77432833f0db6fd72a8d310c8e4272a04461fb833eb021bf94/pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:738db0e0941ca0376804d4de6a782c005245264edaa253ffce24e5a15cbdc7bd", size = 3437470 }, + { url = "https://files.pythonhosted.org/packages/9d/8f/abd47b73c60712f88e9eda32baced7bfc3e9bd6a7619bb64b93acff28c3e/pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9db98ab6565c69082ec9b0d4e40dd9f6181dab0dd236d26f7a50b8b9bfbd5076", size = 3460013 }, + { url = "https://files.pythonhosted.org/packages/f6/20/5c0a0aa83b213b7a07ec01e71a3d6ea2cf4ad1d2c686cc0168173b6089e7/pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:036e53f4170e270ddb8797d4c590e6dd14d28e15c7da375c18978045f7e6c37b", size = 3527165 }, + { url = "https://files.pythonhosted.org/packages/58/0e/2abab98a72202d91146abc839e10c14f7cf36166f12838ea0c4db3ca6ecb/pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:14f73f7c291279bd65fda51ee87affd7c1e097709f7fdd0188957a16c264601f", size = 3571586 }, + { url = "https://files.pythonhosted.org/packages/21/2c/5e05f58658cf49b6667762cca03d6e7d85cededde2caf2ab37b81f80e574/pillow-11.2.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:208653868d5c9ecc2b327f9b9ef34e0e42a4cdd172c2988fd81d62d2bc9bc044", size = 2674751 }, +] + +[[package]] +name = "platformdirs" +version = "4.3.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/8b/3c73abc9c759ecd3f1f7ceff6685840859e8070c4d947c93fae71f6a0bf2/platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc", size = 21362 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fe/39/979e8e21520d4e47a0bbe349e2713c0aac6f3d853d0e5b34d76206c439aa/platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4", size = 18567 }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538 }, +] + +[[package]] +name = "polars" +version = "1.30.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/82/b6/8dbdf626c0705a57f052708c9fc0860ffc2aa97955930d5faaf6a66fcfd3/polars-1.30.0.tar.gz", hash = "sha256:dfe94ae84a5efd9ba74e616e3e125b24ca155494a931890a8f17480737c4db45", size = 4668318 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/48/e9b2cb379abcc9f7aff2e701098fcdb9fe6d85dc4ad4cec7b35d39c70951/polars-1.30.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:4c33bc97c29b7112f0e689a2f8a33143973a3ff466c70b25c7fd1880225de6dd", size = 35704342 }, + { url = "https://files.pythonhosted.org/packages/36/ca/f545f61282f75eea4dfde4db2944963dcd59abd50c20e33a1c894da44dad/polars-1.30.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:e3d05914c364b8e39a5b10dcf97e84d76e516b3b1693880bf189a93aab3ca00d", size = 32459857 }, + { url = "https://files.pythonhosted.org/packages/76/20/e018cd87d7cb6f8684355f31f4e193222455a6e8f7b942f4a2934f5969c7/polars-1.30.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a52af3862082b868c1febeae650af8ae8a2105d2cb28f0449179a7b44f54ccf", size = 36267243 }, + { url = "https://files.pythonhosted.org/packages/cb/e7/b88b973021be07b13d91b9301cc14392c994225ef5107a32a8ffd3fd6424/polars-1.30.0-cp39-abi3-manylinux_2_24_aarch64.whl", hash = "sha256:ffb3ef133454275d4254442257c5f71dd6e393ce365c97997dadeb6fa9d6d4b5", size = 33416871 }, + { url = "https://files.pythonhosted.org/packages/dd/7c/d46d4381adeac537b8520b653dc30cb8b7edbf59883d71fbb989e9005de1/polars-1.30.0-cp39-abi3-win_amd64.whl", hash = "sha256:c26b633a9bd530c5fc09d317fca3bb3e16c772bd7df7549a9d8ec1934773cc5d", size = 36363630 }, + { url = "https://files.pythonhosted.org/packages/fb/b5/5056d0c12aadb57390d0627492bef8b1abf3549474abb9ae0fd4e2bfa885/polars-1.30.0-cp39-abi3-win_arm64.whl", hash = "sha256:476f1bde65bc7b4d9f80af370645c2981b5798d67c151055e58534e89e96f2a8", size = 32643590 }, +] + +[[package]] +name = "pre-commit" +version = "4.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cfgv" }, + { name = "identify" }, + { name = "nodeenv" }, + { name = "pyyaml" }, + { name = "virtualenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/08/39/679ca9b26c7bb2999ff122d50faa301e49af82ca9c066ec061cfbc0c6784/pre_commit-4.2.0.tar.gz", hash = "sha256:601283b9757afd87d40c4c4a9b2b5de9637a8ea02eaff7adc2d0fb4e04841146", size = 193424 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/74/a88bf1b1efeae488a0c0b7bdf71429c313722d1fc0f377537fbe554e6180/pre_commit-4.2.0-py2.py3-none-any.whl", hash = "sha256:a009ca7205f1eb497d10b845e52c838a98b6cdd2102a6c8e4540e94ee75c58bd", size = 220707 }, +] + +[[package]] +name = "promise" +version = "2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cf/9c/fb5d48abfe5d791cd496e4242ebcf87a4bb2e0c3dcd6e0ae68c11426a528/promise-2.3.tar.gz", hash = "sha256:dfd18337c523ba4b6a58801c164c1904a9d4d1b1747c7d5dbf45b693a49d93d0", size = 19534 } + +[[package]] +name = "prompt-toolkit" +version = "3.0.51" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wcwidth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bb/6e/9d084c929dfe9e3bfe0c6a47e31f78a25c54627d64a66e884a8bf5474f1c/prompt_toolkit-3.0.51.tar.gz", hash = "sha256:931a162e3b27fc90c86f1b48bb1fb2c528c2761475e57c9c06de13311c7b54ed", size = 428940 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ce/4f/5249960887b1fbe561d9ff265496d170b55a735b76724f10ef19f9e40716/prompt_toolkit-3.0.51-py3-none-any.whl", hash = "sha256:52742911fde84e2d423e2f9a4cf1de7d7ac4e51958f648d9540e0fb8db077b07", size = 387810 }, +] + +[[package]] +name = "propcache" +version = "0.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/07/c8/fdc6686a986feae3541ea23dcaa661bd93972d3940460646c6bb96e21c40/propcache-0.3.1.tar.gz", hash = "sha256:40d980c33765359098837527e18eddefc9a24cea5b45e078a7f3bb5b032c6ecf", size = 43651 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/90/0f/5a5319ee83bd651f75311fcb0c492c21322a7fc8f788e4eef23f44243427/propcache-0.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7f30241577d2fef2602113b70ef7231bf4c69a97e04693bde08ddab913ba0ce5", size = 80243 }, + { url = "https://files.pythonhosted.org/packages/ce/84/3db5537e0879942783e2256616ff15d870a11d7ac26541336fe1b673c818/propcache-0.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:43593c6772aa12abc3af7784bff4a41ffa921608dd38b77cf1dfd7f5c4e71371", size = 46503 }, + { url = "https://files.pythonhosted.org/packages/e2/c8/b649ed972433c3f0d827d7f0cf9ea47162f4ef8f4fe98c5f3641a0bc63ff/propcache-0.3.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a75801768bbe65499495660b777e018cbe90c7980f07f8aa57d6be79ea6f71da", size = 45934 }, + { url = "https://files.pythonhosted.org/packages/59/f9/4c0a5cf6974c2c43b1a6810c40d889769cc8f84cea676cbe1e62766a45f8/propcache-0.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6f1324db48f001c2ca26a25fa25af60711e09b9aaf4b28488602776f4f9a744", size = 233633 }, + { url = "https://files.pythonhosted.org/packages/e7/64/66f2f4d1b4f0007c6e9078bd95b609b633d3957fe6dd23eac33ebde4b584/propcache-0.3.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cdb0f3e1eb6dfc9965d19734d8f9c481b294b5274337a8cb5cb01b462dcb7e0", size = 241124 }, + { url = "https://files.pythonhosted.org/packages/aa/bf/7b8c9fd097d511638fa9b6af3d986adbdf567598a567b46338c925144c1b/propcache-0.3.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1eb34d90aac9bfbced9a58b266f8946cb5935869ff01b164573a7634d39fbcb5", size = 240283 }, + { url = "https://files.pythonhosted.org/packages/fa/c9/e85aeeeaae83358e2a1ef32d6ff50a483a5d5248bc38510d030a6f4e2816/propcache-0.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f35c7070eeec2cdaac6fd3fe245226ed2a6292d3ee8c938e5bb645b434c5f256", size = 232498 }, + { url = "https://files.pythonhosted.org/packages/8e/66/acb88e1f30ef5536d785c283af2e62931cb934a56a3ecf39105887aa8905/propcache-0.3.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b23c11c2c9e6d4e7300c92e022046ad09b91fd00e36e83c44483df4afa990073", size = 221486 }, + { url = "https://files.pythonhosted.org/packages/f5/f9/233ddb05ffdcaee4448508ee1d70aa7deff21bb41469ccdfcc339f871427/propcache-0.3.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:3e19ea4ea0bf46179f8a3652ac1426e6dcbaf577ce4b4f65be581e237340420d", size = 222675 }, + { url = "https://files.pythonhosted.org/packages/98/b8/eb977e28138f9e22a5a789daf608d36e05ed93093ef12a12441030da800a/propcache-0.3.1-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:bd39c92e4c8f6cbf5f08257d6360123af72af9f4da75a690bef50da77362d25f", size = 215727 }, + { url = "https://files.pythonhosted.org/packages/89/2d/5f52d9c579f67b8ee1edd9ec073c91b23cc5b7ff7951a1e449e04ed8fdf3/propcache-0.3.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b0313e8b923b3814d1c4a524c93dfecea5f39fa95601f6a9b1ac96cd66f89ea0", size = 217878 }, + { url = "https://files.pythonhosted.org/packages/7a/fd/5283e5ed8a82b00c7a989b99bb6ea173db1ad750bf0bf8dff08d3f4a4e28/propcache-0.3.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e861ad82892408487be144906a368ddbe2dc6297074ade2d892341b35c59844a", size = 230558 }, + { url = "https://files.pythonhosted.org/packages/90/38/ab17d75938ef7ac87332c588857422ae126b1c76253f0f5b1242032923ca/propcache-0.3.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:61014615c1274df8da5991a1e5da85a3ccb00c2d4701ac6f3383afd3ca47ab0a", size = 233754 }, + { url = "https://files.pythonhosted.org/packages/06/5d/3b921b9c60659ae464137508d3b4c2b3f52f592ceb1964aa2533b32fcf0b/propcache-0.3.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:71ebe3fe42656a2328ab08933d420df5f3ab121772eef78f2dc63624157f0ed9", size = 226088 }, + { url = "https://files.pythonhosted.org/packages/54/6e/30a11f4417d9266b5a464ac5a8c5164ddc9dd153dfa77bf57918165eb4ae/propcache-0.3.1-cp311-cp311-win32.whl", hash = "sha256:58aa11f4ca8b60113d4b8e32d37e7e78bd8af4d1a5b5cb4979ed856a45e62005", size = 40859 }, + { url = "https://files.pythonhosted.org/packages/1d/3a/8a68dd867da9ca2ee9dfd361093e9cb08cb0f37e5ddb2276f1b5177d7731/propcache-0.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:9532ea0b26a401264b1365146c440a6d78269ed41f83f23818d4b79497aeabe7", size = 45153 }, + { url = "https://files.pythonhosted.org/packages/41/aa/ca78d9be314d1e15ff517b992bebbed3bdfef5b8919e85bf4940e57b6137/propcache-0.3.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f78eb8422acc93d7b69964012ad7048764bb45a54ba7a39bb9e146c72ea29723", size = 80430 }, + { url = "https://files.pythonhosted.org/packages/1a/d8/f0c17c44d1cda0ad1979af2e593ea290defdde9eaeb89b08abbe02a5e8e1/propcache-0.3.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:89498dd49c2f9a026ee057965cdf8192e5ae070ce7d7a7bd4b66a8e257d0c976", size = 46637 }, + { url = "https://files.pythonhosted.org/packages/ae/bd/c1e37265910752e6e5e8a4c1605d0129e5b7933c3dc3cf1b9b48ed83b364/propcache-0.3.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:09400e98545c998d57d10035ff623266927cb784d13dd2b31fd33b8a5316b85b", size = 46123 }, + { url = "https://files.pythonhosted.org/packages/d4/b0/911eda0865f90c0c7e9f0415d40a5bf681204da5fd7ca089361a64c16b28/propcache-0.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa8efd8c5adc5a2c9d3b952815ff8f7710cefdcaf5f2c36d26aff51aeca2f12f", size = 243031 }, + { url = "https://files.pythonhosted.org/packages/0a/06/0da53397c76a74271621807265b6eb61fb011451b1ddebf43213df763669/propcache-0.3.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2fe5c910f6007e716a06d269608d307b4f36e7babee5f36533722660e8c4a70", size = 249100 }, + { url = "https://files.pythonhosted.org/packages/f1/eb/13090e05bf6b963fc1653cdc922133ced467cb4b8dab53158db5a37aa21e/propcache-0.3.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a0ab8cf8cdd2194f8ff979a43ab43049b1df0b37aa64ab7eca04ac14429baeb7", size = 250170 }, + { url = "https://files.pythonhosted.org/packages/3b/4c/f72c9e1022b3b043ec7dc475a0f405d4c3e10b9b1d378a7330fecf0652da/propcache-0.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:563f9d8c03ad645597b8d010ef4e9eab359faeb11a0a2ac9f7b4bc8c28ebef25", size = 245000 }, + { url = "https://files.pythonhosted.org/packages/e8/fd/970ca0e22acc829f1adf5de3724085e778c1ad8a75bec010049502cb3a86/propcache-0.3.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb6e0faf8cb6b4beea5d6ed7b5a578254c6d7df54c36ccd3d8b3eb00d6770277", size = 230262 }, + { url = "https://files.pythonhosted.org/packages/c4/42/817289120c6b9194a44f6c3e6b2c3277c5b70bbad39e7df648f177cc3634/propcache-0.3.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1c5c7ab7f2bb3f573d1cb921993006ba2d39e8621019dffb1c5bc94cdbae81e8", size = 236772 }, + { url = "https://files.pythonhosted.org/packages/7c/9c/3b3942b302badd589ad6b672da3ca7b660a6c2f505cafd058133ddc73918/propcache-0.3.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:050b571b2e96ec942898f8eb46ea4bfbb19bd5502424747e83badc2d4a99a44e", size = 231133 }, + { url = "https://files.pythonhosted.org/packages/98/a1/75f6355f9ad039108ff000dfc2e19962c8dea0430da9a1428e7975cf24b2/propcache-0.3.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e1c4d24b804b3a87e9350f79e2371a705a188d292fd310e663483af6ee6718ee", size = 230741 }, + { url = "https://files.pythonhosted.org/packages/67/0c/3e82563af77d1f8731132166da69fdfd95e71210e31f18edce08a1eb11ea/propcache-0.3.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:e4fe2a6d5ce975c117a6bb1e8ccda772d1e7029c1cca1acd209f91d30fa72815", size = 244047 }, + { url = "https://files.pythonhosted.org/packages/f7/50/9fb7cca01532a08c4d5186d7bb2da6c4c587825c0ae134b89b47c7d62628/propcache-0.3.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:feccd282de1f6322f56f6845bf1207a537227812f0a9bf5571df52bb418d79d5", size = 246467 }, + { url = "https://files.pythonhosted.org/packages/a9/02/ccbcf3e1c604c16cc525309161d57412c23cf2351523aedbb280eb7c9094/propcache-0.3.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ec314cde7314d2dd0510c6787326bbffcbdc317ecee6b7401ce218b3099075a7", size = 241022 }, + { url = "https://files.pythonhosted.org/packages/db/19/e777227545e09ca1e77a6e21274ae9ec45de0f589f0ce3eca2a41f366220/propcache-0.3.1-cp312-cp312-win32.whl", hash = "sha256:7d2d5a0028d920738372630870e7d9644ce437142197f8c827194fca404bf03b", size = 40647 }, + { url = "https://files.pythonhosted.org/packages/24/bb/3b1b01da5dd04c77a204c84e538ff11f624e31431cfde7201d9110b092b1/propcache-0.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:88c423efef9d7a59dae0614eaed718449c09a5ac79a5f224a8b9664d603f04a3", size = 44784 }, + { url = "https://files.pythonhosted.org/packages/58/60/f645cc8b570f99be3cf46714170c2de4b4c9d6b827b912811eff1eb8a412/propcache-0.3.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:f1528ec4374617a7a753f90f20e2f551121bb558fcb35926f99e3c42367164b8", size = 77865 }, + { url = "https://files.pythonhosted.org/packages/6f/d4/c1adbf3901537582e65cf90fd9c26fde1298fde5a2c593f987112c0d0798/propcache-0.3.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dc1915ec523b3b494933b5424980831b636fe483d7d543f7afb7b3bf00f0c10f", size = 45452 }, + { url = "https://files.pythonhosted.org/packages/d1/b5/fe752b2e63f49f727c6c1c224175d21b7d1727ce1d4873ef1c24c9216830/propcache-0.3.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a110205022d077da24e60b3df8bcee73971be9575dec5573dd17ae5d81751111", size = 44800 }, + { url = "https://files.pythonhosted.org/packages/62/37/fc357e345bc1971e21f76597028b059c3d795c5ca7690d7a8d9a03c9708a/propcache-0.3.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d249609e547c04d190e820d0d4c8ca03ed4582bcf8e4e160a6969ddfb57b62e5", size = 225804 }, + { url = "https://files.pythonhosted.org/packages/0d/f1/16e12c33e3dbe7f8b737809bad05719cff1dccb8df4dafbcff5575002c0e/propcache-0.3.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ced33d827625d0a589e831126ccb4f5c29dfdf6766cac441d23995a65825dcb", size = 230650 }, + { url = "https://files.pythonhosted.org/packages/3e/a2/018b9f2ed876bf5091e60153f727e8f9073d97573f790ff7cdf6bc1d1fb8/propcache-0.3.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4114c4ada8f3181af20808bedb250da6bae56660e4b8dfd9cd95d4549c0962f7", size = 234235 }, + { url = "https://files.pythonhosted.org/packages/45/5f/3faee66fc930dfb5da509e34c6ac7128870631c0e3582987fad161fcb4b1/propcache-0.3.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:975af16f406ce48f1333ec5e912fe11064605d5c5b3f6746969077cc3adeb120", size = 228249 }, + { url = "https://files.pythonhosted.org/packages/62/1e/a0d5ebda5da7ff34d2f5259a3e171a94be83c41eb1e7cd21a2105a84a02e/propcache-0.3.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a34aa3a1abc50740be6ac0ab9d594e274f59960d3ad253cd318af76b996dd654", size = 214964 }, + { url = "https://files.pythonhosted.org/packages/db/a0/d72da3f61ceab126e9be1f3bc7844b4e98c6e61c985097474668e7e52152/propcache-0.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9cec3239c85ed15bfaded997773fdad9fb5662b0a7cbc854a43f291eb183179e", size = 222501 }, + { url = "https://files.pythonhosted.org/packages/18/6d/a008e07ad7b905011253adbbd97e5b5375c33f0b961355ca0a30377504ac/propcache-0.3.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:05543250deac8e61084234d5fc54f8ebd254e8f2b39a16b1dce48904f45b744b", size = 217917 }, + { url = "https://files.pythonhosted.org/packages/98/37/02c9343ffe59e590e0e56dc5c97d0da2b8b19fa747ebacf158310f97a79a/propcache-0.3.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:5cb5918253912e088edbf023788de539219718d3b10aef334476b62d2b53de53", size = 217089 }, + { url = "https://files.pythonhosted.org/packages/53/1b/d3406629a2c8a5666d4674c50f757a77be119b113eedd47b0375afdf1b42/propcache-0.3.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f3bbecd2f34d0e6d3c543fdb3b15d6b60dd69970c2b4c822379e5ec8f6f621d5", size = 228102 }, + { url = "https://files.pythonhosted.org/packages/cd/a7/3664756cf50ce739e5f3abd48febc0be1a713b1f389a502ca819791a6b69/propcache-0.3.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aca63103895c7d960a5b9b044a83f544b233c95e0dcff114389d64d762017af7", size = 230122 }, + { url = "https://files.pythonhosted.org/packages/35/36/0bbabaacdcc26dac4f8139625e930f4311864251276033a52fd52ff2a274/propcache-0.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5a0a9898fdb99bf11786265468571e628ba60af80dc3f6eb89a3545540c6b0ef", size = 226818 }, + { url = "https://files.pythonhosted.org/packages/cc/27/4e0ef21084b53bd35d4dae1634b6d0bad35e9c58ed4f032511acca9d4d26/propcache-0.3.1-cp313-cp313-win32.whl", hash = "sha256:3a02a28095b5e63128bcae98eb59025924f121f048a62393db682f049bf4ac24", size = 40112 }, + { url = "https://files.pythonhosted.org/packages/a6/2c/a54614d61895ba6dd7ac8f107e2b2a0347259ab29cbf2ecc7b94fa38c4dc/propcache-0.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:813fbb8b6aea2fc9659815e585e548fe706d6f663fa73dff59a1677d4595a037", size = 44034 }, + { url = "https://files.pythonhosted.org/packages/5a/a8/0a4fd2f664fc6acc66438370905124ce62e84e2e860f2557015ee4a61c7e/propcache-0.3.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:a444192f20f5ce8a5e52761a031b90f5ea6288b1eef42ad4c7e64fef33540b8f", size = 82613 }, + { url = "https://files.pythonhosted.org/packages/4d/e5/5ef30eb2cd81576256d7b6caaa0ce33cd1d2c2c92c8903cccb1af1a4ff2f/propcache-0.3.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0fbe94666e62ebe36cd652f5fc012abfbc2342de99b523f8267a678e4dfdee3c", size = 47763 }, + { url = "https://files.pythonhosted.org/packages/87/9a/87091ceb048efeba4d28e903c0b15bcc84b7c0bf27dc0261e62335d9b7b8/propcache-0.3.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f011f104db880f4e2166bcdcf7f58250f7a465bc6b068dc84c824a3d4a5c94dc", size = 47175 }, + { url = "https://files.pythonhosted.org/packages/3e/2f/854e653c96ad1161f96194c6678a41bbb38c7947d17768e8811a77635a08/propcache-0.3.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e584b6d388aeb0001d6d5c2bd86b26304adde6d9bb9bfa9c4889805021b96de", size = 292265 }, + { url = "https://files.pythonhosted.org/packages/40/8d/090955e13ed06bc3496ba4a9fb26c62e209ac41973cb0d6222de20c6868f/propcache-0.3.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a17583515a04358b034e241f952f1715243482fc2c2945fd99a1b03a0bd77d6", size = 294412 }, + { url = "https://files.pythonhosted.org/packages/39/e6/d51601342e53cc7582449e6a3c14a0479fab2f0750c1f4d22302e34219c6/propcache-0.3.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5aed8d8308215089c0734a2af4f2e95eeb360660184ad3912686c181e500b2e7", size = 294290 }, + { url = "https://files.pythonhosted.org/packages/3b/4d/be5f1a90abc1881884aa5878989a1acdafd379a91d9c7e5e12cef37ec0d7/propcache-0.3.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d8e309ff9a0503ef70dc9a0ebd3e69cf7b3894c9ae2ae81fc10943c37762458", size = 282926 }, + { url = "https://files.pythonhosted.org/packages/57/2b/8f61b998c7ea93a2b7eca79e53f3e903db1787fca9373af9e2cf8dc22f9d/propcache-0.3.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b655032b202028a582d27aeedc2e813299f82cb232f969f87a4fde491a233f11", size = 267808 }, + { url = "https://files.pythonhosted.org/packages/11/1c/311326c3dfce59c58a6098388ba984b0e5fb0381ef2279ec458ef99bd547/propcache-0.3.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9f64d91b751df77931336b5ff7bafbe8845c5770b06630e27acd5dbb71e1931c", size = 290916 }, + { url = "https://files.pythonhosted.org/packages/4b/74/91939924b0385e54dc48eb2e4edd1e4903ffd053cf1916ebc5347ac227f7/propcache-0.3.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:19a06db789a4bd896ee91ebc50d059e23b3639c25d58eb35be3ca1cbe967c3bf", size = 262661 }, + { url = "https://files.pythonhosted.org/packages/c2/d7/e6079af45136ad325c5337f5dd9ef97ab5dc349e0ff362fe5c5db95e2454/propcache-0.3.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:bef100c88d8692864651b5f98e871fb090bd65c8a41a1cb0ff2322db39c96c27", size = 264384 }, + { url = "https://files.pythonhosted.org/packages/b7/d5/ba91702207ac61ae6f1c2da81c5d0d6bf6ce89e08a2b4d44e411c0bbe867/propcache-0.3.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:87380fb1f3089d2a0b8b00f006ed12bd41bd858fabfa7330c954c70f50ed8757", size = 291420 }, + { url = "https://files.pythonhosted.org/packages/58/70/2117780ed7edcd7ba6b8134cb7802aada90b894a9810ec56b7bb6018bee7/propcache-0.3.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e474fc718e73ba5ec5180358aa07f6aded0ff5f2abe700e3115c37d75c947e18", size = 290880 }, + { url = "https://files.pythonhosted.org/packages/4a/1f/ecd9ce27710021ae623631c0146719280a929d895a095f6d85efb6a0be2e/propcache-0.3.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:17d1c688a443355234f3c031349da69444be052613483f3e4158eef751abcd8a", size = 287407 }, + { url = "https://files.pythonhosted.org/packages/3e/66/2e90547d6b60180fb29e23dc87bd8c116517d4255240ec6d3f7dc23d1926/propcache-0.3.1-cp313-cp313t-win32.whl", hash = "sha256:359e81a949a7619802eb601d66d37072b79b79c2505e6d3fd8b945538411400d", size = 42573 }, + { url = "https://files.pythonhosted.org/packages/cb/8f/50ad8599399d1861b4d2b6b45271f0ef6af1b09b0a2386a46dbaf19c9535/propcache-0.3.1-cp313-cp313t-win_amd64.whl", hash = "sha256:e7fb9a84c9abbf2b2683fa3e7b0d7da4d8ecf139a1c635732a8bda29c5214b0e", size = 46757 }, + { url = "https://files.pythonhosted.org/packages/b8/d3/c3cb8f1d6ae3b37f83e1de806713a9b3642c5895f0215a62e1a4bd6e5e34/propcache-0.3.1-py3-none-any.whl", hash = "sha256:9a8ecf38de50a7f518c21568c80f985e776397b902f1ce0b01f799aba1608b40", size = 12376 }, +] + +[[package]] +name = "proto-plus" +version = "1.26.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f4/ac/87285f15f7cce6d4a008f33f1757fb5a13611ea8914eb58c3d0d26243468/proto_plus-1.26.1.tar.gz", hash = "sha256:21a515a4c4c0088a773899e23c7bbade3d18f9c66c73edd4c7ee3816bc96a012", size = 56142 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4e/6d/280c4c2ce28b1593a19ad5239c8b826871fc6ec275c21afc8e1820108039/proto_plus-1.26.1-py3-none-any.whl", hash = "sha256:13285478c2dcf2abb829db158e1047e2f1e8d63a077d94263c2b88b043c75a66", size = 50163 }, +] + +[[package]] +name = "protobuf" +version = "4.25.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/df/01/34c8d2b6354906d728703cb9d546a0e534de479e25f1b581e4094c4a85cc/protobuf-4.25.8.tar.gz", hash = "sha256:6135cf8affe1fc6f76cced2641e4ea8d3e59518d1f24ae41ba97bcad82d397cd", size = 380920 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/ff/05f34305fe6b85bbfbecbc559d423a5985605cad5eda4f47eae9e9c9c5c5/protobuf-4.25.8-cp310-abi3-win32.whl", hash = "sha256:504435d831565f7cfac9f0714440028907f1975e4bed228e58e72ecfff58a1e0", size = 392745 }, + { url = "https://files.pythonhosted.org/packages/08/35/8b8a8405c564caf4ba835b1fdf554da869954712b26d8f2a98c0e434469b/protobuf-4.25.8-cp310-abi3-win_amd64.whl", hash = "sha256:bd551eb1fe1d7e92c1af1d75bdfa572eff1ab0e5bf1736716814cdccdb2360f9", size = 413736 }, + { url = "https://files.pythonhosted.org/packages/28/d7/ab27049a035b258dab43445eb6ec84a26277b16105b277cbe0a7698bdc6c/protobuf-4.25.8-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:ca809b42f4444f144f2115c4c1a747b9a404d590f18f37e9402422033e464e0f", size = 394537 }, + { url = "https://files.pythonhosted.org/packages/bd/6d/a4a198b61808dd3d1ee187082ccc21499bc949d639feb948961b48be9a7e/protobuf-4.25.8-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:9ad7ef62d92baf5a8654fbb88dac7fa5594cfa70fd3440488a5ca3bfc6d795a7", size = 294005 }, + { url = "https://files.pythonhosted.org/packages/d6/c6/c9deaa6e789b6fc41b88ccbdfe7a42d2b82663248b715f55aa77fbc00724/protobuf-4.25.8-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:83e6e54e93d2b696a92cad6e6efc924f3850f82b52e1563778dfab8b355101b0", size = 294924 }, + { url = "https://files.pythonhosted.org/packages/0c/c1/6aece0ab5209981a70cd186f164c133fdba2f51e124ff92b73de7fd24d78/protobuf-4.25.8-py3-none-any.whl", hash = "sha256:15a0af558aa3b13efef102ae6e4f3efac06f1eea11afb3a57db2901447d9fb59", size = 156757 }, +] + +[[package]] +name = "psutil" +version = "7.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2a/80/336820c1ad9286a4ded7e845b2eccfcb27851ab8ac6abece774a6ff4d3de/psutil-7.0.0.tar.gz", hash = "sha256:7be9c3eba38beccb6495ea33afd982a44074b78f28c434a1f51cc07fd315c456", size = 497003 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/e6/2d26234410f8b8abdbf891c9da62bee396583f713fb9f3325a4760875d22/psutil-7.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:101d71dc322e3cffd7cea0650b09b3d08b8e7c4109dd6809fe452dfd00e58b25", size = 238051 }, + { url = "https://files.pythonhosted.org/packages/04/8b/30f930733afe425e3cbfc0e1468a30a18942350c1a8816acfade80c005c4/psutil-7.0.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:39db632f6bb862eeccf56660871433e111b6ea58f2caea825571951d4b6aa3da", size = 239535 }, + { url = "https://files.pythonhosted.org/packages/2a/ed/d362e84620dd22876b55389248e522338ed1bf134a5edd3b8231d7207f6d/psutil-7.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fcee592b4c6f146991ca55919ea3d1f8926497a713ed7faaf8225e174581e91", size = 275004 }, + { url = "https://files.pythonhosted.org/packages/bf/b9/b0eb3f3cbcb734d930fdf839431606844a825b23eaf9a6ab371edac8162c/psutil-7.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b1388a4f6875d7e2aff5c4ca1cc16c545ed41dd8bb596cefea80111db353a34", size = 277986 }, + { url = "https://files.pythonhosted.org/packages/eb/a2/709e0fe2f093556c17fbafda93ac032257242cabcc7ff3369e2cb76a97aa/psutil-7.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f098451abc2828f7dc6b58d44b532b22f2088f4999a937557b603ce72b1993", size = 279544 }, + { url = "https://files.pythonhosted.org/packages/50/e6/eecf58810b9d12e6427369784efe814a1eec0f492084ce8eb8f4d89d6d61/psutil-7.0.0-cp37-abi3-win32.whl", hash = "sha256:ba3fcef7523064a6c9da440fc4d6bd07da93ac726b5733c29027d7dc95b39d99", size = 241053 }, + { url = "https://files.pythonhosted.org/packages/50/1b/6921afe68c74868b4c9fa424dad3be35b095e16687989ebbb50ce4fceb7c/psutil-7.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553", size = 244885 }, +] + +[[package]] +name = "ptyprocess" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993 }, +] + +[[package]] +name = "pure-eval" +version = "0.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842 }, +] + +[[package]] +name = "pyarrow" +version = "20.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/ee/a7810cb9f3d6e9238e61d312076a9859bf3668fd21c69744de9532383912/pyarrow-20.0.0.tar.gz", hash = "sha256:febc4a913592573c8d5805091a6c2b5064c8bd6e002131f01061797d91c783c1", size = 1125187 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/a2/b7930824181ceadd0c63c1042d01fa4ef63eee233934826a7a2a9af6e463/pyarrow-20.0.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:24ca380585444cb2a31324c546a9a56abbe87e26069189e14bdba19c86c049f0", size = 30856035 }, + { url = "https://files.pythonhosted.org/packages/9b/18/c765770227d7f5bdfa8a69f64b49194352325c66a5c3bb5e332dfd5867d9/pyarrow-20.0.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:95b330059ddfdc591a3225f2d272123be26c8fa76e8c9ee1a77aad507361cfdb", size = 32309552 }, + { url = "https://files.pythonhosted.org/packages/44/fb/dfb2dfdd3e488bb14f822d7335653092dde150cffc2da97de6e7500681f9/pyarrow-20.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f0fb1041267e9968c6d0d2ce3ff92e3928b243e2b6d11eeb84d9ac547308232", size = 41334704 }, + { url = "https://files.pythonhosted.org/packages/58/0d/08a95878d38808051a953e887332d4a76bc06c6ee04351918ee1155407eb/pyarrow-20.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8ff87cc837601532cc8242d2f7e09b4e02404de1b797aee747dd4ba4bd6313f", size = 42399836 }, + { url = "https://files.pythonhosted.org/packages/f3/cd/efa271234dfe38f0271561086eedcad7bc0f2ddd1efba423916ff0883684/pyarrow-20.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:7a3a5dcf54286e6141d5114522cf31dd67a9e7c9133d150799f30ee302a7a1ab", size = 40711789 }, + { url = "https://files.pythonhosted.org/packages/46/1f/7f02009bc7fc8955c391defee5348f510e589a020e4b40ca05edcb847854/pyarrow-20.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a6ad3e7758ecf559900261a4df985662df54fb7fdb55e8e3b3aa99b23d526b62", size = 42301124 }, + { url = "https://files.pythonhosted.org/packages/4f/92/692c562be4504c262089e86757a9048739fe1acb4024f92d39615e7bab3f/pyarrow-20.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6bb830757103a6cb300a04610e08d9636f0cd223d32f388418ea893a3e655f1c", size = 42916060 }, + { url = "https://files.pythonhosted.org/packages/a4/ec/9f5c7e7c828d8e0a3c7ef50ee62eca38a7de2fa6eb1b8fa43685c9414fef/pyarrow-20.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:96e37f0766ecb4514a899d9a3554fadda770fb57ddf42b63d80f14bc20aa7db3", size = 44547640 }, + { url = "https://files.pythonhosted.org/packages/54/96/46613131b4727f10fd2ffa6d0d6f02efcc09a0e7374eff3b5771548aa95b/pyarrow-20.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:3346babb516f4b6fd790da99b98bed9708e3f02e734c84971faccb20736848dc", size = 25781491 }, + { url = "https://files.pythonhosted.org/packages/a1/d6/0c10e0d54f6c13eb464ee9b67a68b8c71bcf2f67760ef5b6fbcddd2ab05f/pyarrow-20.0.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:75a51a5b0eef32727a247707d4755322cb970be7e935172b6a3a9f9ae98404ba", size = 30815067 }, + { url = "https://files.pythonhosted.org/packages/7e/e2/04e9874abe4094a06fd8b0cbb0f1312d8dd7d707f144c2ec1e5e8f452ffa/pyarrow-20.0.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:211d5e84cecc640c7a3ab900f930aaff5cd2702177e0d562d426fb7c4f737781", size = 32297128 }, + { url = "https://files.pythonhosted.org/packages/31/fd/c565e5dcc906a3b471a83273039cb75cb79aad4a2d4a12f76cc5ae90a4b8/pyarrow-20.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ba3cf4182828be7a896cbd232aa8dd6a31bd1f9e32776cc3796c012855e1199", size = 41334890 }, + { url = "https://files.pythonhosted.org/packages/af/a9/3bdd799e2c9b20c1ea6dc6fa8e83f29480a97711cf806e823f808c2316ac/pyarrow-20.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c3a01f313ffe27ac4126f4c2e5ea0f36a5fc6ab51f8726cf41fee4b256680bd", size = 42421775 }, + { url = "https://files.pythonhosted.org/packages/10/f7/da98ccd86354c332f593218101ae56568d5dcedb460e342000bd89c49cc1/pyarrow-20.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:a2791f69ad72addd33510fec7bb14ee06c2a448e06b649e264c094c5b5f7ce28", size = 40687231 }, + { url = "https://files.pythonhosted.org/packages/bb/1b/2168d6050e52ff1e6cefc61d600723870bf569cbf41d13db939c8cf97a16/pyarrow-20.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:4250e28a22302ce8692d3a0e8ec9d9dde54ec00d237cff4dfa9c1fbf79e472a8", size = 42295639 }, + { url = "https://files.pythonhosted.org/packages/b2/66/2d976c0c7158fd25591c8ca55aee026e6d5745a021915a1835578707feb3/pyarrow-20.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:89e030dc58fc760e4010148e6ff164d2f44441490280ef1e97a542375e41058e", size = 42908549 }, + { url = "https://files.pythonhosted.org/packages/31/a9/dfb999c2fc6911201dcbf348247f9cc382a8990f9ab45c12eabfd7243a38/pyarrow-20.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6102b4864d77102dbbb72965618e204e550135a940c2534711d5ffa787df2a5a", size = 44557216 }, + { url = "https://files.pythonhosted.org/packages/a0/8e/9adee63dfa3911be2382fb4d92e4b2e7d82610f9d9f668493bebaa2af50f/pyarrow-20.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:96d6a0a37d9c98be08f5ed6a10831d88d52cac7b13f5287f1e0f625a0de8062b", size = 25660496 }, + { url = "https://files.pythonhosted.org/packages/9b/aa/daa413b81446d20d4dad2944110dcf4cf4f4179ef7f685dd5a6d7570dc8e/pyarrow-20.0.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:a15532e77b94c61efadde86d10957950392999503b3616b2ffcef7621a002893", size = 30798501 }, + { url = "https://files.pythonhosted.org/packages/ff/75/2303d1caa410925de902d32ac215dc80a7ce7dd8dfe95358c165f2adf107/pyarrow-20.0.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:dd43f58037443af715f34f1322c782ec463a3c8a94a85fdb2d987ceb5658e061", size = 32277895 }, + { url = "https://files.pythonhosted.org/packages/92/41/fe18c7c0b38b20811b73d1bdd54b1fccba0dab0e51d2048878042d84afa8/pyarrow-20.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa0d288143a8585806e3cc7c39566407aab646fb9ece164609dac1cfff45f6ae", size = 41327322 }, + { url = "https://files.pythonhosted.org/packages/da/ab/7dbf3d11db67c72dbf36ae63dcbc9f30b866c153b3a22ef728523943eee6/pyarrow-20.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6953f0114f8d6f3d905d98e987d0924dabce59c3cda380bdfaa25a6201563b4", size = 42411441 }, + { url = "https://files.pythonhosted.org/packages/90/c3/0c7da7b6dac863af75b64e2f827e4742161128c350bfe7955b426484e226/pyarrow-20.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:991f85b48a8a5e839b2128590ce07611fae48a904cae6cab1f089c5955b57eb5", size = 40677027 }, + { url = "https://files.pythonhosted.org/packages/be/27/43a47fa0ff9053ab5203bb3faeec435d43c0d8bfa40179bfd076cdbd4e1c/pyarrow-20.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:97c8dc984ed09cb07d618d57d8d4b67a5100a30c3818c2fb0b04599f0da2de7b", size = 42281473 }, + { url = "https://files.pythonhosted.org/packages/bc/0b/d56c63b078876da81bbb9ba695a596eabee9b085555ed12bf6eb3b7cab0e/pyarrow-20.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9b71daf534f4745818f96c214dbc1e6124d7daf059167330b610fc69b6f3d3e3", size = 42893897 }, + { url = "https://files.pythonhosted.org/packages/92/ac/7d4bd020ba9145f354012838692d48300c1b8fe5634bfda886abcada67ed/pyarrow-20.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e8b88758f9303fa5a83d6c90e176714b2fd3852e776fc2d7e42a22dd6c2fb368", size = 44543847 }, + { url = "https://files.pythonhosted.org/packages/9d/07/290f4abf9ca702c5df7b47739c1b2c83588641ddfa2cc75e34a301d42e55/pyarrow-20.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:30b3051b7975801c1e1d387e17c588d8ab05ced9b1e14eec57915f79869b5031", size = 25653219 }, + { url = "https://files.pythonhosted.org/packages/95/df/720bb17704b10bd69dde086e1400b8eefb8f58df3f8ac9cff6c425bf57f1/pyarrow-20.0.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:ca151afa4f9b7bc45bcc791eb9a89e90a9eb2772767d0b1e5389609c7d03db63", size = 30853957 }, + { url = "https://files.pythonhosted.org/packages/d9/72/0d5f875efc31baef742ba55a00a25213a19ea64d7176e0fe001c5d8b6e9a/pyarrow-20.0.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:4680f01ecd86e0dd63e39eb5cd59ef9ff24a9d166db328679e36c108dc993d4c", size = 32247972 }, + { url = "https://files.pythonhosted.org/packages/d5/bc/e48b4fa544d2eea72f7844180eb77f83f2030b84c8dad860f199f94307ed/pyarrow-20.0.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f4c8534e2ff059765647aa69b75d6543f9fef59e2cd4c6d18015192565d2b70", size = 41256434 }, + { url = "https://files.pythonhosted.org/packages/c3/01/974043a29874aa2cf4f87fb07fd108828fc7362300265a2a64a94965e35b/pyarrow-20.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e1f8a47f4b4ae4c69c4d702cfbdfe4d41e18e5c7ef6f1bb1c50918c1e81c57b", size = 42353648 }, + { url = "https://files.pythonhosted.org/packages/68/95/cc0d3634cde9ca69b0e51cbe830d8915ea32dda2157560dda27ff3b3337b/pyarrow-20.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:a1f60dc14658efaa927f8214734f6a01a806d7690be4b3232ba526836d216122", size = 40619853 }, + { url = "https://files.pythonhosted.org/packages/29/c2/3ad40e07e96a3e74e7ed7cc8285aadfa84eb848a798c98ec0ad009eb6bcc/pyarrow-20.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:204a846dca751428991346976b914d6d2a82ae5b8316a6ed99789ebf976551e6", size = 42241743 }, + { url = "https://files.pythonhosted.org/packages/eb/cb/65fa110b483339add6a9bc7b6373614166b14e20375d4daa73483755f830/pyarrow-20.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:f3b117b922af5e4c6b9a9115825726cac7d8b1421c37c2b5e24fbacc8930612c", size = 42839441 }, + { url = "https://files.pythonhosted.org/packages/98/7b/f30b1954589243207d7a0fbc9997401044bf9a033eec78f6cb50da3f304a/pyarrow-20.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e724a3fd23ae5b9c010e7be857f4405ed5e679db5c93e66204db1a69f733936a", size = 44503279 }, + { url = "https://files.pythonhosted.org/packages/37/40/ad395740cd641869a13bcf60851296c89624662575621968dcfafabaa7f6/pyarrow-20.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:82f1ee5133bd8f49d31be1299dc07f585136679666b502540db854968576faf9", size = 25944982 }, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135 }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259 }, +] + +[[package]] +name = "pycparser" +version = "2.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552 }, +] + +[[package]] +name = "pydantic" +version = "2.11.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f0/86/8ce9040065e8f924d642c58e4a344e33163a07f6b57f836d0d734e0ad3fb/pydantic-2.11.5.tar.gz", hash = "sha256:7f853db3d0ce78ce8bbb148c401c2cdd6431b3473c0cdff2755c7690952a7b7a", size = 787102 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b5/69/831ed22b38ff9b4b64b66569f0e5b7b97cf3638346eb95a2147fdb49ad5f/pydantic-2.11.5-py3-none-any.whl", hash = "sha256:f9c26ba06f9747749ca1e5c94d6a85cb84254577553c8785576fd38fa64dc0f7", size = 444229 }, +] + +[[package]] +name = "pydantic-core" +version = "2.33.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ad/88/5f2260bdfae97aabf98f1778d43f69574390ad787afb646292a638c923d4/pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc", size = 435195 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3f/8d/71db63483d518cbbf290261a1fc2839d17ff89fce7089e08cad07ccfce67/pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7", size = 2028584 }, + { url = "https://files.pythonhosted.org/packages/24/2f/3cfa7244ae292dd850989f328722d2aef313f74ffc471184dc509e1e4e5a/pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246", size = 1855071 }, + { url = "https://files.pythonhosted.org/packages/b3/d3/4ae42d33f5e3f50dd467761304be2fa0a9417fbf09735bc2cce003480f2a/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f", size = 1897823 }, + { url = "https://files.pythonhosted.org/packages/f4/f3/aa5976e8352b7695ff808599794b1fba2a9ae2ee954a3426855935799488/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc", size = 1983792 }, + { url = "https://files.pythonhosted.org/packages/d5/7a/cda9b5a23c552037717f2b2a5257e9b2bfe45e687386df9591eff7b46d28/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de", size = 2136338 }, + { url = "https://files.pythonhosted.org/packages/2b/9f/b8f9ec8dd1417eb9da784e91e1667d58a2a4a7b7b34cf4af765ef663a7e5/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a", size = 2730998 }, + { url = "https://files.pythonhosted.org/packages/47/bc/cd720e078576bdb8255d5032c5d63ee5c0bf4b7173dd955185a1d658c456/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef", size = 2003200 }, + { url = "https://files.pythonhosted.org/packages/ca/22/3602b895ee2cd29d11a2b349372446ae9727c32e78a94b3d588a40fdf187/pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e", size = 2113890 }, + { url = "https://files.pythonhosted.org/packages/ff/e6/e3c5908c03cf00d629eb38393a98fccc38ee0ce8ecce32f69fc7d7b558a7/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d", size = 2073359 }, + { url = "https://files.pythonhosted.org/packages/12/e7/6a36a07c59ebefc8777d1ffdaf5ae71b06b21952582e4b07eba88a421c79/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30", size = 2245883 }, + { url = "https://files.pythonhosted.org/packages/16/3f/59b3187aaa6cc0c1e6616e8045b284de2b6a87b027cce2ffcea073adf1d2/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf", size = 2241074 }, + { url = "https://files.pythonhosted.org/packages/e0/ed/55532bb88f674d5d8f67ab121a2a13c385df382de2a1677f30ad385f7438/pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51", size = 1910538 }, + { url = "https://files.pythonhosted.org/packages/fe/1b/25b7cccd4519c0b23c2dd636ad39d381abf113085ce4f7bec2b0dc755eb1/pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab", size = 1952909 }, + { url = "https://files.pythonhosted.org/packages/49/a9/d809358e49126438055884c4366a1f6227f0f84f635a9014e2deb9b9de54/pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65", size = 1897786 }, + { url = "https://files.pythonhosted.org/packages/18/8a/2b41c97f554ec8c71f2a8a5f85cb56a8b0956addfe8b0efb5b3d77e8bdc3/pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc", size = 2009000 }, + { url = "https://files.pythonhosted.org/packages/a1/02/6224312aacb3c8ecbaa959897af57181fb6cf3a3d7917fd44d0f2917e6f2/pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7", size = 1847996 }, + { url = "https://files.pythonhosted.org/packages/d6/46/6dcdf084a523dbe0a0be59d054734b86a981726f221f4562aed313dbcb49/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025", size = 1880957 }, + { url = "https://files.pythonhosted.org/packages/ec/6b/1ec2c03837ac00886ba8160ce041ce4e325b41d06a034adbef11339ae422/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011", size = 1964199 }, + { url = "https://files.pythonhosted.org/packages/2d/1d/6bf34d6adb9debd9136bd197ca72642203ce9aaaa85cfcbfcf20f9696e83/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f", size = 2120296 }, + { url = "https://files.pythonhosted.org/packages/e0/94/2bd0aaf5a591e974b32a9f7123f16637776c304471a0ab33cf263cf5591a/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88", size = 2676109 }, + { url = "https://files.pythonhosted.org/packages/f9/41/4b043778cf9c4285d59742281a769eac371b9e47e35f98ad321349cc5d61/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1", size = 2002028 }, + { url = "https://files.pythonhosted.org/packages/cb/d5/7bb781bf2748ce3d03af04d5c969fa1308880e1dca35a9bd94e1a96a922e/pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b", size = 2100044 }, + { url = "https://files.pythonhosted.org/packages/fe/36/def5e53e1eb0ad896785702a5bbfd25eed546cdcf4087ad285021a90ed53/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1", size = 2058881 }, + { url = "https://files.pythonhosted.org/packages/01/6c/57f8d70b2ee57fc3dc8b9610315949837fa8c11d86927b9bb044f8705419/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6", size = 2227034 }, + { url = "https://files.pythonhosted.org/packages/27/b9/9c17f0396a82b3d5cbea4c24d742083422639e7bb1d5bf600e12cb176a13/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea", size = 2234187 }, + { url = "https://files.pythonhosted.org/packages/b0/6a/adf5734ffd52bf86d865093ad70b2ce543415e0e356f6cacabbc0d9ad910/pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290", size = 1892628 }, + { url = "https://files.pythonhosted.org/packages/43/e4/5479fecb3606c1368d496a825d8411e126133c41224c1e7238be58b87d7e/pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2", size = 1955866 }, + { url = "https://files.pythonhosted.org/packages/0d/24/8b11e8b3e2be9dd82df4b11408a67c61bb4dc4f8e11b5b0fc888b38118b5/pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab", size = 1888894 }, + { url = "https://files.pythonhosted.org/packages/46/8c/99040727b41f56616573a28771b1bfa08a3d3fe74d3d513f01251f79f172/pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f", size = 2015688 }, + { url = "https://files.pythonhosted.org/packages/3a/cc/5999d1eb705a6cefc31f0b4a90e9f7fc400539b1a1030529700cc1b51838/pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6", size = 1844808 }, + { url = "https://files.pythonhosted.org/packages/6f/5e/a0a7b8885c98889a18b6e376f344da1ef323d270b44edf8174d6bce4d622/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef", size = 1885580 }, + { url = "https://files.pythonhosted.org/packages/3b/2a/953581f343c7d11a304581156618c3f592435523dd9d79865903272c256a/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a", size = 1973859 }, + { url = "https://files.pythonhosted.org/packages/e6/55/f1a813904771c03a3f97f676c62cca0c0a4138654107c1b61f19c644868b/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916", size = 2120810 }, + { url = "https://files.pythonhosted.org/packages/aa/c3/053389835a996e18853ba107a63caae0b9deb4a276c6b472931ea9ae6e48/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a", size = 2676498 }, + { url = "https://files.pythonhosted.org/packages/eb/3c/f4abd740877a35abade05e437245b192f9d0ffb48bbbbd708df33d3cda37/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d", size = 2000611 }, + { url = "https://files.pythonhosted.org/packages/59/a7/63ef2fed1837d1121a894d0ce88439fe3e3b3e48c7543b2a4479eb99c2bd/pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56", size = 2107924 }, + { url = "https://files.pythonhosted.org/packages/04/8f/2551964ef045669801675f1cfc3b0d74147f4901c3ffa42be2ddb1f0efc4/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5", size = 2063196 }, + { url = "https://files.pythonhosted.org/packages/26/bd/d9602777e77fc6dbb0c7db9ad356e9a985825547dce5ad1d30ee04903918/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e", size = 2236389 }, + { url = "https://files.pythonhosted.org/packages/42/db/0e950daa7e2230423ab342ae918a794964b053bec24ba8af013fc7c94846/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162", size = 2239223 }, + { url = "https://files.pythonhosted.org/packages/58/4d/4f937099c545a8a17eb52cb67fe0447fd9a373b348ccfa9a87f141eeb00f/pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849", size = 1900473 }, + { url = "https://files.pythonhosted.org/packages/a0/75/4a0a9bac998d78d889def5e4ef2b065acba8cae8c93696906c3a91f310ca/pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9", size = 1955269 }, + { url = "https://files.pythonhosted.org/packages/f9/86/1beda0576969592f1497b4ce8e7bc8cbdf614c352426271b1b10d5f0aa64/pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9", size = 1893921 }, + { url = "https://files.pythonhosted.org/packages/a4/7d/e09391c2eebeab681df2b74bfe6c43422fffede8dc74187b2b0bf6fd7571/pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac", size = 1806162 }, + { url = "https://files.pythonhosted.org/packages/f1/3d/847b6b1fed9f8ed3bb95a9ad04fbd0b212e832d4f0f50ff4d9ee5a9f15cf/pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5", size = 1981560 }, + { url = "https://files.pythonhosted.org/packages/6f/9a/e73262f6c6656262b5fdd723ad90f518f579b7bc8622e43a942eec53c938/pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9", size = 1935777 }, + { url = "https://files.pythonhosted.org/packages/7b/27/d4ae6487d73948d6f20dddcd94be4ea43e74349b56eba82e9bdee2d7494c/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8", size = 2025200 }, + { url = "https://files.pythonhosted.org/packages/f1/b8/b3cb95375f05d33801024079b9392a5ab45267a63400bf1866e7ce0f0de4/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593", size = 1859123 }, + { url = "https://files.pythonhosted.org/packages/05/bc/0d0b5adeda59a261cd30a1235a445bf55c7e46ae44aea28f7bd6ed46e091/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612", size = 1892852 }, + { url = "https://files.pythonhosted.org/packages/3e/11/d37bdebbda2e449cb3f519f6ce950927b56d62f0b84fd9cb9e372a26a3d5/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7", size = 2067484 }, + { url = "https://files.pythonhosted.org/packages/8c/55/1f95f0a05ce72ecb02a8a8a1c3be0579bbc29b1d5ab68f1378b7bebc5057/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e", size = 2108896 }, + { url = "https://files.pythonhosted.org/packages/53/89/2b2de6c81fa131f423246a9109d7b2a375e83968ad0800d6e57d0574629b/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8", size = 2069475 }, + { url = "https://files.pythonhosted.org/packages/b8/e9/1f7efbe20d0b2b10f6718944b5d8ece9152390904f29a78e68d4e7961159/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf", size = 2239013 }, + { url = "https://files.pythonhosted.org/packages/3c/b2/5309c905a93811524a49b4e031e9851a6b00ff0fb668794472ea7746b448/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb", size = 2238715 }, + { url = "https://files.pythonhosted.org/packages/32/56/8a7ca5d2cd2cda1d245d34b1c9a942920a718082ae8e54e5f3e5a58b7add/pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1", size = 2066757 }, +] + +[[package]] +name = "pygments" +version = "2.19.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 }, +] + +[[package]] +name = "pymunk" +version = "7.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ec/9b/c0ac2fc7df5d81e3bf45c0e07668c69189f0feb4a102757394c80387b698/pymunk-7.0.0.tar.gz", hash = "sha256:ab763e81c03d9a35bbc542412629423f8d202ff90bf2c0771f89cc1a43a8fb23", size = 3354442 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/4f/a75d1783c9cef226db2d4a66db335b37b3d638cfa9942b08e9eab01729c4/pymunk-7.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:336ba0d7e5f13d2afc91cb8943b377080197f0ea41254a363f88711032517744", size = 366978 }, + { url = "https://files.pythonhosted.org/packages/38/70/7373111ffb11e4bf0b2a45f5b7be2c5efaaf6b811f66168bd6cbd87871cc/pymunk-7.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:742508b3b2f4c6cf64b7ac993b36851d39af22b72589ed0416f04a799a77267b", size = 350115 }, + { url = "https://files.pythonhosted.org/packages/0b/aa/0a6cb8865a2710b7a40a440fd42343218024f548a5f7d5f39c78194ad667/pymunk-7.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f318cc6cf24307d7931f59349b3c8ce35bb0181fb759cbd9a45c43868aa6fcd", size = 1028995 }, + { url = "https://files.pythonhosted.org/packages/73/c4/0d0f84a78bdd2ebe656ec401fbee089f8a26b9e7749f5a2a17ebf8616411/pymunk-7.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d06dacd9baaf47ff7944de5aa0a1dc6100214cb1932fada73dd788c6f3fcf1d1", size = 1059362 }, + { url = "https://files.pythonhosted.org/packages/54/bc/4c698dd854106dae5598bab06c0e6429747e578713467d0aa86abcaf0ae9/pymunk-7.0.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0540e3fe3b72ae5fa2f1a9ee615566b57dafb0a7bac3e4dd3a6b208e3e920f8", size = 984862 }, + { url = "https://files.pythonhosted.org/packages/8f/12/3a1c113dbe993721cb633520b72c497901d6754f7e6b09d13ea7b13240fd/pymunk-7.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f1f3377535a9715ea262ec642058b181199541d9dc473466ace17acead23432f", size = 987204 }, + { url = "https://files.pythonhosted.org/packages/59/36/ced39d426123552e1abde49e02840acf72d74cbec741acf54a33854fb9ba/pymunk-7.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b3dd98078df688c1d5501c79c8ddb54b9414b659411bd9db2eeb936179ddff61", size = 943648 }, + { url = "https://files.pythonhosted.org/packages/fe/1d/1085f4db7f6a364125d3308a34d715ac65a191880972d91986caba04b610/pymunk-7.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:63364b785af5d30097cd20bdb0b7b1bc8dc4dc22ddaea5cade6ca46733700da3", size = 1010703 }, + { url = "https://files.pythonhosted.org/packages/d8/73/99de979e0c885c9e1f695a44b077f9588cef1c63a4971f07a0baa56d7d1e/pymunk-7.0.0-cp311-cp311-win32.whl", hash = "sha256:12718654d58bf2a95707f7c12f08994205436e5c24ab2ae8a5f3ddd2e0d7aa53", size = 317908 }, + { url = "https://files.pythonhosted.org/packages/9b/7d/0199c7806ffd2b387c743fa5b0ca6d94f8eb68f0221cdd15f2901cf361a2/pymunk-7.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:f375ff91ece98f88d005216c2156ae2418110997ed3bc6fd2522ad4230f4aad9", size = 369742 }, + { url = "https://files.pythonhosted.org/packages/aa/dd/9f1a733fc343ab85fd19538526c2556f6f2877dc2deec8e4770cffc08498/pymunk-7.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bea99b48a35a9d8aa6e524ad19d3e590b3e3238c56a3c094e59be82b8e4066c1", size = 367353 }, + { url = "https://files.pythonhosted.org/packages/3f/bc/9e33f0d043d50dc1cad72b0af7b630b72983aa8fd7839c6cf709bdb36da8/pymunk-7.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2363052c9b6a5151e6c95a86da548ecf00520f8a05254ec75456b85c28d62e33", size = 350170 }, + { url = "https://files.pythonhosted.org/packages/7c/7c/c18a616fd733cb97df86f146d49c56b0eadd595bc947d451c11848cd8a73/pymunk-7.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca18348af54a07e8dd7a62e1e64f382ecb06c11d3b2ca09b2a1753fdd3163821", size = 1032811 }, + { url = "https://files.pythonhosted.org/packages/f4/e4/0c6be5682cc3c9fc933e0171abccf4948a67c614ce93a0a66cf61c889aed/pymunk-7.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3dc2e9fe5c98941836d9bbc09323f9e825540301cd72e87245cdb0408e876540", size = 1066076 }, + { url = "https://files.pythonhosted.org/packages/6a/16/51ae1c6cab12d3190d8d67eb1fe1044293e87e4defa9960bdcaf0c0eb9c7/pymunk-7.0.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aad59155924e8bd083abea240d6ea5a6c540d012d10f9a1782416c82abe77e5c", size = 986957 }, + { url = "https://files.pythonhosted.org/packages/3e/9a/8115f48fe2a4a999ec1deb91d7113741a068aeb2c6ac9263ba4e1b75a8b0/pymunk-7.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:872287e7bd31d0d8a7900b212b5b67c82e9cc3b11015310a25fe427aa24d2c76", size = 989309 }, + { url = "https://files.pythonhosted.org/packages/1f/c4/e3fd70881c421eb96c2321ab063295204614022a370ed8cfc31a7e2a213c/pymunk-7.0.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ed95c18ebfb43f4157fd2448baa3263dcec456358344ebf1f7b9f5b1dac1132d", size = 945119 }, + { url = "https://files.pythonhosted.org/packages/72/79/ff88c89ab67e87d44cc8f05c7a0a29bba970b7e697ee413ec16d827e40d9/pymunk-7.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7a7a2afeec6820e7a0a198639c54f08cc073ea6ef18110486063531038f52552", size = 1016275 }, + { url = "https://files.pythonhosted.org/packages/ed/6d/e4930c1e71e9ffbbc11cf713062badd164afced70b7e277e1c973717f6e9/pymunk-7.0.0-cp312-cp312-win32.whl", hash = "sha256:2eeba4d38287de280dfb75fe73941f0139a8742a3aafdd8da3d4e8bcf420f1b1", size = 317775 }, + { url = "https://files.pythonhosted.org/packages/40/54/2f640abbc0e8af3c7e57ed8c58e4301fb5d4bde0b9664b340aefb0fbdb05/pymunk-7.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:41270dbdc0250adfce0e005ce9dfd1793961b1b6724da905a257298dacfb2589", size = 369884 }, + { url = "https://files.pythonhosted.org/packages/a2/71/d7b46e2f526ff3a6a59e8828cb3c0bd21bfbfa5541294e54aa529625bb76/pymunk-7.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d05cf99fd06e0c5248595002137076eabccb31f90aa4fb200f35579b45d1769b", size = 367358 }, + { url = "https://files.pythonhosted.org/packages/fa/d6/639ec8366a14f0b16495d9b423184bcddf30130826a9d006eb6800b974fa/pymunk-7.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:134f464a3a270a182819eb2e235db3622639945e4b6eecaa2d7ae1548a0edfc9", size = 350168 }, + { url = "https://files.pythonhosted.org/packages/54/2f/c09513e58421e6963588a541322172abc31be646cb9fdb59ea5edf26b6c9/pymunk-7.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa5961f53b95b6d090a019dcb51206c79f78f3cfebd7754ef453cf1b00b65afb", size = 1032872 }, + { url = "https://files.pythonhosted.org/packages/2c/df/ab4792875c1b6a121892a761f453255477b0df576ca5ac628cec052faca6/pymunk-7.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6ae930ba7d69bd94e78a544018029316e234d88bd3de5c433752a1ea60397df", size = 1065832 }, + { url = "https://files.pythonhosted.org/packages/a7/33/937dbd9bf1ae422e89f42ea2a81842c4594a27f1a2afe663ad05578a270f/pymunk-7.0.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:af85e17d7a6b7cfdfc72f4340b0b4163baebf480968b91e199fdabaead218995", size = 986949 }, + { url = "https://files.pythonhosted.org/packages/06/b9/d40a80c3919b246f177fd45d87615ae312eab842dc9ea63a50c54375cd84/pymunk-7.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:728ddf67f17a763c4327cc5d9592b1e11b6e22cfa8acf7222a9344154019cc40", size = 989332 }, + { url = "https://files.pythonhosted.org/packages/db/e7/90bfe49123da047a73681a49afa6684cbcc37ee1e16837940676da2fe3f8/pymunk-7.0.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:212fc971e063d3e3464ae938120a03798095d94e7af713f6ffb5670c5df06462", size = 945135 }, + { url = "https://files.pythonhosted.org/packages/bb/84/66eda7f264c89ce294dd2d1cfc66b881cd8adc6fed9c84de83f6baee94c6/pymunk-7.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3fb516f7003dfc060e4dbddb3f38e4172adfae7f5bf10baf6d97b7aed7d35f0b", size = 1016219 }, + { url = "https://files.pythonhosted.org/packages/53/2e/e418227f078ecd28f15199bcf36050a0be335592d26a729a75bce269a9ee/pymunk-7.0.0-cp313-cp313-win32.whl", hash = "sha256:7c5fb620343ef83a79af78ee50aba976923236ca205a153fc5bfe08eaf7d991d", size = 317779 }, + { url = "https://files.pythonhosted.org/packages/53/aa/bc8bda26d6dc5c75b1ecfbe6da1258fa9a1ce4e0ee96fbc55bb8571f091c/pymunk-7.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:22bf62056ddfd5cb43cb3ec0cd3f41dd899c205fb1df5f677abcd5d0f24c0bf7", size = 369886 }, + { url = "https://files.pythonhosted.org/packages/51/3e/bfb4d428fcda647fc5c35536c467ac10c28faf0aa36299d9b30f084c94e7/pymunk-7.0.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:069b5c1ad256464612c0a832fb72eb140f29e401728cd7445ae6bb99b56f05af", size = 293045 }, + { url = "https://files.pythonhosted.org/packages/c2/7b/07db84e31f70fa51619eb253c7fdd633cd2c60695e2871596c259adcf966/pymunk-7.0.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:b131ecab6fb7051378e446d7e7006397efcdba8bf45756c13c50cf327c8e578f", size = 284932 }, + { url = "https://files.pythonhosted.org/packages/e7/b3/b11619a2f80dbad6f02a3278b87591c0db75a5211037cb1b98a60ebadd80/pymunk-7.0.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bf0bfe4f6086083b28a65157a1d81fc72bc9918de3e07b3159cf1077d8b6c29", size = 320141 }, + { url = "https://files.pythonhosted.org/packages/7a/60/3b1f2ffdedb4a176edf18d7c46acef183cf68ba591b36b56ecc6b0c3921e/pymunk-7.0.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c46a53e46520a6d78bbb79a1d82299a7d6957977989da45f0dcfa686023c1f61", size = 328383 }, + { url = "https://files.pythonhosted.org/packages/a5/2d/45d019cf0714faabb9b3e318278426c50562210b15a43301e01d1f3efd0b/pymunk-7.0.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b61d18f4927bd0ba4d247d494c12602f3286d974016819027a0f026fe7ff9e0c", size = 337040 }, + { url = "https://files.pythonhosted.org/packages/06/19/e5596adf49b0acd9d445d3dbf03ef2d241e12c3dc4b90ce56e9ac7cb1b20/pymunk-7.0.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:98d72a1c804e87243d6c973221debfeb5445c56725af19581be728982e9f1bb1", size = 311032 }, +] + +[[package]] +name = "pynput" +version = "1.8.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "evdev", marker = "'linux' in sys_platform" }, + { name = "pyobjc-framework-applicationservices", marker = "sys_platform == 'darwin'" }, + { name = "pyobjc-framework-quartz", marker = "sys_platform == 'darwin'" }, + { name = "python-xlib", marker = "'linux' in sys_platform" }, + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f0/c3/dccf44c68225046df5324db0cc7d563a560635355b3e5f1d249468268a6f/pynput-1.8.1.tar.gz", hash = "sha256:70d7c8373ee98911004a7c938742242840a5628c004573d84ba849d4601df81e", size = 82289 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/59/4f/ac3fa906ae8a375a536b12794128c5efacade9eaa917a35dfd27ce0c7400/pynput-1.8.1-py2.py3-none-any.whl", hash = "sha256:42dfcf27404459ca16ca889c8fb8ffe42a9fe54f722fd1a3e130728e59e768d2", size = 91693 }, +] + +[[package]] +name = "pynvml" +version = "12.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-ml-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/26/6f/6b5880ed0239e85b9a39aed103b65b2ef81425beef9f45e5c035bf008330/pynvml-12.0.0.tar.gz", hash = "sha256:299ce2451a6a17e6822d6faee750103e25b415f06f59abb8db65d30f794166f5", size = 33636 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/df/f7cf07a65a96dd11d71f346f9c2863accdd4784da83af7181b067d556cbc/pynvml-12.0.0-py3-none-any.whl", hash = "sha256:fdff84b62a27dbe98e08e1a647eb77342bef1aebe0878bcd15e99a83fcbecb9e", size = 26560 }, +] + +[[package]] +name = "pyobjc-core" +version = "11.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5c/94/a111239b98260869780a5767e5d74bfd3a8c13a40457f479c28dcd91f89d/pyobjc_core-11.0.tar.gz", hash = "sha256:63bced211cb8a8fb5c8ff46473603da30e51112861bd02c438fbbbc8578d9a70", size = 994931 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/05/fa97309c3b1bc1ec90d701db89902e0bd5e1024023aa2c5387b889458b1b/pyobjc_core-11.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:50675c0bb8696fe960a28466f9baf6943df2928a1fd85625d678fa2f428bd0bd", size = 727295 }, + { url = "https://files.pythonhosted.org/packages/56/ce/bf3ff9a9347721a398c3dfb83e29b43fb166b7ef590f3f7b7ddcd283df39/pyobjc_core-11.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:a03061d4955c62ddd7754224a80cdadfdf17b6b5f60df1d9169a3b1b02923f0b", size = 739750 }, + { url = "https://files.pythonhosted.org/packages/72/16/0c468e73dbecb821e3da8819236fe832dfc53eb5f66a11775b055a7589ea/pyobjc_core-11.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c338c1deb7ab2e9436d4175d1127da2eeed4a1b564b3d83b9f3ae4844ba97e86", size = 743900 }, + { url = "https://files.pythonhosted.org/packages/f3/88/cecec88fd51f62a6cd7775cc4fb6bfde16652f97df88d28c84fb77ca0c18/pyobjc_core-11.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b4e9dc4296110f251a4033ff3f40320b35873ea7f876bd29a1c9705bb5e08c59", size = 791905 }, +] + +[[package]] +name = "pyobjc-framework-applicationservices" +version = "11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyobjc-core", marker = "sys_platform == 'darwin'" }, + { name = "pyobjc-framework-cocoa", marker = "sys_platform == 'darwin'" }, + { name = "pyobjc-framework-coretext", marker = "sys_platform == 'darwin'" }, + { name = "pyobjc-framework-quartz", marker = "sys_platform == 'darwin'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ba/fb/4e42573b0d3baa3fa18ec53614cf979f951313f1451e8f2e17df9429da1f/pyobjc_framework_applicationservices-11.0.tar.gz", hash = "sha256:d6ea18dfc7d5626a3ecf4ac72d510405c0d3a648ca38cae8db841acdebecf4d2", size = 224334 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/37/3d4dc6c004aaeb67bd43f7261d7c169ff45b8fc0eefbc7ba8cd6b0c881bc/pyobjc_framework_ApplicationServices-11.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:61a99eef23abb704257310db4f5271137707e184768f6407030c01de4731b67b", size = 30846 }, + { url = "https://files.pythonhosted.org/packages/74/a9/7a45a67e126d32c61ea22ffd80e87ff7e05b4acf32bede6cce071fbfffc8/pyobjc_framework_ApplicationServices-11.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:5fbeb425897d6129471d451ec61a29ddd5b1386eb26b1dd49cb313e34616ee21", size = 30908 }, + { url = "https://files.pythonhosted.org/packages/82/47/ab4155ec966aff2f8f0f6978b40f12255e8ef46111ca0bda7987959b4052/pyobjc_framework_ApplicationServices-11.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:59becf3cd87a4f4cedf4be02ff6cf46ed736f5c1123ce629f788aaafad91eff0", size = 30924 }, + { url = "https://files.pythonhosted.org/packages/a3/73/747aab95970e0b7b5d38c650028e5e034c0432d9451335ff790ca104f11a/pyobjc_framework_ApplicationServices-11.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:44b466e8745fb49e8ac20f29f2ffd7895b45e97aa63a844b2a80a97c3a34346f", size = 31279 }, +] + +[[package]] +name = "pyobjc-framework-cocoa" +version = "11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyobjc-core", marker = "sys_platform == 'darwin'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c5/32/53809096ad5fc3e7a2c5ddea642590a5f2cb5b81d0ad6ea67fdb2263d9f9/pyobjc_framework_cocoa-11.0.tar.gz", hash = "sha256:00346a8cb81ad7b017b32ff7bf596000f9faa905807b1bd234644ebd47f692c5", size = 6173848 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/23/97/81fd41ad90e9c241172110aa635a6239d56f50d75923aaedbbe351828580/pyobjc_framework_Cocoa-11.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3ea7be6e6dd801b297440de02d312ba3fa7fd3c322db747ae1cb237e975f5d33", size = 385534 }, + { url = "https://files.pythonhosted.org/packages/5b/8d/0e2558447c26b3ba64f7c9776a5a6c9d2ae8abf9d34308b174ae0934402e/pyobjc_framework_Cocoa-11.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:280a577b83c68175a28b2b7138d1d2d3111f2b2b66c30e86f81a19c2b02eae71", size = 385811 }, + { url = "https://files.pythonhosted.org/packages/1d/a5/609281a7e89efefbef9db1d8fe66bc0458c3b4e74e2227c644f9c18926fa/pyobjc_framework_Cocoa-11.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:15b2bd977ed340074f930f1330f03d42912d5882b697d78bd06f8ebe263ef92e", size = 385889 }, + { url = "https://files.pythonhosted.org/packages/93/f6/2d5a863673ef7b85a3cba875c43e6c495fb1307427a6801001ae94bb5e54/pyobjc_framework_Cocoa-11.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:5750001db544e67f2b66f02067d8f0da96bb2ef71732bde104f01b8628f9d7ea", size = 389831 }, +] + +[[package]] +name = "pyobjc-framework-coretext" +version = "11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyobjc-core", marker = "sys_platform == 'darwin'" }, + { name = "pyobjc-framework-cocoa", marker = "sys_platform == 'darwin'" }, + { name = "pyobjc-framework-quartz", marker = "sys_platform == 'darwin'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9d/e8/9b68dc788828e38143a3e834e66346713751cb83d7f0955016323005c1a2/pyobjc_framework_coretext-11.0.tar.gz", hash = "sha256:a68437153e627847e3898754dd3f13ae0cb852246b016a91f9c9cbccb9f91a43", size = 274222 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f6/20/b8a967101b585a2425ffe645135f8618edd51e1430aeb668373475a07d1f/pyobjc_framework_CoreText-11.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:56a4889858308b0d9f147d568b4d91c441cc0ffd332497cb4f709bb1990450c1", size = 30397 }, + { url = "https://files.pythonhosted.org/packages/0d/14/d300b8bf18acd1d98d40820d2a9b5c5b6cf96325bdfc5020bc963218e001/pyobjc_framework_CoreText-11.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:fb90e7f370b3fd7cb2fb442e3dc63fedf0b4af6908db1c18df694d10dc94669d", size = 30456 }, + { url = "https://files.pythonhosted.org/packages/94/f0/53b681481e9429e8f9ac2c039da6a820d7417ca92f763f01d629db36c530/pyobjc_framework_CoreText-11.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7947f755782456bd663e0b00c7905eeffd10f839f0bf2af031f68ded6a1ea360", size = 30453 }, + { url = "https://files.pythonhosted.org/packages/2a/3f/a6d09952e83d70be6d337a5f1d457018459a57a110a91c3e771a2f2a7de0/pyobjc_framework_CoreText-11.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:5356116bae33ec49f1f212c301378a7d08000440a2d6a7281aab351945528ab9", size = 31092 }, +] + +[[package]] +name = "pyobjc-framework-quartz" +version = "11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyobjc-core", marker = "sys_platform == 'darwin'" }, + { name = "pyobjc-framework-cocoa", marker = "sys_platform == 'darwin'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a5/ad/f00f3f53387c23bbf4e0bb1410e11978cbf87c82fa6baff0ee86f74c5fb6/pyobjc_framework_quartz-11.0.tar.gz", hash = "sha256:3205bf7795fb9ae34747f701486b3db6dfac71924894d1f372977c4d70c3c619", size = 3952463 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/6a/68957c8c5e8f0128d4d419728bac397d48fa7ad7a66e82b70e64d129ffca/pyobjc_framework_Quartz-11.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d251696bfd8e8ef72fbc90eb29fec95cb9d1cc409008a183d5cc3246130ae8c2", size = 212349 }, + { url = "https://files.pythonhosted.org/packages/60/5d/df827b78dcb5140652ad08af8038c9ddd7e01e6bdf84462bfee644e6e661/pyobjc_framework_Quartz-11.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:cb4a9f2d9d580ea15e25e6b270f47681afb5689cafc9e25712445ce715bcd18e", size = 212061 }, + { url = "https://files.pythonhosted.org/packages/a6/9e/54c48fe8faab06ee5eb80796c8c17ec61fc313d84398540ee70abeaf7070/pyobjc_framework_Quartz-11.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:973b4f9b8ab844574461a038bd5269f425a7368d6e677e3cc81fcc9b27b65498", size = 212478 }, + { url = "https://files.pythonhosted.org/packages/4a/28/456b54a59bfe11a91b7b4e94f8ffdcf174ffd1efa169f4283e5b3bc10194/pyobjc_framework_Quartz-11.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:66ab58d65348863b8707e63b2ec5cdc54569ee8189d1af90d52f29f5fdf6272c", size = 217973 }, +] + +[[package]] +name = "pyopengl" +version = "3.1.9" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c0/42/71080db298df3ddb7e3090bfea8fd7c300894d8b10954c22f8719bd434eb/pyopengl-3.1.9.tar.gz", hash = "sha256:28ebd82c5f4491a418aeca9672dffb3adbe7d33b39eada4548a5b4e8c03f60c8", size = 1913642 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/92/44/8634af40b0db528b5b37e901c0dc67321354880d251bf8965901d57693a5/PyOpenGL-3.1.9-py3-none-any.whl", hash = "sha256:15995fd3b0deb991376805da36137a4ae5aba6ddbb5e29ac1f35462d130a3f77", size = 3190341 }, +] + +[[package]] +name = "pyparsing" +version = "3.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/22/f1129e69d94ffff626bdb5c835506b3a5b4f3d070f17ea295e12c2c6f60f/pyparsing-3.2.3.tar.gz", hash = "sha256:b9c13f1ab8b3b542f72e28f634bad4de758ab3ce4546e4301970ad6fa77c38be", size = 1088608 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/e7/df2285f3d08fee213f2d041540fa4fc9ca6c2d44cf36d3a035bf2a8d2bcc/pyparsing-3.2.3-py3-none-any.whl", hash = "sha256:a749938e02d6fd0b59b356ca504a24982314bb090c383e3cf201c95ef7e2bfcf", size = 111120 }, +] + +[[package]] +name = "pysocks" +version = "1.7.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bd/11/293dd436aea955d45fc4e8a35b6ae7270f5b8e00b53cf6c024c83b657a11/PySocks-1.7.1.tar.gz", hash = "sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0", size = 284429 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8d/59/b4572118e098ac8e46e399a1dd0f2d85403ce8bbaad9ec79373ed6badaf9/PySocks-1.7.1-py3-none-any.whl", hash = "sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5", size = 16725 }, +] + +[[package]] +name = "pytest" +version = "8.3.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ae/3c/c9d525a414d506893f0cd8a8d0de7706446213181570cdbd766691164e40/pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845", size = 1450891 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/30/3d/64ad57c803f1fa1e963a7946b6e0fea4a70df53c1a7fed304586539c2bac/pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820", size = 343634 }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892 }, +] + +[[package]] +name = "python-xlib" +version = "0.33" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/86/f5/8c0653e5bb54e0cbdfe27bf32d41f27bc4e12faa8742778c17f2a71be2c0/python-xlib-0.33.tar.gz", hash = "sha256:55af7906a2c75ce6cb280a584776080602444f75815a7aff4d287bb2d7018b32", size = 269068 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fc/b8/ff33610932e0ee81ae7f1269c890f697d56ff74b9f5b2ee5d9b7fa2c5355/python_xlib-0.33-py2.py3-none-any.whl", hash = "sha256:c3534038d42e0df2f1392a1b30a15a4ff5fdc2b86cfa94f072bf11b10a164398", size = 182185 }, +] + +[[package]] +name = "pytz" +version = "2025.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f8/bf/abbd3cdfb8fbc7fb3d4d38d320f2441b1e7cbe29be4f23797b4a2b5d8aac/pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3", size = 320884 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225 }, +] + +[[package]] +name = "pywin32" +version = "310" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f7/b1/68aa2986129fb1011dabbe95f0136f44509afaf072b12b8f815905a39f33/pywin32-310-cp311-cp311-win32.whl", hash = "sha256:1e765f9564e83011a63321bb9d27ec456a0ed90d3732c4b2e312b855365ed8bd", size = 8784284 }, + { url = "https://files.pythonhosted.org/packages/b3/bd/d1592635992dd8db5bb8ace0551bc3a769de1ac8850200cfa517e72739fb/pywin32-310-cp311-cp311-win_amd64.whl", hash = "sha256:126298077a9d7c95c53823934f000599f66ec9296b09167810eb24875f32689c", size = 9520748 }, + { url = "https://files.pythonhosted.org/packages/90/b1/ac8b1ffce6603849eb45a91cf126c0fa5431f186c2e768bf56889c46f51c/pywin32-310-cp311-cp311-win_arm64.whl", hash = "sha256:19ec5fc9b1d51c4350be7bb00760ffce46e6c95eaf2f0b2f1150657b1a43c582", size = 8455941 }, + { url = "https://files.pythonhosted.org/packages/6b/ec/4fdbe47932f671d6e348474ea35ed94227fb5df56a7c30cbbb42cd396ed0/pywin32-310-cp312-cp312-win32.whl", hash = "sha256:8a75a5cc3893e83a108c05d82198880704c44bbaee4d06e442e471d3c9ea4f3d", size = 8796239 }, + { url = "https://files.pythonhosted.org/packages/e3/e5/b0627f8bb84e06991bea89ad8153a9e50ace40b2e1195d68e9dff6b03d0f/pywin32-310-cp312-cp312-win_amd64.whl", hash = "sha256:bf5c397c9a9a19a6f62f3fb821fbf36cac08f03770056711f765ec1503972060", size = 9503839 }, + { url = "https://files.pythonhosted.org/packages/1f/32/9ccf53748df72301a89713936645a664ec001abd35ecc8578beda593d37d/pywin32-310-cp312-cp312-win_arm64.whl", hash = "sha256:2349cc906eae872d0663d4d6290d13b90621eaf78964bb1578632ff20e152966", size = 8459470 }, + { url = "https://files.pythonhosted.org/packages/1c/09/9c1b978ffc4ae53999e89c19c77ba882d9fce476729f23ef55211ea1c034/pywin32-310-cp313-cp313-win32.whl", hash = "sha256:5d241a659c496ada3253cd01cfaa779b048e90ce4b2b38cd44168ad555ce74ab", size = 8794384 }, + { url = "https://files.pythonhosted.org/packages/45/3c/b4640f740ffebadd5d34df35fecba0e1cfef8fde9f3e594df91c28ad9b50/pywin32-310-cp313-cp313-win_amd64.whl", hash = "sha256:667827eb3a90208ddbdcc9e860c81bde63a135710e21e4cb3348968e4bd5249e", size = 9503039 }, + { url = "https://files.pythonhosted.org/packages/b4/f4/f785020090fb050e7fb6d34b780f2231f302609dc964672f72bfaeb59a28/pywin32-310-cp313-cp313-win_arm64.whl", hash = "sha256:e308f831de771482b7cf692a1f308f8fca701b2d8f9dde6cc440c7da17e47b33", size = 8458152 }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f8/aa/7af4e81f7acba21a4c6be026da38fd2b872ca46226673c89a758ebdc4fd2/PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", size = 184612 }, + { url = "https://files.pythonhosted.org/packages/8b/62/b9faa998fd185f65c1371643678e4d58254add437edb764a08c5a98fb986/PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", size = 172040 }, + { url = "https://files.pythonhosted.org/packages/ad/0c/c804f5f922a9a6563bab712d8dcc70251e8af811fce4524d57c2c0fd49a4/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", size = 736829 }, + { url = "https://files.pythonhosted.org/packages/51/16/6af8d6a6b210c8e54f1406a6b9481febf9c64a3109c541567e35a49aa2e7/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", size = 764167 }, + { url = "https://files.pythonhosted.org/packages/75/e4/2c27590dfc9992f73aabbeb9241ae20220bd9452df27483b6e56d3975cc5/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", size = 762952 }, + { url = "https://files.pythonhosted.org/packages/9b/97/ecc1abf4a823f5ac61941a9c00fe501b02ac3ab0e373c3857f7d4b83e2b6/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4", size = 735301 }, + { url = "https://files.pythonhosted.org/packages/45/73/0f49dacd6e82c9430e46f4a027baa4ca205e8b0a9dce1397f44edc23559d/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", size = 756638 }, + { url = "https://files.pythonhosted.org/packages/22/5f/956f0f9fc65223a58fbc14459bf34b4cc48dec52e00535c79b8db361aabd/PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", size = 143850 }, + { url = "https://files.pythonhosted.org/packages/ed/23/8da0bbe2ab9dcdd11f4f4557ccaf95c10b9811b13ecced089d43ce59c3c8/PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", size = 161980 }, + { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873 }, + { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302 }, + { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154 }, + { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223 }, + { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542 }, + { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164 }, + { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611 }, + { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591 }, + { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338 }, + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, +] + +[[package]] +name = "pyyaml-include" +version = "1.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyyaml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7f/be/2d07ad85e3d593d69640876a8686eae2c533db8cb7bf298d25c421b4d2d5/pyyaml-include-1.4.1.tar.gz", hash = "sha256:1a96e33a99a3e56235f5221273832464025f02ff3d8539309a3bf00dec624471", size = 20592 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d5/ca/6a2cc3a73170d10b5af1f1613baa2ed1f8f46f62dd0bfab2bffd2c2fe260/pyyaml_include-1.4.1-py3-none-any.whl", hash = "sha256:323c7f3a19c82fbc4d73abbaab7ef4f793e146a13383866831631b26ccc7fb00", size = 19079 }, +] + +[[package]] +name = "pyzmq" +version = "26.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "implementation_name == 'pypy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/11/b9213d25230ac18a71b39b3723494e57adebe36e066397b961657b3b41c1/pyzmq-26.4.0.tar.gz", hash = "sha256:4bd13f85f80962f91a651a7356fe0472791a5f7a92f227822b5acf44795c626d", size = 278293 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/32/6d/234e3b0aa82fd0290b1896e9992f56bdddf1f97266110be54d0177a9d2d9/pyzmq-26.4.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:bfcf82644c9b45ddd7cd2a041f3ff8dce4a0904429b74d73a439e8cab1bd9e54", size = 1339723 }, + { url = "https://files.pythonhosted.org/packages/4f/11/6d561efe29ad83f7149a7cd48e498e539ed09019c6cd7ecc73f4cc725028/pyzmq-26.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9bcae3979b2654d5289d3490742378b2f3ce804b0b5fd42036074e2bf35b030", size = 672645 }, + { url = "https://files.pythonhosted.org/packages/19/fd/81bfe3e23f418644660bad1a90f0d22f0b3eebe33dd65a79385530bceb3d/pyzmq-26.4.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ccdff8ac4246b6fb60dcf3982dfaeeff5dd04f36051fe0632748fc0aa0679c01", size = 910133 }, + { url = "https://files.pythonhosted.org/packages/97/68/321b9c775595ea3df832a9516252b653fe32818db66fdc8fa31c9b9fce37/pyzmq-26.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4550af385b442dc2d55ab7717837812799d3674cb12f9a3aa897611839c18e9e", size = 867428 }, + { url = "https://files.pythonhosted.org/packages/4e/6e/159cbf2055ef36aa2aa297e01b24523176e5b48ead283c23a94179fb2ba2/pyzmq-26.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:2f9f7ffe9db1187a253fca95191854b3fda24696f086e8789d1d449308a34b88", size = 862409 }, + { url = "https://files.pythonhosted.org/packages/05/1c/45fb8db7be5a7d0cadea1070a9cbded5199a2d578de2208197e592f219bd/pyzmq-26.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3709c9ff7ba61589b7372923fd82b99a81932b592a5c7f1a24147c91da9a68d6", size = 1205007 }, + { url = "https://files.pythonhosted.org/packages/f8/fa/658c7f583af6498b463f2fa600f34e298e1b330886f82f1feba0dc2dd6c3/pyzmq-26.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f8f3c30fb2d26ae5ce36b59768ba60fb72507ea9efc72f8f69fa088450cff1df", size = 1514599 }, + { url = "https://files.pythonhosted.org/packages/4d/d7/44d641522353ce0a2bbd150379cb5ec32f7120944e6bfba4846586945658/pyzmq-26.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:382a4a48c8080e273427fc692037e3f7d2851959ffe40864f2db32646eeb3cef", size = 1414546 }, + { url = "https://files.pythonhosted.org/packages/72/76/c8ed7263218b3d1e9bce07b9058502024188bd52cc0b0a267a9513b431fc/pyzmq-26.4.0-cp311-cp311-win32.whl", hash = "sha256:d56aad0517d4c09e3b4f15adebba8f6372c5102c27742a5bdbfc74a7dceb8fca", size = 579247 }, + { url = "https://files.pythonhosted.org/packages/c3/d0/2d9abfa2571a0b1a67c0ada79a8aa1ba1cce57992d80f771abcdf99bb32c/pyzmq-26.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:963977ac8baed7058c1e126014f3fe58b3773f45c78cce7af5c26c09b6823896", size = 644727 }, + { url = "https://files.pythonhosted.org/packages/0d/d1/c8ad82393be6ccedfc3c9f3adb07f8f3976e3c4802640fe3f71441941e70/pyzmq-26.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:c0c8e8cadc81e44cc5088fcd53b9b3b4ce9344815f6c4a03aec653509296fae3", size = 559942 }, + { url = "https://files.pythonhosted.org/packages/10/44/a778555ebfdf6c7fc00816aad12d185d10a74d975800341b1bc36bad1187/pyzmq-26.4.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:5227cb8da4b6f68acfd48d20c588197fd67745c278827d5238c707daf579227b", size = 1341586 }, + { url = "https://files.pythonhosted.org/packages/9c/4f/f3a58dc69ac757e5103be3bd41fb78721a5e17da7cc617ddb56d973a365c/pyzmq-26.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1c07a7fa7f7ba86554a2b1bef198c9fed570c08ee062fd2fd6a4dcacd45f905", size = 665880 }, + { url = "https://files.pythonhosted.org/packages/fe/45/50230bcfb3ae5cb98bee683b6edeba1919f2565d7cc1851d3c38e2260795/pyzmq-26.4.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae775fa83f52f52de73183f7ef5395186f7105d5ed65b1ae65ba27cb1260de2b", size = 902216 }, + { url = "https://files.pythonhosted.org/packages/41/59/56bbdc5689be5e13727491ad2ba5efd7cd564365750514f9bc8f212eef82/pyzmq-26.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66c760d0226ebd52f1e6b644a9e839b5db1e107a23f2fcd46ec0569a4fdd4e63", size = 859814 }, + { url = "https://files.pythonhosted.org/packages/81/b1/57db58cfc8af592ce94f40649bd1804369c05b2190e4cbc0a2dad572baeb/pyzmq-26.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:ef8c6ecc1d520debc147173eaa3765d53f06cd8dbe7bd377064cdbc53ab456f5", size = 855889 }, + { url = "https://files.pythonhosted.org/packages/e8/92/47542e629cbac8f221c230a6d0f38dd3d9cff9f6f589ed45fdf572ffd726/pyzmq-26.4.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3150ef4084e163dec29ae667b10d96aad309b668fac6810c9e8c27cf543d6e0b", size = 1197153 }, + { url = "https://files.pythonhosted.org/packages/07/e5/b10a979d1d565d54410afc87499b16c96b4a181af46e7645ab4831b1088c/pyzmq-26.4.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4448c9e55bf8329fa1dcedd32f661bf611214fa70c8e02fee4347bc589d39a84", size = 1507352 }, + { url = "https://files.pythonhosted.org/packages/ab/58/5a23db84507ab9c01c04b1232a7a763be66e992aa2e66498521bbbc72a71/pyzmq-26.4.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e07dde3647afb084d985310d067a3efa6efad0621ee10826f2cb2f9a31b89d2f", size = 1406834 }, + { url = "https://files.pythonhosted.org/packages/22/74/aaa837b331580c13b79ac39396601fb361454ee184ca85e8861914769b99/pyzmq-26.4.0-cp312-cp312-win32.whl", hash = "sha256:ba034a32ecf9af72adfa5ee383ad0fd4f4e38cdb62b13624278ef768fe5b5b44", size = 577992 }, + { url = "https://files.pythonhosted.org/packages/30/0f/55f8c02c182856743b82dde46b2dc3e314edda7f1098c12a8227eeda0833/pyzmq-26.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:056a97aab4064f526ecb32f4343917a4022a5d9efb6b9df990ff72e1879e40be", size = 640466 }, + { url = "https://files.pythonhosted.org/packages/e4/29/073779afc3ef6f830b8de95026ef20b2d1ec22d0324d767748d806e57379/pyzmq-26.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:2f23c750e485ce1eb639dbd576d27d168595908aa2d60b149e2d9e34c9df40e0", size = 556342 }, + { url = "https://files.pythonhosted.org/packages/d7/20/fb2c92542488db70f833b92893769a569458311a76474bda89dc4264bd18/pyzmq-26.4.0-cp313-cp313-macosx_10_15_universal2.whl", hash = "sha256:c43fac689880f5174d6fc864857d1247fe5cfa22b09ed058a344ca92bf5301e3", size = 1339484 }, + { url = "https://files.pythonhosted.org/packages/58/29/2f06b9cabda3a6ea2c10f43e67ded3e47fc25c54822e2506dfb8325155d4/pyzmq-26.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:902aca7eba477657c5fb81c808318460328758e8367ecdd1964b6330c73cae43", size = 666106 }, + { url = "https://files.pythonhosted.org/packages/77/e4/dcf62bd29e5e190bd21bfccaa4f3386e01bf40d948c239239c2f1e726729/pyzmq-26.4.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5e48a830bfd152fe17fbdeaf99ac5271aa4122521bf0d275b6b24e52ef35eb6", size = 902056 }, + { url = "https://files.pythonhosted.org/packages/1a/cf/b36b3d7aea236087d20189bec1a87eeb2b66009731d7055e5c65f845cdba/pyzmq-26.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31be2b6de98c824c06f5574331f805707c667dc8f60cb18580b7de078479891e", size = 860148 }, + { url = "https://files.pythonhosted.org/packages/18/a6/f048826bc87528c208e90604c3bf573801e54bd91e390cbd2dfa860e82dc/pyzmq-26.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:6332452034be001bbf3206ac59c0d2a7713de5f25bb38b06519fc6967b7cf771", size = 855983 }, + { url = "https://files.pythonhosted.org/packages/0a/27/454d34ab6a1d9772a36add22f17f6b85baf7c16e14325fa29e7202ca8ee8/pyzmq-26.4.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:da8c0f5dd352136853e6a09b1b986ee5278dfddfebd30515e16eae425c872b30", size = 1197274 }, + { url = "https://files.pythonhosted.org/packages/f4/3d/7abfeab6b83ad38aa34cbd57c6fc29752c391e3954fd12848bd8d2ec0df6/pyzmq-26.4.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:f4ccc1a0a2c9806dda2a2dd118a3b7b681e448f3bb354056cad44a65169f6d86", size = 1507120 }, + { url = "https://files.pythonhosted.org/packages/13/ff/bc8d21dbb9bc8705126e875438a1969c4f77e03fc8565d6901c7933a3d01/pyzmq-26.4.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:1c0b5fceadbab461578daf8d1dcc918ebe7ddd2952f748cf30c7cf2de5d51101", size = 1406738 }, + { url = "https://files.pythonhosted.org/packages/f5/5d/d4cd85b24de71d84d81229e3bbb13392b2698432cf8fdcea5afda253d587/pyzmq-26.4.0-cp313-cp313-win32.whl", hash = "sha256:28e2b0ff5ba4b3dd11062d905682bad33385cfa3cc03e81abd7f0822263e6637", size = 577826 }, + { url = "https://files.pythonhosted.org/packages/c6/6c/f289c1789d7bb6e5a3b3bef7b2a55089b8561d17132be7d960d3ff33b14e/pyzmq-26.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:23ecc9d241004c10e8b4f49d12ac064cd7000e1643343944a10df98e57bc544b", size = 640406 }, + { url = "https://files.pythonhosted.org/packages/b3/99/676b8851cb955eb5236a0c1e9ec679ea5ede092bf8bf2c8a68d7e965cac3/pyzmq-26.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:1edb0385c7f025045d6e0f759d4d3afe43c17a3d898914ec6582e6f464203c08", size = 556216 }, + { url = "https://files.pythonhosted.org/packages/65/c2/1fac340de9d7df71efc59d9c50fc7a635a77b103392d1842898dd023afcb/pyzmq-26.4.0-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:93a29e882b2ba1db86ba5dd5e88e18e0ac6b627026c5cfbec9983422011b82d4", size = 1333769 }, + { url = "https://files.pythonhosted.org/packages/5c/c7/6c03637e8d742c3b00bec4f5e4cd9d1c01b2f3694c6f140742e93ca637ed/pyzmq-26.4.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb45684f276f57110bb89e4300c00f1233ca631f08f5f42528a5c408a79efc4a", size = 658826 }, + { url = "https://files.pythonhosted.org/packages/a5/97/a8dca65913c0f78e0545af2bb5078aebfc142ca7d91cdaffa1fbc73e5dbd/pyzmq-26.4.0-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f72073e75260cb301aad4258ad6150fa7f57c719b3f498cb91e31df16784d89b", size = 891650 }, + { url = "https://files.pythonhosted.org/packages/7d/7e/f63af1031eb060bf02d033732b910fe48548dcfdbe9c785e9f74a6cc6ae4/pyzmq-26.4.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be37e24b13026cfedd233bcbbccd8c0bcd2fdd186216094d095f60076201538d", size = 849776 }, + { url = "https://files.pythonhosted.org/packages/f6/fa/1a009ce582802a895c0d5fe9413f029c940a0a8ee828657a3bb0acffd88b/pyzmq-26.4.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:237b283044934d26f1eeff4075f751b05d2f3ed42a257fc44386d00df6a270cf", size = 842516 }, + { url = "https://files.pythonhosted.org/packages/6e/bc/f88b0bad0f7a7f500547d71e99f10336f2314e525d4ebf576a1ea4a1d903/pyzmq-26.4.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:b30f862f6768b17040929a68432c8a8be77780317f45a353cb17e423127d250c", size = 1189183 }, + { url = "https://files.pythonhosted.org/packages/d9/8c/db446a3dd9cf894406dec2e61eeffaa3c07c3abb783deaebb9812c4af6a5/pyzmq-26.4.0-cp313-cp313t-musllinux_1_1_i686.whl", hash = "sha256:c80fcd3504232f13617c6ab501124d373e4895424e65de8b72042333316f64a8", size = 1495501 }, + { url = "https://files.pythonhosted.org/packages/05/4c/bf3cad0d64c3214ac881299c4562b815f05d503bccc513e3fd4fdc6f67e4/pyzmq-26.4.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:26a2a7451606b87f67cdeca2c2789d86f605da08b4bd616b1a9981605ca3a364", size = 1395540 }, + { url = "https://files.pythonhosted.org/packages/04/52/a70fcd5592715702248306d8e1729c10742c2eac44529984413b05c68658/pyzmq-26.4.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4478b14cb54a805088299c25a79f27eaf530564a7a4f72bf432a040042b554eb", size = 834405 }, + { url = "https://files.pythonhosted.org/packages/25/f9/1a03f1accff16b3af1a6fa22cbf7ced074776abbf688b2e9cb4629700c62/pyzmq-26.4.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a28ac29c60e4ba84b5f58605ace8ad495414a724fe7aceb7cf06cd0598d04e1", size = 569578 }, + { url = "https://files.pythonhosted.org/packages/76/0c/3a633acd762aa6655fcb71fa841907eae0ab1e8582ff494b137266de341d/pyzmq-26.4.0-pp311-pypy311_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43b03c1ceea27c6520124f4fb2ba9c647409b9abdf9a62388117148a90419494", size = 798248 }, + { url = "https://files.pythonhosted.org/packages/cd/cc/6c99c84aa60ac1cc56747bed6be8ce6305b9b861d7475772e7a25ce019d3/pyzmq-26.4.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7731abd23a782851426d4e37deb2057bf9410848a4459b5ede4fe89342e687a9", size = 756757 }, + { url = "https://files.pythonhosted.org/packages/13/9c/d8073bd898eb896e94c679abe82e47506e2b750eb261cf6010ced869797c/pyzmq-26.4.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:a222ad02fbe80166b0526c038776e8042cd4e5f0dec1489a006a1df47e9040e0", size = 555371 }, +] + +[[package]] +name = "regex" +version = "2024.11.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/5f/bd69653fbfb76cf8604468d3b4ec4c403197144c7bfe0e6a5fc9e02a07cb/regex-2024.11.6.tar.gz", hash = "sha256:7ab159b063c52a0333c884e4679f8d7a85112ee3078fe3d9004b2dd875585519", size = 399494 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/58/58/7e4d9493a66c88a7da6d205768119f51af0f684fe7be7bac8328e217a52c/regex-2024.11.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5478c6962ad548b54a591778e93cd7c456a7a29f8eca9c49e4f9a806dcc5d638", size = 482669 }, + { url = "https://files.pythonhosted.org/packages/34/4c/8f8e631fcdc2ff978609eaeef1d6994bf2f028b59d9ac67640ed051f1218/regex-2024.11.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c89a8cc122b25ce6945f0423dc1352cb9593c68abd19223eebbd4e56612c5b7", size = 287684 }, + { url = "https://files.pythonhosted.org/packages/c5/1b/f0e4d13e6adf866ce9b069e191f303a30ab1277e037037a365c3aad5cc9c/regex-2024.11.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:94d87b689cdd831934fa3ce16cc15cd65748e6d689f5d2b8f4f4df2065c9fa20", size = 284589 }, + { url = "https://files.pythonhosted.org/packages/25/4d/ab21047f446693887f25510887e6820b93f791992994f6498b0318904d4a/regex-2024.11.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1062b39a0a2b75a9c694f7a08e7183a80c63c0d62b301418ffd9c35f55aaa114", size = 792121 }, + { url = "https://files.pythonhosted.org/packages/45/ee/c867e15cd894985cb32b731d89576c41a4642a57850c162490ea34b78c3b/regex-2024.11.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:167ed4852351d8a750da48712c3930b031f6efdaa0f22fa1933716bfcd6bf4a3", size = 831275 }, + { url = "https://files.pythonhosted.org/packages/b3/12/b0f480726cf1c60f6536fa5e1c95275a77624f3ac8fdccf79e6727499e28/regex-2024.11.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d548dafee61f06ebdb584080621f3e0c23fff312f0de1afc776e2a2ba99a74f", size = 818257 }, + { url = "https://files.pythonhosted.org/packages/bf/ce/0d0e61429f603bac433910d99ef1a02ce45a8967ffbe3cbee48599e62d88/regex-2024.11.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a19f302cd1ce5dd01a9099aaa19cae6173306d1302a43b627f62e21cf18ac0", size = 792727 }, + { url = "https://files.pythonhosted.org/packages/e4/c1/243c83c53d4a419c1556f43777ccb552bccdf79d08fda3980e4e77dd9137/regex-2024.11.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bec9931dfb61ddd8ef2ebc05646293812cb6b16b60cf7c9511a832b6f1854b55", size = 780667 }, + { url = "https://files.pythonhosted.org/packages/c5/f4/75eb0dd4ce4b37f04928987f1d22547ddaf6c4bae697623c1b05da67a8aa/regex-2024.11.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9714398225f299aa85267fd222f7142fcb5c769e73d7733344efc46f2ef5cf89", size = 776963 }, + { url = "https://files.pythonhosted.org/packages/16/5d/95c568574e630e141a69ff8a254c2f188b4398e813c40d49228c9bbd9875/regex-2024.11.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:202eb32e89f60fc147a41e55cb086db2a3f8cb82f9a9a88440dcfc5d37faae8d", size = 784700 }, + { url = "https://files.pythonhosted.org/packages/8e/b5/f8495c7917f15cc6fee1e7f395e324ec3e00ab3c665a7dc9d27562fd5290/regex-2024.11.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:4181b814e56078e9b00427ca358ec44333765f5ca1b45597ec7446d3a1ef6e34", size = 848592 }, + { url = "https://files.pythonhosted.org/packages/1c/80/6dd7118e8cb212c3c60b191b932dc57db93fb2e36fb9e0e92f72a5909af9/regex-2024.11.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:068376da5a7e4da51968ce4c122a7cd31afaaec4fccc7856c92f63876e57b51d", size = 852929 }, + { url = "https://files.pythonhosted.org/packages/11/9b/5a05d2040297d2d254baf95eeeb6df83554e5e1df03bc1a6687fc4ba1f66/regex-2024.11.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f2c4184420d881a3475fb2c6f4d95d53a8d50209a2500723d831036f7c45", size = 781213 }, + { url = "https://files.pythonhosted.org/packages/26/b7/b14e2440156ab39e0177506c08c18accaf2b8932e39fb092074de733d868/regex-2024.11.6-cp311-cp311-win32.whl", hash = "sha256:c36f9b6f5f8649bb251a5f3f66564438977b7ef8386a52460ae77e6070d309d9", size = 261734 }, + { url = "https://files.pythonhosted.org/packages/80/32/763a6cc01d21fb3819227a1cc3f60fd251c13c37c27a73b8ff4315433a8e/regex-2024.11.6-cp311-cp311-win_amd64.whl", hash = "sha256:02e28184be537f0e75c1f9b2f8847dc51e08e6e171c6bde130b2687e0c33cf60", size = 274052 }, + { url = "https://files.pythonhosted.org/packages/ba/30/9a87ce8336b172cc232a0db89a3af97929d06c11ceaa19d97d84fa90a8f8/regex-2024.11.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:52fb28f528778f184f870b7cf8f225f5eef0a8f6e3778529bdd40c7b3920796a", size = 483781 }, + { url = "https://files.pythonhosted.org/packages/01/e8/00008ad4ff4be8b1844786ba6636035f7ef926db5686e4c0f98093612add/regex-2024.11.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdd6028445d2460f33136c55eeb1f601ab06d74cb3347132e1c24250187500d9", size = 288455 }, + { url = "https://files.pythonhosted.org/packages/60/85/cebcc0aff603ea0a201667b203f13ba75d9fc8668fab917ac5b2de3967bc/regex-2024.11.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805e6b60c54bf766b251e94526ebad60b7de0c70f70a4e6210ee2891acb70bf2", size = 284759 }, + { url = "https://files.pythonhosted.org/packages/94/2b/701a4b0585cb05472a4da28ee28fdfe155f3638f5e1ec92306d924e5faf0/regex-2024.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b85c2530be953a890eaffde05485238f07029600e8f098cdf1848d414a8b45e4", size = 794976 }, + { url = "https://files.pythonhosted.org/packages/4b/bf/fa87e563bf5fee75db8915f7352e1887b1249126a1be4813837f5dbec965/regex-2024.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb26437975da7dc36b7efad18aa9dd4ea569d2357ae6b783bf1118dabd9ea577", size = 833077 }, + { url = "https://files.pythonhosted.org/packages/a1/56/7295e6bad94b047f4d0834e4779491b81216583c00c288252ef625c01d23/regex-2024.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abfa5080c374a76a251ba60683242bc17eeb2c9818d0d30117b4486be10c59d3", size = 823160 }, + { url = "https://files.pythonhosted.org/packages/fb/13/e3b075031a738c9598c51cfbc4c7879e26729c53aa9cca59211c44235314/regex-2024.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b7fa6606c2881c1db9479b0eaa11ed5dfa11c8d60a474ff0e095099f39d98e", size = 796896 }, + { url = "https://files.pythonhosted.org/packages/24/56/0b3f1b66d592be6efec23a795b37732682520b47c53da5a32c33ed7d84e3/regex-2024.11.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c32f75920cf99fe6b6c539c399a4a128452eaf1af27f39bce8909c9a3fd8cbe", size = 783997 }, + { url = "https://files.pythonhosted.org/packages/f9/a1/eb378dada8b91c0e4c5f08ffb56f25fcae47bf52ad18f9b2f33b83e6d498/regex-2024.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:982e6d21414e78e1f51cf595d7f321dcd14de1f2881c5dc6a6e23bbbbd68435e", size = 781725 }, + { url = "https://files.pythonhosted.org/packages/83/f2/033e7dec0cfd6dda93390089864732a3409246ffe8b042e9554afa9bff4e/regex-2024.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a7c2155f790e2fb448faed6dd241386719802296ec588a8b9051c1f5c481bc29", size = 789481 }, + { url = "https://files.pythonhosted.org/packages/83/23/15d4552ea28990a74e7696780c438aadd73a20318c47e527b47a4a5a596d/regex-2024.11.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149f5008d286636e48cd0b1dd65018548944e495b0265b45e1bffecce1ef7f39", size = 852896 }, + { url = "https://files.pythonhosted.org/packages/e3/39/ed4416bc90deedbfdada2568b2cb0bc1fdb98efe11f5378d9892b2a88f8f/regex-2024.11.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:e5364a4502efca094731680e80009632ad6624084aff9a23ce8c8c6820de3e51", size = 860138 }, + { url = "https://files.pythonhosted.org/packages/93/2d/dd56bb76bd8e95bbce684326302f287455b56242a4f9c61f1bc76e28360e/regex-2024.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0a86e7eeca091c09e021db8eb72d54751e527fa47b8d5787caf96d9831bd02ad", size = 787692 }, + { url = "https://files.pythonhosted.org/packages/0b/55/31877a249ab7a5156758246b9c59539abbeba22461b7d8adc9e8475ff73e/regex-2024.11.6-cp312-cp312-win32.whl", hash = "sha256:32f9a4c643baad4efa81d549c2aadefaeba12249b2adc5af541759237eee1c54", size = 262135 }, + { url = "https://files.pythonhosted.org/packages/38/ec/ad2d7de49a600cdb8dd78434a1aeffe28b9d6fc42eb36afab4a27ad23384/regex-2024.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:a93c194e2df18f7d264092dc8539b8ffb86b45b899ab976aa15d48214138e81b", size = 273567 }, + { url = "https://files.pythonhosted.org/packages/90/73/bcb0e36614601016552fa9344544a3a2ae1809dc1401b100eab02e772e1f/regex-2024.11.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a6ba92c0bcdf96cbf43a12c717eae4bc98325ca3730f6b130ffa2e3c3c723d84", size = 483525 }, + { url = "https://files.pythonhosted.org/packages/0f/3f/f1a082a46b31e25291d830b369b6b0c5576a6f7fb89d3053a354c24b8a83/regex-2024.11.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:525eab0b789891ac3be914d36893bdf972d483fe66551f79d3e27146191a37d4", size = 288324 }, + { url = "https://files.pythonhosted.org/packages/09/c9/4e68181a4a652fb3ef5099e077faf4fd2a694ea6e0f806a7737aff9e758a/regex-2024.11.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:086a27a0b4ca227941700e0b31425e7a28ef1ae8e5e05a33826e17e47fbfdba0", size = 284617 }, + { url = "https://files.pythonhosted.org/packages/fc/fd/37868b75eaf63843165f1d2122ca6cb94bfc0271e4428cf58c0616786dce/regex-2024.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bde01f35767c4a7899b7eb6e823b125a64de314a8ee9791367c9a34d56af18d0", size = 795023 }, + { url = "https://files.pythonhosted.org/packages/c4/7c/d4cd9c528502a3dedb5c13c146e7a7a539a3853dc20209c8e75d9ba9d1b2/regex-2024.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b583904576650166b3d920d2bcce13971f6f9e9a396c673187f49811b2769dc7", size = 833072 }, + { url = "https://files.pythonhosted.org/packages/4f/db/46f563a08f969159c5a0f0e722260568425363bea43bb7ae370becb66a67/regex-2024.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c4de13f06a0d54fa0d5ab1b7138bfa0d883220965a29616e3ea61b35d5f5fc7", size = 823130 }, + { url = "https://files.pythonhosted.org/packages/db/60/1eeca2074f5b87df394fccaa432ae3fc06c9c9bfa97c5051aed70e6e00c2/regex-2024.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cde6e9f2580eb1665965ce9bf17ff4952f34f5b126beb509fee8f4e994f143c", size = 796857 }, + { url = "https://files.pythonhosted.org/packages/10/db/ac718a08fcee981554d2f7bb8402f1faa7e868c1345c16ab1ebec54b0d7b/regex-2024.11.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d7f453dca13f40a02b79636a339c5b62b670141e63efd511d3f8f73fba162b3", size = 784006 }, + { url = "https://files.pythonhosted.org/packages/c2/41/7da3fe70216cea93144bf12da2b87367590bcf07db97604edeea55dac9ad/regex-2024.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59dfe1ed21aea057a65c6b586afd2a945de04fc7db3de0a6e3ed5397ad491b07", size = 781650 }, + { url = "https://files.pythonhosted.org/packages/a7/d5/880921ee4eec393a4752e6ab9f0fe28009435417c3102fc413f3fe81c4e5/regex-2024.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b97c1e0bd37c5cd7902e65f410779d39eeda155800b65fc4d04cc432efa9bc6e", size = 789545 }, + { url = "https://files.pythonhosted.org/packages/dc/96/53770115e507081122beca8899ab7f5ae28ae790bfcc82b5e38976df6a77/regex-2024.11.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f9d1e379028e0fc2ae3654bac3cbbef81bf3fd571272a42d56c24007979bafb6", size = 853045 }, + { url = "https://files.pythonhosted.org/packages/31/d3/1372add5251cc2d44b451bd94f43b2ec78e15a6e82bff6a290ef9fd8f00a/regex-2024.11.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:13291b39131e2d002a7940fb176e120bec5145f3aeb7621be6534e46251912c4", size = 860182 }, + { url = "https://files.pythonhosted.org/packages/ed/e3/c446a64984ea9f69982ba1a69d4658d5014bc7a0ea468a07e1a1265db6e2/regex-2024.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f51f88c126370dcec4908576c5a627220da6c09d0bff31cfa89f2523843316d", size = 787733 }, + { url = "https://files.pythonhosted.org/packages/2b/f1/e40c8373e3480e4f29f2692bd21b3e05f296d3afebc7e5dcf21b9756ca1c/regex-2024.11.6-cp313-cp313-win32.whl", hash = "sha256:63b13cfd72e9601125027202cad74995ab26921d8cd935c25f09c630436348ff", size = 262122 }, + { url = "https://files.pythonhosted.org/packages/45/94/bc295babb3062a731f52621cdc992d123111282e291abaf23faa413443ea/regex-2024.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:2b3361af3198667e99927da8b84c1b010752fa4b1115ee30beaa332cabc3ef1a", size = 273545 }, +] + +[[package]] +name = "requests" +version = "2.32.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 }, +] + +[package.optional-dependencies] +socks = [ + { name = "pysocks" }, +] + +[[package]] +name = "requests-oauthlib" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "oauthlib" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/f2/05f29bc3913aea15eb670be136045bf5c5bbf4b99ecb839da9b422bb2c85/requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9", size = 55650 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/5d/63d4ae3b9daea098d5d6f5da83984853c1bbacd5dc826764b249fe119d24/requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36", size = 24179 }, +] + +[[package]] +name = "rerun-sdk" +version = "0.23.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "numpy" }, + { name = "pillow" }, + { name = "pyarrow" }, + { name = "typing-extensions" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/dd/6e/a125f4fe2de3269f443b7cb65d465ffd37a836a2dac7e4318e21239d78c8/rerun_sdk-0.23.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:fe06d21cfcf4d84a9396f421d4779efabec7e9674d232a2c552c8a91d871c375", size = 66094053 }, + { url = "https://files.pythonhosted.org/packages/55/f6/b6d13322b05dc77bd9a0127e98155c2b7ee987a236fd4d331eed2e547a90/rerun_sdk-0.23.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:823ae87bfa644e06fb70bada08a83690dd23d9824a013947f80a22c6731bdc0d", size = 62047843 }, + { url = "https://files.pythonhosted.org/packages/a5/7f/6a7422cb727e14a65b55b0089988eeea8d0532c429397a863e6ba395554a/rerun_sdk-0.23.1-cp39-abi3-manylinux_2_31_aarch64.whl", hash = "sha256:dc5129f8744f71249bf45558c853422c51ef39b6b5eea0ea1f602c6049ce732f", size = 68214509 }, + { url = "https://files.pythonhosted.org/packages/4f/86/3aee9eadbfe55188a2c7d739378545b4319772a4d3b165e8d3fc598fa630/rerun_sdk-0.23.1-cp39-abi3-manylinux_2_31_x86_64.whl", hash = "sha256:ee0d0e17df0e08be13b77cc74884c5d8ba8edb39b6f5a60dc2429d39033d90f6", size = 71442196 }, + { url = "https://files.pythonhosted.org/packages/a7/ba/028bd382e2ae21e6643cec25f423285dbc6b328ce56d55727b4101ef9443/rerun_sdk-0.23.1-cp39-abi3-win_amd64.whl", hash = "sha256:d4273db55b56310b053a2de6bf5927a8692cf65f4d234c6e6928fb24ed8a960d", size = 57583198 }, +] + +[[package]] +name = "rich" +version = "14.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a1/53/830aa4c3066a8ab0ae9a9955976fb770fe9c6102117c8ec4ab3ea62d89e8/rich-14.0.0.tar.gz", hash = "sha256:82f1bc23a6a21ebca4ae0c45af9bdbc492ed20231dcb63f297d6d1021a9d5725", size = 224078 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0d/9b/63f4c7ebc259242c89b3acafdb37b41d1185c07ff0011164674e9076b491/rich-14.0.0-py3-none-any.whl", hash = "sha256:1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0", size = 243229 }, +] + +[[package]] +name = "rsa" +version = "4.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/da/8a/22b7beea3ee0d44b1916c0c1cb0ee3af23b700b6da9f04991899d0c555d4/rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75", size = 29034 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696 }, +] + +[[package]] +name = "ruff" +version = "0.11.12" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/15/0a/92416b159ec00cdf11e5882a9d80d29bf84bba3dbebc51c4898bfbca1da6/ruff-0.11.12.tar.gz", hash = "sha256:43cf7f69c7d7c7d7513b9d59c5d8cafd704e05944f978614aa9faff6ac202603", size = 4202289 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/cc/53eb79f012d15e136d40a8e8fc519ba8f55a057f60b29c2df34efd47c6e3/ruff-0.11.12-py3-none-linux_armv6l.whl", hash = "sha256:c7680aa2f0d4c4f43353d1e72123955c7a2159b8646cd43402de6d4a3a25d7cc", size = 10285597 }, + { url = "https://files.pythonhosted.org/packages/e7/d7/73386e9fb0232b015a23f62fea7503f96e29c29e6c45461d4a73bac74df9/ruff-0.11.12-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:2cad64843da9f134565c20bcc430642de897b8ea02e2e79e6e02a76b8dcad7c3", size = 11053154 }, + { url = "https://files.pythonhosted.org/packages/4e/eb/3eae144c5114e92deb65a0cb2c72326c8469e14991e9bc3ec0349da1331c/ruff-0.11.12-py3-none-macosx_11_0_arm64.whl", hash = "sha256:9b6886b524a1c659cee1758140138455d3c029783d1b9e643f3624a5ee0cb0aa", size = 10403048 }, + { url = "https://files.pythonhosted.org/packages/29/64/20c54b20e58b1058db6689e94731f2a22e9f7abab74e1a758dfba058b6ca/ruff-0.11.12-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cc3a3690aad6e86c1958d3ec3c38c4594b6ecec75c1f531e84160bd827b2012", size = 10597062 }, + { url = "https://files.pythonhosted.org/packages/29/3a/79fa6a9a39422a400564ca7233a689a151f1039110f0bbbabcb38106883a/ruff-0.11.12-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f97fdbc2549f456c65b3b0048560d44ddd540db1f27c778a938371424b49fe4a", size = 10155152 }, + { url = "https://files.pythonhosted.org/packages/e5/a4/22c2c97b2340aa968af3a39bc38045e78d36abd4ed3fa2bde91c31e712e3/ruff-0.11.12-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74adf84960236961090e2d1348c1a67d940fd12e811a33fb3d107df61eef8fc7", size = 11723067 }, + { url = "https://files.pythonhosted.org/packages/bc/cf/3e452fbd9597bcd8058856ecd42b22751749d07935793a1856d988154151/ruff-0.11.12-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:b56697e5b8bcf1d61293ccfe63873aba08fdbcbbba839fc046ec5926bdb25a3a", size = 12460807 }, + { url = "https://files.pythonhosted.org/packages/2f/ec/8f170381a15e1eb7d93cb4feef8d17334d5a1eb33fee273aee5d1f8241a3/ruff-0.11.12-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4d47afa45e7b0eaf5e5969c6b39cbd108be83910b5c74626247e366fd7a36a13", size = 12063261 }, + { url = "https://files.pythonhosted.org/packages/0d/bf/57208f8c0a8153a14652a85f4116c0002148e83770d7a41f2e90b52d2b4e/ruff-0.11.12-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:692bf9603fe1bf949de8b09a2da896f05c01ed7a187f4a386cdba6760e7f61be", size = 11329601 }, + { url = "https://files.pythonhosted.org/packages/c3/56/edf942f7fdac5888094d9ffa303f12096f1a93eb46570bcf5f14c0c70880/ruff-0.11.12-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08033320e979df3b20dba567c62f69c45e01df708b0f9c83912d7abd3e0801cd", size = 11522186 }, + { url = "https://files.pythonhosted.org/packages/ed/63/79ffef65246911ed7e2290aeece48739d9603b3a35f9529fec0fc6c26400/ruff-0.11.12-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:929b7706584f5bfd61d67d5070f399057d07c70585fa8c4491d78ada452d3bef", size = 10449032 }, + { url = "https://files.pythonhosted.org/packages/88/19/8c9d4d8a1c2a3f5a1ea45a64b42593d50e28b8e038f1aafd65d6b43647f3/ruff-0.11.12-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:7de4a73205dc5756b8e09ee3ed67c38312dce1aa28972b93150f5751199981b5", size = 10129370 }, + { url = "https://files.pythonhosted.org/packages/bc/0f/2d15533eaa18f460530a857e1778900cd867ded67f16c85723569d54e410/ruff-0.11.12-py3-none-musllinux_1_2_i686.whl", hash = "sha256:2635c2a90ac1b8ca9e93b70af59dfd1dd2026a40e2d6eebaa3efb0465dd9cf02", size = 11123529 }, + { url = "https://files.pythonhosted.org/packages/4f/e2/4c2ac669534bdded835356813f48ea33cfb3a947dc47f270038364587088/ruff-0.11.12-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:d05d6a78a89166f03f03a198ecc9d18779076ad0eec476819467acb401028c0c", size = 11577642 }, + { url = "https://files.pythonhosted.org/packages/a7/9b/c9ddf7f924d5617a1c94a93ba595f4b24cb5bc50e98b94433ab3f7ad27e5/ruff-0.11.12-py3-none-win32.whl", hash = "sha256:f5a07f49767c4be4772d161bfc049c1f242db0cfe1bd976e0f0886732a4765d6", size = 10475511 }, + { url = "https://files.pythonhosted.org/packages/fd/d6/74fb6d3470c1aada019ffff33c0f9210af746cca0a4de19a1f10ce54968a/ruff-0.11.12-py3-none-win_amd64.whl", hash = "sha256:5a4d9f8030d8c3a45df201d7fb3ed38d0219bccd7955268e863ee4a115fa0832", size = 11523573 }, + { url = "https://files.pythonhosted.org/packages/44/42/d58086ec20f52d2b0140752ae54b355ea2be2ed46f914231136dd1effcc7/ruff-0.11.12-py3-none-win_arm64.whl", hash = "sha256:65194e37853158d368e333ba282217941029a28ea90913c67e558c611d04daa5", size = 10697770 }, +] + +[[package]] +name = "safetensors" +version = "0.5.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/71/7e/2d5d6ee7b40c0682315367ec7475693d110f512922d582fef1bd4a63adc3/safetensors-0.5.3.tar.gz", hash = "sha256:b6b0d6ecacec39a4fdd99cc19f4576f5219ce858e6fd8dbe7609df0b8dc56965", size = 67210 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/ae/88f6c49dbd0cc4da0e08610019a3c78a7d390879a919411a410a1876d03a/safetensors-0.5.3-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:bd20eb133db8ed15b40110b7c00c6df51655a2998132193de2f75f72d99c7073", size = 436917 }, + { url = "https://files.pythonhosted.org/packages/b8/3b/11f1b4a2f5d2ab7da34ecc062b0bc301f2be024d110a6466726bec8c055c/safetensors-0.5.3-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:21d01c14ff6c415c485616b8b0bf961c46b3b343ca59110d38d744e577f9cce7", size = 418419 }, + { url = "https://files.pythonhosted.org/packages/5d/9a/add3e6fef267658075c5a41573c26d42d80c935cdc992384dfae435feaef/safetensors-0.5.3-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11bce6164887cd491ca75c2326a113ba934be596e22b28b1742ce27b1d076467", size = 459493 }, + { url = "https://files.pythonhosted.org/packages/df/5c/bf2cae92222513cc23b3ff85c4a1bb2811a2c3583ac0f8e8d502751de934/safetensors-0.5.3-cp38-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4a243be3590bc3301c821da7a18d87224ef35cbd3e5f5727e4e0728b8172411e", size = 472400 }, + { url = "https://files.pythonhosted.org/packages/58/11/7456afb740bd45782d0f4c8e8e1bb9e572f1bf82899fb6ace58af47b4282/safetensors-0.5.3-cp38-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8bd84b12b1670a6f8e50f01e28156422a2bc07fb16fc4e98bded13039d688a0d", size = 522891 }, + { url = "https://files.pythonhosted.org/packages/57/3d/fe73a9d2ace487e7285f6e157afee2383bd1ddb911b7cb44a55cf812eae3/safetensors-0.5.3-cp38-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:391ac8cab7c829452175f871fcaf414aa1e292b5448bd02620f675a7f3e7abb9", size = 537694 }, + { url = "https://files.pythonhosted.org/packages/a6/f8/dae3421624fcc87a89d42e1898a798bc7ff72c61f38973a65d60df8f124c/safetensors-0.5.3-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cead1fa41fc54b1e61089fa57452e8834f798cb1dc7a09ba3524f1eb08e0317a", size = 471642 }, + { url = "https://files.pythonhosted.org/packages/ce/20/1fbe16f9b815f6c5a672f5b760951e20e17e43f67f231428f871909a37f6/safetensors-0.5.3-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1077f3e94182d72618357b04b5ced540ceb71c8a813d3319f1aba448e68a770d", size = 502241 }, + { url = "https://files.pythonhosted.org/packages/5f/18/8e108846b506487aa4629fe4116b27db65c3dde922de2c8e0cc1133f3f29/safetensors-0.5.3-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:799021e78287bac619c7b3f3606730a22da4cda27759ddf55d37c8db7511c74b", size = 638001 }, + { url = "https://files.pythonhosted.org/packages/82/5a/c116111d8291af6c8c8a8b40628fe833b9db97d8141c2a82359d14d9e078/safetensors-0.5.3-cp38-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:df26da01aaac504334644e1b7642fa000bfec820e7cef83aeac4e355e03195ff", size = 734013 }, + { url = "https://files.pythonhosted.org/packages/7d/ff/41fcc4d3b7de837963622e8610d998710705bbde9a8a17221d85e5d0baad/safetensors-0.5.3-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:32c3ef2d7af8b9f52ff685ed0bc43913cdcde135089ae322ee576de93eae5135", size = 670687 }, + { url = "https://files.pythonhosted.org/packages/40/ad/2b113098e69c985a3d8fbda4b902778eae4a35b7d5188859b4a63d30c161/safetensors-0.5.3-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:37f1521be045e56fc2b54c606d4455573e717b2d887c579ee1dbba5f868ece04", size = 643147 }, + { url = "https://files.pythonhosted.org/packages/0a/0c/95aeb51d4246bd9a3242d3d8349c1112b4ee7611a4b40f0c5c93b05f001d/safetensors-0.5.3-cp38-abi3-win32.whl", hash = "sha256:cfc0ec0846dcf6763b0ed3d1846ff36008c6e7290683b61616c4b040f6a54ace", size = 296677 }, + { url = "https://files.pythonhosted.org/packages/69/e2/b011c38e5394c4c18fb5500778a55ec43ad6106126e74723ffaee246f56e/safetensors-0.5.3-cp38-abi3-win_amd64.whl", hash = "sha256:836cbbc320b47e80acd40e44c8682db0e8ad7123209f69b093def21ec7cafd11", size = 308878 }, +] + +[[package]] +name = "scipy" +version = "1.15.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0f/37/6964b830433e654ec7485e45a00fc9a27cf868d622838f6b6d9c5ec0d532/scipy-1.15.3.tar.gz", hash = "sha256:eae3cf522bc7df64b42cad3925c876e1b0b6c35c1337c93e12c0f366f55b0eaf", size = 59419214 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/96/ab/5cc9f80f28f6a7dff646c5756e559823614a42b1939d86dd0ed550470210/scipy-1.15.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:993439ce220d25e3696d1b23b233dd010169b62f6456488567e830654ee37a6b", size = 38714255 }, + { url = "https://files.pythonhosted.org/packages/4a/4a/66ba30abe5ad1a3ad15bfb0b59d22174012e8056ff448cb1644deccbfed2/scipy-1.15.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:34716e281f181a02341ddeaad584205bd2fd3c242063bd3423d61ac259ca7eba", size = 30111035 }, + { url = "https://files.pythonhosted.org/packages/4b/fa/a7e5b95afd80d24313307f03624acc65801846fa75599034f8ceb9e2cbf6/scipy-1.15.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3b0334816afb8b91dab859281b1b9786934392aa3d527cd847e41bb6f45bee65", size = 22384499 }, + { url = "https://files.pythonhosted.org/packages/17/99/f3aaddccf3588bb4aea70ba35328c204cadd89517a1612ecfda5b2dd9d7a/scipy-1.15.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:6db907c7368e3092e24919b5e31c76998b0ce1684d51a90943cb0ed1b4ffd6c1", size = 25152602 }, + { url = "https://files.pythonhosted.org/packages/56/c5/1032cdb565f146109212153339f9cb8b993701e9fe56b1c97699eee12586/scipy-1.15.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:721d6b4ef5dc82ca8968c25b111e307083d7ca9091bc38163fb89243e85e3889", size = 35503415 }, + { url = "https://files.pythonhosted.org/packages/bd/37/89f19c8c05505d0601ed5650156e50eb881ae3918786c8fd7262b4ee66d3/scipy-1.15.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39cb9c62e471b1bb3750066ecc3a3f3052b37751c7c3dfd0fd7e48900ed52982", size = 37652622 }, + { url = "https://files.pythonhosted.org/packages/7e/31/be59513aa9695519b18e1851bb9e487de66f2d31f835201f1b42f5d4d475/scipy-1.15.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:795c46999bae845966368a3c013e0e00947932d68e235702b5c3f6ea799aa8c9", size = 37244796 }, + { url = "https://files.pythonhosted.org/packages/10/c0/4f5f3eeccc235632aab79b27a74a9130c6c35df358129f7ac8b29f562ac7/scipy-1.15.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:18aaacb735ab38b38db42cb01f6b92a2d0d4b6aabefeb07f02849e47f8fb3594", size = 40047684 }, + { url = "https://files.pythonhosted.org/packages/ab/a7/0ddaf514ce8a8714f6ed243a2b391b41dbb65251affe21ee3077ec45ea9a/scipy-1.15.3-cp311-cp311-win_amd64.whl", hash = "sha256:ae48a786a28412d744c62fd7816a4118ef97e5be0bee968ce8f0a2fba7acf3bb", size = 41246504 }, + { url = "https://files.pythonhosted.org/packages/37/4b/683aa044c4162e10ed7a7ea30527f2cbd92e6999c10a8ed8edb253836e9c/scipy-1.15.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6ac6310fdbfb7aa6612408bd2f07295bcbd3fda00d2d702178434751fe48e019", size = 38766735 }, + { url = "https://files.pythonhosted.org/packages/7b/7e/f30be3d03de07f25dc0ec926d1681fed5c732d759ac8f51079708c79e680/scipy-1.15.3-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:185cd3d6d05ca4b44a8f1595af87f9c372bb6acf9c808e99aa3e9aa03bd98cf6", size = 30173284 }, + { url = "https://files.pythonhosted.org/packages/07/9c/0ddb0d0abdabe0d181c1793db51f02cd59e4901da6f9f7848e1f96759f0d/scipy-1.15.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:05dc6abcd105e1a29f95eada46d4a3f251743cfd7d3ae8ddb4088047f24ea477", size = 22446958 }, + { url = "https://files.pythonhosted.org/packages/af/43/0bce905a965f36c58ff80d8bea33f1f9351b05fad4beaad4eae34699b7a1/scipy-1.15.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:06efcba926324df1696931a57a176c80848ccd67ce6ad020c810736bfd58eb1c", size = 25242454 }, + { url = "https://files.pythonhosted.org/packages/56/30/a6f08f84ee5b7b28b4c597aca4cbe545535c39fe911845a96414700b64ba/scipy-1.15.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c05045d8b9bfd807ee1b9f38761993297b10b245f012b11b13b91ba8945f7e45", size = 35210199 }, + { url = "https://files.pythonhosted.org/packages/0b/1f/03f52c282437a168ee2c7c14a1a0d0781a9a4a8962d84ac05c06b4c5b555/scipy-1.15.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:271e3713e645149ea5ea3e97b57fdab61ce61333f97cfae392c28ba786f9bb49", size = 37309455 }, + { url = "https://files.pythonhosted.org/packages/89/b1/fbb53137f42c4bf630b1ffdfc2151a62d1d1b903b249f030d2b1c0280af8/scipy-1.15.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6cfd56fc1a8e53f6e89ba3a7a7251f7396412d655bca2aa5611c8ec9a6784a1e", size = 36885140 }, + { url = "https://files.pythonhosted.org/packages/2e/2e/025e39e339f5090df1ff266d021892694dbb7e63568edcfe43f892fa381d/scipy-1.15.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0ff17c0bb1cb32952c09217d8d1eed9b53d1463e5f1dd6052c7857f83127d539", size = 39710549 }, + { url = "https://files.pythonhosted.org/packages/e6/eb/3bf6ea8ab7f1503dca3a10df2e4b9c3f6b3316df07f6c0ded94b281c7101/scipy-1.15.3-cp312-cp312-win_amd64.whl", hash = "sha256:52092bc0472cfd17df49ff17e70624345efece4e1a12b23783a1ac59a1b728ed", size = 40966184 }, + { url = "https://files.pythonhosted.org/packages/73/18/ec27848c9baae6e0d6573eda6e01a602e5649ee72c27c3a8aad673ebecfd/scipy-1.15.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2c620736bcc334782e24d173c0fdbb7590a0a436d2fdf39310a8902505008759", size = 38728256 }, + { url = "https://files.pythonhosted.org/packages/74/cd/1aef2184948728b4b6e21267d53b3339762c285a46a274ebb7863c9e4742/scipy-1.15.3-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:7e11270a000969409d37ed399585ee530b9ef6aa99d50c019de4cb01e8e54e62", size = 30109540 }, + { url = "https://files.pythonhosted.org/packages/5b/d8/59e452c0a255ec352bd0a833537a3bc1bfb679944c4938ab375b0a6b3a3e/scipy-1.15.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:8c9ed3ba2c8a2ce098163a9bdb26f891746d02136995df25227a20e71c396ebb", size = 22383115 }, + { url = "https://files.pythonhosted.org/packages/08/f5/456f56bbbfccf696263b47095291040655e3cbaf05d063bdc7c7517f32ac/scipy-1.15.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:0bdd905264c0c9cfa74a4772cdb2070171790381a5c4d312c973382fc6eaf730", size = 25163884 }, + { url = "https://files.pythonhosted.org/packages/a2/66/a9618b6a435a0f0c0b8a6d0a2efb32d4ec5a85f023c2b79d39512040355b/scipy-1.15.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79167bba085c31f38603e11a267d862957cbb3ce018d8b38f79ac043bc92d825", size = 35174018 }, + { url = "https://files.pythonhosted.org/packages/b5/09/c5b6734a50ad4882432b6bb7c02baf757f5b2f256041da5df242e2d7e6b6/scipy-1.15.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9deabd6d547aee2c9a81dee6cc96c6d7e9a9b1953f74850c179f91fdc729cb7", size = 37269716 }, + { url = "https://files.pythonhosted.org/packages/77/0a/eac00ff741f23bcabd352731ed9b8995a0a60ef57f5fd788d611d43d69a1/scipy-1.15.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:dde4fc32993071ac0c7dd2d82569e544f0bdaff66269cb475e0f369adad13f11", size = 36872342 }, + { url = "https://files.pythonhosted.org/packages/fe/54/4379be86dd74b6ad81551689107360d9a3e18f24d20767a2d5b9253a3f0a/scipy-1.15.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f77f853d584e72e874d87357ad70f44b437331507d1c311457bed8ed2b956126", size = 39670869 }, + { url = "https://files.pythonhosted.org/packages/87/2e/892ad2862ba54f084ffe8cc4a22667eaf9c2bcec6d2bff1d15713c6c0703/scipy-1.15.3-cp313-cp313-win_amd64.whl", hash = "sha256:b90ab29d0c37ec9bf55424c064312930ca5f4bde15ee8619ee44e69319aab163", size = 40988851 }, + { url = "https://files.pythonhosted.org/packages/1b/e9/7a879c137f7e55b30d75d90ce3eb468197646bc7b443ac036ae3fe109055/scipy-1.15.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3ac07623267feb3ae308487c260ac684b32ea35fd81e12845039952f558047b8", size = 38863011 }, + { url = "https://files.pythonhosted.org/packages/51/d1/226a806bbd69f62ce5ef5f3ffadc35286e9fbc802f606a07eb83bf2359de/scipy-1.15.3-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:6487aa99c2a3d509a5227d9a5e889ff05830a06b2ce08ec30df6d79db5fcd5c5", size = 30266407 }, + { url = "https://files.pythonhosted.org/packages/e5/9b/f32d1d6093ab9eeabbd839b0f7619c62e46cc4b7b6dbf05b6e615bbd4400/scipy-1.15.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:50f9e62461c95d933d5c5ef4a1f2ebf9a2b4e83b0db374cb3f1de104d935922e", size = 22540030 }, + { url = "https://files.pythonhosted.org/packages/e7/29/c278f699b095c1a884f29fda126340fcc201461ee8bfea5c8bdb1c7c958b/scipy-1.15.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:14ed70039d182f411ffc74789a16df3835e05dc469b898233a245cdfd7f162cb", size = 25218709 }, + { url = "https://files.pythonhosted.org/packages/24/18/9e5374b617aba742a990581373cd6b68a2945d65cc588482749ef2e64467/scipy-1.15.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a769105537aa07a69468a0eefcd121be52006db61cdd8cac8a0e68980bbb723", size = 34809045 }, + { url = "https://files.pythonhosted.org/packages/e1/fe/9c4361e7ba2927074360856db6135ef4904d505e9b3afbbcb073c4008328/scipy-1.15.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9db984639887e3dffb3928d118145ffe40eff2fa40cb241a306ec57c219ebbbb", size = 36703062 }, + { url = "https://files.pythonhosted.org/packages/b7/8e/038ccfe29d272b30086b25a4960f757f97122cb2ec42e62b460d02fe98e9/scipy-1.15.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:40e54d5c7e7ebf1aa596c374c49fa3135f04648a0caabcb66c52884b943f02b4", size = 36393132 }, + { url = "https://files.pythonhosted.org/packages/10/7e/5c12285452970be5bdbe8352c619250b97ebf7917d7a9a9e96b8a8140f17/scipy-1.15.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5e721fed53187e71d0ccf382b6bf977644c533e506c4d33c3fb24de89f5c3ed5", size = 38979503 }, + { url = "https://files.pythonhosted.org/packages/81/06/0a5e5349474e1cbc5757975b21bd4fad0e72ebf138c5592f191646154e06/scipy-1.15.3-cp313-cp313t-win_amd64.whl", hash = "sha256:76ad1fb5f8752eabf0fa02e4cc0336b4e8f021e2d5f061ed37d6d264db35e3ca", size = 40308097 }, +] + +[[package]] +name = "sentencepiece" +version = "0.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c9/d2/b9c7ca067c26d8ff085d252c89b5f69609ca93fb85a00ede95f4857865d4/sentencepiece-0.2.0.tar.gz", hash = "sha256:a52c19171daaf2e697dc6cbe67684e0fa341b1248966f6aebb541de654d15843", size = 2632106 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/32/43/8f8885168a47a02eba1455bd3f4f169f50ad5b8cebd2402d0f5e20854d04/sentencepiece-0.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:17982700c4f6dbb55fa3594f3d7e5dd1c8659a274af3738e33c987d2a27c9d5c", size = 2409036 }, + { url = "https://files.pythonhosted.org/packages/0f/35/e63ba28062af0a3d688a9f128e407a1a2608544b2f480cb49bf7f4b1cbb9/sentencepiece-0.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7c867012c0e8bcd5bdad0f791609101cb5c66acb303ab3270218d6debc68a65e", size = 1238921 }, + { url = "https://files.pythonhosted.org/packages/de/42/ae30952c4a0bd773e90c9bf2579f5533037c886dfc8ec68133d5694f4dd2/sentencepiece-0.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7fd6071249c74f779c5b27183295b9202f8dedb68034e716784364443879eaa6", size = 1181477 }, + { url = "https://files.pythonhosted.org/packages/e3/ac/2f2ab1d60bb2d795d054eebe5e3f24b164bc21b5a9b75fba7968b3b91b5a/sentencepiece-0.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27f90c55a65013cbb8f4d7aab0599bf925cde4adc67ae43a0d323677b5a1c6cb", size = 1259182 }, + { url = "https://files.pythonhosted.org/packages/45/fb/14633c6ecf262c468759ffcdb55c3a7ee38fe4eda6a70d75ee7c7d63c58b/sentencepiece-0.2.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b293734059ef656dcd65be62ff771507bea8fed0a711b6733976e1ed3add4553", size = 1355537 }, + { url = "https://files.pythonhosted.org/packages/fb/12/2f5c8d4764b00033cf1c935b702d3bb878d10be9f0b87f0253495832d85f/sentencepiece-0.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e58b47f933aca74c6a60a79dcb21d5b9e47416256c795c2d58d55cec27f9551d", size = 1301464 }, + { url = "https://files.pythonhosted.org/packages/4e/b1/67afc0bde24f6dcb3acdea0dd8dcdf4b8b0db240f6bacd39378bd32d09f8/sentencepiece-0.2.0-cp311-cp311-win32.whl", hash = "sha256:c581258cf346b327c62c4f1cebd32691826306f6a41d8c4bec43b010dee08e75", size = 936749 }, + { url = "https://files.pythonhosted.org/packages/a2/f6/587c62fd21fc988555b85351f50bbde43a51524caafd63bc69240ded14fd/sentencepiece-0.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:0993dbc665f4113017892f1b87c3904a44d0640eda510abcacdfb07f74286d36", size = 991520 }, + { url = "https://files.pythonhosted.org/packages/27/5a/141b227ed54293360a9ffbb7bf8252b4e5efc0400cdeac5809340e5d2b21/sentencepiece-0.2.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ea5f536e32ea8ec96086ee00d7a4a131ce583a1b18d130711707c10e69601cb2", size = 2409370 }, + { url = "https://files.pythonhosted.org/packages/2e/08/a4c135ad6fc2ce26798d14ab72790d66e813efc9589fd30a5316a88ca8d5/sentencepiece-0.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d0cb51f53b6aae3c36bafe41e86167c71af8370a039f542c43b0cce5ef24a68c", size = 1239288 }, + { url = "https://files.pythonhosted.org/packages/49/0a/2fe387f825ac5aad5a0bfe221904882106cac58e1b693ba7818785a882b6/sentencepiece-0.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3212121805afc58d8b00ab4e7dd1f8f76c203ddb9dc94aa4079618a31cf5da0f", size = 1181597 }, + { url = "https://files.pythonhosted.org/packages/cc/38/e4698ee2293fe4835dc033c49796a39b3eebd8752098f6bd0aa53a14af1f/sentencepiece-0.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a3149e3066c2a75e0d68a43eb632d7ae728c7925b517f4c05c40f6f7280ce08", size = 1259220 }, + { url = "https://files.pythonhosted.org/packages/12/24/fd7ef967c9dad2f6e6e5386d0cadaf65cda8b7be6e3861a9ab3121035139/sentencepiece-0.2.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:632f3594d3e7ac8b367bca204cb3fd05a01d5b21455acd097ea4c0e30e2f63d7", size = 1355962 }, + { url = "https://files.pythonhosted.org/packages/4f/d2/18246f43ca730bb81918f87b7e886531eda32d835811ad9f4657c54eee35/sentencepiece-0.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f295105c6bdbb05bd5e1b0cafbd78ff95036f5d3641e7949455a3f4e5e7c3109", size = 1301706 }, + { url = "https://files.pythonhosted.org/packages/8a/47/ca237b562f420044ab56ddb4c278672f7e8c866e183730a20e413b38a989/sentencepiece-0.2.0-cp312-cp312-win32.whl", hash = "sha256:fb89f811e5efd18bab141afc3fea3de141c3f69f3fe9e898f710ae7fe3aab251", size = 936941 }, + { url = "https://files.pythonhosted.org/packages/c6/97/d159c32642306ee2b70732077632895438867b3b6df282354bd550cf2a67/sentencepiece-0.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:7a673a72aab81fef5ebe755c6e0cc60087d1f3a4700835d40537183c1703a45f", size = 991994 }, +] + +[[package]] +name = "sentry-sdk" +version = "2.29.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/22/67/d552a5f8e5a6a56b2feea6529e2d8ccd54349084c84176d5a1f7295044bc/sentry_sdk-2.29.1.tar.gz", hash = "sha256:8d4a0206b95fa5fe85e5e7517ed662e3888374bdc342c00e435e10e6d831aa6d", size = 325518 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f0/e5/da07b0bd832cefd52d16f2b9bbbe31624d57552602c06631686b93ccb1bd/sentry_sdk-2.29.1-py2.py3-none-any.whl", hash = "sha256:90862fe0616ded4572da6c9dadb363121a1ae49a49e21c418f0634e9d10b4c19", size = 341553 }, +] + +[[package]] +name = "setproctitle" +version = "1.3.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9e/af/56efe21c53ac81ac87e000b15e60b3d8104224b4313b6eacac3597bd183d/setproctitle-1.3.6.tar.gz", hash = "sha256:c9f32b96c700bb384f33f7cf07954bb609d35dd82752cef57fb2ee0968409169", size = 26889 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/3b/8288d0cd969a63500dd62fc2c99ce6980f9909ccef0770ab1f86c361e0bf/setproctitle-1.3.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a1d856b0f4e4a33e31cdab5f50d0a14998f3a2d726a3fd5cb7c4d45a57b28d1b", size = 17412 }, + { url = "https://files.pythonhosted.org/packages/39/37/43a5a3e25ca1048dbbf4db0d88d346226f5f1acd131bb8e660f4bfe2799f/setproctitle-1.3.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:50706b9c0eda55f7de18695bfeead5f28b58aa42fd5219b3b1692d554ecbc9ec", size = 11963 }, + { url = "https://files.pythonhosted.org/packages/5b/47/f103c40e133154783c91a10ab08ac9fc410ed835aa85bcf7107cb882f505/setproctitle-1.3.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af188f3305f0a65c3217c30c6d4c06891e79144076a91e8b454f14256acc7279", size = 31718 }, + { url = "https://files.pythonhosted.org/packages/1f/13/7325dd1c008dd6c0ebd370ddb7505977054a87e406f142318e395031a792/setproctitle-1.3.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cce0ed8b3f64c71c140f0ec244e5fdf8ecf78ddf8d2e591d4a8b6aa1c1214235", size = 33027 }, + { url = "https://files.pythonhosted.org/packages/0c/0a/6075bfea05a71379d77af98a9ac61163e8b6e5ef1ae58cd2b05871b2079c/setproctitle-1.3.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70100e2087fe05359f249a0b5f393127b3a1819bf34dec3a3e0d4941138650c9", size = 30223 }, + { url = "https://files.pythonhosted.org/packages/cc/41/fbf57ec52f4f0776193bd94334a841f0bc9d17e745f89c7790f336420c65/setproctitle-1.3.6-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1065ed36bd03a3fd4186d6c6de5f19846650b015789f72e2dea2d77be99bdca1", size = 31204 }, + { url = "https://files.pythonhosted.org/packages/97/b5/f799fb7a00de29fb0ac1dfd015528dea425b9e31a8f1068a0b3df52d317f/setproctitle-1.3.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4adf6a0013fe4e0844e3ba7583ec203ca518b9394c6cc0d3354df2bf31d1c034", size = 31181 }, + { url = "https://files.pythonhosted.org/packages/b5/b7/81f101b612014ec61723436022c31146178813d6ca6b947f7b9c84e9daf4/setproctitle-1.3.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:eb7452849f6615871eabed6560ffedfe56bc8af31a823b6be4ce1e6ff0ab72c5", size = 30101 }, + { url = "https://files.pythonhosted.org/packages/67/23/681232eed7640eab96719daa8647cc99b639e3daff5c287bd270ef179a73/setproctitle-1.3.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a094b7ce455ca341b59a0f6ce6be2e11411ba6e2860b9aa3dbb37468f23338f4", size = 32438 }, + { url = "https://files.pythonhosted.org/packages/19/f8/4d075a7bdc3609ac71535b849775812455e4c40aedfbf0778a6f123b1774/setproctitle-1.3.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ad1c2c2baaba62823a7f348f469a967ece0062140ca39e7a48e4bbb1f20d54c4", size = 30625 }, + { url = "https://files.pythonhosted.org/packages/5f/73/a2a8259ebee166aee1ca53eead75de0e190b3ddca4f716e5c7470ebb7ef6/setproctitle-1.3.6-cp311-cp311-win32.whl", hash = "sha256:8050c01331135f77ec99d99307bfbc6519ea24d2f92964b06f3222a804a3ff1f", size = 11488 }, + { url = "https://files.pythonhosted.org/packages/c9/15/52cf5e1ff0727d53704cfdde2858eaf237ce523b0b04db65faa84ff83e13/setproctitle-1.3.6-cp311-cp311-win_amd64.whl", hash = "sha256:9b73cf0fe28009a04a35bb2522e4c5b5176cc148919431dcb73fdbdfaab15781", size = 12201 }, + { url = "https://files.pythonhosted.org/packages/8f/fb/99456fd94d4207c5f6c40746a048a33a52b4239cd7d9c8d4889e2210ec82/setproctitle-1.3.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:af44bb7a1af163806bbb679eb8432fa7b4fb6d83a5d403b541b675dcd3798638", size = 17399 }, + { url = "https://files.pythonhosted.org/packages/d5/48/9699191fe6062827683c43bfa9caac33a2c89f8781dd8c7253fa3dba85fd/setproctitle-1.3.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3cca16fd055316a48f0debfcbfb6af7cea715429fc31515ab3fcac05abd527d8", size = 11966 }, + { url = "https://files.pythonhosted.org/packages/33/03/b085d192b9ecb9c7ce6ad6ef30ecf4110b7f39430b58a56245569827fcf4/setproctitle-1.3.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea002088d5554fd75e619742cefc78b84a212ba21632e59931b3501f0cfc8f67", size = 32017 }, + { url = "https://files.pythonhosted.org/packages/ae/68/c53162e645816f97212002111420d1b2f75bf6d02632e37e961dc2cd6d8b/setproctitle-1.3.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb465dd5825356c1191a038a86ee1b8166e3562d6e8add95eec04ab484cfb8a2", size = 33419 }, + { url = "https://files.pythonhosted.org/packages/ac/0d/119a45d15a816a6cf5ccc61b19729f82620095b27a47e0a6838216a95fae/setproctitle-1.3.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d2c8e20487b3b73c1fa72c56f5c89430617296cd380373e7af3a538a82d4cd6d", size = 30711 }, + { url = "https://files.pythonhosted.org/packages/e3/fb/5e9b5068df9e9f31a722a775a5e8322a29a638eaaa3eac5ea7f0b35e6314/setproctitle-1.3.6-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d6252098e98129a1decb59b46920d4eca17b0395f3d71b0d327d086fefe77d", size = 31742 }, + { url = "https://files.pythonhosted.org/packages/35/88/54de1e73e8fce87d587889c7eedb48fc4ee2bbe4e4ca6331690d03024f86/setproctitle-1.3.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:cf355fbf0d4275d86f9f57be705d8e5eaa7f8ddb12b24ced2ea6cbd68fdb14dc", size = 31925 }, + { url = "https://files.pythonhosted.org/packages/f3/01/65948d7badd66e63e3db247b923143da142790fa293830fdecf832712c2d/setproctitle-1.3.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e288f8a162d663916060beb5e8165a8551312b08efee9cf68302687471a6545d", size = 30981 }, + { url = "https://files.pythonhosted.org/packages/22/20/c495e61786f1d38d5dc340b9d9077fee9be3dfc7e89f515afe12e1526dbc/setproctitle-1.3.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:b2e54f4a2dc6edf0f5ea5b1d0a608d2af3dcb5aa8c8eeab9c8841b23e1b054fe", size = 33209 }, + { url = "https://files.pythonhosted.org/packages/98/3f/a457b8550fbd34d5b482fe20b8376b529e76bf1fbf9a474a6d9a641ab4ad/setproctitle-1.3.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b6f4abde9a2946f57e8daaf1160b2351bcf64274ef539e6675c1d945dbd75e2a", size = 31587 }, + { url = "https://files.pythonhosted.org/packages/44/fe/743517340e5a635e3f1c4310baea20c16c66202f96a6f4cead222ffd6d84/setproctitle-1.3.6-cp312-cp312-win32.whl", hash = "sha256:db608db98ccc21248370d30044a60843b3f0f3d34781ceeea67067c508cd5a28", size = 11487 }, + { url = "https://files.pythonhosted.org/packages/60/9a/d88f1c1f0f4efff1bd29d9233583ee341114dda7d9613941453984849674/setproctitle-1.3.6-cp312-cp312-win_amd64.whl", hash = "sha256:082413db8a96b1f021088e8ec23f0a61fec352e649aba20881895815388b66d3", size = 12208 }, + { url = "https://files.pythonhosted.org/packages/89/76/f1a2fdbf9b9602945a7489ba5c52e9863de37381ef1a85a2b9ed0ff8bc79/setproctitle-1.3.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e2a9e62647dc040a76d55563580bf3bb8fe1f5b6ead08447c2ed0d7786e5e794", size = 17392 }, + { url = "https://files.pythonhosted.org/packages/5c/5b/4e0db8b10b4543afcb3dbc0827793d46e43ec1de6b377e313af3703d08e0/setproctitle-1.3.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:751ba352ed922e0af60458e961167fa7b732ac31c0ddd1476a2dfd30ab5958c5", size = 11951 }, + { url = "https://files.pythonhosted.org/packages/dc/fe/d5d00aaa700fe1f6160b6e95c225b29c01f4d9292176d48fd968815163ea/setproctitle-1.3.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7890e291bf4708e3b61db9069ea39b3ab0651e42923a5e1f4d78a7b9e4b18301", size = 32087 }, + { url = "https://files.pythonhosted.org/packages/9f/b3/894b827b93ef813c082479bebf88185860f01ac243df737823dd705e7fff/setproctitle-1.3.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2b17855ed7f994f3f259cf2dfbfad78814538536fa1a91b50253d84d87fd88d", size = 33502 }, + { url = "https://files.pythonhosted.org/packages/b2/cd/5330734cca1a4cfcb721432c22cb7899ff15a4101ba868b2ef452ffafea1/setproctitle-1.3.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e51ec673513465663008ce402171192a053564865c2fc6dc840620871a9bd7c", size = 30713 }, + { url = "https://files.pythonhosted.org/packages/fa/d3/c2590c5daa2e9a008d3f2b16c0f4a351826193be55f147cb32af49c6d814/setproctitle-1.3.6-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63cc10352dc6cf35a33951656aa660d99f25f574eb78132ce41a85001a638aa7", size = 31792 }, + { url = "https://files.pythonhosted.org/packages/e6/b1/c553ed5af8cfcecd5ae7737e63af58a17a03d26f3d61868c7eb20bf7e3cf/setproctitle-1.3.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0dba8faee2e4a96e934797c9f0f2d093f8239bf210406a99060b3eabe549628e", size = 31927 }, + { url = "https://files.pythonhosted.org/packages/70/78/2d5385206540127a3dca0ff83225b1ac66873f5cc89d4a6d3806c92f5ae2/setproctitle-1.3.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:e3e44d08b61de0dd6f205528498f834a51a5c06689f8fb182fe26f3a3ce7dca9", size = 30981 }, + { url = "https://files.pythonhosted.org/packages/31/62/e3e4a4e006d0e549748e53cded4ff3b667be0602860fc61b7de8b412b667/setproctitle-1.3.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:de004939fc3fd0c1200d26ea9264350bfe501ffbf46c8cf5dc7f345f2d87a7f1", size = 33244 }, + { url = "https://files.pythonhosted.org/packages/aa/05/4b223fd4ef94e105dc7aff27fa502fb7200cf52be2bb0c064bd2406b5611/setproctitle-1.3.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3f8194b4d631b003a1176a75d1acd545e04b1f54b821638e098a93e6e62830ef", size = 31630 }, + { url = "https://files.pythonhosted.org/packages/1b/ba/5f68eb969f7336f54b54a599fd3ffbd7662f9733b080bc8598705971b3dd/setproctitle-1.3.6-cp313-cp313-win32.whl", hash = "sha256:d714e002dd3638170fe7376dc1b686dbac9cb712cde3f7224440af722cc9866a", size = 11480 }, + { url = "https://files.pythonhosted.org/packages/ba/f5/7f47f0ca35c9c357f16187cee9229f3eda0237bc6fdd3061441336f361c0/setproctitle-1.3.6-cp313-cp313-win_amd64.whl", hash = "sha256:b70c07409d465f3a8b34d52f863871fb8a00755370791d2bd1d4f82b3cdaf3d5", size = 12198 }, + { url = "https://files.pythonhosted.org/packages/39/ad/c3941b8fc6b32a976c9e2d9615a90ae793b69cd010ca8c3575dbc822104f/setproctitle-1.3.6-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:23a57d3b8f1549515c2dbe4a2880ebc1f27780dc126c5e064167563e015817f5", size = 17401 }, + { url = "https://files.pythonhosted.org/packages/04/38/a184f857b988d3a9c401e470a4e38182a5c99ee77bf90432d7665e9d35a3/setproctitle-1.3.6-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:81c443310831e29fabbd07b75ebbfa29d0740b56f5907c6af218482d51260431", size = 11959 }, + { url = "https://files.pythonhosted.org/packages/b7/b9/4878ef9d8483adfd1edf6bf95151362aaec0d05aac306a97ff0383f491b5/setproctitle-1.3.6-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d88c63bd395c787b0aa81d8bbc22c1809f311032ce3e823a6517b711129818e4", size = 33463 }, + { url = "https://files.pythonhosted.org/packages/cc/60/3ef49d1931aff2a36a7324a49cca10d77ef03e0278452fd468c33a52d7e3/setproctitle-1.3.6-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d73f14b86d0e2858ece6bf5807c9889670e392c001d414b4293d0d9b291942c3", size = 34959 }, + { url = "https://files.pythonhosted.org/packages/81/c6/dee0a973acecefb0db6c9c2e0ea7f18b7e4db773a72e534741ebdee8bbb8/setproctitle-1.3.6-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3393859eb8f19f5804049a685bf286cb08d447e28ba5c6d8543c7bf5500d5970", size = 32055 }, + { url = "https://files.pythonhosted.org/packages/ea/a5/5dd5c4192cf18d16349a32a07f728a9a48a2a05178e16966cabd6645903e/setproctitle-1.3.6-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:785cd210c0311d9be28a70e281a914486d62bfd44ac926fcd70cf0b4d65dff1c", size = 32986 }, + { url = "https://files.pythonhosted.org/packages/df/a6/1508d37eb8008670d33f13fcdb91cbd8ef54697276469abbfdd3d4428c59/setproctitle-1.3.6-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c051f46ed1e13ba8214b334cbf21902102807582fbfaf0fef341b9e52f0fafbf", size = 32736 }, + { url = "https://files.pythonhosted.org/packages/1a/73/c84ec8880d543766a12fcd6b65dbd013770974a40577889f357409b0441e/setproctitle-1.3.6-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:49498ebf68ca3e75321ffe634fcea5cc720502bfaa79bd6b03ded92ce0dc3c24", size = 31945 }, + { url = "https://files.pythonhosted.org/packages/95/0a/126b9ff7a406a69a62825fe5bd6d1ba8671919a7018c4f9e2c63f49bfcb6/setproctitle-1.3.6-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:4431629c178193f23c538cb1de3da285a99ccc86b20ee91d81eb5f1a80e0d2ba", size = 34333 }, + { url = "https://files.pythonhosted.org/packages/9a/fd/5474b04f1c013ff460129d2bc774557dd6e186da4667865efef9a83bf378/setproctitle-1.3.6-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d136fbf8ad4321716e44d6d6b3d8dffb4872626010884e07a1db54b7450836cf", size = 32508 }, + { url = "https://files.pythonhosted.org/packages/32/21/2503e38520cb076a7ecaef6a35d6a6fa89cf02af3541c84c811fd7500d20/setproctitle-1.3.6-cp313-cp313t-win32.whl", hash = "sha256:d483cc23cc56ab32911ea0baa0d2d9ea7aa065987f47de847a0a93a58bf57905", size = 11482 }, + { url = "https://files.pythonhosted.org/packages/65/23/7833d75a27fba25ddc5cd3b54cd03c4bf8e18b8e2dbec622eb6326278ce8/setproctitle-1.3.6-cp313-cp313t-win_amd64.whl", hash = "sha256:74973aebea3543ad033b9103db30579ec2b950a466e09f9c2180089e8346e0ec", size = 12209 }, +] + +[[package]] +name = "setuptools" +version = "80.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/5d/3bf57dcd21979b887f014ea83c24ae194cfcd12b9e0fda66b957c69d1fca/setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c", size = 1319958 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486 }, +] + +[[package]] +name = "shtab" +version = "1.7.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/3e/837067b970c1d2ffa936c72f384a63fdec4e186b74da781e921354a94024/shtab-1.7.2.tar.gz", hash = "sha256:8c16673ade76a2d42417f03e57acf239bfb5968e842204c17990cae357d07d6f", size = 45751 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/74/03/3271b7bb470fbab4adf5bd30b0d32143909d96f3608d815b447357f47f2b/shtab-1.7.2-py3-none-any.whl", hash = "sha256:858a5805f6c137bb0cda4f282d27d08fd44ca487ab4a6a36d2a400263cd0b5c1", size = 14214 }, +] + +[[package]] +name = "simple-parsing" +version = "0.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docstring-parser" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/c5/f1e2fcb3a81085cdf3cfed48b8c8ce0e7cc30c95dee734cbb35d6265336a/simple_parsing-0.1.7.tar.gz", hash = "sha256:225e6b35252d68f7894716101fe3bd7e6dd3d30ab7b1c3c023f77a42dbe1336f", size = 96375 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4f/9c/e9ea38750027a6de3e3c5e68a19fda0e7b0cd3db8045f30d0f6bc113b911/simple_parsing-0.1.7-py3-none-any.whl", hash = "sha256:5276e6c90c157362dd0173d1eecebe58361a66b457129cc9bba13b78a4e85092", size = 112782 }, +] + +[[package]] +name = "simplejson" +version = "3.20.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/af/92/51b417685abd96b31308b61b9acce7ec50d8e1de8fbc39a7fd4962c60689/simplejson-3.20.1.tar.gz", hash = "sha256:e64139b4ec4f1f24c142ff7dcafe55a22b811a74d86d66560c8815687143037d", size = 85591 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/59/74bc90d1c051bc2432c96b34bd4e8036875ab58b4fcbe4d6a5a76985f853/simplejson-3.20.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:325b8c107253d3217e89d7b50c71015b5b31e2433e6c5bf38967b2f80630a8ca", size = 92132 }, + { url = "https://files.pythonhosted.org/packages/71/c7/1970916e0c51794fff89f76da2f632aaf0b259b87753c88a8c409623d3e1/simplejson-3.20.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88a7baa8211089b9e58d78fbc1b0b322103f3f3d459ff16f03a36cece0d0fcf0", size = 74956 }, + { url = "https://files.pythonhosted.org/packages/c8/0d/98cc5909180463f1d75fac7180de62d4cdb4e82c4fef276b9e591979372c/simplejson-3.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:299b1007b8101d50d95bc0db1bf5c38dc372e85b504cf77f596462083ee77e3f", size = 74772 }, + { url = "https://files.pythonhosted.org/packages/e1/94/a30a5211a90d67725a3e8fcc1c788189f2ae2ed2b96b63ed15d0b7f5d6bb/simplejson-3.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ec618ed65caab48e81e3ed29586236a8e57daef792f1f3bb59504a7e98cd10", size = 143575 }, + { url = "https://files.pythonhosted.org/packages/ee/08/cdb6821f1058eb5db46d252de69ff7e6c53f05f1bae6368fe20d5b51d37e/simplejson-3.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2cdead1d3197f0ff43373cf4730213420523ba48697743e135e26f3d179f38", size = 153241 }, + { url = "https://files.pythonhosted.org/packages/4c/2d/ca3caeea0bdc5efc5503d5f57a2dfb56804898fb196dfada121323ee0ccb/simplejson-3.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3466d2839fdc83e1af42e07b90bc8ff361c4e8796cd66722a40ba14e458faddd", size = 141500 }, + { url = "https://files.pythonhosted.org/packages/e1/33/d3e0779d5c58245e7370c98eb969275af6b7a4a5aec3b97cbf85f09ad328/simplejson-3.20.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d492ed8e92f3a9f9be829205f44b1d0a89af6582f0cf43e0d129fa477b93fe0c", size = 144757 }, + { url = "https://files.pythonhosted.org/packages/54/53/2d93128bb55861b2fa36c5944f38da51a0bc6d83e513afc6f7838440dd15/simplejson-3.20.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f924b485537b640dc69434565463fd6fc0c68c65a8c6e01a823dd26c9983cf79", size = 144409 }, + { url = "https://files.pythonhosted.org/packages/99/4c/dac310a98f897ad3435b4bdc836d92e78f09e38c5dbf28211ed21dc59fa2/simplejson-3.20.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9e8eacf6a3491bf76ea91a8d46726368a6be0eb94993f60b8583550baae9439e", size = 146082 }, + { url = "https://files.pythonhosted.org/packages/ee/22/d7ba958cfed39827335b82656b1c46f89678faecda9a7677b47e87b48ee6/simplejson-3.20.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:d34d04bf90b4cea7c22d8b19091633908f14a096caa301b24c2f3d85b5068fb8", size = 154339 }, + { url = "https://files.pythonhosted.org/packages/b8/c8/b072b741129406a7086a0799c6f5d13096231bf35fdd87a0cffa789687fc/simplejson-3.20.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:69dd28d4ce38390ea4aaf212902712c0fd1093dc4c1ff67e09687c3c3e15a749", size = 147915 }, + { url = "https://files.pythonhosted.org/packages/6c/46/8347e61e9cf3db5342a42f7fd30a81b4f5cf85977f916852d7674a540907/simplejson-3.20.1-cp311-cp311-win32.whl", hash = "sha256:dfe7a9da5fd2a3499436cd350f31539e0a6ded5da6b5b3d422df016444d65e43", size = 73972 }, + { url = "https://files.pythonhosted.org/packages/01/85/b52f24859237b4e9d523d5655796d911ba3d46e242eb1959c45b6af5aedd/simplejson-3.20.1-cp311-cp311-win_amd64.whl", hash = "sha256:896a6c04d7861d507d800da7642479c3547060bf97419d9ef73d98ced8258766", size = 75595 }, + { url = "https://files.pythonhosted.org/packages/8d/eb/34c16a1ac9ba265d024dc977ad84e1659d931c0a700967c3e59a98ed7514/simplejson-3.20.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f31c4a3a7ab18467ee73a27f3e59158255d1520f3aad74315edde7a940f1be23", size = 93100 }, + { url = "https://files.pythonhosted.org/packages/41/fc/2c2c007d135894971e6814e7c0806936e5bade28f8db4dd7e2a58b50debd/simplejson-3.20.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:884e6183d16b725e113b83a6fc0230152ab6627d4d36cb05c89c2c5bccfa7bc6", size = 75464 }, + { url = "https://files.pythonhosted.org/packages/0f/05/2b5ecb33b776c34bb5cace5de5d7669f9b60e3ca13c113037b2ca86edfbd/simplejson-3.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03d7a426e416fe0d3337115f04164cd9427eb4256e843a6b8751cacf70abc832", size = 75112 }, + { url = "https://files.pythonhosted.org/packages/fe/36/1f3609a2792f06cd4b71030485f78e91eb09cfd57bebf3116bf2980a8bac/simplejson-3.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:000602141d0bddfcff60ea6a6e97d5e10c9db6b17fd2d6c66199fa481b6214bb", size = 150182 }, + { url = "https://files.pythonhosted.org/packages/2f/b0/053fbda38b8b602a77a4f7829def1b4f316cd8deb5440a6d3ee90790d2a4/simplejson-3.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:af8377a8af78226e82e3a4349efdde59ffa421ae88be67e18cef915e4023a595", size = 158363 }, + { url = "https://files.pythonhosted.org/packages/d1/4b/2eb84ae867539a80822e92f9be4a7200dffba609275faf99b24141839110/simplejson-3.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15c7de4c88ab2fbcb8781a3b982ef883696736134e20b1210bca43fb42ff1acf", size = 148415 }, + { url = "https://files.pythonhosted.org/packages/e0/bd/400b0bd372a5666addf2540c7358bfc3841b9ce5cdbc5cc4ad2f61627ad8/simplejson-3.20.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:455a882ff3f97d810709f7b620007d4e0aca8da71d06fc5c18ba11daf1c4df49", size = 152213 }, + { url = "https://files.pythonhosted.org/packages/50/12/143f447bf6a827ee9472693768dc1a5eb96154f8feb140a88ce6973a3cfa/simplejson-3.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fc0f523ce923e7f38eb67804bc80e0a028c76d7868500aa3f59225574b5d0453", size = 150048 }, + { url = "https://files.pythonhosted.org/packages/5e/ea/dd9b3e8e8ed710a66f24a22c16a907c9b539b6f5f45fd8586bd5c231444e/simplejson-3.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76461ec929282dde4a08061071a47281ad939d0202dc4e63cdd135844e162fbc", size = 151668 }, + { url = "https://files.pythonhosted.org/packages/99/af/ee52a8045426a0c5b89d755a5a70cc821815ef3c333b56fbcad33c4435c0/simplejson-3.20.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ab19c2da8c043607bde4d4ef3a6b633e668a7d2e3d56f40a476a74c5ea71949f", size = 158840 }, + { url = "https://files.pythonhosted.org/packages/68/db/ab32869acea6b5de7d75fa0dac07a112ded795d41eaa7e66c7813b17be95/simplejson-3.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b2578bedaedf6294415197b267d4ef678fea336dd78ee2a6d2f4b028e9d07be3", size = 154212 }, + { url = "https://files.pythonhosted.org/packages/fa/7a/e3132d454977d75a3bf9a6d541d730f76462ebf42a96fea2621498166f41/simplejson-3.20.1-cp312-cp312-win32.whl", hash = "sha256:339f407373325a36b7fd744b688ba5bae0666b5d340ec6d98aebc3014bf3d8ea", size = 74101 }, + { url = "https://files.pythonhosted.org/packages/bc/5d/4e243e937fa3560107c69f6f7c2eed8589163f5ed14324e864871daa2dd9/simplejson-3.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:627d4486a1ea7edf1f66bb044ace1ce6b4c1698acd1b05353c97ba4864ea2e17", size = 75736 }, + { url = "https://files.pythonhosted.org/packages/c4/03/0f453a27877cb5a5fff16a975925f4119102cc8552f52536b9a98ef0431e/simplejson-3.20.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:71e849e7ceb2178344998cbe5ade101f1b329460243c79c27fbfc51c0447a7c3", size = 93109 }, + { url = "https://files.pythonhosted.org/packages/74/1f/a729f4026850cabeaff23e134646c3f455e86925d2533463420635ae54de/simplejson-3.20.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b63fdbab29dc3868d6f009a59797cefaba315fd43cd32ddd998ee1da28e50e29", size = 75475 }, + { url = "https://files.pythonhosted.org/packages/e2/14/50a2713fee8ff1f8d655b1a14f4a0f1c0c7246768a1b3b3d12964a4ed5aa/simplejson-3.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1190f9a3ce644fd50ec277ac4a98c0517f532cfebdcc4bd975c0979a9f05e1fb", size = 75112 }, + { url = "https://files.pythonhosted.org/packages/45/86/ea9835abb646755140e2d482edc9bc1e91997ed19a59fd77ae4c6a0facea/simplejson-3.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1336ba7bcb722ad487cd265701ff0583c0bb6de638364ca947bb84ecc0015d1", size = 150245 }, + { url = "https://files.pythonhosted.org/packages/12/b4/53084809faede45da829fe571c65fbda8479d2a5b9c633f46b74124d56f5/simplejson-3.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e975aac6a5acd8b510eba58d5591e10a03e3d16c1cf8a8624ca177491f7230f0", size = 158465 }, + { url = "https://files.pythonhosted.org/packages/a9/7d/d56579468d1660b3841e1f21c14490d103e33cf911886b22652d6e9683ec/simplejson-3.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a6dd11ee282937ad749da6f3b8d87952ad585b26e5edfa10da3ae2536c73078", size = 148514 }, + { url = "https://files.pythonhosted.org/packages/19/e3/874b1cca3d3897b486d3afdccc475eb3a09815bf1015b01cf7fcb52a55f0/simplejson-3.20.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab980fcc446ab87ea0879edad41a5c28f2d86020014eb035cf5161e8de4474c6", size = 152262 }, + { url = "https://files.pythonhosted.org/packages/32/84/f0fdb3625292d945c2bd13a814584603aebdb38cfbe5fe9be6b46fe598c4/simplejson-3.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f5aee2a4cb6b146bd17333ac623610f069f34e8f31d2f4f0c1a2186e50c594f0", size = 150164 }, + { url = "https://files.pythonhosted.org/packages/95/51/6d625247224f01eaaeabace9aec75ac5603a42f8ebcce02c486fbda8b428/simplejson-3.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:652d8eecbb9a3b6461b21ec7cf11fd0acbab144e45e600c817ecf18e4580b99e", size = 151795 }, + { url = "https://files.pythonhosted.org/packages/7f/d9/bb921df6b35be8412f519e58e86d1060fddf3ad401b783e4862e0a74c4c1/simplejson-3.20.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:8c09948f1a486a89251ee3a67c9f8c969b379f6ffff1a6064b41fea3bce0a112", size = 159027 }, + { url = "https://files.pythonhosted.org/packages/03/c5/5950605e4ad023a6621cf4c931b29fd3d2a9c1f36be937230bfc83d7271d/simplejson-3.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:cbbd7b215ad4fc6f058b5dd4c26ee5c59f72e031dfda3ac183d7968a99e4ca3a", size = 154380 }, + { url = "https://files.pythonhosted.org/packages/66/ad/b74149557c5ec1e4e4d55758bda426f5d2ec0123cd01a53ae63b8de51fa3/simplejson-3.20.1-cp313-cp313-win32.whl", hash = "sha256:ae81e482476eaa088ef9d0120ae5345de924f23962c0c1e20abbdff597631f87", size = 74102 }, + { url = "https://files.pythonhosted.org/packages/db/a9/25282fdd24493e1022f30b7f5cdf804255c007218b2bfaa655bd7ad34b2d/simplejson-3.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:1b9fd15853b90aec3b1739f4471efbf1ac05066a2c7041bf8db821bb73cd2ddc", size = 75736 }, + { url = "https://files.pythonhosted.org/packages/4b/30/00f02a0a921556dd5a6db1ef2926a1bc7a8bbbfb1c49cfed68a275b8ab2b/simplejson-3.20.1-py3-none-any.whl", hash = "sha256:8a6c1bbac39fa4a79f83cbf1df6ccd8ff7069582a9fd8db1e52cea073bc2c697", size = 57121 }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050 }, +] + +[[package]] +name = "smmap" +version = "5.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/44/cd/a040c4b3119bbe532e5b0732286f805445375489fceaec1f48306068ee3b/smmap-5.0.2.tar.gz", hash = "sha256:26ea65a03958fa0c8a1c7e8c7a58fdc77221b8910f6be2131affade476898ad5", size = 22329 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/be/d09147ad1ec7934636ad912901c5fd7667e1c858e19d355237db0d0cd5e4/smmap-5.0.2-py3-none-any.whl", hash = "sha256:b30115f0def7d7531d22a0fb6502488d879e75b260a9db4d0819cfb25403af5e", size = 24303 }, +] + +[[package]] +name = "soupsieve" +version = "2.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3f/f4/4a80cd6ef364b2e8b65b15816a843c0980f7a5a2b4dc701fc574952aa19f/soupsieve-2.7.tar.gz", hash = "sha256:ad282f9b6926286d2ead4750552c8a6142bc4c783fd66b0293547c8fe6ae126a", size = 103418 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/9c/0e6afc12c269578be5c0c1c9f4b49a8d32770a080260c333ac04cc1c832d/soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4", size = 36677 }, +] + +[[package]] +name = "stack-data" +version = "0.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asttokens" }, + { name = "executing" }, + { name = "pure-eval" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/28/e3/55dcc2cfbc3ca9c29519eb6884dd1415ecb53b0e934862d3559ddcb7e20b/stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", size = 44707 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521 }, +] + +[[package]] +name = "svgwrite" +version = "1.4.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/16/c1/263d4e93b543390d86d8eb4fc23d9ce8a8d6efd146f9427364109004fa9b/svgwrite-1.4.3.zip", hash = "sha256:a8fbdfd4443302a6619a7f76bc937fc683daf2628d9b737c891ec08b8ce524c3", size = 189516 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/84/15/640e399579024a6875918839454025bb1d5f850bb70d96a11eabb644d11c/svgwrite-1.4.3-py3-none-any.whl", hash = "sha256:bb6b2b5450f1edbfa597d924f9ac2dd099e625562e492021d7dd614f65f8a22d", size = 67122 }, +] + +[[package]] +name = "sympy" +version = "1.14.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mpmath" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/83/d3/803453b36afefb7c2bb238361cd4ae6125a569b4db67cd9e79846ba2d68c/sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517", size = 7793921 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353 }, +] + +[[package]] +name = "tensorboard" +version = "2.15.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "absl-py" }, + { name = "google-auth" }, + { name = "google-auth-oauthlib" }, + { name = "grpcio" }, + { name = "markdown" }, + { name = "numpy" }, + { name = "protobuf" }, + { name = "requests" }, + { name = "setuptools" }, + { name = "six" }, + { name = "tensorboard-data-server" }, + { name = "werkzeug" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/37/12/f6e9b9dcc310263cbd3948274e286538bd6800fd0c268850788f14a0c6d0/tensorboard-2.15.2-py3-none-any.whl", hash = "sha256:a6f6443728064d962caea6d34653e220e34ef8df764cb06a8212c17e1a8f0622", size = 5539713 }, +] + +[[package]] +name = "tensorboard-data-server" +version = "0.7.2" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7a/13/e503968fefabd4c6b2650af21e110aa8466fe21432cd7c43a84577a89438/tensorboard_data_server-0.7.2-py3-none-any.whl", hash = "sha256:7e0610d205889588983836ec05dc098e80f97b7e7bbff7e994ebb78f578d0ddb", size = 2356 }, + { url = "https://files.pythonhosted.org/packages/b7/85/dabeaf902892922777492e1d253bb7e1264cadce3cea932f7ff599e53fea/tensorboard_data_server-0.7.2-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:9fe5d24221b29625dbc7328b0436ca7fc1c23de4acf4d272f1180856e32f9f60", size = 4823598 }, + { url = "https://files.pythonhosted.org/packages/73/c6/825dab04195756cf8ff2e12698f22513b3db2f64925bdd41671bfb33aaa5/tensorboard_data_server-0.7.2-py3-none-manylinux_2_31_x86_64.whl", hash = "sha256:ef687163c24185ae9754ed5650eb5bc4d84ff257aabdc33f0cc6f74d8ba54530", size = 6590363 }, +] + +[[package]] +name = "tensorflow" +version = "2.15.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "absl-py" }, + { name = "astunparse" }, + { name = "flatbuffers" }, + { name = "gast" }, + { name = "google-pasta" }, + { name = "grpcio" }, + { name = "h5py" }, + { name = "keras" }, + { name = "libclang" }, + { name = "ml-dtypes" }, + { name = "numpy" }, + { name = "opt-einsum" }, + { name = "packaging" }, + { name = "protobuf" }, + { name = "setuptools" }, + { name = "six" }, + { name = "tensorboard" }, + { name = "tensorflow-estimator" }, + { name = "tensorflow-io-gcs-filesystem" }, + { name = "termcolor" }, + { name = "typing-extensions" }, + { name = "wrapt" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/92/2d/880fcd65e4414b05088193e6f2cfb86fdf90003dd2dd0f4d1bc465348f0e/tensorflow-2.15.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:1e0716622ed7af867d8b1997b00a2940f1a1587dee923ff53efa2ee506992f32", size = 239098779 }, + { url = "https://files.pythonhosted.org/packages/85/15/cf99a373812d37f8ae99752a34a9f5f690d820ceb5b302e922705bc18944/tensorflow-2.15.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:124930e7d4f5d74c61a5c80d642a26c22fe0c42fdd383fe9ee5803c3ac9ed4ce", size = 2137 }, + { url = "https://files.pythonhosted.org/packages/cf/ac/6d884eba6d30196baf8f8284448f4d5388681f386f1150ad2d54398bc33a/tensorflow-2.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:852efeb4d18beedac0120c4f2d4f4dccf4c090bb6740c5199d395ff609e85e98", size = 2151 }, + { url = "https://files.pythonhosted.org/packages/93/c0/a774286d0383419f558deb27096e5de9f9facd6c27df8e9f9af6fba2f77e/tensorflow-2.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dee8ec2b2c6c942ae65d25746e53cdc475e82d5fcbbb3009ce47f5963d69ebfc", size = 475258658 }, + { url = "https://files.pythonhosted.org/packages/93/21/9b035a4f823d6aee2917c75415be9a95861ff3d73a0a65e48edbf210cec1/tensorflow-2.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:e05a48006930e4e9e68468e7affed3bbce8a1c7fe6df86500496ad1558804a78", size = 2129 }, +] + +[[package]] +name = "tensorflow-cpu" +version = "2.15.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "absl-py" }, + { name = "astunparse" }, + { name = "flatbuffers" }, + { name = "gast" }, + { name = "google-pasta" }, + { name = "grpcio" }, + { name = "h5py" }, + { name = "keras" }, + { name = "libclang" }, + { name = "ml-dtypes" }, + { name = "numpy" }, + { name = "opt-einsum" }, + { name = "packaging" }, + { name = "protobuf" }, + { name = "setuptools" }, + { name = "six" }, + { name = "tensorboard" }, + { name = "tensorflow-estimator" }, + { name = "tensorflow-io-gcs-filesystem" }, + { name = "termcolor" }, + { name = "typing-extensions" }, + { name = "wrapt" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/d0/5b1288c11011a63e0027a8e8524928dc5ae9e0ad3134ec619937c019d0e7/tensorflow_cpu-2.15.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:b0b2986a6cf63053c1f63bc751b228f5478283c0aa66a58271e931ae318978ce", size = 239098829 }, + { url = "https://files.pythonhosted.org/packages/fa/44/a1698c62942d20cab378ba201a6cbfcce579418351a0c6e4ea9d66c9adf2/tensorflow_cpu-2.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f46c795177f6311c83562e05d38dc7d4618f8d3150e6902a4499b875f3f97270", size = 207208171 }, + { url = "https://files.pythonhosted.org/packages/ec/b2/b76e4b3c0a9dbdb0feacdfa393d6d3df78e2232514eec0659471e7cbc5a3/tensorflow_cpu-2.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:4487d0991e6f71bb56000f49a8ba467786b1ed7fafc7a6c0fad6d10ea46fc304", size = 2166 }, +] + +[[package]] +name = "tensorflow-datasets" +version = "4.9.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "absl-py" }, + { name = "array-record", marker = "sys_platform == 'linux'" }, + { name = "dm-tree", version = "0.1.8", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.13'" }, + { name = "dm-tree", version = "0.1.9", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.13'" }, + { name = "etils", extra = ["edc", "enp", "epath", "epy", "etree"] }, + { name = "immutabledict" }, + { name = "numpy" }, + { name = "promise" }, + { name = "protobuf" }, + { name = "psutil" }, + { name = "pyarrow" }, + { name = "requests" }, + { name = "simple-parsing" }, + { name = "tensorflow-metadata" }, + { name = "termcolor" }, + { name = "toml" }, + { name = "tqdm" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/92/a436764aeea5aa0c85774770afdc6063b1016dd38b67e39c5b6240cf1deb/tensorflow_datasets-4.9.9.tar.gz", hash = "sha256:9cb245cad97e7d227f0b8e006491cfef860ff8d4b9d84a3c68f8b96d6295355e", size = 3943946 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/16/e0/657192dbc03636532ccbd5c90669d31a65187365b99ba685db36bb31dd67/tensorflow_datasets-4.9.9-py3-none-any.whl", hash = "sha256:b94902d414cdc12a1014cda9ee5815c502c3d44215b780e06dacbd7949abd14e", size = 5319309 }, +] + +[[package]] +name = "tensorflow-estimator" +version = "2.15.0" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b6/c8/2f823c8958d5342eafc6dd3e922f0cc4fcf8c2e0460284cc462dae3b60a0/tensorflow_estimator-2.15.0-py2.py3-none-any.whl", hash = "sha256:aedf21eec7fb2dc91150fc91a1ce12bc44dbb72278a08b58e79ff87c9e28f153", size = 441974 }, +] + +[[package]] +name = "tensorflow-io-gcs-filesystem" +version = "0.37.1" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/9b/b2fb82d0da673b17a334f785fc19c23483165019ddc33b275ef25ca31173/tensorflow_io_gcs_filesystem-0.37.1-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:32c50ab4e29a23c1f91cd0f9ab8c381a0ab10f45ef5c5252e94965916041737c", size = 2470224 }, + { url = "https://files.pythonhosted.org/packages/5b/cc/16634e76f3647fbec18187258da3ba11184a6232dcf9073dc44579076d36/tensorflow_io_gcs_filesystem-0.37.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:b02f9c5f94fd62773954a04f69b68c4d576d076fd0db4ca25d5479f0fbfcdbad", size = 3479613 }, + { url = "https://files.pythonhosted.org/packages/de/bf/ba597d3884c77d05a78050f3c178933d69e3f80200a261df6eaa920656cd/tensorflow_io_gcs_filesystem-0.37.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e1f2796b57e799a8ca1b75bf47c2aaa437c968408cc1a402a9862929e104cda", size = 4842079 }, + { url = "https://files.pythonhosted.org/packages/66/7f/e36ae148c2f03d61ca1bff24bc13a0fef6d6825c966abef73fc6f880a23b/tensorflow_io_gcs_filesystem-0.37.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee7c8ee5fe2fd8cb6392669ef16e71841133041fee8a330eff519ad9b36e4556", size = 5085736 }, + { url = "https://files.pythonhosted.org/packages/70/83/4422804257fe2942ae0af4ea5bcc9df59cb6cb1bd092202ef240751d16aa/tensorflow_io_gcs_filesystem-0.37.1-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:ffebb6666a7bfc28005f4fbbb111a455b5e7d6cd3b12752b7050863ecb27d5cc", size = 2470224 }, + { url = "https://files.pythonhosted.org/packages/43/9b/be27588352d7bd971696874db92d370f578715c17c0ccb27e4b13e16751e/tensorflow_io_gcs_filesystem-0.37.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:fe8dcc6d222258a080ac3dfcaaaa347325ce36a7a046277f6b3e19abc1efb3c5", size = 3479614 }, + { url = "https://files.pythonhosted.org/packages/d3/46/962f47af08bd39fc9feb280d3192825431a91a078c856d17a78ae4884eb1/tensorflow_io_gcs_filesystem-0.37.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fbb33f1745f218464a59cecd9a18e32ca927b0f4d77abd8f8671b645cc1a182f", size = 4842077 }, + { url = "https://files.pythonhosted.org/packages/f0/9b/790d290c232bce9b691391cf16e95a96e469669c56abfb1d9d0f35fa437c/tensorflow_io_gcs_filesystem-0.37.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:286389a203a5aee1a4fa2e53718c661091aa5fea797ff4fa6715ab8436b02e6c", size = 5085733 }, +] + +[[package]] +name = "tensorflow-metadata" +version = "1.17.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "absl-py" }, + { name = "googleapis-common-protos" }, + { name = "protobuf" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/33/30/944470f0ec0f00ccf25f0fdc84cf28be83838da5636c2b2b002960ba7ac1/tensorflow_metadata-1.17.1-py3-none-any.whl", hash = "sha256:f60d6605a16094c46921ffcf064747ba4b57840adad9fad682e2f28d0bac20eb", size = 31555 }, +] + +[[package]] +name = "tensorstore" +version = "0.1.74" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ml-dtypes" }, + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3c/b9/ea25aba62c688a87d7d7d9cc5926d602e2f9e84fa72586825486fb180b7e/tensorstore-0.1.74.tar.gz", hash = "sha256:a062875f27283d30ce4959c408c253ecb336fce8e3f9837c064e3d30cda79203", size = 6795605 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0d/3e/d67bb3d9bb7409469d15fb90ef5756e6ac8b835af7f27c02fc542c4b4059/tensorstore-0.1.74-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:8353e619d9140ca50fc0cb5b846e07c68462dd5015b4714752a0a664e48a03d3", size = 15294582 }, + { url = "https://files.pythonhosted.org/packages/01/f4/49cb5ea8e63303fcb0a6ebf0ed546aaec63982a4abca0e9801da5e3a24e3/tensorstore-0.1.74-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a3ad1bfbb257ab84de1a5c9b79a60cebb5fbb7a411ddb1c246c21c9795789ba1", size = 13261395 }, + { url = "https://files.pythonhosted.org/packages/ad/7b/9c12d4687e6ff19222f12719286c13a546f1714e5dbed75d52a4267534ed/tensorstore-0.1.74-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3ad9daf4c757db41ad091a1a5502807baeb848be0937986d8766049c39c8466", size = 17042621 }, + { url = "https://files.pythonhosted.org/packages/b5/07/cf0dc4540a78bc715fbcf4417c5dc708f3d12ed1664bf117f22463f411fc/tensorstore-0.1.74-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a35364804e7d71bf5e86d2dae4de04c90249b61ff71448b9713b4e72b2389bd", size = 18393581 }, + { url = "https://files.pythonhosted.org/packages/ac/42/edf004c5a101e021f052ea3564250d773d7cf6458f92934456ffa967383f/tensorstore-0.1.74-cp311-cp311-win_amd64.whl", hash = "sha256:15dcb6ce282e32d005caad34d595b0be070947578448a2861c63fdd608fc7394", size = 12431849 }, + { url = "https://files.pythonhosted.org/packages/a1/14/2e6d1cad744af9e9a1a78d881a908a859ad95b61b15de10397069f55fbd8/tensorstore-0.1.74-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:7218722ee5d74e4d01f357917d3b1b7b1d6b1c068aa73e3d801cb3d58fc45116", size = 15334307 }, + { url = "https://files.pythonhosted.org/packages/b2/ac/8d572b8c6d689eb50db0252e9d35ee6278a6aed481b64d7e025cf51e32c4/tensorstore-0.1.74-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a6926554a8633d0210bdba619d3996fff6a6af4214237fbca626e6ddfcc8ea39", size = 13288669 }, + { url = "https://files.pythonhosted.org/packages/9d/6c/3e76d614ad70b61670686d91abaa3ddee6b01255bf2b40f050beb15b7970/tensorstore-0.1.74-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d584e468eb4ef8195f5d21a9da4780cf96c6074b87ef219b43a89efce3d503ca", size = 17031720 }, + { url = "https://files.pythonhosted.org/packages/31/f3/09d7c3ad7c9517f89b5be9b4460b83333e98dce1c9ab0a52464ded0bab67/tensorstore-0.1.74-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0af2225431d59f8a2bb4db4c1519252f10ee407e6550875d78212d3d34ee743", size = 18378829 }, + { url = "https://files.pythonhosted.org/packages/a7/f2/45ece38705280ed9ebf4ccaf084ed1e76e35b1eeec8c510e589978ac8dcd/tensorstore-0.1.74-cp312-cp312-win_amd64.whl", hash = "sha256:4e35f3679873cdc488aae20b9ae2cea4589c7b147a80edb07eb3f09eba47d43d", size = 12432300 }, + { url = "https://files.pythonhosted.org/packages/fb/e9/a08c6a6eb7d6b4b26053d4575196a06c6fccf4e89f9bc625f81e7c91bb5d/tensorstore-0.1.74-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:f7d2c80de9ab352ca14aeca798d6650c5670725e6f8eac73f4fcc8f3147ca614", size = 15334469 }, + { url = "https://files.pythonhosted.org/packages/9a/a9/64b90c6e66e0b8043e641090144c6614b0c78d9a719b9110d953d13a516d/tensorstore-0.1.74-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ceef7d2dcfd1caf61356f7eeb9a37896b4825b4be2750b00615cf5fb1ae47a8b", size = 13288791 }, + { url = "https://files.pythonhosted.org/packages/62/e8/226cfc25d7eac00e783ff2ee4994830c4a42cd8690e207c4a8b93210f3d9/tensorstore-0.1.74-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e71637002a806bc1b0f0f05556d1c33493a43f3ab35f9632b3d48855677d93dc", size = 17031815 }, + { url = "https://files.pythonhosted.org/packages/9a/09/dce8a0942d84f6bb039b5ea3e8bc6a479b1a9535cd216b0d42dd03c4f761/tensorstore-0.1.74-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c799edf9000aee68d6676e3d2f73d4e1a56fc817c47e150732f6d3bd2b1ef46d", size = 18378091 }, + { url = "https://files.pythonhosted.org/packages/a6/23/5218575d25de9d8debfb3faf290a1e3b9a7b6be9e77ba07ff3a63a0bc899/tensorstore-0.1.74-cp313-cp313-win_amd64.whl", hash = "sha256:5da86437ffa1ee0f0c590c38daa2f4b548890ce66b1f470ac98714cb0eabdbf5", size = 12432635 }, +] + +[[package]] +name = "termcolor" +version = "3.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/6c/3d75c196ac07ac8749600b60b03f4f6094d54e132c4d94ebac6ee0e0add0/termcolor-3.1.0.tar.gz", hash = "sha256:6a6dd7fbee581909eeec6a756cff1d7f7c376063b14e4a298dc4980309e55970", size = 14324 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4f/bd/de8d508070629b6d84a30d01d57e4a65c69aa7f5abe7560b8fad3b50ea59/termcolor-3.1.0-py3-none-any.whl", hash = "sha256:591dd26b5c2ce03b9e43f391264626557873ce1d379019786f99b0c2bee140aa", size = 7684 }, +] + +[[package]] +name = "tokenizers" +version = "0.21.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "huggingface-hub" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/92/76/5ac0c97f1117b91b7eb7323dcd61af80d72f790b4df71249a7850c195f30/tokenizers-0.21.1.tar.gz", hash = "sha256:a1bb04dc5b448985f86ecd4b05407f5a8d97cb2c0532199b2a302a604a0165ab", size = 343256 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/1f/328aee25f9115bf04262e8b4e5a2050b7b7cf44b59c74e982db7270c7f30/tokenizers-0.21.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:e78e413e9e668ad790a29456e677d9d3aa50a9ad311a40905d6861ba7692cf41", size = 2780767 }, + { url = "https://files.pythonhosted.org/packages/ae/1a/4526797f3719b0287853f12c5ad563a9be09d446c44ac784cdd7c50f76ab/tokenizers-0.21.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:cd51cd0a91ecc801633829fcd1fda9cf8682ed3477c6243b9a095539de4aecf3", size = 2650555 }, + { url = "https://files.pythonhosted.org/packages/4d/7a/a209b29f971a9fdc1da86f917fe4524564924db50d13f0724feed37b2a4d/tokenizers-0.21.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28da6b72d4fb14ee200a1bd386ff74ade8992d7f725f2bde2c495a9a98cf4d9f", size = 2937541 }, + { url = "https://files.pythonhosted.org/packages/3c/1e/b788b50ffc6191e0b1fc2b0d49df8cff16fe415302e5ceb89f619d12c5bc/tokenizers-0.21.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:34d8cfde551c9916cb92014e040806122295a6800914bab5865deb85623931cf", size = 2819058 }, + { url = "https://files.pythonhosted.org/packages/36/aa/3626dfa09a0ecc5b57a8c58eeaeb7dd7ca9a37ad9dd681edab5acd55764c/tokenizers-0.21.1-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aaa852d23e125b73d283c98f007e06d4595732104b65402f46e8ef24b588d9f8", size = 3133278 }, + { url = "https://files.pythonhosted.org/packages/a4/4d/8fbc203838b3d26269f944a89459d94c858f5b3f9a9b6ee9728cdcf69161/tokenizers-0.21.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a21a15d5c8e603331b8a59548bbe113564136dc0f5ad8306dd5033459a226da0", size = 3144253 }, + { url = "https://files.pythonhosted.org/packages/d8/1b/2bd062adeb7c7511b847b32e356024980c0ffcf35f28947792c2d8ad2288/tokenizers-0.21.1-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2fdbd4c067c60a0ac7eca14b6bd18a5bebace54eb757c706b47ea93204f7a37c", size = 3398225 }, + { url = "https://files.pythonhosted.org/packages/8a/63/38be071b0c8e06840bc6046991636bcb30c27f6bb1e670f4f4bc87cf49cc/tokenizers-0.21.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dd9a0061e403546f7377df940e866c3e678d7d4e9643d0461ea442b4f89e61a", size = 3038874 }, + { url = "https://files.pythonhosted.org/packages/ec/83/afa94193c09246417c23a3c75a8a0a96bf44ab5630a3015538d0c316dd4b/tokenizers-0.21.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:db9484aeb2e200c43b915a1a0150ea885e35f357a5a8fabf7373af333dcc8dbf", size = 9014448 }, + { url = "https://files.pythonhosted.org/packages/ae/b3/0e1a37d4f84c0f014d43701c11eb8072704f6efe8d8fc2dcdb79c47d76de/tokenizers-0.21.1-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:ed248ab5279e601a30a4d67bdb897ecbe955a50f1e7bb62bd99f07dd11c2f5b6", size = 8937877 }, + { url = "https://files.pythonhosted.org/packages/ac/33/ff08f50e6d615eb180a4a328c65907feb6ded0b8f990ec923969759dc379/tokenizers-0.21.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:9ac78b12e541d4ce67b4dfd970e44c060a2147b9b2a21f509566d556a509c67d", size = 9186645 }, + { url = "https://files.pythonhosted.org/packages/5f/aa/8ae85f69a9f6012c6f8011c6f4aa1c96154c816e9eea2e1b758601157833/tokenizers-0.21.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:e5a69c1a4496b81a5ee5d2c1f3f7fbdf95e90a0196101b0ee89ed9956b8a168f", size = 9384380 }, + { url = "https://files.pythonhosted.org/packages/e8/5b/a5d98c89f747455e8b7a9504910c865d5e51da55e825a7ae641fb5ff0a58/tokenizers-0.21.1-cp39-abi3-win32.whl", hash = "sha256:1039a3a5734944e09de1d48761ade94e00d0fa760c0e0551151d4dd851ba63e3", size = 2239506 }, + { url = "https://files.pythonhosted.org/packages/e6/b6/072a8e053ae600dcc2ac0da81a23548e3b523301a442a6ca900e92ac35be/tokenizers-0.21.1-cp39-abi3-win_amd64.whl", hash = "sha256:0f0dcbcc9f6e13e675a66d7a5f2f225a736745ce484c1a4e07476a89ccdad382", size = 2435481 }, +] + +[[package]] +name = "toml" +version = "0.10.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/be/ba/1f744cdc819428fc6b5084ec34d9b30660f6f9daaf70eead706e3203ec3c/toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f", size = 22253 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/6f/7120676b6d73228c96e17f1f794d8ab046fc910d781c8d151120c3f1569e/toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b", size = 16588 }, +] + +[[package]] +name = "toolz" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/0b/d80dfa675bf592f636d1ea0b835eab4ec8df6e9415d8cfd766df54456123/toolz-1.0.0.tar.gz", hash = "sha256:2c86e3d9a04798ac556793bced838816296a2f085017664e4995cb40a1047a02", size = 66790 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/98/eb27cc78ad3af8e302c9d8ff4977f5026676e130d28dd7578132a457170c/toolz-1.0.0-py3-none-any.whl", hash = "sha256:292c8f1c4e7516bf9086f8850935c799a874039c8bcf959d47b600e4c44a6236", size = 56383 }, +] + +[[package]] +name = "torch" +version = "2.7.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "filelock" }, + { name = "fsspec" }, + { name = "jinja2" }, + { name = "networkx" }, + { name = "nvidia-cublas-cu12", version = "12.6.4.1", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-cupti-cu12", version = "12.6.80", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-runtime-cu12", version = "12.6.77", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cudnn-cu12", version = "9.5.1.17", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cufft-cu12", version = "11.3.0.4", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cufile-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusolver-cu12", version = "11.7.1.2", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusparse-cu12", version = "12.5.4.2", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusparselt-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nccl-cu12", version = "2.26.2", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nvjitlink-cu12", version = "12.6.85", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "setuptools", marker = "python_full_version >= '3.12'" }, + { name = "sympy" }, + { name = "triton", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "typing-extensions" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/56/2eae3494e3d375533034a8e8cf0ba163363e996d85f0629441fa9d9843fe/torch-2.7.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:236f501f2e383f1cb861337bdf057712182f910f10aeaf509065d54d339e49b2", size = 99093039 }, + { url = "https://files.pythonhosted.org/packages/e5/94/34b80bd172d0072c9979708ccd279c2da2f55c3ef318eceec276ab9544a4/torch-2.7.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:06eea61f859436622e78dd0cdd51dbc8f8c6d76917a9cf0555a333f9eac31ec1", size = 821174704 }, + { url = "https://files.pythonhosted.org/packages/50/9e/acf04ff375b0b49a45511c55d188bcea5c942da2aaf293096676110086d1/torch-2.7.1-cp311-cp311-win_amd64.whl", hash = "sha256:8273145a2e0a3c6f9fd2ac36762d6ee89c26d430e612b95a99885df083b04e52", size = 216095937 }, + { url = "https://files.pythonhosted.org/packages/5b/2b/d36d57c66ff031f93b4fa432e86802f84991477e522adcdffd314454326b/torch-2.7.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:aea4fc1bf433d12843eb2c6b2204861f43d8364597697074c8d38ae2507f8730", size = 68640034 }, + { url = "https://files.pythonhosted.org/packages/87/93/fb505a5022a2e908d81fe9a5e0aa84c86c0d5f408173be71c6018836f34e/torch-2.7.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:27ea1e518df4c9de73af7e8a720770f3628e7f667280bce2be7a16292697e3fa", size = 98948276 }, + { url = "https://files.pythonhosted.org/packages/56/7e/67c3fe2b8c33f40af06326a3d6ae7776b3e3a01daa8f71d125d78594d874/torch-2.7.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:c33360cfc2edd976c2633b3b66c769bdcbbf0e0b6550606d188431c81e7dd1fc", size = 821025792 }, + { url = "https://files.pythonhosted.org/packages/a1/37/a37495502bc7a23bf34f89584fa5a78e25bae7b8da513bc1b8f97afb7009/torch-2.7.1-cp312-cp312-win_amd64.whl", hash = "sha256:d8bf6e1856ddd1807e79dc57e54d3335f2b62e6f316ed13ed3ecfe1fc1df3d8b", size = 216050349 }, + { url = "https://files.pythonhosted.org/packages/3a/60/04b77281c730bb13460628e518c52721257814ac6c298acd25757f6a175c/torch-2.7.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:787687087412c4bd68d315e39bc1223f08aae1d16a9e9771d95eabbb04ae98fb", size = 68645146 }, + { url = "https://files.pythonhosted.org/packages/66/81/e48c9edb655ee8eb8c2a6026abdb6f8d2146abd1f150979ede807bb75dcb/torch-2.7.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:03563603d931e70722dce0e11999d53aa80a375a3d78e6b39b9f6805ea0a8d28", size = 98946649 }, + { url = "https://files.pythonhosted.org/packages/3a/24/efe2f520d75274fc06b695c616415a1e8a1021d87a13c68ff9dce733d088/torch-2.7.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:d632f5417b6980f61404a125b999ca6ebd0b8b4bbdbb5fbbba44374ab619a412", size = 821033192 }, + { url = "https://files.pythonhosted.org/packages/dd/d9/9c24d230333ff4e9b6807274f6f8d52a864210b52ec794c5def7925f4495/torch-2.7.1-cp313-cp313-win_amd64.whl", hash = "sha256:23660443e13995ee93e3d844786701ea4ca69f337027b05182f5ba053ce43b38", size = 216055668 }, + { url = "https://files.pythonhosted.org/packages/95/bf/e086ee36ddcef9299f6e708d3b6c8487c1651787bb9ee2939eb2a7f74911/torch-2.7.1-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:0da4f4dba9f65d0d203794e619fe7ca3247a55ffdcbd17ae8fb83c8b2dc9b585", size = 68925988 }, + { url = "https://files.pythonhosted.org/packages/69/6a/67090dcfe1cf9048448b31555af6efb149f7afa0a310a366adbdada32105/torch-2.7.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:e08d7e6f21a617fe38eeb46dd2213ded43f27c072e9165dc27300c9ef9570934", size = 99028857 }, + { url = "https://files.pythonhosted.org/packages/90/1c/48b988870823d1cc381f15ec4e70ed3d65e043f43f919329b0045ae83529/torch-2.7.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:30207f672328a42df4f2174b8f426f354b2baa0b7cca3a0adb3d6ab5daf00dc8", size = 821098066 }, + { url = "https://files.pythonhosted.org/packages/7b/eb/10050d61c9d5140c5dc04a89ed3257ef1a6b93e49dd91b95363d757071e0/torch-2.7.1-cp313-cp313t-win_amd64.whl", hash = "sha256:79042feca1c634aaf6603fe6feea8c6b30dfa140a6bbc0b973e2260c7e79a22e", size = 216336310 }, + { url = "https://files.pythonhosted.org/packages/b1/29/beb45cdf5c4fc3ebe282bf5eafc8dfd925ead7299b3c97491900fe5ed844/torch-2.7.1-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:988b0cbc4333618a1056d2ebad9eb10089637b659eb645434d0809d8d937b946", size = 68645708 }, +] + +[[package]] +name = "torchcodec" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/09/b7/481cec9d5d3d679919632bf873720c905cb4af8b157a363c8f4b470bfd35/torchcodec-0.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4a1c488df253c62ed67b945f3be27a800acbc3fecacda52127fbabd72a2c6e2b", size = 2972557 }, + { url = "https://files.pythonhosted.org/packages/31/d8/7e00a46cb6f8d5dc01c88f67f5014835c39e1189f7ff0bbd82c363aeef0f/torchcodec-0.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a2b3ee4c40236eec82faa61f5941f1bb746bed81bb0a0e00751142f0cbf0e5e0", size = 1328647 }, + { url = "https://files.pythonhosted.org/packages/04/9d/18944c18f5c29516fc5e920d764904b703775812c4b4756b11ed6970f1df/torchcodec-0.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:29ee0ad7514d5636d3a889cbdc92d4ed68e8f283d8da971fc6faa001e3e5dd67", size = 3461019 }, + { url = "https://files.pythonhosted.org/packages/17/26/2ac91c004d2c7cf813c8ccc151e7760b0d4b4f8ba26648d873e8fa7654be/torchcodec-0.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:0c1211bc2fb68cac5080d71635880e5a1ddc0d95f038cad1f7c3d5c32492f770", size = 1327571 }, + { url = "https://files.pythonhosted.org/packages/c6/7b/c15be1378e4816d72d2cb544cd161154131aedae2121667019452e47d78f/torchcodec-0.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:89d95adb8cf89cf85ff1c09c15f3de0df3b63c2e6cae5be0b387af0e8c84dbec", size = 3398659 }, + { url = "https://files.pythonhosted.org/packages/be/ce/451a1e79964790866d58f005a8789334434076457912ba295c73961a1ccf/torchcodec-0.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:474b476880b70f44dce47672a98e3516cd15bad2ddde2d0537319d12c0a3e80e", size = 1327372 }, + { url = "https://files.pythonhosted.org/packages/8f/04/bc5c72c279e77bdeaf0b26178c650e61800798c1fc4ff6b9353760f8ee5a/torchcodec-0.4.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:51e94f4eb63bac48e7ec4fc11c03ddb0cfa9af7210077d507666ecb2aa81e0ac", size = 3304288 }, + { url = "https://files.pythonhosted.org/packages/04/a5/9ff2b9819058fd3114a794c34df7992874ab62a0ad180879ba4d9d3f392d/torchcodec-0.4.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:87a85dfd1e1555c48c61bc152f03545d11940b71cf55079aa4c06cd41492467f", size = 1327762 }, +] + +[[package]] +name = "torchvision" +version = "0.22.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "pillow" }, + { name = "torch" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/f6/00/bdab236ef19da050290abc2b5203ff9945c84a1f2c7aab73e8e9c8c85669/torchvision-0.22.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4addf626e2b57fc22fd6d329cf1346d474497672e6af8383b7b5b636fba94a53", size = 1947827 }, + { url = "https://files.pythonhosted.org/packages/ac/d0/18f951b2be3cfe48c0027b349dcc6fde950e3dc95dd83e037e86f284f6fd/torchvision-0.22.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:8b4a53a6067d63adba0c52f2b8dd2290db649d642021674ee43c0c922f0c6a69", size = 2514021 }, + { url = "https://files.pythonhosted.org/packages/c3/1a/63eb241598b36d37a0221e10af357da34bd33402ccf5c0765e389642218a/torchvision-0.22.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:b7866a3b326413e67724ac46f1ee594996735e10521ba9e6cdbe0fa3cd98c2f2", size = 7487300 }, + { url = "https://files.pythonhosted.org/packages/e5/73/1b009b42fe4a7774ba19c23c26bb0f020d68525c417a348b166f1c56044f/torchvision-0.22.1-cp311-cp311-win_amd64.whl", hash = "sha256:bb3f6df6f8fd415ce38ec4fd338376ad40c62e86052d7fc706a0dd51efac1718", size = 1707989 }, + { url = "https://files.pythonhosted.org/packages/02/90/f4e99a5112dc221cf68a485e853cc3d9f3f1787cb950b895f3ea26d1ea98/torchvision-0.22.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:153f1790e505bd6da123e21eee6e83e2e155df05c0fe7d56347303067d8543c5", size = 1947827 }, + { url = "https://files.pythonhosted.org/packages/25/f6/53e65384cdbbe732cc2106bb04f7fb908487e4fb02ae4a1613ce6904a122/torchvision-0.22.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:964414eef19459d55a10e886e2fca50677550e243586d1678f65e3f6f6bac47a", size = 2514576 }, + { url = "https://files.pythonhosted.org/packages/17/8b/155f99042f9319bd7759536779b2a5b67cbd4f89c380854670850f89a2f4/torchvision-0.22.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:699c2d70d33951187f6ed910ea05720b9b4aaac1dcc1135f53162ce7d42481d3", size = 7485962 }, + { url = "https://files.pythonhosted.org/packages/05/17/e45d5cd3627efdb47587a0634179a3533593436219de3f20c743672d2a79/torchvision-0.22.1-cp312-cp312-win_amd64.whl", hash = "sha256:75e0897da7a8e43d78632f66f2bdc4f6e26da8d3f021a7c0fa83746073c2597b", size = 1707992 }, + { url = "https://files.pythonhosted.org/packages/7a/30/fecdd09fb973e963da68207fe9f3d03ec6f39a935516dc2a98397bf495c6/torchvision-0.22.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c3ae3319624c43cc8127020f46c14aa878406781f0899bb6283ae474afeafbf", size = 1947818 }, + { url = "https://files.pythonhosted.org/packages/55/f4/b45f6cd92fa0acfac5e31b8e9258232f25bcdb0709a604e8b8a39d76e411/torchvision-0.22.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:4a614a6a408d2ed74208d0ea6c28a2fbb68290e9a7df206c5fef3f0b6865d307", size = 2471597 }, + { url = "https://files.pythonhosted.org/packages/8d/b0/3cffd6a285b5ffee3fe4a31caff49e350c98c5963854474d1c4f7a51dea5/torchvision-0.22.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:7ee682be589bb1a002b7704f06b8ec0b89e4b9068f48e79307d2c6e937a9fdf4", size = 7485894 }, + { url = "https://files.pythonhosted.org/packages/fd/1d/0ede596fedc2080d18108149921278b59f220fbb398f29619495337b0f86/torchvision-0.22.1-cp313-cp313-win_amd64.whl", hash = "sha256:2566cafcfa47ecfdbeed04bab8cef1307c8d4ef75046f7624b9e55f384880dfe", size = 1708020 }, + { url = "https://files.pythonhosted.org/packages/0f/ca/e9a06bd61ee8e04fb4962a3fb524fe6ee4051662db07840b702a9f339b24/torchvision-0.22.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:043d9e35ed69c2e586aff6eb9e2887382e7863707115668ac9d140da58f42cba", size = 2137623 }, + { url = "https://files.pythonhosted.org/packages/ab/c8/2ebe90f18e7ffa2120f5c3eab62aa86923185f78d2d051a455ea91461608/torchvision-0.22.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:27142bcc8a984227a6dcf560985e83f52b82a7d3f5fe9051af586a2ccc46ef26", size = 2476561 }, + { url = "https://files.pythonhosted.org/packages/94/8b/04c6b15f8c29b39f0679589753091cec8b192ab296d4fdaf9055544c4ec9/torchvision-0.22.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:ef46e065502f7300ad6abc98554131c35dc4c837b978d91306658f1a65c00baa", size = 7658543 }, + { url = "https://files.pythonhosted.org/packages/ab/c0/131628e6d42682b0502c63fd7f647b8b5ca4bd94088f6c85ca7225db8ac4/torchvision-0.22.1-cp313-cp313t-win_amd64.whl", hash = "sha256:7414eeacfb941fa21acddcd725f1617da5630ec822e498660a4b864d7d998075", size = 1629892 }, +] + +[[package]] +name = "tornado" +version = "6.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/51/89/c72771c81d25d53fe33e3dca61c233b665b2780f21820ba6fd2c6793c12b/tornado-6.5.1.tar.gz", hash = "sha256:84ceece391e8eb9b2b95578db65e920d2a61070260594819589609ba9bc6308c", size = 509934 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/89/f4532dee6843c9e0ebc4e28d4be04c67f54f60813e4bf73d595fe7567452/tornado-6.5.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d50065ba7fd11d3bd41bcad0825227cc9a95154bad83239357094c36708001f7", size = 441948 }, + { url = "https://files.pythonhosted.org/packages/15/9a/557406b62cffa395d18772e0cdcf03bed2fff03b374677348eef9f6a3792/tornado-6.5.1-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9e9ca370f717997cb85606d074b0e5b247282cf5e2e1611568b8821afe0342d6", size = 440112 }, + { url = "https://files.pythonhosted.org/packages/55/82/7721b7319013a3cf881f4dffa4f60ceff07b31b394e459984e7a36dc99ec/tornado-6.5.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b77e9dfa7ed69754a54c89d82ef746398be82f749df69c4d3abe75c4d1ff4888", size = 443672 }, + { url = "https://files.pythonhosted.org/packages/7d/42/d11c4376e7d101171b94e03cef0cbce43e823ed6567ceda571f54cf6e3ce/tornado-6.5.1-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:253b76040ee3bab8bcf7ba9feb136436a3787208717a1fb9f2c16b744fba7331", size = 443019 }, + { url = "https://files.pythonhosted.org/packages/7d/f7/0c48ba992d875521ac761e6e04b0a1750f8150ae42ea26df1852d6a98942/tornado-6.5.1-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:308473f4cc5a76227157cdf904de33ac268af770b2c5f05ca6c1161d82fdd95e", size = 443252 }, + { url = "https://files.pythonhosted.org/packages/89/46/d8d7413d11987e316df4ad42e16023cd62666a3c0dfa1518ffa30b8df06c/tornado-6.5.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:caec6314ce8a81cf69bd89909f4b633b9f523834dc1a352021775d45e51d9401", size = 443930 }, + { url = "https://files.pythonhosted.org/packages/78/b2/f8049221c96a06df89bed68260e8ca94beca5ea532ffc63b1175ad31f9cc/tornado-6.5.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:13ce6e3396c24e2808774741331638ee6c2f50b114b97a55c5b442df65fd9692", size = 443351 }, + { url = "https://files.pythonhosted.org/packages/76/ff/6a0079e65b326cc222a54720a748e04a4db246870c4da54ece4577bfa702/tornado-6.5.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5cae6145f4cdf5ab24744526cc0f55a17d76f02c98f4cff9daa08ae9a217448a", size = 443328 }, + { url = "https://files.pythonhosted.org/packages/49/18/e3f902a1d21f14035b5bc6246a8c0f51e0eef562ace3a2cea403c1fb7021/tornado-6.5.1-cp39-abi3-win32.whl", hash = "sha256:e0a36e1bc684dca10b1aa75a31df8bdfed656831489bc1e6a6ebed05dc1ec365", size = 444396 }, + { url = "https://files.pythonhosted.org/packages/7b/09/6526e32bf1049ee7de3bebba81572673b19a2a8541f795d887e92af1a8bc/tornado-6.5.1-cp39-abi3-win_amd64.whl", hash = "sha256:908e7d64567cecd4c2b458075589a775063453aeb1d2a1853eedb806922f568b", size = 444840 }, + { url = "https://files.pythonhosted.org/packages/55/a7/535c44c7bea4578e48281d83c615219f3ab19e6abc67625ef637c73987be/tornado-6.5.1-cp39-abi3-win_arm64.whl", hash = "sha256:02420a0eb7bf617257b9935e2b754d1b63897525d8a289c9d65690d580b4dcf7", size = 443596 }, +] + +[[package]] +name = "tqdm" +version = "4.67.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540 }, +] + +[[package]] +name = "tqdm-loggable" +version = "0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "tqdm" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/65/96/d924c326727dbdcac6043065dba08b1455aaaca4f7ef1e79d4fea889b34d/tqdm_loggable-0.2.tar.gz", hash = "sha256:175abec3e1f63bbd2eac192fa5da075e80c7bb715d7ccf3cd1a29b7ab5af0617", size = 7442 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/12/1f/1acb36a85797beba22934f124be6b51a7c18a4f408ce31443bec073181c7/tqdm_loggable-0.2-py3-none-any.whl", hash = "sha256:9703046302b93a667166487759e6f3f49597e86c89eb132ba1f31caa07bf0941", size = 9264 }, +] + +[[package]] +name = "traitlets" +version = "5.14.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/eb/79/72064e6a701c2183016abbbfedaba506d81e30e232a68c9f0d6f6fcd1574/traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", size = 161621 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359 }, +] + +[[package]] +name = "transformers" +version = "4.53.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "filelock" }, + { name = "huggingface-hub" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "pyyaml" }, + { name = "regex" }, + { name = "requests" }, + { name = "safetensors" }, + { name = "tokenizers" }, + { name = "tqdm" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4c/67/80f51466ec447028fd84469b208eb742533ce06cc8fad2e3181380199e5c/transformers-4.53.2.tar.gz", hash = "sha256:6c3ed95edfb1cba71c4245758f1b4878c93bf8cde77d076307dacb2cbbd72be2", size = 9201233 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/96/88/beb33a79a382fcd2aed0be5222bdc47f41e4bfe7aaa90ae1374f1d8ea2af/transformers-4.53.2-py3-none-any.whl", hash = "sha256:db8f4819bb34f000029c73c3c557e7d06fc1b8e612ec142eecdae3947a9c78bf", size = 10826609 }, +] + +[[package]] +name = "tree" +version = "0.2.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "pillow" }, + { name = "setuptools" }, + { name = "svgwrite" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/29/3f/63cbed2909786f0e5ac30a4ae5791ad597c6b5fec7167e161c55bba511ce/Tree-0.2.4.tar.gz", hash = "sha256:f84d8ec9bf50dd69f551da78925a23d110864e7706551f590cdade27646f7883", size = 6489 } + +[[package]] +name = "treescope" +version = "0.1.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/26/27/80ad254da167e0055d5679aefd224ab08844a4cd55aeee7ef72c999d5fc6/treescope-0.1.9.tar.gz", hash = "sha256:ba6cdbdc9c5b52691d5f3bb4c5d5c7daa5627119acac8640b46d37e6aabe63a6", size = 544385 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e4/09/b7e7bc5f21313d227e4fb98d2037646457ec06746327c5dd8ffed75e41e1/treescope-0.1.9-py3-none-any.whl", hash = "sha256:68677013a9f0228212fccf835f3fb037be07ae8b4c5f6f58eefab11198f83cf7", size = 182162 }, +] + +[[package]] +name = "triton" +version = "3.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "setuptools", marker = "platform_machine != 'aarch64' and sys_platform == 'linux'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/21/2f/3e56ea7b58f80ff68899b1dbe810ff257c9d177d288c6b0f55bf2fe4eb50/triton-3.3.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b31e3aa26f8cb3cc5bf4e187bf737cbacf17311e1112b781d4a059353dfd731b", size = 155689937 }, + { url = "https://files.pythonhosted.org/packages/24/5f/950fb373bf9c01ad4eb5a8cd5eaf32cdf9e238c02f9293557a2129b9c4ac/triton-3.3.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9999e83aba21e1a78c1f36f21bce621b77bcaa530277a50484a7cb4a822f6e43", size = 155669138 }, + { url = "https://files.pythonhosted.org/packages/74/1f/dfb531f90a2d367d914adfee771babbd3f1a5b26c3f5fbc458dee21daa78/triton-3.3.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b89d846b5a4198317fec27a5d3a609ea96b6d557ff44b56c23176546023c4240", size = 155673035 }, + { url = "https://files.pythonhosted.org/packages/28/71/bd20ffcb7a64c753dc2463489a61bf69d531f308e390ad06390268c4ea04/triton-3.3.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3198adb9d78b77818a5388bff89fa72ff36f9da0bc689db2f0a651a67ce6a42", size = 155735832 }, +] + +[[package]] +name = "typeguard" +version = "4.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/70/60/8cd6a3d78d00ceeb2193c02b7ed08f063d5341ccdfb24df88e61f383048e/typeguard-4.4.2.tar.gz", hash = "sha256:a6f1065813e32ef365bc3b3f503af8a96f9dd4e0033a02c28c4a4983de8c6c49", size = 75746 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/4b/9a77dc721aa0b7f74440a42e4ef6f9a4fae7324e17f64f88b96f4c25cc05/typeguard-4.4.2-py3-none-any.whl", hash = "sha256:77a78f11f09777aeae7fa08585f33b5f4ef0e7335af40005b0c422ed398ff48c", size = 35801 }, +] + +[[package]] +name = "typing-extensions" +version = "4.13.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/37/23083fcd6e35492953e8d2aaaa68b860eb422b34627b13f2ce3eb6106061/typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef", size = 106967 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8b/54/b1ae86c0973cc6f0210b53d508ca3641fb6d0c56823f288d108bc7ab3cc8/typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c", size = 45806 }, +] + +[[package]] +name = "typing-inspect" +version = "0.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dc/74/1789779d91f1961fa9438e9a8710cdae6bd138c80d7303996933d117264a/typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78", size = 13825 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/65/f3/107a22063bf27bdccf2024833d3445f4eea42b2e598abfbd46f6a63b6cb0/typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f", size = 8827 }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f8/b1/0c11f5058406b3af7609f121aaa6b609744687f1d158b3c3a5bf4cc94238/typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28", size = 75726 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/69/cd203477f944c353c31bade965f880aa1061fd6bf05ded0726ca845b6ff7/typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51", size = 14552 }, +] + +[[package]] +name = "tyro" +version = "0.9.22" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "docstring-parser" }, + { name = "rich" }, + { name = "shtab" }, + { name = "typeguard" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7c/27/0f96255f378be5dea9e222ab96f6f5d76c637aaf998846b949b0e362c326/tyro-0.9.22.tar.gz", hash = "sha256:727124cb82874ee28b07b35c534b0e2da5cf65da7d19acf52bc5bc0869b19974", size = 284332 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c9/4a/b489665cfeb52ce2364d9b997c900fd72eac628ed3b8600d45f04e878b06/tyro-0.9.22-py3-none-any.whl", hash = "sha256:90fce6169c40abf4fab48ae6d8fd013c909e0e63e16d6c33d2e9481947a63e58", size = 125335 }, +] + +[[package]] +name = "tzdata" +version = "2025.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/95/32/1a225d6164441be760d75c2c42e2780dc0873fe382da3e98a2e1e48361e5/tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9", size = 196380 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839 }, +] + +[[package]] +name = "urllib3" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/78/16493d9c386d8e60e442a35feac5e00f0913c0f4b7c217c11e8ec2ff53e0/urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466", size = 390672 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6b/11/cc635220681e93a0183390e26485430ca2c7b5f9d33b15c74c2861cb8091/urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813", size = 128680 }, +] + +[[package]] +name = "virtualenv" +version = "20.31.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "distlib" }, + { name = "filelock" }, + { name = "platformdirs" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/56/2c/444f465fb2c65f40c3a104fd0c495184c4f2336d65baf398e3c75d72ea94/virtualenv-20.31.2.tar.gz", hash = "sha256:e10c0a9d02835e592521be48b332b6caee6887f332c111aa79a09b9e79efc2af", size = 6076316 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/40/b1c265d4b2b62b58576588510fc4d1fe60a86319c8de99fd8e9fec617d2c/virtualenv-20.31.2-py3-none-any.whl", hash = "sha256:36efd0d9650ee985f0cad72065001e66d49a6f24eb44d98980f630686243cf11", size = 6057982 }, +] + +[[package]] +name = "wadler-lindig" +version = "0.1.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4d/c8/e2112ecb627e01c9e2911f9b388167231c23a114946946d046f4e9535118/wadler_lindig-0.1.6.tar.gz", hash = "sha256:8b6adad9718291a7d82fb088a596b93659ce2346321ca76819810affbc66102b", size = 15812 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/9a/937038f3efc70871fb26b0ee6148efcfcfb96643c517c2aaddd7ed07ad76/wadler_lindig-0.1.6-py3-none-any.whl", hash = "sha256:d707f63994c7d3e1e125e7fb7e196f4adb6f80f4a11beb955c6da937754026a3", size = 20483 }, +] + +[[package]] +name = "wandb" +version = "0.19.11" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "docker-pycreds" }, + { name = "gitpython" }, + { name = "platformdirs" }, + { name = "protobuf" }, + { name = "psutil" }, + { name = "pydantic" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "sentry-sdk" }, + { name = "setproctitle" }, + { name = "setuptools" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/39/98/0ff2925a21b998d4b84731429f4554ca3d9b5cad42c09c075e7306c3aca0/wandb-0.19.11.tar.gz", hash = "sha256:3f50a27dfadbb25946a513ffe856c0e8e538b5626ef207aa50b00c3b0356bff8", size = 39511477 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4f/2c/f8bab58c73fdde4442f1baffd9ea5d1bb3113906a97a27e8d9ab72db7a69/wandb-0.19.11-py3-none-any.whl", hash = "sha256:ff3bf050ba25ebae7aedc9a775ffab90c28068832edfe5458423f488c2558f82", size = 6481327 }, + { url = "https://files.pythonhosted.org/packages/45/4a/34b364280f690f4c6d7660f528fba9f13bdecabc4c869d266a4632cf836e/wandb-0.19.11-py3-none-macosx_10_14_x86_64.whl", hash = "sha256:0823fd9aa6343f40c04e01959997ca8c6d6adf1bd81c8d45261fa4915f1c6b67", size = 20555751 }, + { url = "https://files.pythonhosted.org/packages/d8/e6/a27868fdb83a60df37b9d15e52c3353dd88d74442f27ae48cf765c6b9554/wandb-0.19.11-py3-none-macosx_11_0_arm64.whl", hash = "sha256:c758ef5439599d9023db5b3cf1698477055d82f9fae48af2779f63f1d289167c", size = 20377587 }, + { url = "https://files.pythonhosted.org/packages/21/f7/d5cf5b58c2b3015364c7b2b6af6a440cbeda4103b67332e1e64b30f6252d/wandb-0.19.11-py3-none-macosx_11_0_x86_64.whl", hash = "sha256:de2dfd4911e7691735e271654c735e7b90cdee9d29a3796fbf06e9e92d48f3d7", size = 20985041 }, + { url = "https://files.pythonhosted.org/packages/68/06/8b827f16a0b8f18002d2fffa7c5a7fd447946e0d0c68aeec0dd7eb18cdd3/wandb-0.19.11-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cfff738850770d26b13f8f3fe400a6456f1e39e87f3f29d5aa241b249476df95", size = 20017696 }, + { url = "https://files.pythonhosted.org/packages/f9/31/eeb2878b26566c04c3e9b8b20b3ec3c54a2be50535088d36a37c008e07a3/wandb-0.19.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8ff673007448df11cc69379ae0df28ead866800dc1ec7bc151b402db0bbcf40", size = 21425857 }, + { url = "https://files.pythonhosted.org/packages/10/30/08988360678ae78334bb16625c28260fcaba49f500b89f8766807cb74d71/wandb-0.19.11-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:858bc5023fa1b3285d89d15f62be78afdb28301064daa49ea3f4ebde5dcedad2", size = 20023145 }, + { url = "https://files.pythonhosted.org/packages/c8/e9/a639c42c8ca517c4d25e8970d64d0c5a9bd35b784faed5f47d9cca3dcd12/wandb-0.19.11-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:90e4b57649896acb16c3dd41b3093df1a169c2f1d94ff15d76af86b8a60dcdac", size = 21504842 }, + { url = "https://files.pythonhosted.org/packages/44/74/dbe9277dd935b77dd16939cdf15357766fec0813a6e336cf5f1d07eb016e/wandb-0.19.11-py3-none-win32.whl", hash = "sha256:38dea43c7926d8800405a73b80b9adfe81eb315fc6f2ac6885c77eb966634421", size = 20767584 }, + { url = "https://files.pythonhosted.org/packages/36/d5/215cac3edec5c5ac6e7231beb9d22466d5d4e4a132fa3a1d044f7d682c15/wandb-0.19.11-py3-none-win_amd64.whl", hash = "sha256:73402003c56ddc2198878492ab2bff55bb49bce5587eae5960e737d27c0c48f7", size = 20767588 }, +] + +[[package]] +name = "wcwidth" +version = "0.2.13" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166 }, +] + +[[package]] +name = "websockets" +version = "15.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/e6/26d09fab466b7ca9c7737474c52be4f76a40301b08362eb2dbc19dcc16c1/websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee", size = 177016 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9f/32/18fcd5919c293a398db67443acd33fde142f283853076049824fc58e6f75/websockets-15.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:823c248b690b2fd9303ba00c4f66cd5e2d8c3ba4aa968b2779be9532a4dad431", size = 175423 }, + { url = "https://files.pythonhosted.org/packages/76/70/ba1ad96b07869275ef42e2ce21f07a5b0148936688c2baf7e4a1f60d5058/websockets-15.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678999709e68425ae2593acf2e3ebcbcf2e69885a5ee78f9eb80e6e371f1bf57", size = 173082 }, + { url = "https://files.pythonhosted.org/packages/86/f2/10b55821dd40eb696ce4704a87d57774696f9451108cff0d2824c97e0f97/websockets-15.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d50fd1ee42388dcfb2b3676132c78116490976f1300da28eb629272d5d93e905", size = 173330 }, + { url = "https://files.pythonhosted.org/packages/a5/90/1c37ae8b8a113d3daf1065222b6af61cc44102da95388ac0018fcb7d93d9/websockets-15.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d99e5546bf73dbad5bf3547174cd6cb8ba7273062a23808ffea025ecb1cf8562", size = 182878 }, + { url = "https://files.pythonhosted.org/packages/8e/8d/96e8e288b2a41dffafb78e8904ea7367ee4f891dafc2ab8d87e2124cb3d3/websockets-15.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66dd88c918e3287efc22409d426c8f729688d89a0c587c88971a0faa2c2f3792", size = 181883 }, + { url = "https://files.pythonhosted.org/packages/93/1f/5d6dbf551766308f6f50f8baf8e9860be6182911e8106da7a7f73785f4c4/websockets-15.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8dd8327c795b3e3f219760fa603dcae1dcc148172290a8ab15158cf85a953413", size = 182252 }, + { url = "https://files.pythonhosted.org/packages/d4/78/2d4fed9123e6620cbf1706c0de8a1632e1a28e7774d94346d7de1bba2ca3/websockets-15.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fdc51055e6ff4adeb88d58a11042ec9a5eae317a0a53d12c062c8a8865909e8", size = 182521 }, + { url = "https://files.pythonhosted.org/packages/e7/3b/66d4c1b444dd1a9823c4a81f50231b921bab54eee2f69e70319b4e21f1ca/websockets-15.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:693f0192126df6c2327cce3baa7c06f2a117575e32ab2308f7f8216c29d9e2e3", size = 181958 }, + { url = "https://files.pythonhosted.org/packages/08/ff/e9eed2ee5fed6f76fdd6032ca5cd38c57ca9661430bb3d5fb2872dc8703c/websockets-15.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:54479983bd5fb469c38f2f5c7e3a24f9a4e70594cd68cd1fa6b9340dadaff7cf", size = 181918 }, + { url = "https://files.pythonhosted.org/packages/d8/75/994634a49b7e12532be6a42103597b71098fd25900f7437d6055ed39930a/websockets-15.0.1-cp311-cp311-win32.whl", hash = "sha256:16b6c1b3e57799b9d38427dda63edcbe4926352c47cf88588c0be4ace18dac85", size = 176388 }, + { url = "https://files.pythonhosted.org/packages/98/93/e36c73f78400a65f5e236cd376713c34182e6663f6889cd45a4a04d8f203/websockets-15.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:27ccee0071a0e75d22cb35849b1db43f2ecd3e161041ac1ee9d2352ddf72f065", size = 176828 }, + { url = "https://files.pythonhosted.org/packages/51/6b/4545a0d843594f5d0771e86463606a3988b5a09ca5123136f8a76580dd63/websockets-15.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3", size = 175437 }, + { url = "https://files.pythonhosted.org/packages/f4/71/809a0f5f6a06522af902e0f2ea2757f71ead94610010cf570ab5c98e99ed/websockets-15.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665", size = 173096 }, + { url = "https://files.pythonhosted.org/packages/3d/69/1a681dd6f02180916f116894181eab8b2e25b31e484c5d0eae637ec01f7c/websockets-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2", size = 173332 }, + { url = "https://files.pythonhosted.org/packages/a6/02/0073b3952f5bce97eafbb35757f8d0d54812b6174ed8dd952aa08429bcc3/websockets-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215", size = 183152 }, + { url = "https://files.pythonhosted.org/packages/74/45/c205c8480eafd114b428284840da0b1be9ffd0e4f87338dc95dc6ff961a1/websockets-15.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5", size = 182096 }, + { url = "https://files.pythonhosted.org/packages/14/8f/aa61f528fba38578ec553c145857a181384c72b98156f858ca5c8e82d9d3/websockets-15.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65", size = 182523 }, + { url = "https://files.pythonhosted.org/packages/ec/6d/0267396610add5bc0d0d3e77f546d4cd287200804fe02323797de77dbce9/websockets-15.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe", size = 182790 }, + { url = "https://files.pythonhosted.org/packages/02/05/c68c5adbf679cf610ae2f74a9b871ae84564462955d991178f95a1ddb7dd/websockets-15.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4", size = 182165 }, + { url = "https://files.pythonhosted.org/packages/29/93/bb672df7b2f5faac89761cb5fa34f5cec45a4026c383a4b5761c6cea5c16/websockets-15.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597", size = 182160 }, + { url = "https://files.pythonhosted.org/packages/ff/83/de1f7709376dc3ca9b7eeb4b9a07b4526b14876b6d372a4dc62312bebee0/websockets-15.0.1-cp312-cp312-win32.whl", hash = "sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9", size = 176395 }, + { url = "https://files.pythonhosted.org/packages/7d/71/abf2ebc3bbfa40f391ce1428c7168fb20582d0ff57019b69ea20fa698043/websockets-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7", size = 176841 }, + { url = "https://files.pythonhosted.org/packages/cb/9f/51f0cf64471a9d2b4d0fc6c534f323b664e7095640c34562f5182e5a7195/websockets-15.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931", size = 175440 }, + { url = "https://files.pythonhosted.org/packages/8a/05/aa116ec9943c718905997412c5989f7ed671bc0188ee2ba89520e8765d7b/websockets-15.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675", size = 173098 }, + { url = "https://files.pythonhosted.org/packages/ff/0b/33cef55ff24f2d92924923c99926dcce78e7bd922d649467f0eda8368923/websockets-15.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151", size = 173329 }, + { url = "https://files.pythonhosted.org/packages/31/1d/063b25dcc01faa8fada1469bdf769de3768b7044eac9d41f734fd7b6ad6d/websockets-15.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22", size = 183111 }, + { url = "https://files.pythonhosted.org/packages/93/53/9a87ee494a51bf63e4ec9241c1ccc4f7c2f45fff85d5bde2ff74fcb68b9e/websockets-15.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f", size = 182054 }, + { url = "https://files.pythonhosted.org/packages/ff/b2/83a6ddf56cdcbad4e3d841fcc55d6ba7d19aeb89c50f24dd7e859ec0805f/websockets-15.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8", size = 182496 }, + { url = "https://files.pythonhosted.org/packages/98/41/e7038944ed0abf34c45aa4635ba28136f06052e08fc2168520bb8b25149f/websockets-15.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375", size = 182829 }, + { url = "https://files.pythonhosted.org/packages/e0/17/de15b6158680c7623c6ef0db361da965ab25d813ae54fcfeae2e5b9ef910/websockets-15.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d", size = 182217 }, + { url = "https://files.pythonhosted.org/packages/33/2b/1f168cb6041853eef0362fb9554c3824367c5560cbdaad89ac40f8c2edfc/websockets-15.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4", size = 182195 }, + { url = "https://files.pythonhosted.org/packages/86/eb/20b6cdf273913d0ad05a6a14aed4b9a85591c18a987a3d47f20fa13dcc47/websockets-15.0.1-cp313-cp313-win32.whl", hash = "sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa", size = 176393 }, + { url = "https://files.pythonhosted.org/packages/1b/6c/c65773d6cab416a64d191d6ee8a8b1c68a09970ea6909d16965d26bfed1e/websockets-15.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561", size = 176837 }, + { url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743 }, +] + +[[package]] +name = "werkzeug" +version = "3.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/69/83029f1f6300c5fb2471d621ab06f6ec6b3324685a2ce0f9777fd4a8b71e/werkzeug-3.1.3.tar.gz", hash = "sha256:60723ce945c19328679790e3282cc758aa4a6040e4bb330f53d30fa546d44746", size = 806925 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/24/ab44c871b0f07f491e5d2ad12c9bd7358e527510618cb1b803a88e986db1/werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e", size = 224498 }, +] + +[[package]] +name = "wheel" +version = "0.45.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/98/2d9906746cdc6a6ef809ae6338005b3f21bb568bea3165cfc6a243fdc25c/wheel-0.45.1.tar.gz", hash = "sha256:661e1abd9198507b1409a20c02106d9670b2576e916d58f520316666abca6729", size = 107545 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0b/2c/87f3254fd8ffd29e4c02732eee68a83a1d3c346ae39bc6822dcbcb697f2b/wheel-0.45.1-py3-none-any.whl", hash = "sha256:708e7481cc80179af0e556bbf0cc00b8444c7321e2700b8d8580231d13017248", size = 72494 }, +] + +[[package]] +name = "widgetsnbextension" +version = "4.0.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/41/53/2e0253c5efd69c9656b1843892052a31c36d37ad42812b5da45c62191f7e/widgetsnbextension-4.0.14.tar.gz", hash = "sha256:a3629b04e3edb893212df862038c7232f62973373869db5084aed739b437b5af", size = 1097428 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ca/51/5447876806d1088a0f8f71e16542bf350918128d0a69437df26047c8e46f/widgetsnbextension-4.0.14-py3-none-any.whl", hash = "sha256:4875a9eaf72fbf5079dc372a51a9f268fc38d46f767cbf85c43a36da5cb9b575", size = 2196503 }, +] + +[[package]] +name = "wrapt" +version = "1.14.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/11/eb/e06e77394d6cf09977d92bff310cb0392930c08a338f99af6066a5a98f92/wrapt-1.14.1.tar.gz", hash = "sha256:380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d", size = 50890 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/f9/8c078b4973604cd968b23eb3dff52028b5c48f2a02c4f1f975f4d5e344d1/wrapt-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ecee4132c6cd2ce5308e21672015ddfed1ff975ad0ac8d27168ea82e71413f55", size = 35432 }, + { url = "https://files.pythonhosted.org/packages/6e/79/aec8185eefe20e8f49e5adeb0c2e20e016d5916d10872c17705ddac41be2/wrapt-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2020f391008ef874c6d9e208b24f28e31bcb85ccff4f335f15a3251d222b92d9", size = 36219 }, + { url = "https://files.pythonhosted.org/packages/d1/71/8d68004e5d5a676177342a56808af51e1df3b0e54b203e3295a8cd96b53b/wrapt-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2feecf86e1f7a86517cab34ae6c2f081fd2d0dac860cb0c0ded96d799d20b335", size = 78509 }, + { url = "https://files.pythonhosted.org/packages/5a/27/604d6ad71fe5935446df1b7512d491b47fe2aef8c95e9813d03d78024a28/wrapt-1.14.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:240b1686f38ae665d1b15475966fe0472f78e71b1b4903c143a842659c8e4cb9", size = 70972 }, + { url = "https://files.pythonhosted.org/packages/7f/1b/e0439eec0db6520968c751bc7e12480bb80bb8d939190e0e55ed762f3c7a/wrapt-1.14.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9008dad07d71f68487c91e96579c8567c98ca4c3881b9b113bc7b33e9fd78b8", size = 78402 }, + { url = "https://files.pythonhosted.org/packages/b9/45/2cc612ff64061d4416baf8d0daf27bea7f79f0097638ddc2af51a3e647f3/wrapt-1.14.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6447e9f3ba72f8e2b985a1da758767698efa72723d5b59accefd716e9e8272bf", size = 83373 }, + { url = "https://files.pythonhosted.org/packages/ad/b7/332692b8d0387922da0f1323ad36a14e365911def3c78ea0d102f83ac592/wrapt-1.14.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:acae32e13a4153809db37405f5eba5bac5fbe2e2ba61ab227926a22901051c0a", size = 76299 }, + { url = "https://files.pythonhosted.org/packages/f2/31/cbce966b6760e62d005c237961e839a755bf0c907199248394e2ee03ab05/wrapt-1.14.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49ef582b7a1152ae2766557f0550a9fcbf7bbd76f43fbdc94dd3bf07cc7168be", size = 83361 }, + { url = "https://files.pythonhosted.org/packages/9a/aa/ab46fb18072b86e87e0965a402f8723217e8c0312d1b3e2a91308df924ab/wrapt-1.14.1-cp311-cp311-win32.whl", hash = "sha256:358fe87cc899c6bb0ddc185bf3dbfa4ba646f05b1b0b9b5a27c2cb92c2cea204", size = 33454 }, + { url = "https://files.pythonhosted.org/packages/ba/7e/14113996bc6ee68eb987773b4139c87afd3ceff60e27e37648aa5eb2798a/wrapt-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:26046cd03936ae745a502abf44dac702a5e6880b2b01c29aea8ddf3353b68224", size = 35616 }, +] + +[[package]] +name = "xxhash" +version = "3.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/00/5e/d6e5258d69df8b4ed8c83b6664f2b47d30d2dec551a29ad72a6c69eafd31/xxhash-3.5.0.tar.gz", hash = "sha256:84f2caddf951c9cbf8dc2e22a89d4ccf5d86391ac6418fe81e3c67d0cf60b45f", size = 84241 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b8/c7/afed0f131fbda960ff15eee7f304fa0eeb2d58770fade99897984852ef23/xxhash-3.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:02c2e816896dc6f85922ced60097bcf6f008dedfc5073dcba32f9c8dd786f3c1", size = 31969 }, + { url = "https://files.pythonhosted.org/packages/8c/0c/7c3bc6d87e5235672fcc2fb42fd5ad79fe1033925f71bf549ee068c7d1ca/xxhash-3.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6027dcd885e21581e46d3c7f682cfb2b870942feeed58a21c29583512c3f09f8", size = 30800 }, + { url = "https://files.pythonhosted.org/packages/04/9e/01067981d98069eec1c20201f8c145367698e9056f8bc295346e4ea32dd1/xxhash-3.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1308fa542bbdbf2fa85e9e66b1077eea3a88bef38ee8a06270b4298a7a62a166", size = 221566 }, + { url = "https://files.pythonhosted.org/packages/d4/09/d4996de4059c3ce5342b6e1e6a77c9d6c91acce31f6ed979891872dd162b/xxhash-3.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c28b2fdcee797e1c1961cd3bcd3d545cab22ad202c846235197935e1df2f8ef7", size = 201214 }, + { url = "https://files.pythonhosted.org/packages/62/f5/6d2dc9f8d55a7ce0f5e7bfef916e67536f01b85d32a9fbf137d4cadbee38/xxhash-3.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:924361811732ddad75ff23e90efd9ccfda4f664132feecb90895bade6a1b4623", size = 429433 }, + { url = "https://files.pythonhosted.org/packages/d9/72/9256303f10e41ab004799a4aa74b80b3c5977d6383ae4550548b24bd1971/xxhash-3.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89997aa1c4b6a5b1e5b588979d1da048a3c6f15e55c11d117a56b75c84531f5a", size = 194822 }, + { url = "https://files.pythonhosted.org/packages/34/92/1a3a29acd08248a34b0e6a94f4e0ed9b8379a4ff471f1668e4dce7bdbaa8/xxhash-3.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:685c4f4e8c59837de103344eb1c8a3851f670309eb5c361f746805c5471b8c88", size = 208538 }, + { url = "https://files.pythonhosted.org/packages/53/ad/7fa1a109663366de42f724a1cdb8e796a260dbac45047bce153bc1e18abf/xxhash-3.5.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dbd2ecfbfee70bc1a4acb7461fa6af7748ec2ab08ac0fa298f281c51518f982c", size = 216953 }, + { url = "https://files.pythonhosted.org/packages/35/02/137300e24203bf2b2a49b48ce898ecce6fd01789c0fcd9c686c0a002d129/xxhash-3.5.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:25b5a51dc3dfb20a10833c8eee25903fd2e14059e9afcd329c9da20609a307b2", size = 203594 }, + { url = "https://files.pythonhosted.org/packages/23/03/aeceb273933d7eee248c4322b98b8e971f06cc3880e5f7602c94e5578af5/xxhash-3.5.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a8fb786fb754ef6ff8c120cb96629fb518f8eb5a61a16aac3a979a9dbd40a084", size = 210971 }, + { url = "https://files.pythonhosted.org/packages/e3/64/ed82ec09489474cbb35c716b189ddc1521d8b3de12b1b5ab41ce7f70253c/xxhash-3.5.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a905ad00ad1e1c34fe4e9d7c1d949ab09c6fa90c919860c1534ff479f40fd12d", size = 415050 }, + { url = "https://files.pythonhosted.org/packages/71/43/6db4c02dcb488ad4e03bc86d70506c3d40a384ee73c9b5c93338eb1f3c23/xxhash-3.5.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:963be41bcd49f53af6d795f65c0da9b4cc518c0dd9c47145c98f61cb464f4839", size = 192216 }, + { url = "https://files.pythonhosted.org/packages/22/6d/db4abec29e7a567455344433d095fdb39c97db6955bb4a2c432e486b4d28/xxhash-3.5.0-cp311-cp311-win32.whl", hash = "sha256:109b436096d0a2dd039c355fa3414160ec4d843dfecc64a14077332a00aeb7da", size = 30120 }, + { url = "https://files.pythonhosted.org/packages/52/1c/fa3b61c0cf03e1da4767213672efe186b1dfa4fc901a4a694fb184a513d1/xxhash-3.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:b702f806693201ad6c0a05ddbbe4c8f359626d0b3305f766077d51388a6bac58", size = 30003 }, + { url = "https://files.pythonhosted.org/packages/6b/8e/9e6fc572acf6e1cc7ccb01973c213f895cb8668a9d4c2b58a99350da14b7/xxhash-3.5.0-cp311-cp311-win_arm64.whl", hash = "sha256:c4dcb4120d0cc3cc448624147dba64e9021b278c63e34a38789b688fd0da9bf3", size = 26777 }, + { url = "https://files.pythonhosted.org/packages/07/0e/1bfce2502c57d7e2e787600b31c83535af83746885aa1a5f153d8c8059d6/xxhash-3.5.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:14470ace8bd3b5d51318782cd94e6f94431974f16cb3b8dc15d52f3b69df8e00", size = 31969 }, + { url = "https://files.pythonhosted.org/packages/3f/d6/8ca450d6fe5b71ce521b4e5db69622383d039e2b253e9b2f24f93265b52c/xxhash-3.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:59aa1203de1cb96dbeab595ded0ad0c0056bb2245ae11fac11c0ceea861382b9", size = 30787 }, + { url = "https://files.pythonhosted.org/packages/5b/84/de7c89bc6ef63d750159086a6ada6416cc4349eab23f76ab870407178b93/xxhash-3.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08424f6648526076e28fae6ea2806c0a7d504b9ef05ae61d196d571e5c879c84", size = 220959 }, + { url = "https://files.pythonhosted.org/packages/fe/86/51258d3e8a8545ff26468c977101964c14d56a8a37f5835bc0082426c672/xxhash-3.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:61a1ff00674879725b194695e17f23d3248998b843eb5e933007ca743310f793", size = 200006 }, + { url = "https://files.pythonhosted.org/packages/02/0a/96973bd325412feccf23cf3680fd2246aebf4b789122f938d5557c54a6b2/xxhash-3.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2f2c61bee5844d41c3eb015ac652a0229e901074951ae48581d58bfb2ba01be", size = 428326 }, + { url = "https://files.pythonhosted.org/packages/11/a7/81dba5010f7e733de88af9555725146fc133be97ce36533867f4c7e75066/xxhash-3.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d32a592cac88d18cc09a89172e1c32d7f2a6e516c3dfde1b9adb90ab5df54a6", size = 194380 }, + { url = "https://files.pythonhosted.org/packages/fb/7d/f29006ab398a173f4501c0e4977ba288f1c621d878ec217b4ff516810c04/xxhash-3.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70dabf941dede727cca579e8c205e61121afc9b28516752fd65724be1355cc90", size = 207934 }, + { url = "https://files.pythonhosted.org/packages/8a/6e/6e88b8f24612510e73d4d70d9b0c7dff62a2e78451b9f0d042a5462c8d03/xxhash-3.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e5d0ddaca65ecca9c10dcf01730165fd858533d0be84c75c327487c37a906a27", size = 216301 }, + { url = "https://files.pythonhosted.org/packages/af/51/7862f4fa4b75a25c3b4163c8a873f070532fe5f2d3f9b3fc869c8337a398/xxhash-3.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e5b5e16c5a480fe5f59f56c30abdeba09ffd75da8d13f6b9b6fd224d0b4d0a2", size = 203351 }, + { url = "https://files.pythonhosted.org/packages/22/61/8d6a40f288f791cf79ed5bb113159abf0c81d6efb86e734334f698eb4c59/xxhash-3.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149b7914451eb154b3dfaa721315117ea1dac2cc55a01bfbd4df7c68c5dd683d", size = 210294 }, + { url = "https://files.pythonhosted.org/packages/17/02/215c4698955762d45a8158117190261b2dbefe9ae7e5b906768c09d8bc74/xxhash-3.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:eade977f5c96c677035ff39c56ac74d851b1cca7d607ab3d8f23c6b859379cab", size = 414674 }, + { url = "https://files.pythonhosted.org/packages/31/5c/b7a8db8a3237cff3d535261325d95de509f6a8ae439a5a7a4ffcff478189/xxhash-3.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fa9f547bd98f5553d03160967866a71056a60960be00356a15ecc44efb40ba8e", size = 192022 }, + { url = "https://files.pythonhosted.org/packages/78/e3/dd76659b2811b3fd06892a8beb850e1996b63e9235af5a86ea348f053e9e/xxhash-3.5.0-cp312-cp312-win32.whl", hash = "sha256:f7b58d1fd3551b8c80a971199543379be1cee3d0d409e1f6d8b01c1a2eebf1f8", size = 30170 }, + { url = "https://files.pythonhosted.org/packages/d9/6b/1c443fe6cfeb4ad1dcf231cdec96eb94fb43d6498b4469ed8b51f8b59a37/xxhash-3.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:fa0cafd3a2af231b4e113fba24a65d7922af91aeb23774a8b78228e6cd785e3e", size = 30040 }, + { url = "https://files.pythonhosted.org/packages/0f/eb/04405305f290173acc0350eba6d2f1a794b57925df0398861a20fbafa415/xxhash-3.5.0-cp312-cp312-win_arm64.whl", hash = "sha256:586886c7e89cb9828bcd8a5686b12e161368e0064d040e225e72607b43858ba2", size = 26796 }, + { url = "https://files.pythonhosted.org/packages/c9/b8/e4b3ad92d249be5c83fa72916c9091b0965cb0faeff05d9a0a3870ae6bff/xxhash-3.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:37889a0d13b0b7d739cfc128b1c902f04e32de17b33d74b637ad42f1c55101f6", size = 31795 }, + { url = "https://files.pythonhosted.org/packages/fc/d8/b3627a0aebfbfa4c12a41e22af3742cf08c8ea84f5cc3367b5de2d039cce/xxhash-3.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:97a662338797c660178e682f3bc180277b9569a59abfb5925e8620fba00b9fc5", size = 30792 }, + { url = "https://files.pythonhosted.org/packages/c3/cc/762312960691da989c7cd0545cb120ba2a4148741c6ba458aa723c00a3f8/xxhash-3.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f85e0108d51092bdda90672476c7d909c04ada6923c14ff9d913c4f7dc8a3bc", size = 220950 }, + { url = "https://files.pythonhosted.org/packages/fe/e9/cc266f1042c3c13750e86a535496b58beb12bf8c50a915c336136f6168dc/xxhash-3.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2fd827b0ba763ac919440042302315c564fdb797294d86e8cdd4578e3bc7f3", size = 199980 }, + { url = "https://files.pythonhosted.org/packages/bf/85/a836cd0dc5cc20376de26b346858d0ac9656f8f730998ca4324921a010b9/xxhash-3.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:82085c2abec437abebf457c1d12fccb30cc8b3774a0814872511f0f0562c768c", size = 428324 }, + { url = "https://files.pythonhosted.org/packages/b4/0e/15c243775342ce840b9ba34aceace06a1148fa1630cd8ca269e3223987f5/xxhash-3.5.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07fda5de378626e502b42b311b049848c2ef38784d0d67b6f30bb5008642f8eb", size = 194370 }, + { url = "https://files.pythonhosted.org/packages/87/a1/b028bb02636dfdc190da01951d0703b3d904301ed0ef6094d948983bef0e/xxhash-3.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c279f0d2b34ef15f922b77966640ade58b4ccdfef1c4d94b20f2a364617a493f", size = 207911 }, + { url = "https://files.pythonhosted.org/packages/80/d5/73c73b03fc0ac73dacf069fdf6036c9abad82de0a47549e9912c955ab449/xxhash-3.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:89e66ceed67b213dec5a773e2f7a9e8c58f64daeb38c7859d8815d2c89f39ad7", size = 216352 }, + { url = "https://files.pythonhosted.org/packages/b6/2a/5043dba5ddbe35b4fe6ea0a111280ad9c3d4ba477dd0f2d1fe1129bda9d0/xxhash-3.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:bcd51708a633410737111e998ceb3b45d3dbc98c0931f743d9bb0a209033a326", size = 203410 }, + { url = "https://files.pythonhosted.org/packages/a2/b2/9a8ded888b7b190aed75b484eb5c853ddd48aa2896e7b59bbfbce442f0a1/xxhash-3.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3ff2c0a34eae7df88c868be53a8dd56fbdf592109e21d4bfa092a27b0bf4a7bf", size = 210322 }, + { url = "https://files.pythonhosted.org/packages/98/62/440083fafbc917bf3e4b67c2ade621920dd905517e85631c10aac955c1d2/xxhash-3.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:4e28503dccc7d32e0b9817aa0cbfc1f45f563b2c995b7a66c4c8a0d232e840c7", size = 414725 }, + { url = "https://files.pythonhosted.org/packages/75/db/009206f7076ad60a517e016bb0058381d96a007ce3f79fa91d3010f49cc2/xxhash-3.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a6c50017518329ed65a9e4829154626f008916d36295b6a3ba336e2458824c8c", size = 192070 }, + { url = "https://files.pythonhosted.org/packages/1f/6d/c61e0668943a034abc3a569cdc5aeae37d686d9da7e39cf2ed621d533e36/xxhash-3.5.0-cp313-cp313-win32.whl", hash = "sha256:53a068fe70301ec30d868ece566ac90d873e3bb059cf83c32e76012c889b8637", size = 30172 }, + { url = "https://files.pythonhosted.org/packages/96/14/8416dce965f35e3d24722cdf79361ae154fa23e2ab730e5323aa98d7919e/xxhash-3.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:80babcc30e7a1a484eab952d76a4f4673ff601f54d5142c26826502740e70b43", size = 30041 }, + { url = "https://files.pythonhosted.org/packages/27/ee/518b72faa2073f5aa8e3262408d284892cb79cf2754ba0c3a5870645ef73/xxhash-3.5.0-cp313-cp313-win_arm64.whl", hash = "sha256:4811336f1ce11cac89dcbd18f3a25c527c16311709a89313c3acaf771def2d4b", size = 26801 }, +] + +[[package]] +name = "yarl" +version = "1.20.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "multidict" }, + { name = "propcache" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/62/51/c0edba5219027f6eab262e139f73e2417b0f4efffa23bf562f6e18f76ca5/yarl-1.20.0.tar.gz", hash = "sha256:686d51e51ee5dfe62dec86e4866ee0e9ed66df700d55c828a615640adc885307", size = 185258 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/82/a59d8e21b20ffc836775fa7daedac51d16bb8f3010c4fcb495c4496aa922/yarl-1.20.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fdb5204d17cb32b2de2d1e21c7461cabfacf17f3645e4b9039f210c5d3378bf3", size = 145178 }, + { url = "https://files.pythonhosted.org/packages/ba/81/315a3f6f95947cfbf37c92d6fbce42a1a6207b6c38e8c2b452499ec7d449/yarl-1.20.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:eaddd7804d8e77d67c28d154ae5fab203163bd0998769569861258e525039d2a", size = 96859 }, + { url = "https://files.pythonhosted.org/packages/ad/17/9b64e575583158551b72272a1023cdbd65af54fe13421d856b2850a6ddb7/yarl-1.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:634b7ba6b4a85cf67e9df7c13a7fb2e44fa37b5d34501038d174a63eaac25ee2", size = 94647 }, + { url = "https://files.pythonhosted.org/packages/2c/29/8f291e7922a58a21349683f6120a85701aeefaa02e9f7c8a2dc24fe3f431/yarl-1.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d409e321e4addf7d97ee84162538c7258e53792eb7c6defd0c33647d754172e", size = 355788 }, + { url = "https://files.pythonhosted.org/packages/26/6d/b4892c80b805c42c228c6d11e03cafabf81662d371b0853e7f0f513837d5/yarl-1.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:ea52f7328a36960ba3231c6677380fa67811b414798a6e071c7085c57b6d20a9", size = 344613 }, + { url = "https://files.pythonhosted.org/packages/d7/0e/517aa28d3f848589bae9593717b063a544b86ba0a807d943c70f48fcf3bb/yarl-1.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c8703517b924463994c344dcdf99a2d5ce9eca2b6882bb640aa555fb5efc706a", size = 370953 }, + { url = "https://files.pythonhosted.org/packages/5f/9b/5bd09d2f1ad6e6f7c2beae9e50db78edd2cca4d194d227b958955573e240/yarl-1.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:077989b09ffd2f48fb2d8f6a86c5fef02f63ffe6b1dd4824c76de7bb01e4f2e2", size = 369204 }, + { url = "https://files.pythonhosted.org/packages/9c/85/d793a703cf4bd0d4cd04e4b13cc3d44149470f790230430331a0c1f52df5/yarl-1.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0acfaf1da020253f3533526e8b7dd212838fdc4109959a2c53cafc6db611bff2", size = 358108 }, + { url = "https://files.pythonhosted.org/packages/6f/54/b6c71e13549c1f6048fbc14ce8d930ac5fb8bafe4f1a252e621a24f3f1f9/yarl-1.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b4230ac0b97ec5eeb91d96b324d66060a43fd0d2a9b603e3327ed65f084e41f8", size = 346610 }, + { url = "https://files.pythonhosted.org/packages/a0/1a/d6087d58bdd0d8a2a37bbcdffac9d9721af6ebe50d85304d9f9b57dfd862/yarl-1.20.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a6a1e6ae21cdd84011c24c78d7a126425148b24d437b5702328e4ba640a8902", size = 365378 }, + { url = "https://files.pythonhosted.org/packages/02/84/e25ddff4cbc001dbc4af76f8d41a3e23818212dd1f0a52044cbc60568872/yarl-1.20.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:86de313371ec04dd2531f30bc41a5a1a96f25a02823558ee0f2af0beaa7ca791", size = 356919 }, + { url = "https://files.pythonhosted.org/packages/04/76/898ae362353bf8f64636495d222c8014c8e5267df39b1a9fe1e1572fb7d0/yarl-1.20.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:dd59c9dd58ae16eaa0f48c3d0cbe6be8ab4dc7247c3ff7db678edecbaf59327f", size = 364248 }, + { url = "https://files.pythonhosted.org/packages/1b/b0/9d9198d83a622f1c40fdbf7bd13b224a6979f2e1fc2cf50bfb1d8773c495/yarl-1.20.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a0bc5e05f457b7c1994cc29e83b58f540b76234ba6b9648a4971ddc7f6aa52da", size = 378418 }, + { url = "https://files.pythonhosted.org/packages/c7/ce/1f50c1cc594cf5d3f5bf4a9b616fca68680deaec8ad349d928445ac52eb8/yarl-1.20.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:c9471ca18e6aeb0e03276b5e9b27b14a54c052d370a9c0c04a68cefbd1455eb4", size = 383850 }, + { url = "https://files.pythonhosted.org/packages/89/1e/a59253a87b35bfec1a25bb5801fb69943330b67cfd266278eb07e0609012/yarl-1.20.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:40ed574b4df723583a26c04b298b283ff171bcc387bc34c2683235e2487a65a5", size = 381218 }, + { url = "https://files.pythonhosted.org/packages/85/b0/26f87df2b3044b0ef1a7cf66d321102bdca091db64c5ae853fcb2171c031/yarl-1.20.0-cp311-cp311-win32.whl", hash = "sha256:db243357c6c2bf3cd7e17080034ade668d54ce304d820c2a58514a4e51d0cfd6", size = 86606 }, + { url = "https://files.pythonhosted.org/packages/33/46/ca335c2e1f90446a77640a45eeb1cd8f6934f2c6e4df7db0f0f36ef9f025/yarl-1.20.0-cp311-cp311-win_amd64.whl", hash = "sha256:8c12cd754d9dbd14204c328915e23b0c361b88f3cffd124129955e60a4fbfcfb", size = 93374 }, + { url = "https://files.pythonhosted.org/packages/c3/e8/3efdcb83073df978bb5b1a9cc0360ce596680e6c3fac01f2a994ccbb8939/yarl-1.20.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e06b9f6cdd772f9b665e5ba8161968e11e403774114420737f7884b5bd7bdf6f", size = 147089 }, + { url = "https://files.pythonhosted.org/packages/60/c3/9e776e98ea350f76f94dd80b408eaa54e5092643dbf65fd9babcffb60509/yarl-1.20.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b9ae2fbe54d859b3ade40290f60fe40e7f969d83d482e84d2c31b9bff03e359e", size = 97706 }, + { url = "https://files.pythonhosted.org/packages/0c/5b/45cdfb64a3b855ce074ae607b9fc40bc82e7613b94e7612b030255c93a09/yarl-1.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6d12b8945250d80c67688602c891237994d203d42427cb14e36d1a732eda480e", size = 95719 }, + { url = "https://files.pythonhosted.org/packages/2d/4e/929633b249611eeed04e2f861a14ed001acca3ef9ec2a984a757b1515889/yarl-1.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:087e9731884621b162a3e06dc0d2d626e1542a617f65ba7cc7aeab279d55ad33", size = 343972 }, + { url = "https://files.pythonhosted.org/packages/49/fd/047535d326c913f1a90407a3baf7ff535b10098611eaef2c527e32e81ca1/yarl-1.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:69df35468b66c1a6e6556248e6443ef0ec5f11a7a4428cf1f6281f1879220f58", size = 339639 }, + { url = "https://files.pythonhosted.org/packages/48/2f/11566f1176a78f4bafb0937c0072410b1b0d3640b297944a6a7a556e1d0b/yarl-1.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b2992fe29002fd0d4cbaea9428b09af9b8686a9024c840b8a2b8f4ea4abc16f", size = 353745 }, + { url = "https://files.pythonhosted.org/packages/26/17/07dfcf034d6ae8837b33988be66045dd52f878dfb1c4e8f80a7343f677be/yarl-1.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4c903e0b42aab48abfbac668b5a9d7b6938e721a6341751331bcd7553de2dcae", size = 354178 }, + { url = "https://files.pythonhosted.org/packages/15/45/212604d3142d84b4065d5f8cab6582ed3d78e4cc250568ef2a36fe1cf0a5/yarl-1.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf099e2432131093cc611623e0b0bcc399b8cddd9a91eded8bfb50402ec35018", size = 349219 }, + { url = "https://files.pythonhosted.org/packages/e6/e0/a10b30f294111c5f1c682461e9459935c17d467a760c21e1f7db400ff499/yarl-1.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8a7f62f5dc70a6c763bec9ebf922be52aa22863d9496a9a30124d65b489ea672", size = 337266 }, + { url = "https://files.pythonhosted.org/packages/33/a6/6efa1d85a675d25a46a167f9f3e80104cde317dfdf7f53f112ae6b16a60a/yarl-1.20.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:54ac15a8b60382b2bcefd9a289ee26dc0920cf59b05368c9b2b72450751c6eb8", size = 360873 }, + { url = "https://files.pythonhosted.org/packages/77/67/c8ab718cb98dfa2ae9ba0f97bf3cbb7d45d37f13fe1fbad25ac92940954e/yarl-1.20.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:25b3bc0763a7aca16a0f1b5e8ef0f23829df11fb539a1b70476dcab28bd83da7", size = 360524 }, + { url = "https://files.pythonhosted.org/packages/bd/e8/c3f18660cea1bc73d9f8a2b3ef423def8dadbbae6c4afabdb920b73e0ead/yarl-1.20.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b2586e36dc070fc8fad6270f93242124df68b379c3a251af534030a4a33ef594", size = 365370 }, + { url = "https://files.pythonhosted.org/packages/c9/99/33f3b97b065e62ff2d52817155a89cfa030a1a9b43fee7843ef560ad9603/yarl-1.20.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:866349da9d8c5290cfefb7fcc47721e94de3f315433613e01b435473be63daa6", size = 373297 }, + { url = "https://files.pythonhosted.org/packages/3d/89/7519e79e264a5f08653d2446b26d4724b01198a93a74d2e259291d538ab1/yarl-1.20.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:33bb660b390a0554d41f8ebec5cd4475502d84104b27e9b42f5321c5192bfcd1", size = 378771 }, + { url = "https://files.pythonhosted.org/packages/3a/58/6c460bbb884abd2917c3eef6f663a4a873f8dc6f498561fc0ad92231c113/yarl-1.20.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:737e9f171e5a07031cbee5e9180f6ce21a6c599b9d4b2c24d35df20a52fabf4b", size = 375000 }, + { url = "https://files.pythonhosted.org/packages/3b/2a/dd7ed1aa23fea996834278d7ff178f215b24324ee527df53d45e34d21d28/yarl-1.20.0-cp312-cp312-win32.whl", hash = "sha256:839de4c574169b6598d47ad61534e6981979ca2c820ccb77bf70f4311dd2cc64", size = 86355 }, + { url = "https://files.pythonhosted.org/packages/ca/c6/333fe0338305c0ac1c16d5aa7cc4841208d3252bbe62172e0051006b5445/yarl-1.20.0-cp312-cp312-win_amd64.whl", hash = "sha256:3d7dbbe44b443b0c4aa0971cb07dcb2c2060e4a9bf8d1301140a33a93c98e18c", size = 92904 }, + { url = "https://files.pythonhosted.org/packages/0f/6f/514c9bff2900c22a4f10e06297714dbaf98707143b37ff0bcba65a956221/yarl-1.20.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2137810a20b933b1b1b7e5cf06a64c3ed3b4747b0e5d79c9447c00db0e2f752f", size = 145030 }, + { url = "https://files.pythonhosted.org/packages/4e/9d/f88da3fa319b8c9c813389bfb3463e8d777c62654c7168e580a13fadff05/yarl-1.20.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:447c5eadd750db8389804030d15f43d30435ed47af1313303ed82a62388176d3", size = 96894 }, + { url = "https://files.pythonhosted.org/packages/cd/57/92e83538580a6968b2451d6c89c5579938a7309d4785748e8ad42ddafdce/yarl-1.20.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:42fbe577272c203528d402eec8bf4b2d14fd49ecfec92272334270b850e9cd7d", size = 94457 }, + { url = "https://files.pythonhosted.org/packages/e9/ee/7ee43bd4cf82dddd5da97fcaddb6fa541ab81f3ed564c42f146c83ae17ce/yarl-1.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18e321617de4ab170226cd15006a565d0fa0d908f11f724a2c9142d6b2812ab0", size = 343070 }, + { url = "https://files.pythonhosted.org/packages/4a/12/b5eccd1109e2097bcc494ba7dc5de156e41cf8309fab437ebb7c2b296ce3/yarl-1.20.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:4345f58719825bba29895011e8e3b545e6e00257abb984f9f27fe923afca2501", size = 337739 }, + { url = "https://files.pythonhosted.org/packages/7d/6b/0eade8e49af9fc2585552f63c76fa59ef469c724cc05b29519b19aa3a6d5/yarl-1.20.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d9b980d7234614bc4674468ab173ed77d678349c860c3af83b1fffb6a837ddc", size = 351338 }, + { url = "https://files.pythonhosted.org/packages/45/cb/aaaa75d30087b5183c7b8a07b4fb16ae0682dd149a1719b3a28f54061754/yarl-1.20.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af4baa8a445977831cbaa91a9a84cc09debb10bc8391f128da2f7bd070fc351d", size = 353636 }, + { url = "https://files.pythonhosted.org/packages/98/9d/d9cb39ec68a91ba6e66fa86d97003f58570327d6713833edf7ad6ce9dde5/yarl-1.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:123393db7420e71d6ce40d24885a9e65eb1edefc7a5228db2d62bcab3386a5c0", size = 348061 }, + { url = "https://files.pythonhosted.org/packages/72/6b/103940aae893d0cc770b4c36ce80e2ed86fcb863d48ea80a752b8bda9303/yarl-1.20.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ab47acc9332f3de1b39e9b702d9c916af7f02656b2a86a474d9db4e53ef8fd7a", size = 334150 }, + { url = "https://files.pythonhosted.org/packages/ef/b2/986bd82aa222c3e6b211a69c9081ba46484cffa9fab2a5235e8d18ca7a27/yarl-1.20.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4a34c52ed158f89876cba9c600b2c964dfc1ca52ba7b3ab6deb722d1d8be6df2", size = 362207 }, + { url = "https://files.pythonhosted.org/packages/14/7c/63f5922437b873795d9422cbe7eb2509d4b540c37ae5548a4bb68fd2c546/yarl-1.20.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:04d8cfb12714158abf2618f792c77bc5c3d8c5f37353e79509608be4f18705c9", size = 361277 }, + { url = "https://files.pythonhosted.org/packages/81/83/450938cccf732466953406570bdb42c62b5ffb0ac7ac75a1f267773ab5c8/yarl-1.20.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7dc63ad0d541c38b6ae2255aaa794434293964677d5c1ec5d0116b0e308031f5", size = 364990 }, + { url = "https://files.pythonhosted.org/packages/b4/de/af47d3a47e4a833693b9ec8e87debb20f09d9fdc9139b207b09a3e6cbd5a/yarl-1.20.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f9d02b591a64e4e6ca18c5e3d925f11b559c763b950184a64cf47d74d7e41877", size = 374684 }, + { url = "https://files.pythonhosted.org/packages/62/0b/078bcc2d539f1faffdc7d32cb29a2d7caa65f1a6f7e40795d8485db21851/yarl-1.20.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:95fc9876f917cac7f757df80a5dda9de59d423568460fe75d128c813b9af558e", size = 382599 }, + { url = "https://files.pythonhosted.org/packages/74/a9/4fdb1a7899f1fb47fd1371e7ba9e94bff73439ce87099d5dd26d285fffe0/yarl-1.20.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:bb769ae5760cd1c6a712135ee7915f9d43f11d9ef769cb3f75a23e398a92d384", size = 378573 }, + { url = "https://files.pythonhosted.org/packages/fd/be/29f5156b7a319e4d2e5b51ce622b4dfb3aa8d8204cd2a8a339340fbfad40/yarl-1.20.0-cp313-cp313-win32.whl", hash = "sha256:70e0c580a0292c7414a1cead1e076c9786f685c1fc4757573d2967689b370e62", size = 86051 }, + { url = "https://files.pythonhosted.org/packages/52/56/05fa52c32c301da77ec0b5f63d2d9605946fe29defacb2a7ebd473c23b81/yarl-1.20.0-cp313-cp313-win_amd64.whl", hash = "sha256:4c43030e4b0af775a85be1fa0433119b1565673266a70bf87ef68a9d5ba3174c", size = 92742 }, + { url = "https://files.pythonhosted.org/packages/d4/2f/422546794196519152fc2e2f475f0e1d4d094a11995c81a465faf5673ffd/yarl-1.20.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b6c4c3d0d6a0ae9b281e492b1465c72de433b782e6b5001c8e7249e085b69051", size = 163575 }, + { url = "https://files.pythonhosted.org/packages/90/fc/67c64ddab6c0b4a169d03c637fb2d2a212b536e1989dec8e7e2c92211b7f/yarl-1.20.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:8681700f4e4df891eafa4f69a439a6e7d480d64e52bf460918f58e443bd3da7d", size = 106121 }, + { url = "https://files.pythonhosted.org/packages/6d/00/29366b9eba7b6f6baed7d749f12add209b987c4cfbfa418404dbadc0f97c/yarl-1.20.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:84aeb556cb06c00652dbf87c17838eb6d92cfd317799a8092cee0e570ee11229", size = 103815 }, + { url = "https://files.pythonhosted.org/packages/28/f4/a2a4c967c8323c03689383dff73396281ced3b35d0ed140580825c826af7/yarl-1.20.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f166eafa78810ddb383e930d62e623d288fb04ec566d1b4790099ae0f31485f1", size = 408231 }, + { url = "https://files.pythonhosted.org/packages/0f/a1/66f7ffc0915877d726b70cc7a896ac30b6ac5d1d2760613603b022173635/yarl-1.20.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:5d3d6d14754aefc7a458261027a562f024d4f6b8a798adb472277f675857b1eb", size = 390221 }, + { url = "https://files.pythonhosted.org/packages/41/15/cc248f0504610283271615e85bf38bc014224122498c2016d13a3a1b8426/yarl-1.20.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2a8f64df8ed5d04c51260dbae3cc82e5649834eebea9eadfd829837b8093eb00", size = 411400 }, + { url = "https://files.pythonhosted.org/packages/5c/af/f0823d7e092bfb97d24fce6c7269d67fcd1aefade97d0a8189c4452e4d5e/yarl-1.20.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4d9949eaf05b4d30e93e4034a7790634bbb41b8be2d07edd26754f2e38e491de", size = 411714 }, + { url = "https://files.pythonhosted.org/packages/83/70/be418329eae64b9f1b20ecdaac75d53aef098797d4c2299d82ae6f8e4663/yarl-1.20.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c366b254082d21cc4f08f522ac201d0d83a8b8447ab562732931d31d80eb2a5", size = 404279 }, + { url = "https://files.pythonhosted.org/packages/19/f5/52e02f0075f65b4914eb890eea1ba97e6fd91dd821cc33a623aa707b2f67/yarl-1.20.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91bc450c80a2e9685b10e34e41aef3d44ddf99b3a498717938926d05ca493f6a", size = 384044 }, + { url = "https://files.pythonhosted.org/packages/6a/36/b0fa25226b03d3f769c68d46170b3e92b00ab3853d73127273ba22474697/yarl-1.20.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9c2aa4387de4bc3a5fe158080757748d16567119bef215bec643716b4fbf53f9", size = 416236 }, + { url = "https://files.pythonhosted.org/packages/cb/3a/54c828dd35f6831dfdd5a79e6c6b4302ae2c5feca24232a83cb75132b205/yarl-1.20.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:d2cbca6760a541189cf87ee54ff891e1d9ea6406079c66341008f7ef6ab61145", size = 402034 }, + { url = "https://files.pythonhosted.org/packages/10/97/c7bf5fba488f7e049f9ad69c1b8fdfe3daa2e8916b3d321aa049e361a55a/yarl-1.20.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:798a5074e656f06b9fad1a162be5a32da45237ce19d07884d0b67a0aa9d5fdda", size = 407943 }, + { url = "https://files.pythonhosted.org/packages/fd/a4/022d2555c1e8fcff08ad7f0f43e4df3aba34f135bff04dd35d5526ce54ab/yarl-1.20.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:f106e75c454288472dbe615accef8248c686958c2e7dd3b8d8ee2669770d020f", size = 423058 }, + { url = "https://files.pythonhosted.org/packages/4c/f6/0873a05563e5df29ccf35345a6ae0ac9e66588b41fdb7043a65848f03139/yarl-1.20.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:3b60a86551669c23dc5445010534d2c5d8a4e012163218fc9114e857c0586fdd", size = 423792 }, + { url = "https://files.pythonhosted.org/packages/9e/35/43fbbd082708fa42e923f314c24f8277a28483d219e049552e5007a9aaca/yarl-1.20.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:3e429857e341d5e8e15806118e0294f8073ba9c4580637e59ab7b238afca836f", size = 422242 }, + { url = "https://files.pythonhosted.org/packages/ed/f7/f0f2500cf0c469beb2050b522c7815c575811627e6d3eb9ec7550ddd0bfe/yarl-1.20.0-cp313-cp313t-win32.whl", hash = "sha256:65a4053580fe88a63e8e4056b427224cd01edfb5f951498bfefca4052f0ce0ac", size = 93816 }, + { url = "https://files.pythonhosted.org/packages/3f/93/f73b61353b2a699d489e782c3f5998b59f974ec3156a2050a52dfd7e8946/yarl-1.20.0-cp313-cp313t-win_amd64.whl", hash = "sha256:53b2da3a6ca0a541c1ae799c349788d480e5144cac47dba0266c7cb6c76151fe", size = 101093 }, + { url = "https://files.pythonhosted.org/packages/ea/1f/70c57b3d7278e94ed22d85e09685d3f0a38ebdd8c5c73b65ba4c0d0fe002/yarl-1.20.0-py3-none-any.whl", hash = "sha256:5d0fe6af927a47a230f31e6004621fd0959eaa915fc62acfafa67ff7229a3124", size = 46124 }, +] + +[[package]] +name = "zarr" +version = "3.0.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "donfig" }, + { name = "numcodecs", extra = ["crc32c"] }, + { name = "numpy" }, + { name = "packaging" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/52/60/9652fd0536fbaca8d08cbc1a5572c52e0ce01773297df75da8bb47e45907/zarr-3.0.8.tar.gz", hash = "sha256:88505d095af899a88ae8ac4db02f4650ef0801d2ff6f65b6d1f0a45dcf760a6d", size = 256825 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/3b/e20bdf84088c11f2c396d034506cbffadd53e024111c1aa4585c2aba1523/zarr-3.0.8-py3-none-any.whl", hash = "sha256:7f81e7aec086437d98882aa432209107114bd7f3a9f4958b2af9c6b5928a70a7", size = 205364 }, +] + +[[package]] +name = "zipp" +version = "3.22.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/12/b6/7b3d16792fdf94f146bed92be90b4eb4563569eca91513c8609aebf0c167/zipp-3.22.0.tar.gz", hash = "sha256:dd2f28c3ce4bc67507bfd3781d21b7bb2be31103b51a4553ad7d90b84e57ace5", size = 25257 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ad/da/f64669af4cae46f17b90798a827519ce3737d31dbafad65d391e49643dc4/zipp-3.22.0-py3-none-any.whl", hash = "sha256:fe208f65f2aca48b81f9e6fd8cf7b8b32c26375266b009b413d45306b6148343", size = 9796 }, +]