CoACT initialize (#292)
This commit is contained in:
60
mm_agents/coact/autogen/oai/oai_models/completion_usage.py
Normal file
60
mm_agents/coact/autogen/oai/oai_models/completion_usage.py
Normal file
@@ -0,0 +1,60 @@
|
||||
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Taken over from https://github.com/openai/openai-python/blob/3e69750d47df4f0759d4a28ddc68e4b38756d9ca/src/openai/types/completion_usage.py
|
||||
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from ._models import BaseModel
|
||||
|
||||
__all__ = ["CompletionTokensDetails", "CompletionUsage", "PromptTokensDetails"]
|
||||
|
||||
|
||||
class CompletionTokensDetails(BaseModel):
|
||||
accepted_prediction_tokens: Optional[int] = None
|
||||
"""
|
||||
When using Predicted Outputs, the number of tokens in the prediction that
|
||||
appeared in the completion.
|
||||
"""
|
||||
|
||||
audio_tokens: Optional[int] = None
|
||||
"""Audio input tokens generated by the model."""
|
||||
|
||||
reasoning_tokens: Optional[int] = None
|
||||
"""Tokens generated by the model for reasoning."""
|
||||
|
||||
rejected_prediction_tokens: Optional[int] = None
|
||||
"""
|
||||
When using Predicted Outputs, the number of tokens in the prediction that did
|
||||
not appear in the completion. However, like reasoning tokens, these tokens are
|
||||
still counted in the total completion tokens for purposes of billing, output,
|
||||
and context window limits.
|
||||
"""
|
||||
|
||||
|
||||
class PromptTokensDetails(BaseModel):
|
||||
audio_tokens: Optional[int] = None
|
||||
"""Audio input tokens present in the prompt."""
|
||||
|
||||
cached_tokens: Optional[int] = None
|
||||
"""Cached tokens present in the prompt."""
|
||||
|
||||
|
||||
class CompletionUsage(BaseModel):
|
||||
completion_tokens: int
|
||||
"""Number of tokens in the generated completion."""
|
||||
|
||||
prompt_tokens: int
|
||||
"""Number of tokens in the prompt."""
|
||||
|
||||
total_tokens: int
|
||||
"""Total number of tokens used in the request (prompt + completion)."""
|
||||
|
||||
completion_tokens_details: Optional[CompletionTokensDetails] = None
|
||||
"""Breakdown of tokens used in a completion."""
|
||||
|
||||
prompt_tokens_details: Optional[PromptTokensDetails] = None
|
||||
"""Breakdown of tokens used in the prompt."""
|
||||
Reference in New Issue
Block a user