CoACT initialize (#292)
This commit is contained in:
11
mm_agents/coact/autogen/oai/oai_models/__init__.py
Normal file
11
mm_agents/coact/autogen/oai/oai_models/__init__.py
Normal file
@@ -0,0 +1,11 @@
|
||||
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from .chat_completion import ChatCompletionExtended as ChatCompletion
|
||||
from .chat_completion import Choice
|
||||
from .chat_completion_message import ChatCompletionMessage
|
||||
from .chat_completion_message_tool_call import ChatCompletionMessageToolCall
|
||||
from .completion_usage import CompletionUsage
|
||||
|
||||
__all__ = ["ChatCompletion", "ChatCompletionMessage", "ChatCompletionMessageToolCall", "Choice", "CompletionUsage"]
|
||||
16
mm_agents/coact/autogen/oai/oai_models/_models.py
Normal file
16
mm_agents/coact/autogen/oai/oai_models/_models.py
Normal file
@@ -0,0 +1,16 @@
|
||||
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Taken over from https://github.com/openai/openai-python/blob/main/src/openai/_models.py
|
||||
|
||||
import pydantic
|
||||
import pydantic.generics
|
||||
from pydantic import ConfigDict
|
||||
from typing_extensions import ClassVar
|
||||
|
||||
__all__ = ["BaseModel"]
|
||||
|
||||
|
||||
class BaseModel(pydantic.BaseModel):
|
||||
model_config: ClassVar[ConfigDict] = ConfigDict(extra="allow")
|
||||
87
mm_agents/coact/autogen/oai/oai_models/chat_completion.py
Normal file
87
mm_agents/coact/autogen/oai/oai_models/chat_completion.py
Normal file
@@ -0,0 +1,87 @@
|
||||
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Taken over from https://github.com/openai/openai-python/blob/3e69750d47df4f0759d4a28ddc68e4b38756d9ca/src/openai/types/chat/chat_completion.py
|
||||
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from typing import Any, Callable, List, Optional
|
||||
|
||||
from typing_extensions import Literal
|
||||
|
||||
from ._models import BaseModel
|
||||
from .chat_completion_message import ChatCompletionMessage
|
||||
from .chat_completion_token_logprob import ChatCompletionTokenLogprob
|
||||
from .completion_usage import CompletionUsage
|
||||
|
||||
__all__ = ["ChatCompletion", "Choice", "ChoiceLogprobs"]
|
||||
|
||||
|
||||
class ChoiceLogprobs(BaseModel):
|
||||
content: Optional[List[ChatCompletionTokenLogprob]] = None
|
||||
"""A list of message content tokens with log probability information."""
|
||||
|
||||
refusal: Optional[List[ChatCompletionTokenLogprob]] = None
|
||||
"""A list of message refusal tokens with log probability information."""
|
||||
|
||||
|
||||
class Choice(BaseModel):
|
||||
finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"]
|
||||
"""The reason the model stopped generating tokens.
|
||||
|
||||
This will be `stop` if the model hit a natural stop point or a provided stop
|
||||
sequence, `length` if the maximum number of tokens specified in the request was
|
||||
reached, `content_filter` if content was omitted due to a flag from our content
|
||||
filters, `tool_calls` if the model called a tool, or `function_call`
|
||||
(deprecated) if the model called a function.
|
||||
"""
|
||||
|
||||
index: int
|
||||
"""The index of the choice in the list of choices."""
|
||||
|
||||
logprobs: Optional[ChoiceLogprobs] = None
|
||||
"""Log probability information for the choice."""
|
||||
|
||||
message: ChatCompletionMessage
|
||||
"""A chat completion message generated by the model."""
|
||||
|
||||
|
||||
class ChatCompletion(BaseModel):
|
||||
id: str
|
||||
"""A unique identifier for the chat completion."""
|
||||
|
||||
choices: List[Choice]
|
||||
"""A list of chat completion choices.
|
||||
|
||||
Can be more than one if `n` is greater than 1.
|
||||
"""
|
||||
|
||||
created: int
|
||||
"""The Unix timestamp (in seconds) of when the chat completion was created."""
|
||||
|
||||
model: str
|
||||
"""The model used for the chat completion."""
|
||||
|
||||
object: Literal["chat.completion"]
|
||||
"""The object type, which is always `chat.completion`."""
|
||||
|
||||
service_tier: Optional[Literal["auto", "default", "flex", "scale"]] = None
|
||||
"""The service tier used for processing the request."""
|
||||
|
||||
system_fingerprint: Optional[str] = None
|
||||
"""This fingerprint represents the backend configuration that the model runs with.
|
||||
|
||||
Can be used in conjunction with the `seed` request parameter to understand when
|
||||
backend changes have been made that might impact determinism.
|
||||
"""
|
||||
|
||||
usage: Optional[CompletionUsage] = None
|
||||
"""Usage statistics for the completion request."""
|
||||
|
||||
|
||||
class ChatCompletionExtended(ChatCompletion):
|
||||
message_retrieval_function: Optional[Callable[[Any, "ChatCompletion"], list[ChatCompletionMessage]]] = None
|
||||
config_id: Optional[str] = None
|
||||
pass_filter: Optional[Callable[..., bool]] = None
|
||||
cost: Optional[float] = None
|
||||
@@ -0,0 +1,32 @@
|
||||
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Taken over from https://github.com/openai/openai-python/blob/3e69750d47df4f0759d4a28ddc68e4b38756d9ca/src/openai/types/chat/chat_completion_audio.py
|
||||
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
|
||||
from ._models import BaseModel
|
||||
|
||||
__all__ = ["ChatCompletionAudio"]
|
||||
|
||||
|
||||
class ChatCompletionAudio(BaseModel):
|
||||
id: str
|
||||
"""Unique identifier for this audio response."""
|
||||
|
||||
data: str
|
||||
"""
|
||||
Base64 encoded audio bytes generated by the model, in the format specified in
|
||||
the request.
|
||||
"""
|
||||
|
||||
expires_at: int
|
||||
"""
|
||||
The Unix timestamp (in seconds) for when this audio response will no longer be
|
||||
accessible on the server for use in multi-turn conversations.
|
||||
"""
|
||||
|
||||
transcript: str
|
||||
"""Transcript of the audio generated by the model."""
|
||||
@@ -0,0 +1,86 @@
|
||||
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Taken over from https://github.com/openai/openai-python/blob/16a10604fbd0d82c1382b84b417a1d6a2d33a7f1/src/openai/types/chat/chat_completion_message.py
|
||||
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from typing import List, Optional
|
||||
|
||||
from typing_extensions import Literal
|
||||
|
||||
from ._models import BaseModel
|
||||
from .chat_completion_audio import ChatCompletionAudio
|
||||
from .chat_completion_message_tool_call import ChatCompletionMessageToolCall
|
||||
|
||||
__all__ = ["Annotation", "AnnotationURLCitation", "ChatCompletionMessage", "FunctionCall"]
|
||||
|
||||
|
||||
class AnnotationURLCitation(BaseModel):
|
||||
end_index: int
|
||||
"""The index of the last character of the URL citation in the message."""
|
||||
|
||||
start_index: int
|
||||
"""The index of the first character of the URL citation in the message."""
|
||||
|
||||
title: str
|
||||
"""The title of the web resource."""
|
||||
|
||||
url: str
|
||||
"""The URL of the web resource."""
|
||||
|
||||
|
||||
class Annotation(BaseModel):
|
||||
type: Literal["url_citation"]
|
||||
"""The type of the URL citation. Always `url_citation`."""
|
||||
|
||||
url_citation: AnnotationURLCitation
|
||||
"""A URL citation when using web search."""
|
||||
|
||||
|
||||
class FunctionCall(BaseModel):
|
||||
arguments: str
|
||||
"""
|
||||
The arguments to call the function with, as generated by the model in JSON
|
||||
format. Note that the model does not always generate valid JSON, and may
|
||||
hallucinate parameters not defined by your function schema. Validate the
|
||||
arguments in your code before calling your function.
|
||||
"""
|
||||
|
||||
name: str
|
||||
"""The name of the function to call."""
|
||||
|
||||
|
||||
class ChatCompletionMessage(BaseModel):
|
||||
content: Optional[str] = None
|
||||
"""The contents of the message."""
|
||||
|
||||
refusal: Optional[str] = None
|
||||
"""The refusal message generated by the model."""
|
||||
|
||||
role: Literal["assistant"]
|
||||
"""The role of the author of this message."""
|
||||
|
||||
annotations: Optional[List[Annotation]] = None
|
||||
"""
|
||||
Annotations for the message, when applicable, as when using the
|
||||
[web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
|
||||
"""
|
||||
|
||||
audio: Optional[ChatCompletionAudio] = None
|
||||
"""
|
||||
If the audio output modality is requested, this object contains data about the
|
||||
audio response from the model.
|
||||
[Learn more](https://platform.openai.com/docs/guides/audio).
|
||||
"""
|
||||
|
||||
function_call: Optional[FunctionCall] = None
|
||||
"""Deprecated and replaced by `tool_calls`.
|
||||
|
||||
The name and arguments of a function that should be called, as generated by the
|
||||
model.
|
||||
"""
|
||||
|
||||
tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None
|
||||
"""The tool calls generated by the model, such as function calls."""
|
||||
@@ -0,0 +1,37 @@
|
||||
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Taken over from https://github.com/openai/openai-python/blob/3e69750d47df4f0759d4a28ddc68e4b38756d9ca/src/openai/types/chat/chat_completion_message_tool_call.py
|
||||
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from typing_extensions import Literal
|
||||
|
||||
from ._models import BaseModel
|
||||
|
||||
__all__ = ["ChatCompletionMessageToolCall", "Function"]
|
||||
|
||||
|
||||
class Function(BaseModel):
|
||||
arguments: str
|
||||
"""
|
||||
The arguments to call the function with, as generated by the model in JSON
|
||||
format. Note that the model does not always generate valid JSON, and may
|
||||
hallucinate parameters not defined by your function schema. Validate the
|
||||
arguments in your code before calling your function.
|
||||
"""
|
||||
|
||||
name: str
|
||||
"""The name of the function to call."""
|
||||
|
||||
|
||||
class ChatCompletionMessageToolCall(BaseModel):
|
||||
id: str
|
||||
"""The ID of the tool call."""
|
||||
|
||||
function: Function
|
||||
"""The function that the model called."""
|
||||
|
||||
type: Literal["function"]
|
||||
"""The type of the tool. Currently, only `function` is supported."""
|
||||
@@ -0,0 +1,63 @@
|
||||
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Taken over from https://github.com/openai/openai-python/blob/3e69750d47df4f0759d4a28ddc68e4b38756d9ca/src/openai/types/chat/chat_completion_token_logprob.py
|
||||
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from typing import List, Optional
|
||||
|
||||
from ._models import BaseModel
|
||||
|
||||
__all__ = ["ChatCompletionTokenLogprob", "TopLogprob"]
|
||||
|
||||
|
||||
class TopLogprob(BaseModel):
|
||||
token: str
|
||||
"""The token."""
|
||||
|
||||
bytes: Optional[List[int]] = None
|
||||
"""A list of integers representing the UTF-8 bytes representation of the token.
|
||||
|
||||
Useful in instances where characters are represented by multiple tokens and
|
||||
their byte representations must be combined to generate the correct text
|
||||
representation. Can be `null` if there is no bytes representation for the token.
|
||||
"""
|
||||
|
||||
logprob: float
|
||||
"""The log probability of this token, if it is within the top 20 most likely
|
||||
tokens.
|
||||
|
||||
Otherwise, the value `-9999.0` is used to signify that the token is very
|
||||
unlikely.
|
||||
"""
|
||||
|
||||
|
||||
class ChatCompletionTokenLogprob(BaseModel):
|
||||
token: str
|
||||
"""The token."""
|
||||
|
||||
bytes: Optional[List[int]] = None
|
||||
"""A list of integers representing the UTF-8 bytes representation of the token.
|
||||
|
||||
Useful in instances where characters are represented by multiple tokens and
|
||||
their byte representations must be combined to generate the correct text
|
||||
representation. Can be `null` if there is no bytes representation for the token.
|
||||
"""
|
||||
|
||||
logprob: float
|
||||
"""The log probability of this token, if it is within the top 20 most likely
|
||||
tokens.
|
||||
|
||||
Otherwise, the value `-9999.0` is used to signify that the token is very
|
||||
unlikely.
|
||||
"""
|
||||
|
||||
top_logprobs: List[TopLogprob]
|
||||
"""List of the most likely tokens and their log probability, at this token
|
||||
position.
|
||||
|
||||
In rare cases, there may be fewer than the number of requested `top_logprobs`
|
||||
returned.
|
||||
"""
|
||||
60
mm_agents/coact/autogen/oai/oai_models/completion_usage.py
Normal file
60
mm_agents/coact/autogen/oai/oai_models/completion_usage.py
Normal file
@@ -0,0 +1,60 @@
|
||||
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Taken over from https://github.com/openai/openai-python/blob/3e69750d47df4f0759d4a28ddc68e4b38756d9ca/src/openai/types/completion_usage.py
|
||||
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from ._models import BaseModel
|
||||
|
||||
__all__ = ["CompletionTokensDetails", "CompletionUsage", "PromptTokensDetails"]
|
||||
|
||||
|
||||
class CompletionTokensDetails(BaseModel):
|
||||
accepted_prediction_tokens: Optional[int] = None
|
||||
"""
|
||||
When using Predicted Outputs, the number of tokens in the prediction that
|
||||
appeared in the completion.
|
||||
"""
|
||||
|
||||
audio_tokens: Optional[int] = None
|
||||
"""Audio input tokens generated by the model."""
|
||||
|
||||
reasoning_tokens: Optional[int] = None
|
||||
"""Tokens generated by the model for reasoning."""
|
||||
|
||||
rejected_prediction_tokens: Optional[int] = None
|
||||
"""
|
||||
When using Predicted Outputs, the number of tokens in the prediction that did
|
||||
not appear in the completion. However, like reasoning tokens, these tokens are
|
||||
still counted in the total completion tokens for purposes of billing, output,
|
||||
and context window limits.
|
||||
"""
|
||||
|
||||
|
||||
class PromptTokensDetails(BaseModel):
|
||||
audio_tokens: Optional[int] = None
|
||||
"""Audio input tokens present in the prompt."""
|
||||
|
||||
cached_tokens: Optional[int] = None
|
||||
"""Cached tokens present in the prompt."""
|
||||
|
||||
|
||||
class CompletionUsage(BaseModel):
|
||||
completion_tokens: int
|
||||
"""Number of tokens in the generated completion."""
|
||||
|
||||
prompt_tokens: int
|
||||
"""Number of tokens in the prompt."""
|
||||
|
||||
total_tokens: int
|
||||
"""Total number of tokens used in the request (prompt + completion)."""
|
||||
|
||||
completion_tokens_details: Optional[CompletionTokensDetails] = None
|
||||
"""Breakdown of tokens used in a completion."""
|
||||
|
||||
prompt_tokens_details: Optional[PromptTokensDetails] = None
|
||||
"""Breakdown of tokens used in the prompt."""
|
||||
Reference in New Issue
Block a user