import asyncio from typing import Sequence from autogen_agentchat.agents import AssistantAgent, SocietyOfMindAgent from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination from autogen_agentchat.messages import AgentEvent, ChatMessage, TextMessage, ToolCallExecutionEvent from autogen_agentchat.teams import SelectorGroupChat, RoundRobinGroupChat from autogen_agentchat.ui import Console from autogen_ext.models.openai import OpenAIChatCompletionClient from constant import MODEL, OPENAI_API_KEY, OPENAI_BASE_URL from scientist_team import create_scientist_team from engineer_team import create_engineer_team model_client = OpenAIChatCompletionClient( model=MODEL, base_url=OPENAI_BASE_URL, api_key=OPENAI_API_KEY, model_info={ "vision": True, "function_calling": True, "json_output": True, "family": "unknown", }, ) async def main(task: str = "") -> dict: scientist_team = create_scientist_team() engineer_team = create_engineer_team() result = {} planning_agent = AssistantAgent( "PlanningAgent", description="An agent for planning tasks, this agent should be the first to engage when given a new task.", model_client=model_client, system_message=""" You are a planning agent. Your job is to break down complex Materials science research tasks into smaller, manageable subtasks. Assign these subtasks to the appropriate sub-teams; not all sub-teams are required to participate in every task. Your sub-teams are: Scientist team: A professional team of material scientists who are mainly responsible for consulting on material synthesis, structure, application and properties. Engineer team: A team of professional engineers who are responsible for writing code, visualizing experimental schemes, converting experimental schemes to machine code, and more. You only plan and delegate tasks - you do not execute them yourself. When assigning subtasks, use this format: 1. : When assigning subtasks, give a flow chart with following format or mermaid to visualize the collaboration between the various teams, such as: -> -> After plan and delegate tasks are complete, end with "START"; Determine if all sub-teams have completed their tasks, and if so, summarize the findings and end with "TERMINATE". """, reflect_on_tool_use=False ) # The termination condition is a combination of text mention termination and max message termination. text_mention_termination = TextMentionTermination("TERMINATE") max_messages_termination = MaxMessageTermination(max_messages=25) termination = text_mention_termination | max_messages_termination # The selector function is a function that takes the current message thread of the group chat # and returns the next speaker's name. If None is returned, the LLM-based selection method will be used. def selector_func(messages: Sequence[AgentEvent | ChatMessage]) -> str | None: if messages[-1].source != planning_agent.name: return planning_agent.name # Always return to the planning agent after the other agents have spoken. return None team = SelectorGroupChat( [planning_agent, scientist_team, engineer_team], model_client=model_client, # Use a smaller model for the selector. termination_condition=termination, selector_func=selector_func, ) await Console(team.run_stream(task=task)) # async for message in team.run_stream(task=task): # if isinstance(message, TextMessage): # print(f"----------------{message.source}----------------\n {message.content}") # result[message.source] = message.content # elif isinstance(message, ToolCallExecutionEvent): # print(f"----------------{message.source}----------------\n {message.content}") # result[message.source] = [content.content for content in message.content] return result # Example usage in another function async def main_1(): result = await main("Let the robot synthesize CsPbBr3 nanocubes at room temperature") # result = await main("查一下CsPbBr3的晶体结构") print(result) if __name__ == "__main__": asyncio.run(main_1())