117 lines
5.1 KiB
Python
Executable File
117 lines
5.1 KiB
Python
Executable File
from autogen.coding import LocalCommandLineCodeExecutor
|
|
from autogen.agentchat.contrib.multimodal_conversable_agent import MultimodalConversableAgent
|
|
import autogen
|
|
from .utils import get_max_uv_wavelength_from_txt, get_max_pl_peak_from_txt
|
|
from .constant import config_list, STREAM, SILENT, WORK_DIR, CACHE
|
|
|
|
llm_config = {"config_list": config_list, "stream": STREAM, "cache_seed": CACHE}
|
|
|
|
def init_optimize_group(work_dir, venv_context):
|
|
optimize_env = LocalCommandLineCodeExecutor(
|
|
work_dir=work_dir,
|
|
virtual_env_context=venv_context,
|
|
functions=[get_max_uv_wavelength_from_txt, get_max_pl_peak_from_txt]
|
|
)
|
|
|
|
analysis_executor = autogen.UserProxyAgent(
|
|
name="analysis_executor",
|
|
human_input_mode="NEVER",
|
|
llm_config=False,
|
|
code_execution_config={
|
|
"executor": optimize_env,
|
|
},
|
|
description="analysis_executor",
|
|
max_consecutive_auto_reply=1
|
|
)
|
|
|
|
analysis_pl_uv_system_message = """
|
|
You are a analysis_pl_uv.
|
|
You are given a task and some code snippets. Your task is to write code to call the functions of get_max_uv_wavelength_from_txt and get_max_pl_peak_from_txt at the same time.
|
|
You need to write the rest of the code to answer the user's request.
|
|
DONOT ANSWER THE USER'S REQUEST DIRECTLY.
|
|
"""
|
|
analysis_pl_uv_system_message += optimize_env.format_functions_for_prompt()
|
|
analysis_pl_uv = autogen.AssistantAgent(
|
|
name="analysis_pl_uv",
|
|
human_input_mode="NEVER",
|
|
system_message=analysis_pl_uv_system_message,
|
|
llm_config={"config_list": config_list},
|
|
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
|
# human_input_mode="TERMINATE",
|
|
description="analysis_pl_uv",
|
|
max_consecutive_auto_reply=1
|
|
)
|
|
|
|
analysis_picturer_system_message = """
|
|
You are a analysis_picture.
|
|
You are given a task and some code snippets. Your task is to analyze image information, which mainly includes electron microscope images and XRD patterns.
|
|
You need to write the rest of the code to answer the user's request.
|
|
DONOT ANSWER THE USER'SREQUEST DIRECTLY."""
|
|
analysis_picturer_system_message += optimize_env.format_functions_for_prompt()
|
|
analysis_picturer = MultimodalConversableAgent(
|
|
name="analysis_picturer",
|
|
human_input_mode="NEVER",
|
|
system_message=analysis_picturer_system_message,
|
|
llm_config={"config_list": config_list},
|
|
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
|
# human_input_mode="TERMINATE",
|
|
description="analysis_picturer",
|
|
max_consecutive_auto_reply=1
|
|
)
|
|
|
|
# optizmizer
|
|
optimizer_system_message = """
|
|
You are an experimental optimizer.
|
|
You will receive the recipe, the results, and the expected results from the data collector, and you will be asked to use this information to optimize the recipe so that the results are closer to the expected results.
|
|
It is important to note that your experimental recipe must adhere to the format of the experimental recipe given by the collector.
|
|
Add 'TERMINATE' to the end of the answer to indicate that the answer is complete."""
|
|
optimizer = autogen.AssistantAgent(
|
|
name="Experiment_Optimizer",
|
|
llm_config=llm_config,
|
|
system_message=optimizer_system_message,
|
|
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
|
human_input_mode="TERMINATE",
|
|
description="Experiment_Optimizer",
|
|
)
|
|
|
|
optimizer_critic_system_message = "You are the optimizer_critic, and your job is to judge whether the solution given by the optimizer is correct or not according to some fundamental physicochemical laws."
|
|
optimizer_critic = autogen.AssistantAgent(
|
|
name="optimizer_critic",
|
|
system_message=optimizer_critic_system_message,
|
|
llm_config=llm_config,
|
|
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
|
human_input_mode="TERMINATE",
|
|
description="optimizer_critic",
|
|
max_consecutive_auto_reply=3
|
|
)
|
|
|
|
outer_analysis_agent = autogen.ConversableAgent(
|
|
name="Outer_Analysis_Admin",
|
|
human_input_mode="NEVER",
|
|
# human_input_mode="TERMINATE",
|
|
code_execution_config={
|
|
"work_dir": WORK_DIR,
|
|
"use_docker": False,
|
|
},
|
|
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
|
description="Outer_Analysis_Admin",
|
|
default_auto_reply="continue",
|
|
max_consecutive_auto_reply=1
|
|
)
|
|
|
|
analysis_group = autogen.GroupChat(
|
|
agents=[outer_analysis_agent, analysis_pl_uv, analysis_executor],
|
|
messages=[],
|
|
speaker_selection_method='round_robin', # custom speaker selection method
|
|
max_round=10,
|
|
)
|
|
|
|
inner_analysis_admin = autogen.GroupChatManager(
|
|
name="Analysis_Group_Admin",
|
|
description="Analysis_Group_Admin",
|
|
groupchat=analysis_group,
|
|
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
|
llm_config=llm_config,
|
|
)
|
|
|
|
return inner_analysis_admin, outer_analysis_agent, optimizer |