migrate 2to4

This commit is contained in:
2025-01-10 09:57:42 +08:00
parent 1fd031d6e6
commit ba7fd2d13a
40 changed files with 203 additions and 2184 deletions

View File

View File

@@ -1,28 +0,0 @@
"""
ASGI config for matagent project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.auth import AuthMiddlewareStack
import backend.routing
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
#application = get_asgi_application()
application = ProtocolTypeRouter({
"http": get_asgi_application(),
"websocket": AuthMiddlewareStack(
URLRouter(
backend.routing.websocket_urlpatterns # 替换为你的应用名称
)
),
})

View File

@@ -3,11 +3,8 @@ import os
# Define your API keys and configurations
# OPENAI_API_KEY = "sk-QQq3pi7xjy24YuY207Cc9fD9Af51479fA2B584058a7593Fa"
OPENAI_API_KEY = "sk-urFGAQRThR6pysea0aC93bD27fA34bA69811A9254aAaD8B2"
# OPENAI_API_KEY = "sk-FTmEt110USDrY7py99Af4a3eAbF6432f97Aa1103Ba9cCdE8"
OPENAI_API_KEY = "sk-4aJj5ygdQ9rw6lS6920712Ef9bB848439522E72318439eCd"
OPENAI_BASE_URL = "http://8.218.238.241:17935/v1"
# OPENAI_BASE_URL = "https://pro.aiskt.com/v1"
MODEL = "gpt-4o"
# MODEL = "gpt-4o-2024-08-06"

View File

@@ -1,25 +0,0 @@
# your_app_name/consumers.py
import json
from channels.generic.websocket import AsyncWebsocketConsumer
class ChatConsumer(AsyncWebsocketConsumer):
async def connect(self):
await self.accept()
async def disconnect(self, close_code):
pass
async def receive(self, text_data):
#text_data_json = json.loads(text_data)
#message = text_data_json['message']
# 在这里处理接收到的消息
print(f"Received message: {text_data}")
# 发送消息回客户端
response_text = {
'message': f'You said: {text_data}'
}
await self.send(text_data=json.dumps(response_text))

View File

@@ -1,131 +0,0 @@
from autogen.code_utils import create_virtual_env
from autogen.coding import LocalCommandLineCodeExecutor
from autogen.agentchat.contrib.capabilities.teachability import Teachability
from autogen.agentchat.contrib.capabilities.vision_capability import VisionCapability
from autogen.agentchat.contrib.multimodal_conversable_agent import MultimodalConversableAgent
from pathlib import Path
import autogen
from .utils import load_agent_configs
import os
from .retrieval_group import init_retrieval_group
from .constant import config_list, STREAM, SILENT, WORK_DIR, CACHE
agent_configs = load_agent_configs(os.path.join(os.path.dirname(os.path.abspath(__file__)), "config/converter_group.yaml"))
venv_context = create_virtual_env(WORK_DIR)
llm_config = {"config_list": config_list, "stream": STREAM, "cache_seed": CACHE}
def init_converter_group():
outer_converter_agent = autogen.ConversableAgent(
name="Outer_Converter_Admin",
human_input_mode="NEVER",
# human_input_mode="TERMINATE",
code_execution_config={
"work_dir": WORK_DIR,
"use_docker": False,
},
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
description="Outer_Converter_Admin",
default_auto_reply="continue",
max_consecutive_auto_reply=1
)
scheme_converter_name = "scheme_converter"
scheme_converter = autogen.AssistantAgent(
name=scheme_converter_name,
system_message=agent_configs[scheme_converter_name]["system_message"],
llm_config=llm_config,
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
human_input_mode="TERMINATE",
description="scheme_converter",
max_consecutive_auto_reply=3
)
converter_critic_name = "converter_critic"
converter_critic = autogen.AssistantAgent(
name=converter_critic_name,
system_message=agent_configs[converter_critic_name]['system_message'],
llm_config=llm_config,
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
human_input_mode="TERMINATE",
description="converter_critic",
max_consecutive_auto_reply=3
)
mergrid_ploter_name = "mergrid_ploter"
mergrid_ploter = autogen.AssistantAgent(
name=mergrid_ploter_name,
system_message=agent_configs[mergrid_ploter_name]['system_message'],
llm_config=llm_config,
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
human_input_mode="TERMINATE",
description="mergrid_ploter",
max_consecutive_auto_reply=1
)
scheme_code_writer_name = "scheme_code_writer"
scheme_code_writer = autogen.AssistantAgent(
name=scheme_code_writer_name,
system_message=agent_configs[scheme_code_writer_name]['system_message'],
llm_config=llm_config,
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
human_input_mode="TERMINATE",
description="scheme_code_writer",
max_consecutive_auto_reply=3
)
scheme_code_critic_name = "scheme_code_critic"
scheme_code_critic = autogen.AssistantAgent(
name=scheme_code_critic_name,
system_message=agent_configs[scheme_code_critic_name]['system_message'],
llm_config=llm_config,
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
human_input_mode="TERMINATE",
description="scheme_code_critic",
max_consecutive_auto_reply=3
)
def state_transition(last_speaker, groupchat):
messages = groupchat.messages
if last_speaker is outer_converter_agent:
return scheme_converter
elif last_speaker is scheme_converter:
return converter_critic
elif last_speaker is converter_critic:
if "OPTIMIZE" in messages[-1]["content"]:
return scheme_converter
elif "TERMINATE" in messages[-1]["content"]:
return mergrid_ploter
elif last_speaker is mergrid_ploter:
return scheme_code_writer
elif last_speaker is scheme_code_writer:
return scheme_code_critic
elif last_speaker is scheme_code_critic:
if "OPTIMIZE" in messages[-1]["content"]:
return scheme_code_writer
elif "TERMINATE" in messages[-1]["content"]:
return outer_converter_agent
converter_group = autogen.GroupChat(
agents=[outer_converter_agent, scheme_converter, converter_critic, mergrid_ploter, scheme_code_writer, scheme_code_critic],
messages=[],
speaker_selection_method=state_transition, # custom speaker selection method
max_round=12,
)
inner_converter_admin = autogen.GroupChatManager(
name="Converter_Group_Admin",
description="Converter_Group_Admin",
groupchat=converter_group,
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
llm_config=llm_config,
system_message=agent_configs["admin"]['system_message']
)
return inner_converter_admin, outer_converter_agent

View File

@@ -1,108 +0,0 @@
from autogen.coding import LocalCommandLineCodeExecutor
import autogen
from .utils import send_instruction_to_robot, get_uv_latest_file, get_pl_latest_file
from .constant import config_list, STREAM, CACHE
llm_config = {"config_list": config_list, "stream": STREAM, "cache_seed": CACHE}
def init_executor_group(work_dir, venv_context):
outer_executor_agent = autogen.ConversableAgent(
name="Outer_Executor_Admin",
human_input_mode="NEVER",
# human_input_mode="TERMINATE",
code_execution_config={
"work_dir": work_dir,
"use_docker": False,
},
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
description="Outer_Executor_Admin",
default_auto_reply="continue",
max_consecutive_auto_reply=1
)
executor_env = LocalCommandLineCodeExecutor(
work_dir=work_dir,
virtual_env_context=venv_context,
functions=[send_instruction_to_robot, get_uv_latest_file, get_pl_latest_file]
)
expriment_executor = autogen.UserProxyAgent(
name="experiment_executor",
human_input_mode="NEVER",
llm_config=False,
code_execution_config={
"executor": executor_env,
},
description="experiment_executor",
max_consecutive_auto_reply=1
)
expriment_coder_system_message = """
You are a expriment_coder.
You are given a task and some code snippets. Your task is to write code to call the function of send_instruction_to_robot, that will send the instruction to the robot.
You need to write the rest of the code to answer the user's request.
DONOT ANSWER THE USER'S REQUEST DIRECTLY.
"""
expriment_coder_system_message += executor_env.format_functions_for_prompt()
expriment_code_writer = autogen.AssistantAgent(
name="expriment_code_writer",
human_input_mode="NEVER",
system_message=expriment_coder_system_message,
# llm_config={"config_list": config_list},
llm_config=llm_config,
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
# human_input_mode="TERMINATE",
description="expriment_code_writer",
max_consecutive_auto_reply=1
)
data_collector = autogen.UserProxyAgent(
name="data_collector",
human_input_mode="NEVER",
llm_config=False,
code_execution_config={
"executor": executor_env,
},
description="data_collector",
max_consecutive_auto_reply=1
)
collector_coder_system_message = """
You are a collector_coder.
You are given a task and some code snippets. Your task is to write code to call the functions of get_uv_latest_file and get_pl_latest_file at the same time.
You need to write the rest of the code to answer the user's request.
DONOT ANSWER THE USER'S REQUEST DIRECTLY.
"""
collector_coder_system_message += executor_env.format_functions_for_prompt()
collector_code_writer = autogen.AssistantAgent(
name="collector_code_writer",
human_input_mode="NEVER",
system_message=collector_coder_system_message,
# llm_config={"config_list": config_list},
llm_config=llm_config,
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
# human_input_mode="TERMINATE",
description="collector_code_writer",
max_consecutive_auto_reply=1
)
executor_group = autogen.GroupChat(
agents=[outer_executor_agent, expriment_code_writer, expriment_executor, collector_code_writer, data_collector],
messages=[],
speaker_selection_method='round_robin', # With two agents, this is equivalent to a 1:1 conversation.
allow_repeat_speaker=False,
max_round=6,
)
inner_executor_admin = autogen.GroupChatManager(
name="Inner_Executor_Admin",
groupchat=executor_group,
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
# llm_config={"config_list": config_list},
llm_config=llm_config,
human_input_mode="NEVER",
description="Inner_Executor_Admin",
)
return inner_executor_admin, outer_executor_agent

View File

@@ -1,178 +0,0 @@
from autogen.code_utils import create_virtual_env
from autogen.coding import LocalCommandLineCodeExecutor
from autogen.agentchat.contrib.capabilities.teachability import Teachability
from autogen.agentchat.contrib.capabilities.vision_capability import VisionCapability
from autogen.agentchat.contrib.multimodal_conversable_agent import MultimodalConversableAgent
from pathlib import Path
import autogen
from .utils import load_agent_configs
import os
from .retrieval_group import init_retrieval_group
from .constant import config_list, STREAM, SILENT, WORK_DIR, CACHE
agent_configs = load_agent_configs(os.path.join(os.path.dirname(os.path.abspath(__file__)), "config/generate_group.yaml"))
venv_context = create_virtual_env(WORK_DIR)
llm_config = {"config_list": config_list, "stream": STREAM, "cache_seed": CACHE}
def init_generate_group(outer_retrieval_agent, inner_retrieval_admin):
outer_generate_agent = autogen.ConversableAgent(
name="Outer_Generate_Admin",
human_input_mode="NEVER",
# human_input_mode="TERMINATE",
code_execution_config={
"work_dir": WORK_DIR,
"use_docker": False,
},
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
description="Outer_Generate_Admin",
default_auto_reply="continue",
max_consecutive_auto_reply=1
)
structure_scientist_name = "structure_scientist"
structure_scientist = MultimodalConversableAgent(
name=structure_scientist_name,
system_message=agent_configs[structure_scientist_name]['system_message'],
llm_config=llm_config,
is_termination_msg=lambda x: x.get("content", "")[-1].get("text", "").find("TERMINATE") >= 0,
human_input_mode="TERMINATE",
description="structure_scientist",
max_consecutive_auto_reply=1
)
property_scientist_name = "property_scientist"
property_scientist = MultimodalConversableAgent(
name=property_scientist_name,
system_message=agent_configs[property_scientist_name]['system_message'],
llm_config=llm_config,
is_termination_msg=lambda x: x.get("content", "")[-1].get("text", "").find("TERMINATE") >= 0,
human_input_mode="TERMINATE",
description="property_scientist",
max_consecutive_auto_reply=1
)
application_scientist_name = "application_scientist"
application_scientist = MultimodalConversableAgent(
name=application_scientist_name,
system_message=agent_configs[application_scientist_name]['system_message'],
llm_config=llm_config,
is_termination_msg=lambda x: x.get("content", "")[-1].get("text", "").find("TERMINATE") >= 0,
human_input_mode="TERMINATE",
description="application_scientist",
max_consecutive_auto_reply=1
)
synthesis_scientist_name = "synthesis_scientist"
synthesis_scientist = autogen.AssistantAgent(
name=synthesis_scientist_name,
system_message=agent_configs[synthesis_scientist_name]['system_message'],
llm_config=llm_config,
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
human_input_mode="TERMINATE",
description="synthesis_scientist",
max_consecutive_auto_reply=3
)
scheme_critic_name = "scheme_critic"
scheme_critic = autogen.AssistantAgent(
name=scheme_critic_name,
system_message=agent_configs[scheme_critic_name]['system_message'],
llm_config=llm_config,
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
human_input_mode="TERMINATE",
description="scheme_critic",
max_consecutive_auto_reply=3
)
def state_transition(last_speaker, groupchat):
messages = groupchat.messages
if last_speaker is outer_generate_agent:
# user -> retrieve
if len(messages) <= 1:
return outer_retrieval_agent
else:
return "auto"
elif last_speaker is outer_retrieval_agent:
# retrieve -> auto select scientist
return "auto"
# 方案问题
elif last_speaker is synthesis_scientist:
return scheme_critic
elif last_speaker is scheme_critic:
groupchat.messages[-1]["content"] += "\n\nSYNTHESIS"
if "OPTIMIZE" in messages[-1]["content"]:
return synthesis_scientist
elif ("TERMINATE" not in messages[-1]["content"] and "OPTIMIZE" not in messages[-1]["content"]):
groupchat.messages[-1]["content"] += "\n\nTERMINATE"
return outer_generate_agent
else:
return outer_generate_agent
else:
if ("TERMINATE" not in messages[-1]["content"] and "OPTIMIZE" not in messages[-1]["content"]):
messages[-1]["content"] += "\n\nTERMINATE"
return outer_generate_agent
generate_group = autogen.GroupChat(
agents=[outer_generate_agent, outer_retrieval_agent, structure_scientist, property_scientist, application_scientist, synthesis_scientist, scheme_critic],
messages=[],
speaker_selection_method=state_transition, # custom speaker selection method
max_round=10,
)
inner_generate_admin = autogen.GroupChatManager(
name="Generate_Group_Admin",
description="Generate_Group_Admin",
groupchat=generate_group,
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
llm_config=llm_config,
system_message=agent_configs["admin"]['system_message']
)
outer_retrieval_agent.register_nested_chats(
[
{"recipient": inner_retrieval_admin, "max_turn": 1, "summary_method": "last_msg", "silent": SILENT},
],
trigger=inner_generate_admin,
)
return inner_generate_admin, outer_generate_agent
if __name__ == "__main__":
outer_retrieval_agent, inner_retrieval_admin = init_retrieval_group(WORK_DIR, venv_context)
inner_generate_admin, outer_generate_agent = init_generate_group(outer_retrieval_agent, inner_retrieval_admin)
outer_generate_agent.initiate_chat(
inner_generate_admin,
# message="如何在常温条件下制备CsPbBr3纳米立方体",
# message="how to synthesis of CsPbBr3 Perovskite NCs at room temperature?"
# message="how to synthesis CsPbBr3 nanocubes at room temperature?"
# message="什么是钙钛矿?"
message="钙钛矿有什么作用?"
# message="Please prepare few layers graphene from graphite powder.",
# message="Can you please prepare black phosphorusene with improved stability from black phosphorus crystals powder?",
# message="Can you synthesize gold nanorods by seed-mediated method with absorption peaks at 820 nm?",
# message="Please synthesize CsPbBr3 nanocubes with a fluorescence emission wavelength of 520 nm at room temperature?",
# message="Please design a new hybridized halide perovskite composite material that is biocompatible and water-stable",
# message="please use phospholipid membrane as shell to encapsulate hybrid perovskite"
# Now I want a novel bright perovskite composite based CH3NH3PbBr3 and phospholipid membrane(PM) to improve the stability and biocompatibility, please synthesis this materials under room temperature
)

116
_backend/main.py Normal file
View File

@@ -0,0 +1,116 @@
import asyncio
from typing import Sequence
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination
from autogen_agentchat.messages import AgentEvent, ChatMessage, TextMessage, ToolCallExecutionEvent
from autogen_agentchat.teams import SelectorGroupChat, RoundRobinGroupChat
from autogen_agentchat.ui import Console
from autogen_ext.models.openai import OpenAIChatCompletionClient
from constant import MODEL, OPENAI_API_KEY, OPENAI_BASE_URL
from tools import retrieval_from_knowledge_base, search_from_oqmd_by_composition
model_client = OpenAIChatCompletionClient(
model=MODEL,
base_url=OPENAI_BASE_URL,
api_key=OPENAI_API_KEY,
model_info={
"vision": True,
"function_calling": True,
"json_output": True,
"family": "unknown",
},
)
def create_team() -> SelectorGroupChat:
planning_agent = AssistantAgent(
"PlanningAgent",
description="An agent for planning tasks, this agent should be the first to engage when given a new task.",
model_client=model_client,
system_message="""
You are a planning agent.
Your job is to break down complex search tasks into smaller, manageable subtasks.
Assign these subtasks to the appropriate team members; not all team members are required to participate in every task.
Your team members are:
Vector search agent: Searches for paper information in Vector database of knowledge base.
OQMD search agent: Searches for crystal structure and property information in OQMD database by composition.
You only plan and delegate tasks - you do not execute them yourself.
When assigning tasks, use this format:
1. <agent> : <task>
After all search tasks are complete, summarize the findings and end with "TERMINATE".
""",
)
vector_search_agent = AssistantAgent(
"VectorSearcher",
description="A vector search agent.",
tools=[retrieval_from_knowledge_base],
model_client=model_client,
system_message="""
You are a vector search agent.
Your only tool is retrieval_from_knowledge_base - use it to find information.
You make only one search call at a time.
Once you have the results, you never do calculations based on them.
""",
reflect_on_tool_use=False, # Set to True to have the model reflect on the tool use, set to False to return the tool call result directly.
)
oqmd_database_search_agent = AssistantAgent(
"OQMDDatabaseSearcher",
description="A database search agent.",
tools=[search_from_oqmd_by_composition],
model_client=model_client,
system_message="""
You are a database search agent of OQMD.
Your only tool is search_from_oqmd_by_composition - use it to find information.
You make only one search call at a time.
Once you have the results, you never do calculations based on them.
""",
reflect_on_tool_use=False, # Set to True to have the model reflect on the tool use, set to False to return the tool call result directly.
)
# The termination condition is a combination of text mention termination and max message termination.
text_mention_termination = TextMentionTermination("TERMINATE")
max_messages_termination = MaxMessageTermination(max_messages=25)
termination = text_mention_termination | max_messages_termination
# The selector function is a function that takes the current message thread of the group chat
# and returns the next speaker's name. If None is returned, the LLM-based selection method will be used.
def selector_func(messages: Sequence[AgentEvent | ChatMessage]) -> str | None:
if messages[-1].source != planning_agent.name:
return planning_agent.name # Always return to the planning agent after the other agents have spoken.
return None
team = SelectorGroupChat(
[planning_agent, vector_search_agent, oqmd_database_search_agent],
model_client=model_client, # Use a smaller model for the selector.
termination_condition=termination,
selector_func=selector_func,
)
return team
async def main(task: str = "") -> dict:
team = create_team()
result = {}
async for message in team.run_stream(task=task):
if isinstance(message, TextMessage):
print(f"----------------{message.source}----------------\n {message.content}")
result[message.source] = message.content
elif isinstance(message, ToolCallExecutionEvent):
print(f"----------------{message.source}----------------\n {message.content}")
result[message.source] = [content.content for content in message.content]
return result
# Example usage in another function
async def main_1():
result = await main("How to synthesis CsPbBr3 nanocubes at room temperature?")
# Now you can use result in main_1
print(result)
if __name__ == "__main__":
asyncio.run(main_1())

View File

@@ -1,128 +0,0 @@
from autogen.code_utils import create_virtual_env
from autogen.coding import LocalCommandLineCodeExecutor
from autogen.agentchat.contrib.capabilities.teachability import Teachability
from autogen.agentchat.contrib.capabilities.vision_capability import VisionCapability
from autogen.agentchat.contrib.multimodal_conversable_agent import MultimodalConversableAgent
from pathlib import Path
import autogen
import os
from .constant import config_list, STREAM, SILENT, WORK_DIR, CACHE
from .utils import load_agent_configs
from .retrieval_group import init_retrieval_group
from .generate_group import init_generate_group
from .converter_group import init_converter_group
from .executor_group import init_executor_group
from .optimize_group import init_optimize_group
venv_context = create_virtual_env(WORK_DIR)
llm_config = {"config_list": config_list, "stream": STREAM, "cache_seed": CACHE}
def main():
agent_configs = load_agent_configs(os.path.join(os.path.dirname(os.path.abspath(__file__)), "config/plan_group.yaml"))
user = autogen.UserProxyAgent(
name="User",
human_input_mode="ALWAYS",
code_execution_config={
"work_dir": WORK_DIR,
"use_docker": False,
},
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
description="User",
)
inner_retrieval_admin, outer_retrieval_agent = init_retrieval_group(WORK_DIR, venv_context)
inner_generate_admin, outer_generate_agent = init_generate_group(outer_retrieval_agent, inner_retrieval_admin)
inner_converter_admin, outer_converter_agent = init_converter_group()
inner_executor_admin, outer_executor_agent = init_executor_group(WORK_DIR, venv_context)
inner_analysis_admin, outer_analysis_agent, optimizer = init_optimize_group(WORK_DIR, venv_context)
def state_transition(last_speaker, groupchat):
messages = groupchat.messages
if last_speaker is user:
if len(messages) <= 1:
return outer_generate_agent
else:
return "auto"
elif last_speaker is outer_generate_agent:
if "synthesis" in messages[-1]["content"].lower():
return outer_converter_agent
else:
return user
elif last_speaker is outer_converter_agent:
return outer_executor_agent
elif last_speaker is outer_executor_agent:
return outer_analysis_agent
elif last_speaker is outer_analysis_agent:
return optimizer
else:
return user
matagent_group = autogen.GroupChat(
agents=[user, outer_generate_agent, outer_converter_agent, outer_executor_agent, outer_analysis_agent, optimizer],
messages=[],
speaker_selection_method=state_transition,
max_round=50,
)
matagent_admin_name = "Planer"
matagent_admin = autogen.GroupChatManager(
name=matagent_admin_name,
groupchat=matagent_group,
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
llm_config=llm_config,
system_message=agent_configs[matagent_admin_name]['system_message'],
description=matagent_admin_name
)
outer_generate_agent.register_nested_chats(
[
{"recipient": inner_generate_admin, "max_turn": 1, "summary_method": "last_msg", "silent": SILENT},
],
trigger=matagent_admin,
)
outer_converter_agent.register_nested_chats(
[
{"recipient": inner_converter_admin, "max_turn": 1, "summary_method": "last_msg", "silent": SILENT},
],
trigger=matagent_admin,
)
outer_executor_agent.register_nested_chats(
[
{"recipient": inner_executor_admin, "max_turn": 1, "summary_method": "last_msg", "silent": SILENT},
],
trigger=matagent_admin,
)
outer_analysis_agent.register_nested_chats(
[
{"recipient": inner_analysis_admin, "max_turn": 1, "summary_method": "last_msg", "silent": SILENT},
],
trigger=matagent_admin,
)
user.initiate_chat(
matagent_admin,
# message="如何在常温条件下制备CsPbBr3纳米立方体",
message="how to synthesis of CsPbBr3 Perovskite NCs at room temperature?"
# message="how to synthesis CsPbBr3 nanocubes at room temperature?"
# message="什么是钙钛矿?"
# message="Please prepare few layers graphene from graphite powder.",
# message="Can you please prepare black phosphorusene with improved stability from black phosphorus crystals powder?",
# message="Can you synthesize gold nanorods by seed-mediated method with absorption peaks at 820 nm?",
# message="Please synthesize CsPbBr3 nanocubes with a fluorescence emission wavelength of 520 nm at room temperature?",
# message="Please design a new hybridized halide perovskite composite material that is biocompatible and water-stable",
# message="please use phospholipid membrane as shell to encapsulate hybrid perovskite"
# Now I want a novel bright perovskite composite based CH3NH3PbBr3 and phospholipid membrane(PM) to improve the stability and biocompatibility, please synthesis this materials under room temperature
)
print(outer_generate_agent.last_message(matagent_admin))
print(matagent_admin)
if __name__ == "__main__":
main()

View File

@@ -1,117 +0,0 @@
from autogen.coding import LocalCommandLineCodeExecutor
from autogen.agentchat.contrib.multimodal_conversable_agent import MultimodalConversableAgent
import autogen
from .utils import get_max_uv_wavelength_from_txt, get_max_pl_peak_from_txt
from .constant import config_list, STREAM, SILENT, WORK_DIR, CACHE
llm_config = {"config_list": config_list, "stream": STREAM, "cache_seed": CACHE}
def init_optimize_group(work_dir, venv_context):
optimize_env = LocalCommandLineCodeExecutor(
work_dir=work_dir,
virtual_env_context=venv_context,
functions=[get_max_uv_wavelength_from_txt, get_max_pl_peak_from_txt]
)
analysis_executor = autogen.UserProxyAgent(
name="analysis_executor",
human_input_mode="NEVER",
llm_config=False,
code_execution_config={
"executor": optimize_env,
},
description="analysis_executor",
max_consecutive_auto_reply=1
)
analysis_pl_uv_system_message = """
You are a analysis_pl_uv.
You are given a task and some code snippets. Your task is to write code to call the functions of get_max_uv_wavelength_from_txt and get_max_pl_peak_from_txt at the same time.
You need to write the rest of the code to answer the user's request.
DONOT ANSWER THE USER'S REQUEST DIRECTLY.
"""
analysis_pl_uv_system_message += optimize_env.format_functions_for_prompt()
analysis_pl_uv = autogen.AssistantAgent(
name="analysis_pl_uv",
human_input_mode="NEVER",
system_message=analysis_pl_uv_system_message,
llm_config={"config_list": config_list},
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
# human_input_mode="TERMINATE",
description="analysis_pl_uv",
max_consecutive_auto_reply=1
)
analysis_picturer_system_message = """
You are a analysis_picture.
You are given a task and some code snippets. Your task is to analyze image information, which mainly includes electron microscope images and XRD patterns.
You need to write the rest of the code to answer the user's request.
DONOT ANSWER THE USER'SREQUEST DIRECTLY."""
analysis_picturer_system_message += optimize_env.format_functions_for_prompt()
analysis_picturer = MultimodalConversableAgent(
name="analysis_picturer",
human_input_mode="NEVER",
system_message=analysis_picturer_system_message,
llm_config={"config_list": config_list},
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
# human_input_mode="TERMINATE",
description="analysis_picturer",
max_consecutive_auto_reply=1
)
# optizmizer
optimizer_system_message = """
You are an experimental optimizer.
You will receive the recipe, the results, and the expected results from the data collector, and you will be asked to use this information to optimize the recipe so that the results are closer to the expected results.
It is important to note that your experimental recipe must adhere to the format of the experimental recipe given by the collector.
Add 'TERMINATE' to the end of the answer to indicate that the answer is complete."""
optimizer = autogen.AssistantAgent(
name="Experiment_Optimizer",
llm_config=llm_config,
system_message=optimizer_system_message,
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
human_input_mode="TERMINATE",
description="Experiment_Optimizer",
)
optimizer_critic_system_message = "You are the optimizer_critic, and your job is to judge whether the solution given by the optimizer is correct or not according to some fundamental physicochemical laws."
optimizer_critic = autogen.AssistantAgent(
name="optimizer_critic",
system_message=optimizer_critic_system_message,
llm_config=llm_config,
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
human_input_mode="TERMINATE",
description="optimizer_critic",
max_consecutive_auto_reply=3
)
outer_analysis_agent = autogen.ConversableAgent(
name="Outer_Analysis_Admin",
human_input_mode="NEVER",
# human_input_mode="TERMINATE",
code_execution_config={
"work_dir": WORK_DIR,
"use_docker": False,
},
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
description="Outer_Analysis_Admin",
default_auto_reply="continue",
max_consecutive_auto_reply=1
)
analysis_group = autogen.GroupChat(
agents=[outer_analysis_agent, analysis_pl_uv, analysis_executor],
messages=[],
speaker_selection_method='round_robin', # custom speaker selection method
max_round=10,
)
inner_analysis_admin = autogen.GroupChatManager(
name="Analysis_Group_Admin",
description="Analysis_Group_Admin",
groupchat=analysis_group,
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
llm_config=llm_config,
)
return inner_analysis_admin, outer_analysis_agent, optimizer

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.7 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 194 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 194 KiB

View File

@@ -1,6 +0,0 @@
body {
background-image: url('./bakg.png');
background-size: cover; /* 或根据需要调整为其他值,比如 contain */
background-position: center;
background-repeat: no-repeat;
}

View File

@@ -1 +0,0 @@
pyautogen==0.2.34

View File

@@ -1,173 +0,0 @@
from autogen.coding import LocalCommandLineCodeExecutor
import autogen
import os
from .constant import config_list, STREAM, SILENT, WORK_DIR, CACHE
from .utils import load_agent_configs,retrieval_from_knowledge_base, web_searcher, retrieval_from_graphrag
agent_configs = load_agent_configs(os.path.join(os.path.dirname(os.path.abspath(__file__)), "config/retrieval_group.yaml"))
llm_config = {"config_list": config_list, "stream": STREAM, "cache_seed": CACHE}
def init_retrieval_group(work_dir, venv_context):
# 向量检索
vector_executor = LocalCommandLineCodeExecutor(
work_dir=work_dir,
virtual_env_context=venv_context,
functions=[retrieval_from_knowledge_base]
)
vector_code_executor = autogen.UserProxyAgent(
name="vector_code_executor",
human_input_mode="NEVER",
llm_config=False,
code_execution_config={
"executor": vector_executor,
},
description="vector_code_executor",
max_consecutive_auto_reply=1
)
vector_searcher_name = "vector_searcher"
vector_searcher_system_message = agent_configs[vector_searcher_name]["system_message"] + vector_executor.format_functions_for_prompt()
vector_searcher = autogen.AssistantAgent(
name=vector_searcher_name,
human_input_mode="NEVER",
system_message=vector_searcher_system_message,
llm_config={"config_list": config_list},
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
# human_input_mode="TERMINATE",
description=vector_searcher_name,
max_consecutive_auto_reply=1
)
# graphrag retrieval agent
graphrag_executor = LocalCommandLineCodeExecutor(
work_dir=work_dir,
virtual_env_context=venv_context,
functions=[retrieval_from_graphrag]
)
graphrag_code_executor = autogen.UserProxyAgent(
name="graphrag_code_executor",
human_input_mode="NEVER",
llm_config=False,
code_execution_config={
"executor": graphrag_executor,
},
description="graphrag_code_executor",
max_consecutive_auto_reply=1
)
graphrag_searcher_name = "graphrag_searcher"
graphrag_searcher_system_message = agent_configs[graphrag_searcher_name]["system_message"] + graphrag_executor.format_functions_for_prompt()
graphrag_searcher = autogen.AssistantAgent(
name=graphrag_searcher_name,
human_input_mode="NEVER",
system_message=graphrag_searcher_system_message,
llm_config={"config_list": config_list},
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
# human_input_mode="TERMINATE",
description=graphrag_searcher_name,
max_consecutive_auto_reply=1
)
# web searcher with jina reader
web_executor = LocalCommandLineCodeExecutor(
work_dir=work_dir,
virtual_env_context=venv_context,
functions=[web_searcher]
)
web_code_executor = autogen.UserProxyAgent(
name="web_code_executor",
human_input_mode="NEVER",
llm_config=False,
code_execution_config={
"executor": web_executor,
},
description="web_code_executor",
)
web_seacher_name = "web_searcher"
web_seacher_system_message = agent_configs[web_seacher_name]["system_message"] + web_executor.format_functions_for_prompt()
web_seacher = autogen.AssistantAgent(
name=web_seacher_name,
human_input_mode="NEVER",
system_message=web_seacher_system_message,
llm_config={"config_list": config_list},
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
# human_input_mode="TERMINATE",
description=web_seacher_name,
)
web_summary = autogen.AssistantAgent(
name="web_summary",
human_input_mode="NEVER",
system_message="You are a web summary agent. You are given a task and a web search result. You need to summarize the web search result to answer the user's request. DONOT ANSWER THE USER'S REQUEST DIRECTLY.",
llm_config={"config_list": config_list},
description="web_summary",
)
outer_retrieval_agent = autogen.ConversableAgent(
name="Outer_Retrieval_Admin",
human_input_mode="NEVER",
# human_input_mode="TERMINATE",
code_execution_config={
"work_dir": work_dir,
"use_docker": False,
},
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
description="Outer_Retrieval_Admin",
)
def state_transition(last_speaker, groupchat):
messages = groupchat.messages
if last_speaker is outer_retrieval_agent:
return vector_searcher
# 向量检索
elif last_speaker is vector_searcher:
return vector_code_executor
elif last_speaker is vector_code_executor:
if "NULL" in messages[-1]["content"]:
return 'auto'
else:
return outer_retrieval_agent
# 知识图谱检索
elif last_speaker is graphrag_searcher:
return graphrag_code_executor
elif last_speaker is graphrag_code_executor:
return outer_retrieval_agent
# 向量检索为空时尝试web搜索
elif last_speaker is web_seacher:
return web_code_executor
elif last_speaker is web_code_executor:
return web_summary
elif last_speaker is web_summary:
return outer_retrieval_agent
retrieval_group = autogen.GroupChat(
agents=[outer_retrieval_agent, vector_searcher, vector_code_executor, graphrag_searcher, graphrag_code_executor, web_seacher, web_code_executor, web_summary],
messages=[],
speaker_selection_method=state_transition, # With two agents, this is equivalent to a 1:1 conversation.
allow_repeat_speaker=False,
max_round=8,
)
inner_retrieval_admin = autogen.GroupChatManager(
name="Inner_Retrieval_Admin",
groupchat=retrieval_group,
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
llm_config={"config_list": config_list},
human_input_mode="NEVER",
description="Inner_Retrieval_Admin",
)
return inner_retrieval_admin, outer_retrieval_agent

View File

@@ -1,13 +0,0 @@
# your_app_name/routing.py
from django.urls import path
from . import views
# HTTP 路由
# urlpatterns = [
# path('login/', views.UserView, name='userlogin'),
# ]
websocket_urlpatterns = [
path('matagent/chat', views.ChatConsumer.as_asgi()), # 替换为实际的路径和消费者类
]

View File

@@ -1,142 +0,0 @@
"""
Django settings for matagent project.
Generated by 'django-admin startproject' using Django 4.2.17.
For more information on this file, see
https://docs.djangoproject.com/en/4.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-e%))57j&i60=su8d&y1+g-y%hd86k^xti@n5md#xb525fi=4rb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1', 'yourdomain.com', '192.168.42.130', '47.121.220.134']
REST_FRAMEWORK = {
# 在这里可以添加更多的配置项
}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'backend',
'channels',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.2/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# 配置 ASGI 应用
ASGI_APPLICATION = 'backend.asgi.application'
# 配置 Channels 层
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels.layers.InMemoryChannelLayer',
},
}

View File

@@ -1,9 +0,0 @@
import _socket
def main():
print('aaa')
print(_socket)
if __name__ == "__main__":
main()

View File

@@ -1,20 +0,0 @@
from websockets.sync.client import connect as ws_connect
uri = "ws://0.0.0.0:8765"
with ws_connect(uri) as websocket:
print(f" - Connected to server on {uri}", flush=True)
print(" - Sending message to server.", flush=True)
# websocket.send("2+2=?")
websocket.send("Check out the weather in Paris and write a poem about it.")
while True:
message = websocket.recv()
message = message.decode("utf-8") if isinstance(message, bytes) else message
print(message, end="", flush=True)
if "TERMINATE" in message:
print()
print(" - Received TERMINATE message. Exiting.", flush=True)
break

86
_backend/tools.py Normal file
View File

@@ -0,0 +1,86 @@
import requests
def retrieval_from_knowledge_base(
query: str,
topk: int
) -> str:
"""
Retrieval for knowledge from the knowledge base based on the specified query and returns the topk results.
Parameters:
query (str): The query for knowledge retrieval.
topk (int): The number of top results to return, default is 3.
Returns:
str: The result of the knowledge retrieval in JSON format.
"""
url = 'http://100.85.52.31:7080/v1/chat-messages'
headers = {
'Authorization': f'Bearer app-uJgo3TQKcS1O9PMCDHko71Fp',
'Content-Type': 'application/json'
}
data = {
"inputs": {"topK": topk},
"query": query,
"response_mode": "blocking",
"user": "tangger",
"files": []
}
response = requests.post(url, headers=headers, json=data)
if response.status_code == 524:
print("Server is not responding. Please try again later. Maybe GPU was down in the container.")
return None
try:
result = response.json()
except ValueError:
return [{"error": "Response is not in JSON format"}]
useful_results = []
try:
answer = eval(result.get("answer", "[]"))
for item in answer:
metadata = item.get("metadata", {})
useful_info = {
"id": metadata.get("document_id"),
"title": item.get("title"),
"content": item.get("content"),
"metadata": None,
"embedding": None,
"score": metadata.get("score")
}
useful_results.append(useful_info)
except Exception as e:
return [{"error": f"Error processing result: {e}", "status": "TERMINATE"}]
if useful_results == []:
useful_results = "NULL"
return str(useful_results)
def search_from_oqmd_by_composition(composition: str) -> str:
"""
Search materials from OQMD database by chemical composition.
Args:
composition (str): Chemical composition string. Example: "CsPbBr3"
Returns:
str: JSON string containing material data or error message
"""
# 构建请求参数
param = {
'composition': composition
}
tool_endpoint = "http://100.84.94.73:8020"
try:
# 发送请求到/oqmd/search路由
response = requests.get(
tool_endpoint + "/oqmd/search",
params=param
)
response.raise_for_status()
return str(response.json()['data'])
except requests.exceptions.RequestException as e:
return f"Error: {str(e)}"

View File

@@ -1,26 +0,0 @@
"""
URL configuration for matagent project.
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('matagent/login', views.UserView.as_view(), name='login'),
path('matagent/model', views.ModelView.as_view(), name='model'),
]

View File

@@ -1,460 +0,0 @@
import requests
import json
import yaml
from typing_extensions import Annotated, List
from autogen.coding.func_with_reqs import with_requirements
# 读取YAML配置文件
def load_agent_configs(config_path):
with open(config_path, 'r') as file:
return yaml.safe_load(file)
# Define the termination message function
def termination_msg(x):
return isinstance(x, dict) and "TERMINATE" == str(x.get("content", ""))[-9:].upper()
@with_requirements(python_packages=["requests"], global_imports=["requests"])
def retrieval_from_knowledge_base(
query: str,
topk: int
) -> str:
"""
Retrieval for knowledge from the knowledge base based on the specified query and returns the topk results.
Parameters:
query (str): The query for knowledge retrieval.
topk (int): The number of top results to return, default is 3.
Returns:
str: The result of the knowledge retrieval in JSON format.
"""
url = 'http://127.0.0.1:7080/v1/chat-messages'
headers = {
'Authorization': f'Bearer app-uJgo3TQKcS1O9PMCDHko71Fp',
'Content-Type': 'application/json'
}
data = {
"inputs": {"topK": topk},
"query": query,
"response_mode": "blocking",
"user": "tangger",
"files": []
}
response = requests.post(url, headers=headers, json=data)
if response.status_code == 524:
print("Server is not responding. Please try again later. Maybe GPU was down in the container.")
return None
try:
result = response.json()
except ValueError:
return [{"error": "Response is not in JSON format"}]
useful_results = []
try:
answer = eval(result.get("answer", "[]"))
for item in answer:
metadata = item.get("metadata", {})
useful_info = {
"id": metadata.get("document_id"),
"title": item.get("title"),
"content": item.get("content"),
"metadata": None,
"embedding": None,
"score": metadata.get("score")
}
useful_results.append(useful_info)
except Exception as e:
return [{"error": f"Error processing result: {e}", "status": "TERMINATE"}]
if useful_results == []:
useful_results = "NULL"
return str(useful_results)
@with_requirements(python_packages=["graphrag", "graphrag_api"], global_imports=["graphrag", "graphrag_api"])
def retrieval_from_graphrag(query: str,) -> str:
from graphrag_api.search import SearchRunner
search_runner = SearchRunner(root_dir="/home/ubuntu/workplace/LYT/llm-agent/psk-graphrag")
result = search_runner.remove_sources(search_runner.run_local_search(query=query))
return result
@with_requirements(python_packages=["requests"], global_imports=["requests", "urllib.parse"])
def web_searcher(query: str):
from urllib.parse import quote
encoded_query = quote(query)
url = 'https://s.jina.ai/' + encoded_query
headers = {
'Authorization': 'Bearer jina_8fc99db105ed48d7ab6a76ecd117e8e9GkNoWHY3tbmIMYSwQ6GAgzmr6c7z',
'X-Return-Format': 'markdown'
}
response = requests.get(url, headers=headers)
# print(response.text)
return str(response.text)
@with_requirements(python_packages=["requests", "mp-api"], global_imports=["requests", "mp-api"])
def mp_database(api_key: str, query: str):
"""
Query the Materials Project database using an API key and a material ID.
Parameters:
api_key (str): The API key used to access the Materials Project database.
query (str): The material ID to query in the database.
Returns:
list: A list of document objects related to the material ID query.
"""
from mp_api.client import MPRester
import requests.exceptions
# Validate API key and query parameters
if not isinstance(api_key, str) or not api_key.strip():
raise ValueError("The API key must be a non-empty string.")
if not isinstance(query, str) or not query.strip():
raise ValueError("The query must be a non-empty string.")
try:
# Accessing the MPRester with provided API key
with MPRester(api_key) as mpr:
# Search for the material based on the query
docs = mpr.materials.summary.search(
material_ids=[query]
)
# Check if any documents are returned
if not docs:
return f"No results found for the given material ID: {query}"
return docs
except requests.exceptions.HTTPError as http_err:
return f"HTTP error occurred: {http_err}"
except requests.exceptions.ConnectionError as conn_err:
return f"Connection error occurred: {conn_err}"
except requests.exceptions.Timeout as timeout_err:
return f"Request timed out: {timeout_err}"
except requests.exceptions.RequestException as req_err:
return f"An error occurred while making the request: {req_err}"
except Exception as e:
return f"An unexpected error occurred: {e}"
@with_requirements(python_packages=["requests"], global_imports=["requests"])
def oqdm_database(query: str):
"""
Query the OQMD (Open Quantum Materials Database) to retrieve information about a material composition.
Parameters:
query (str): The material composition to query in the database.
Returns:
dict: A dictionary containing information related to the queried material composition.
"""
import requests
# Validate query parameter
if not isinstance(query, str) or not query.strip():
raise ValueError("The query must be a non-empty string.")
url = f"https://oqmd.org/materials/composition/{query}"
try:
# Make the request to the OQMD API
response = requests.get(url)
response.raise_for_status()
# Parse the response
data = response.json()
# Check if data is available
if not data:
return f"No results found for the given material composition: {query}"
return data
except requests.exceptions.HTTPError as http_err:
return f"HTTP error occurred: {http_err}"
except requests.exceptions.ConnectionError as conn_err:
return f"Connection error occurred: {conn_err}"
except requests.exceptions.Timeout as timeout_err:
return f"Request timed out: {timeout_err}"
except requests.exceptions.RequestException as req_err:
return f"An error occurred while making the request: {req_err}"
except Exception as e:
return f"An unexpected error occurred: {e}"
@with_requirements(python_packages=["requests"], global_imports=["requests"])
def aflow_database(query: list[str]):
"""
Query the AFLOW database to retrieve information about materials based on a list of species.
Parameters:
query (list[str]): A list of chemical species to query in the AFLOW database.
Returns:
dict: A dictionary containing information related to the queried species.
"""
import requests
# Validate query parameter
if not isinstance(query, list) or not query:
raise ValueError("The query must be a non-empty list of species.")
if not all(isinstance(species, str) and species.strip() for species in query):
raise ValueError("All species in the query must be non-empty strings.")
query_str = ",".join(query)
url = f"https://aflowlib.duke.edu/search/ui/API/aflux/?species({query_str}),$catalog(ICSD),$paging(1,1000)"
try:
# Make the request to the AFLOW API
response = requests.get(url)
response.raise_for_status()
# Parse the response
data = response.json()
# Check if data is available
if not data:
return f"No results found for the given species: {query_str}"
return data
except requests.exceptions.HTTPError as http_err:
return f"HTTP error occurred: {http_err}"
except requests.exceptions.ConnectionError as conn_err:
return f"Connection error occurred: {conn_err}"
except requests.exceptions.Timeout as timeout_err:
return f"Request timed out: {timeout_err}"
except requests.exceptions.RequestException as req_err:
return f"An error occurred while making the request: {req_err}"
except Exception as e:
return f"An unexpected error occurred: {e}"
@with_requirements(python_packages=["requests"], global_imports=["socket, json, re"])
def send_instruction_to_robot(json_data: str):
import socket
import json
import re
# 去掉可能存在的 ```json 和 ``` 标记
json_data_cleaned = re.sub(r'```json|```', '', json_data).strip()
try:
# 尝试解析清理后的JSON数据
data = json.loads(json_data_cleaned)
except json.JSONDecodeError as e:
print(f"JSON解析错误: {e}")
return
# 创建UDP套接字
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# 目标地址和端口
server_address = ('172.20.103.79', 10000)
try:
# 序列化为JSON字符串并编码为字节
json_bytes = json.dumps(data).encode('utf-8')
# 发送数据
sock.sendto(json_bytes, server_address)
print("指令发送成功")
except Exception as e:
print(f"发送数据时发生错误: {e}")
finally:
# 关闭套接字
sock.close()
@with_requirements(python_packages=["pandas"], global_imports=["pandas", "os", "glob"])
def get_uv_latest_file():
import os
import glob
# UV数据缓存文件夹路径 (请将此路径修改为实际的文件夹路径)
current_folder = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
folder_path = os.path.join(current_folder, 'data/UV/')
# 查找文件夹中的所有 .wls 文件
uv_files = sorted(glob.glob(os.path.join(folder_path, '*.[Tt][Xx][Tt]')))
if not uv_files:
res = f"ERROR: 缓存文件夹{current_folder}中没有找到任何UV文件"
return res
# 找到最新修改的文件
latest_file = uv_files[-1]
res = f"找到最新的UV数据文件: {latest_file}"
return res
@with_requirements(python_packages=["pandas"], global_imports=["pandas", "os", "glob"])
def get_max_uv_wavelength_from_txt(latest_file_path: str):
import pandas as pd
import os
# 文件检查
if not os.path.isfile(latest_file_path):
res = "ERROR: 指定的文件不存在"
return res
# 打开并读取最新文件
with open(latest_file_path, 'r') as file:
lines = file.readlines()
# 找到数据开始的行号
data_start_index = -1
for i, line in enumerate(lines):
if "Wavelength Scan Data Record" in line:
data_start_index = i + 2 # 数据从该行的下两行开始
break
if data_start_index == -1:
res = "ERROR: 无法找到数据记录部分"
return res
# 解析数据并构建表格
data = []
for line in lines[data_start_index:]:
parts = line.split()
if len(parts) == 7: # 保证每行有7列数据
no, wavelength, abs_value, trans, energy, energy_100, energy_0 = parts
try:
data.append({
'No': int(no),
'Wavelength(nm)': float(wavelength),
'Abs': float(abs_value),
'Trans(%T)': float(trans),
'Energy': float(energy),
'Energy(100%T)': float(energy_100),
'Energy(0%T)': float(energy_0)
})
except ValueError:
print(f"跳过无法解析的行: {line}")
if not data:
res = "ERROR: 未解析到任何有效数据"
return res
# 构建DataFrame
df = pd.DataFrame(data)
# 找到Abs值最大的行
max_abs_row = df.loc[df['Abs'].idxmax()]
# 获取最大Abs值对应的波长
max_abs_wavelength = max_abs_row['Wavelength(nm)']
res = f"本次实验的UV波长为: {max_abs_wavelength} nm"
print(res)
return res
@with_requirements(python_packages=["pandas"], global_imports=["pandas", "os", "glob"])
def get_pl_latest_file():
import os
import glob
# PL数据缓存文件夹路径 (请将此路径修改为实际的文件夹路径)
current_folder = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
folder_path = os.path.join(current_folder, 'data/PL/')
# 查找文件夹中的所有 .txt 或 .TXT 文件
pl_files = sorted(glob.glob(os.path.join(folder_path, '*.[Tt][Xx][Tt]')))
if not pl_files:
res = f"ERROR: 缓存文件夹{current_folder}中没有找到任何PL文件"
return res
# 找到最新修改的文件
latest_file = pl_files[-1]
res = f"找到最新的PL数据文件: {latest_file}"
# print(res)
return res
@with_requirements(python_packages=["pandas"], global_imports=["pandas", "os", "glob"])
def get_max_pl_peak_from_txt(latest_file_path: str):
import pandas as pd
import os
# 文件检查
if not os.path.isfile(latest_file_path):
res = "ERROR: 指定的文件不存在"
return res
# 打开并读取最新文件
with open(latest_file_path, 'r') as file:
lines = file.readlines()
# 找到 'Data Points' 开始的行号
data_start_index = -1
for i, line in enumerate(lines):
if "Data Points" in line:
data_start_index = i + 1 # 数据从该行的下一行开始
break
if data_start_index == -1:
res = "ERROR: 无法找到数据记录部分"
return res
# 解析nm和Data数据
data = []
for line in lines[data_start_index:]:
parts = line.split()
if len(parts) == 2: # 每行应该有2列数据nm 和 Data
try:
nm = float(parts[0])
data_value = float(parts[1])
data.append({'nm': nm, 'Data': data_value})
except ValueError:
print(f"跳过无法解析的行: {line}")
if not data:
res = "ERROR: 未解析到任何有效数据"
return res
# 构建DataFrame
df = pd.DataFrame(data)
# 找到Data值最大的行
max_data_row = df.loc[df['Data'].idxmax()]
# 获取最大Data值对应的nm
max_data_nm = max_data_row['nm']
res = f"本次实验的PL峰位为: {max_data_nm} nm"
print(res)
return res
if __name__ == "__main__":
# res = web_searcher("how to Synthesis CsPbBr3 nanocubes at room temperature?")
# print(res)
# res = retrieval_from_knowledge_base("how to Synthesis CsPbBr3 nanocubes at room temperature?", 3)
# print(res)
# res = get_max_uv_wavelength_from_txt()
# print(res)
# res = get_max_pl_peak_from_txt()
# print(res)
# res = retrieval_from_graphrag("how to Synthesis CsPbBr3 nanocubes at room temperature?")
# print(res)
pass

View File

@@ -1,599 +0,0 @@
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
import requests # 用于调用 AutoGen API
from django.http import StreamingHttpResponse
from contextlib import asynccontextmanager
from autogen.code_utils import create_virtual_env
from autogen.coding import LocalCommandLineCodeExecutor
from autogen.agentchat.contrib.capabilities.teachability import Teachability
from autogen.agentchat.contrib.capabilities.vision_capability import VisionCapability
from autogen.agentchat.contrib.multimodal_conversable_agent import MultimodalConversableAgent
from pathlib import Path
import websockets
import autogen
import os
from .constant import config_list, STREAM, SILENT, WORK_DIR
from .utils import load_agent_configs
from .retrieval_group import init_retrieval_group
from .generate_group import init_generate_group
from .converter_group import init_converter_group
from .executor_group import init_executor_group
from .optimize_group import init_optimize_group
import io
import sys
from contextlib import redirect_stdout
from websockets.sync.client import connect as ws_connect
from autogen.io.websockets import IOWebsockets
import json
import asyncio
from channels.generic.websocket import AsyncWebsocketConsumer
venv_context = create_virtual_env(WORK_DIR)
llm_config = {"config_list": config_list, "stream": True}
import threading
class ChatConsumer(AsyncWebsocketConsumer):
async def connect(self):
await self.accept()
async def disconnect(self, close_code):
pass
async def receive(self, text_data):
# 在这里处理接收到的消息
print(f"Received message: {text_data}")
# 异步启动 WebSocket 连接和处理过程
await self.handle_websocket_connection(text_data)
async def handle_websocket_connection(self, text_data):
# 启动 WebSocket 服务器的同步部分在后台线程中
threading.Thread(target=self.run_websocket_server, args=(text_data,), daemon=True).start()
def run_websocket_server(self, text_data):
"""
启动 IOWebsocket 服务器,处理消息并与客户端进行交互
"""
uri = "ws://localhost:8765" # 你要连接的 WebSocket 服务器 URI
# 运行 WebSocket 服务器并处理消息
print(f"Starting WebSocket server at {uri}")
with IOWebsockets.run_server_in_thread(on_connect=self.handle_message, port=8765) as uri:
asyncio.run(self.connect_to_server(uri, text_data))
async def connect_to_server(self, uri, text_data):
# async with ws_connect(uri) as websocket:
async with websockets.connect(uri) as websocket:
print(f" - Connected to server on {uri}", flush=True)
print(" - Sending message to server.", flush=True)
print(text_data)
json_data = json.loads(json.loads(text_data))
chat_id = json_data['chat_id']
await websocket.send(json_data['message'])
import re
current_agent = "User"
while True:
message = await websocket.recv()
message = message.decode("utf-8") if isinstance(message, bytes) else message
print(message, end="", flush=True)
cleaned_string = re.sub(r'\x1b\[[0-?]*[ -/]*[@-~]', '', message)
cleaned_string = cleaned_string.replace('\n>>>>>>>> USING AUTO REPLY...', '').replace('\n>>>>>>>> ', '')
if "Next speaker: User" in cleaned_string:
print('wcnm')
if "Next speaker" in cleaned_string:
match = re.search(r"Next\s+speaker:\s+(\w+)", cleaned_string)
current_agent = match.group(1)
else:
if cleaned_string == "\n--------------------------------------------------------------------------------\n":
continue
if cleaned_string == "\n********************************************************************************\n":
continue
if cleaned_string == "Starting a new chat....\n":
continue
if cleaned_string == "\n>>>>>>>> USING AUTO REPLY...\n":
continue
match = re.findall(r"(\w+)\s*\(to\s+(\w+)\)", cleaned_string)
if len(match)==1:
continue
if current_agent in ['Outer_Retrieval_Admin', 'Outer_Generate_Admin', 'Outer_Converter_Admin']:
current_agent = current_agent.replace('Outer_', '')
if current_agent in ['vector_code_executor']:
continue
if current_agent == 'User':
group_name = 'Planner'
if current_agent in ['vector_searcher','vector_code_executor', 'graphrag_searcher', 'graphrag_code_executor', 'web_searcher', 'web_summary', 'Outer_Retrieval_Admin']:
group_name = 'Retrieval'
if current_agent in ['structure_scientist','property_scientist', 'application_scientist', 'synthesis_scientist', 'scheme_critic', 'Outer_Generate_Admin']:
group_name = 'Generator'
if current_agent in ['scheme_converter','converter_critic', 'mergrid_ploter', 'scheme_code_writer', 'scheme_code_critic', 'Outer_Converter_Admin']:
group_name = 'Converter'
if current_agent in ['experiment_executor','expriment_code_writer', 'data_collector', 'collector_code_writer', 'Outer_Executor_Admin']:
group_name = 'Executor'
if current_agent in ['analysis_executor','analysis_pl_uv', 'analysis_picturer', 'Experiment_Optimizer', 'optimizer_critic', 'Outer_Analysis_Admin']:
group_name = 'Optimizer'
if 'TERMINATE' in cleaned_string or (group_name != 'Retrieval' and len(cleaned_string) > 100):
continue
content = {"group_name": group_name, "agent_name": current_agent.replace("_", " ").title(), "content": cleaned_string}
await self.send(text_data=json.dumps(content))
def handle_message(self, iostream: IOWebsockets):
initial_msg = iostream.input()
agent_configs = load_agent_configs(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "config/plan_group.yaml"))
user = autogen.UserProxyAgent(
name="User",
chat_messages=None,
human_input_mode="ALWAYS",
code_execution_config={
"work_dir": WORK_DIR,
"use_docker": False,
},
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
description="User",
)
inner_retrieval_admin, outer_retrieval_agent = init_retrieval_group(WORK_DIR, venv_context)
inner_generate_admin, outer_generate_agent = init_generate_group(outer_retrieval_agent, inner_retrieval_admin)
inner_converter_admin, outer_converter_agent = init_converter_group()
inner_executor_admin, outer_executor_agent = init_executor_group(WORK_DIR, venv_context)
inner_analysis_admin, outer_analysis_agent, optimizer = init_optimize_group(WORK_DIR, venv_context)
def state_transition(last_speaker, groupchat):
messages = groupchat.messages
if last_speaker is user:
if len(messages) <= 1:
return outer_generate_agent
else:
return "auto"
elif last_speaker is outer_generate_agent:
if "synthesis" in messages[-1]["content"].lower():
return outer_converter_agent
else:
return user
elif last_speaker is outer_converter_agent:
return outer_executor_agent
elif last_speaker is outer_executor_agent:
return outer_analysis_agent
elif last_speaker is outer_analysis_agent:
return optimizer
else:
return user
matagent_group = autogen.GroupChat(
agents=[user, outer_generate_agent, outer_converter_agent, outer_executor_agent, outer_analysis_agent,
optimizer],
messages=[],
speaker_selection_method=state_transition,
max_round=60,
)
matagent_admin_name = "Planer"
matagent_admin = autogen.GroupChatManager(
name=matagent_admin_name,
groupchat=matagent_group,
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
llm_config=llm_config,
system_message=agent_configs[matagent_admin_name]['system_message'],
description=matagent_admin_name
)
outer_generate_agent.register_nested_chats(
[
{"recipient": inner_generate_admin, "max_turn": 1, "summary_method": "last_msg", "silent": SILENT},
],
trigger=matagent_admin,
#reply_func_from_nested_chats =
)
outer_converter_agent.register_nested_chats(
[
{"recipient": inner_converter_admin, "max_turn": 1, "summary_method": "last_msg", "silent": SILENT},
],
trigger=matagent_admin,
)
outer_executor_agent.register_nested_chats(
[
{"recipient": inner_executor_admin, "max_turn": 1, "summary_method": "last_msg", "silent": SILENT},
],
trigger=matagent_admin,
)
outer_analysis_agent.register_nested_chats(
[
{"recipient": inner_analysis_admin, "max_turn": 1, "summary_method": "last_msg", "silent": SILENT},
],
trigger=matagent_admin,
)
user.initiate_chat(
matagent_admin,
message=initial_msg, # "如何在常温条件下制备CsPbBr3纳米立方体",
cache=False
)
return iostream
# class ChatConsumer(AsyncWebsocketConsumer):
# async def connect(self):
# await self.accept()
# async def disconnect(self, close_code):
# pass
# async def receive(self, text_data):
# # 在这里处理接收到的消息
# print(f"Received message: {text_data}")
# # 异步启动 WebSocket 连接和处理过程
# await self.handle_websocket_connection(text_data)
# async def handle_websocket_connection(self, text_data):
# # 启动独立的 WebSocket 服务器,使用异步线程避免阻塞
# uri = "ws://localhost:8765" # 示例 URI可以替换为你的 URI
# # 启动 WebSocket 服务器在后台线程中运行
# await asyncio.to_thread(self.run_websocket_server, uri, text_data)
# def run_websocket_server(self, uri, text_data):
# """
# 这是一个同步方法,负责启动 IOWebsockets 的服务器并处理消息
# 使用 asyncio.to_thread() 将其异步化
# """
# with IOWebsockets.run_server_in_thread(on_connect=self.handle_message, port=8765) as uri:
# print(f" - test_setup() with websocket server running on {uri}.", flush=True)
# with ws_connect(uri) as websocket:
# print(f" - Connected to server on {uri}", flush=True)
# print(" - Sending message to server.", flush=True)
# print(text_data)
# json_data = json.loads(json.loads(text_data))
# chat_id = json_data['chat_id']
# websocket.send(json_data['message'])
# import re
# current_agent = "User"
# while True:
# message = websocket.recv()
# message = message.decode("utf-8") if isinstance(message, bytes) else message
# print(message, end="", flush=True)
# cleaned_string = re.sub(r'\x1b\[[0-?]*[ -/]*[@-~]', '', message)
# cleaned_string = cleaned_string.replace('\n>>>>>>>> USING AUTO REPLY...', '').replace('\n>>>>>>>> ', '')
# if "Next speaker" in cleaned_string:
# match = re.search(r"Next\s+speaker:\s+(\w+)", cleaned_string)
# current_agent = match.group(1)
# else:
# if cleaned_string == "\n--------------------------------------------------------------------------------\n":
# continue
# if cleaned_string == "\n********************************************************************************\n":
# continue
# if cleaned_string == "Starting a new chat....\n":
# continue
# if cleaned_string == "\n>>>>>>>> USING AUTO REPLY...\n":
# continue
# match = re.findall(r"(\w+)\s*\(to\s+(\w+)\)", cleaned_string)
# if len(match)==1:
# continue
# if current_agent in ['Outer_Retrieval_Admin', 'Outer_Generate_Admin', 'Outer_Converter_Admin']:
# current_agent = current_agent.replace('Outer_', '')
# if current_agent in ['vector_code_executor']:
# continue
# if current_agent == 'User':
# group_name = 'Planner'
# if current_agent in ['vector_searcher','vector_code_executor', 'graphrag_searcher', 'graphrag_code_executor', 'web_searcher', 'web_summary', 'Outer_Retrieval_Admin']:
# group_name = 'Retrieval'
# if current_agent in ['structure_scientist','property_scientist', 'application_scientist', 'synthesis_scientist', 'scheme_critic', 'Outer_Generate_Admin']:
# group_name = 'Generator'
# if current_agent in ['scheme_converter','converter_critic', 'mergrid_ploter', 'scheme_code_writer', 'scheme_code_critic', 'Outer_Converter_Admin']:
# group_name = 'Converter'
# if current_agent in ['experiment_executor','expriment_code_writer', 'data_collector', 'collector_code_writer', 'Outer_Executor_Admin']:
# group_name = 'Executor'
# if current_agent in ['analysis_executor','analysis_pl_uv', 'analysis_picturer', 'Experiment_Optimizer', 'optimizer_critic', 'Outer_Analysis_Admin']:
# group_name = 'Optimizer'
# if 'TERMINATE' in cleaned_string or (group_name != 'Retrieval' and len(cleaned_string) > 100):
# continue
# content = {"group_name": group_name, "agent_name": current_agent.replace("_", " ").title(), "content": cleaned_string}
# # 通过 WebSocket 消费者发送消息
# asyncio.run(self.send(text_data=json.dumps(content)))
# # if "TERMINATE" in message:
# # print(" - Received TERMINATE message. Exiting.", flush=True)
# # break
# def handle_message(self, iostream: IOWebsockets):
# initial_msg = iostream.input()
# agent_configs = load_agent_configs(
# os.path.join(os.path.dirname(os.path.abspath(__file__)), "config/plan_group.yaml"))
# user = autogen.UserProxyAgent(
# name="User",
# chat_messages=None,
# human_input_mode="NEVER",
# code_execution_config={
# "work_dir": WORK_DIR,
# "use_docker": False,
# },
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
# description="User",
# )
# inner_retrieval_admin, outer_retrieval_agent = init_retrieval_group(WORK_DIR, venv_context)
# inner_generate_admin, outer_generate_agent = init_generate_group(outer_retrieval_agent, inner_retrieval_admin)
# inner_converter_admin, outer_converter_agent = init_converter_group()
# inner_executor_admin, outer_executor_agent = init_executor_group(WORK_DIR, venv_context)
# inner_analysis_admin, outer_analysis_agent, optimizer = init_optimize_group(WORK_DIR, venv_context)
# def state_transition(last_speaker, groupchat):
# messages = groupchat.messages
# if last_speaker is user:
# if len(messages) <= 1:
# return outer_generate_agent
# else:
# return "auto"
# elif last_speaker is outer_generate_agent:
# if "synthesis" in messages[-1]["content"].lower():
# return outer_converter_agent
# else:
# return user
# elif last_speaker is outer_converter_agent:
# return outer_executor_agent
# elif last_speaker is outer_executor_agent:
# return outer_analysis_agent
# elif last_speaker is outer_analysis_agent:
# return optimizer
# else:
# return user
# matagent_group = autogen.GroupChat(
# agents=[user, outer_generate_agent, outer_converter_agent, outer_executor_agent, outer_analysis_agent,
# optimizer],
# messages=[],
# speaker_selection_method=state_transition,
# max_round=50,
# )
# matagent_admin_name = "Planer"
# matagent_admin = autogen.GroupChatManager(
# name=matagent_admin_name,
# groupchat=matagent_group,
# # is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
# llm_config=llm_config,
# system_message=agent_configs[matagent_admin_name]['system_message'],
# description=matagent_admin_name
# )
# outer_generate_agent.register_nested_chats(
# [
# {"recipient": inner_generate_admin, "max_turn": 1, "summary_method": "last_msg", "silent": SILENT},
# ],
# trigger=matagent_admin,
# #reply_func_from_nested_chats =
# )
# outer_converter_agent.register_nested_chats(
# [
# {"recipient": inner_converter_admin, "max_turn": 1, "summary_method": "last_msg", "silent": SILENT},
# ],
# trigger=matagent_admin,
# )
# outer_executor_agent.register_nested_chats(
# [
# {"recipient": inner_executor_admin, "max_turn": 1, "summary_method": "last_msg", "silent": SILENT},
# ],
# trigger=matagent_admin,
# )
# outer_analysis_agent.register_nested_chats(
# [
# {"recipient": inner_analysis_admin, "max_turn": 1, "summary_method": "last_msg", "silent": SILENT},
# ],
# trigger=matagent_admin,
# )
# user.initiate_chat(
# matagent_admin,
# message=initial_msg, # "如何在常温条件下制备CsPbBr3纳米立方体",
# cache=False
# )
# return iostream
# class ChatConsumer(AsyncWebsocketConsumer):
# async def connect(self):
# await self.accept()
# async def disconnect(self, close_code):
# pass
# async def receive(self, text_data):
# #text_data_json = json.loads(text_data)
# #message = text_data_json['message']
# # 在这里处理接收到的消息
# print(f"Received message: {text_data}")
# agent_configs = load_agent_configs(
# os.path.join(os.path.dirname(os.path.abspath(__file__)), "config/plan_group.yaml"))
# user = autogen.UserProxyAgent(
# name="User",
# chat_messages=None,
# human_input_mode="NEVER",
# code_execution_config={
# "work_dir": WORK_DIR,
# "use_docker": False,
# },
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
# description="User",
# )
# inner_retrieval_admin, outer_retrieval_agent = init_retrieval_group(WORK_DIR, venv_context)
# inner_generate_admin, outer_generate_agent = init_generate_group(outer_retrieval_agent, inner_retrieval_admin)
# inner_converter_admin, outer_converter_agent = init_converter_group()
# inner_executor_admin, outer_executor_agent = init_executor_group(WORK_DIR, venv_context)
# inner_analysis_admin, outer_analysis_agent, optimizer = init_optimize_group(WORK_DIR, venv_context)
# def state_transition(last_speaker, groupchat):
# messages = groupchat.messages
# if last_speaker is user:
# if len(messages) <= 1:
# return outer_generate_agent
# else:
# return "auto"
# elif last_speaker is outer_generate_agent:
# if "synthesis" in messages[-1]["content"].lower():
# return outer_converter_agent
# else:
# return user
# elif last_speaker is outer_converter_agent:
# return outer_executor_agent
# elif last_speaker is outer_executor_agent:
# return outer_analysis_agent
# elif last_speaker is outer_analysis_agent:
# return optimizer
# else:
# return user
# matagent_group = autogen.GroupChat(
# agents=[user, outer_generate_agent, outer_converter_agent, outer_executor_agent, outer_analysis_agent,
# optimizer],
# messages=[],
# speaker_selection_method=state_transition,
# max_round=50,
# )
# matagent_admin_name = "Planer"
# matagent_admin = autogen.GroupChatManager(
# name=matagent_admin_name,
# groupchat=matagent_group,
# # is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
# llm_config=llm_config,
# system_message=agent_configs[matagent_admin_name]['system_message'],
# description=matagent_admin_name
# )
# outer_generate_agent.register_nested_chats(
# [
# {"recipient": inner_generate_admin, "max_turn": 1, "summary_method": "last_msg", "silent": SILENT},
# ],
# trigger=matagent_admin,
# #reply_func_from_nested_chats =
# )
# outer_converter_agent.register_nested_chats(
# [
# {"recipient": inner_converter_admin, "max_turn": 1, "summary_method": "last_msg", "silent": SILENT},
# ],
# trigger=matagent_admin,
# )
# outer_executor_agent.register_nested_chats(
# [
# {"recipient": inner_executor_admin, "max_turn": 1, "summary_method": "last_msg", "silent": SILENT},
# ],
# trigger=matagent_admin,
# )
# outer_analysis_agent.register_nested_chats(
# [
# {"recipient": inner_analysis_admin, "max_turn": 1, "summary_method": "last_msg", "silent": SILENT},
# ],
# trigger=matagent_admin,
# )
# def print_messages(recipient, messages, sender, config):
# # if "callback" in config and config["callback"] is not None:
# # callback = config["callback"]
# # callback(sender, recipient, messages[-1])
# print(f"~~~~~~~~~~~~~~~~: {recipient.name} | num messages: {len(messages)}")
# self.send(text_data=json.dumps(messages))
# return False, None # required to ensure the agent communication flow continues
# # user.register_reply(
# # [autogen.Agent, None],
# # reply_func=print_messages,
# # config={"callback": None},
# # )
# user.initiate_chat(
# matagent_admin,
# message=text_data # "如何在常温条件下制备CsPbBr3纳米立方体",
# )
# #json.dump(user.chat_messages[matagent_admin], open(output.txt, "w"), indent=2)
# response_text = outer_generate_agent.last_message(matagent_admin)
# await self.send(text_data=json.dumps(response_text))
# with IOWebsockets.run_server_in_thread(on_connect=on_connect, host='192.168.42.130', port=8765) as uri:
# print(f" - test_setup() with websocket server running on {uri}.", flush=True)
# with ws_connect(uri) as websocket:
# print(f" - Connected to server on {uri}", flush=True)
#
# print(" - Sending message to server.", flush=True)
# websocket.send("开始聊天吧")
#
# while True:
# message = websocket.recv()
# message = message.decode("utf-8") if isinstance(message, bytes) else message
#
# print(message, end="", flush=True)
# if "TERMINATE" in message:
# print()
# print(" - Received TERMINATE message. Exiting.", flush=True)
# break
class UserView(APIView):
def post(self, request, *args, **kwargs):
# 从请求中获取用户输入
user_name = request.data.get('user_name')
pass_word = request.data.get('pass_word')
# 验证用户名和密码
if user_name=='test' and pass_word=='111111':
return Response({"token": "token_test"},status=status.HTTP_200_OK)
else:
return Response({"error": "用户名或密码错误"},status=status.HTTP_401_UNAUTHORIZED)
class ModelView(APIView):
def get(self, request, *args, **kwargs):
token = request.headers.get("token", None)
if not token:
return Response({"error": "请登录以后再重试"}, status=status.HTTP_203_NON_AUTHORITATIVE_INFORMATION)
model_lists = {
"count":"1",
"data":
[
{"model_name":"model1","model_des":"model1"},
{"model_name":"model1","model_des":"model2"}
]
}
return Response(model_lists,status=status.HTTP_200_OK)

View File

@@ -1,16 +0,0 @@
"""
WSGI config for matagent project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
application = get_wsgi_application()