From 99301d7b3d20a3a3b8ce389235b56512edaffd80 Mon Sep 17 00:00:00 2001 From: Yutang Li Date: Mon, 30 Dec 2024 19:45:48 +0800 Subject: [PATCH] =?UTF-8?q?=E5=90=8E=E7=AB=AF=E6=B7=BB=E5=8A=A0cache?= =?UTF-8?q?=EF=BC=9B=E5=89=8D=E7=AB=AF=E6=94=B9=E5=8A=A8agents?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/.coding/functions.py | 163 ++++++------------ ...p_code_13e7e54147f77b0bfaecbf7c920ef7dd.py | 18 -- ...p_code_2ad9eda61379dc378928cf732508c890.py | 66 ------- ...p_code_2c3931dbbd9beafb9ec2777cebf789fe.py | 61 ------- ...p_code_3890c47cfc390b061ca90ccfcabe61d1.py | 18 -- ...p_code_54d6b7617799bd780ee3d5e3e5edd994.py | 14 -- ...p_code_ba7b17ddef8656ae723efc3dbaf752c1.py | 28 --- backend/constant.py | 3 +- backend/converter_group.py | 4 +- backend/executor_group.py | 12 +- backend/generate_group.py | 4 +- backend/matagent_main.py | 4 +- backend/optimize_group.py | 4 +- backend/retrieval_group.py | 4 +- backend/utils.py | 10 +- backend/views.py | 3 - frontend/.env.development | 2 +- frontend/.env.prod | 2 +- frontend/src/utils/agent.ts | 18 +- 19 files changed, 90 insertions(+), 348 deletions(-) delete mode 100644 backend/.coding/tmp_code_13e7e54147f77b0bfaecbf7c920ef7dd.py delete mode 100644 backend/.coding/tmp_code_2ad9eda61379dc378928cf732508c890.py delete mode 100644 backend/.coding/tmp_code_2c3931dbbd9beafb9ec2777cebf789fe.py delete mode 100644 backend/.coding/tmp_code_3890c47cfc390b061ca90ccfcabe61d1.py delete mode 100644 backend/.coding/tmp_code_54d6b7617799bd780ee3d5e3e5edd994.py delete mode 100644 backend/.coding/tmp_code_ba7b17ddef8656ae723efc3dbaf752c1.py diff --git a/backend/.coding/functions.py b/backend/.coding/functions.py index 4e1558e..cf83d19 100644 --- a/backend/.coding/functions.py +++ b/backend/.coding/functions.py @@ -1,116 +1,61 @@ -import os -import pandas -import glob +import requests -def get_max_uv_wavelength_from_txt(latest_file_path: str): - import pandas as pd - import os - # 文件检查 - if not os.path.isfile(latest_file_path): - res = "ERROR: 指定的文件不存在" - return res +def retrieval_from_knowledge_base( + query: str, + topk: int + ) -> str: + """ + Retrieval for knowledge from the knowledge base based on the specified query and returns the topk results. + + Parameters: + query (str): The query for knowledge retrieval. + topk (int): The number of top results to return, default is 3. + + Returns: + str: The result of the knowledge retrieval in JSON format. + """ + url = 'http://127.0.0.1:7080/v1/chat-messages' + headers = { + 'Authorization': f'Bearer app-uJgo3TQKcS1O9PMCDHko71Fp', + 'Content-Type': 'application/json' + } + data = { + "inputs": {"topK": topk}, + "query": query, + "response_mode": "blocking", + "user": "tangger", + "files": [] + } - # 打开并读取最新文件 - with open(latest_file_path, 'r') as file: - lines = file.readlines() + response = requests.post(url, headers=headers, json=data) - # 找到数据开始的行号 - data_start_index = -1 - for i, line in enumerate(lines): - if "Wavelength Scan Data Record" in line: - data_start_index = i + 2 # 数据从该行的下两行开始 - break + if response.status_code == 524: + print("Server is not responding. Please try again later. Maybe GPU was down in the container.") + return None - if data_start_index == -1: - res = "ERROR: 无法找到数据记录部分" - return res + try: + result = response.json() + except ValueError: + return [{"error": "Response is not in JSON format"}] - # 解析数据并构建表格 - data = [] - for line in lines[data_start_index:]: - parts = line.split() - if len(parts) == 7: # 保证每行有7列数据 - no, wavelength, abs_value, trans, energy, energy_100, energy_0 = parts - try: - data.append({ - 'No': int(no), - 'Wavelength(nm)': float(wavelength), - 'Abs': float(abs_value), - 'Trans(%T)': float(trans), - 'Energy': float(energy), - 'Energy(100%T)': float(energy_100), - 'Energy(0%T)': float(energy_0) - }) - except ValueError: - print(f"跳过无法解析的行: {line}") - - if not data: - res = "ERROR: 未解析到任何有效数据" - return res - - # 构建DataFrame - df = pd.DataFrame(data) - - # 找到Abs值最大的行 - max_abs_row = df.loc[df['Abs'].idxmax()] - - # 获取最大Abs值对应的波长 - max_abs_wavelength = max_abs_row['Wavelength(nm)'] - res = f"本次实验的UV波长为: {max_abs_wavelength} nm" - print(res) - return res - - -def get_max_pl_peak_from_txt(latest_file_path: str): - import pandas as pd - import os - # 文件检查 - if not os.path.isfile(latest_file_path): - res = "ERROR: 指定的文件不存在" - return res - - # 打开并读取最新文件 - with open(latest_file_path, 'r') as file: - lines = file.readlines() - - # 找到 'Data Points' 开始的行号 - data_start_index = -1 - for i, line in enumerate(lines): - if "Data Points" in line: - data_start_index = i + 1 # 数据从该行的下一行开始 - break - - if data_start_index == -1: - res = "ERROR: 无法找到数据记录部分" - return res - - # 解析nm和Data数据 - data = [] - for line in lines[data_start_index:]: - parts = line.split() - if len(parts) == 2: # 每行应该有2列数据,nm 和 Data - try: - nm = float(parts[0]) - data_value = float(parts[1]) - data.append({'nm': nm, 'Data': data_value}) - except ValueError: - print(f"跳过无法解析的行: {line}") - - if not data: - res = "ERROR: 未解析到任何有效数据" - return res - - # 构建DataFrame - df = pd.DataFrame(data) - - # 找到Data值最大的行 - max_data_row = df.loc[df['Data'].idxmax()] - - # 获取最大Data值对应的nm - max_data_nm = max_data_row['nm'] - - res = f"本次实验的PL峰位为: {max_data_nm} nm" - print(res) - return res + useful_results = [] + try: + answer = eval(result.get("answer", "[]")) + for item in answer: + metadata = item.get("metadata", {}) + useful_info = { + "id": metadata.get("document_id"), + "title": item.get("title"), + "content": item.get("content"), + "metadata": None, + "embedding": None, + "score": metadata.get("score") + } + useful_results.append(useful_info) + except Exception as e: + return [{"error": f"Error processing result: {e}", "status": "TERMINATE"}] + if useful_results == []: + useful_results = "NULL" + return str(useful_results) diff --git a/backend/.coding/tmp_code_13e7e54147f77b0bfaecbf7c920ef7dd.py b/backend/.coding/tmp_code_13e7e54147f77b0bfaecbf7c920ef7dd.py deleted file mode 100644 index b737616..0000000 --- a/backend/.coding/tmp_code_13e7e54147f77b0bfaecbf7c920ef7dd.py +++ /dev/null @@ -1,18 +0,0 @@ -import urllib.parse -import requests - -def web_searcher(query: str): - from urllib.parse import quote - encoded_query = quote(query) - url = 'https://s.jina.ai/' + encoded_query - headers = { - 'Authorization': 'Bearer jina_8fc99db105ed48d7ab6a76ecd117e8e9GkNoWHY3tbmIMYSwQ6GAgzmr6c7z', - 'X-Return-Format': 'markdown' - } - - response = requests.get(url, headers=headers) - - # print(response.text) - return str(response.text) - - diff --git a/backend/.coding/tmp_code_2ad9eda61379dc378928cf732508c890.py b/backend/.coding/tmp_code_2ad9eda61379dc378928cf732508c890.py deleted file mode 100644 index 771c16a..0000000 --- a/backend/.coding/tmp_code_2ad9eda61379dc378928cf732508c890.py +++ /dev/null @@ -1,66 +0,0 @@ -import requests - -def retrieval_from_knowledge_base( - query: str, - topk: int - ) -> str: - """ - Retrieval for knowledge from the knowledge base based on the specified query and returns the topk results. - - Parameters: - query (str): The query for knowledge retrieval. - topk (int): The number of top results to return, default is 3. - - Returns: - str: The result of the knowledge retrieval in JSON format. - """ - # url = 'https://dify.siat-mic.com/v1/chat-messages' - # headers = { - # 'Authorization': f'Bearer app-uJgo3TQKcS1O9PMCDHko71Fp', - # 'Content-Type': 'application/json' - # } - url = f'{DIFY_BASE_URL}/chat-messages' - headers = { - 'Authorization': f'{DIFY_API_KEY}', - 'Content-Type': 'application/json' - } - data = { - "inputs": {"topK": topk}, - "query": query, - "response_mode": "blocking", - "user": "tangger", - "files": [] - } - - response = requests.post(url, headers=headers, json=data) - - if response.status_code == 524: - print("Server is not responding. Please try again later. Maybe GPU was down in the container.") - return None - - try: - result = response.json() - except ValueError: - return [{"error": "Response is not in JSON format"}] - - useful_results = [] - try: - answer = eval(result.get("answer", "[]")) - for item in answer: - metadata = item.get("metadata", {}) - useful_info = { - "id": metadata.get("document_id"), - "title": item.get("title"), - "content": item.get("content"), - "metadata": None, - "embedding": None, - "score": metadata.get("score") - } - useful_results.append(useful_info) - except Exception as e: - return [{"error": f"Error processing result: {e}", "status": "TERMINATE"}] - if useful_results == []: - useful_results = "NULL" - return str(useful_results) - - diff --git a/backend/.coding/tmp_code_2c3931dbbd9beafb9ec2777cebf789fe.py b/backend/.coding/tmp_code_2c3931dbbd9beafb9ec2777cebf789fe.py deleted file mode 100644 index ea33d76..0000000 --- a/backend/.coding/tmp_code_2c3931dbbd9beafb9ec2777cebf789fe.py +++ /dev/null @@ -1,61 +0,0 @@ -import requests - -def retrieval_from_knowledge_base( - query: str, - topk: int - ) -> str: - """ - Retrieval for knowledge from the knowledge base based on the specified query and returns the topk results. - - Parameters: - query (str): The query for knowledge retrieval. - topk (int): The number of top results to return, default is 3. - - Returns: - str: The result of the knowledge retrieval in JSON format. - """ - url = 'https://dify.siat-mic.com/v1/chat-messages' - headers = { - 'Authorization': f'Bearer app-uJgo3TQKcS1O9PMCDHko71Fp', - 'Content-Type': 'application/json' - } - data = { - "inputs": {"topK": topk}, - "query": query, - "response_mode": "blocking", - "user": "tangger", - "files": [] - } - - response = requests.post(url, headers=headers, json=data) - - if response.status_code == 524: - print("Server is not responding. Please try again later. Maybe GPU was down in the container.") - return None - - try: - result = response.json() - except ValueError: - return [{"error": "Response is not in JSON format"}] - - useful_results = [] - try: - answer = eval(result.get("answer", "[]")) - for item in answer: - metadata = item.get("metadata", {}) - useful_info = { - "id": metadata.get("document_id"), - "title": item.get("title"), - "content": item.get("content"), - "metadata": None, - "embedding": None, - "score": metadata.get("score") - } - useful_results.append(useful_info) - except Exception as e: - return [{"error": f"Error processing result: {e}", "status": "TERMINATE"}] - if useful_results == []: - useful_results = "NULL" - return str(useful_results) - - diff --git a/backend/.coding/tmp_code_3890c47cfc390b061ca90ccfcabe61d1.py b/backend/.coding/tmp_code_3890c47cfc390b061ca90ccfcabe61d1.py deleted file mode 100644 index f303801..0000000 --- a/backend/.coding/tmp_code_3890c47cfc390b061ca90ccfcabe61d1.py +++ /dev/null @@ -1,18 +0,0 @@ -import requests -import urllib.parse - -def web_searcher(query: str): - from urllib.parse import quote - encoded_query = quote(query) - url = 'https://s.jina.ai/' + encoded_query - headers = { - 'Authorization': 'Bearer jina_8fc99db105ed48d7ab6a76ecd117e8e9GkNoWHY3tbmIMYSwQ6GAgzmr6c7z', - 'X-Return-Format': 'markdown' - } - - response = requests.get(url, headers=headers) - - # print(response.text) - return str(response.text) - - diff --git a/backend/.coding/tmp_code_54d6b7617799bd780ee3d5e3e5edd994.py b/backend/.coding/tmp_code_54d6b7617799bd780ee3d5e3e5edd994.py deleted file mode 100644 index e7cbd7c..0000000 --- a/backend/.coding/tmp_code_54d6b7617799bd780ee3d5e3e5edd994.py +++ /dev/null @@ -1,14 +0,0 @@ -from functions import retrieval_from_knowledge_base - -# Define the query for the task -query = "如何在常温条件下制备CsPbBr3纳米立方体" - -# Specify the number of relevant top results to return -top_k_results = 3 - -# Retrieve information from the knowledge base -results = retrieval_from_knowledge_base(query, top_k_results) - -# Display the retrieved results -print("Relevant Information:") -print(results) \ No newline at end of file diff --git a/backend/.coding/tmp_code_ba7b17ddef8656ae723efc3dbaf752c1.py b/backend/.coding/tmp_code_ba7b17ddef8656ae723efc3dbaf752c1.py deleted file mode 100644 index 7d4776c..0000000 --- a/backend/.coding/tmp_code_ba7b17ddef8656ae723efc3dbaf752c1.py +++ /dev/null @@ -1,28 +0,0 @@ -from functions import retrieval_from_knowledge_base - -def retrieve_nanocube_preparation(query, topk=3): - """ - Retrieves information on the preparation of CsPbBr3 nanocubes at room temperature. - - Parameters: - query (str): The query string about the preparation process. - topk (int): The number of top results to return. - - Returns: - str: The relevant information in JSON format. - """ - try: - # Execute retrieval function with the query and topk value - result = retrieval_from_knowledge_base(query, topk) - return result - except Exception as e: - return f"An error occurred while retrieving information: {str(e)}" - -# Define the query -query = "Room-temperature synthesis of CsPbBr3 nanocubes" - -# Perform retrieval -retrieval_result = retrieve_nanocube_preparation(query) - -# Print the result -print(retrieval_result) \ No newline at end of file diff --git a/backend/constant.py b/backend/constant.py index 985d384..a152683 100644 --- a/backend/constant.py +++ b/backend/constant.py @@ -6,7 +6,7 @@ import os # OPENAI_API_KEY = "sk-QQq3pi7xjy24YuY207Cc9fD9Af51479fA2B584058a7593Fa" OPENAI_API_KEY = "sk-urFGAQRThR6pysea0aC93bD27fA34bA69811A9254aAaD8B2" # OPENAI_API_KEY = "sk-FTmEt110USDrY7py99Af4a3eAbF6432f97Aa1103Ba9cCdE8" -OPENAI_BASE_URL = "https://vip.apiyi.com/v1" +OPENAI_BASE_URL = "http://8.218.238.241:17935/v1" # OPENAI_BASE_URL = "https://pro.aiskt.com/v1" MODEL = "gpt-4o" @@ -16,6 +16,7 @@ config_list = [{"model": MODEL, "api_key": OPENAI_API_KEY, "base_url": OPENAI_BA SILENT = False # 关闭嵌套智能体的输出 STREAM = True # stream on console +CACHE = None # None 就是关闭 41是默认值开启 current_path = os.path.dirname(os.path.abspath(__file__)) WORK_DIR = Path(current_path, ".coding") diff --git a/backend/converter_group.py b/backend/converter_group.py index 166564b..59013fa 100644 --- a/backend/converter_group.py +++ b/backend/converter_group.py @@ -8,13 +8,13 @@ import autogen from .utils import load_agent_configs import os from .retrieval_group import init_retrieval_group -from .constant import config_list, STREAM, SILENT, WORK_DIR +from .constant import config_list, STREAM, SILENT, WORK_DIR, CACHE agent_configs = load_agent_configs(os.path.join(os.path.dirname(os.path.abspath(__file__)), "config/converter_group.yaml")) venv_context = create_virtual_env(WORK_DIR) -llm_config = {"config_list": config_list, "stream": STREAM} +llm_config = {"config_list": config_list, "stream": STREAM, "cache_seed": CACHE} def init_converter_group(): diff --git a/backend/executor_group.py b/backend/executor_group.py index 9318485..9e2652f 100644 --- a/backend/executor_group.py +++ b/backend/executor_group.py @@ -1,7 +1,8 @@ from autogen.coding import LocalCommandLineCodeExecutor import autogen from .utils import send_instruction_to_robot, get_uv_latest_file, get_pl_latest_file -from .constant import config_list +from .constant import config_list, STREAM, CACHE +llm_config = {"config_list": config_list, "stream": STREAM, "cache_seed": CACHE} def init_executor_group(work_dir, venv_context): @@ -47,7 +48,8 @@ def init_executor_group(work_dir, venv_context): name="expriment_code_writer", human_input_mode="NEVER", system_message=expriment_coder_system_message, - llm_config={"config_list": config_list}, + # llm_config={"config_list": config_list}, + llm_config=llm_config, # is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0, # human_input_mode="TERMINATE", description="expriment_code_writer", @@ -77,7 +79,8 @@ def init_executor_group(work_dir, venv_context): name="collector_code_writer", human_input_mode="NEVER", system_message=collector_coder_system_message, - llm_config={"config_list": config_list}, + # llm_config={"config_list": config_list}, + llm_config=llm_config, # is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0, # human_input_mode="TERMINATE", description="collector_code_writer", @@ -96,7 +99,8 @@ def init_executor_group(work_dir, venv_context): name="Inner_Executor_Admin", groupchat=executor_group, # is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0, - llm_config={"config_list": config_list}, + # llm_config={"config_list": config_list}, + llm_config=llm_config, human_input_mode="NEVER", description="Inner_Executor_Admin", ) diff --git a/backend/generate_group.py b/backend/generate_group.py index bcf14f9..03e4c5c 100644 --- a/backend/generate_group.py +++ b/backend/generate_group.py @@ -8,13 +8,13 @@ import autogen from .utils import load_agent_configs import os from .retrieval_group import init_retrieval_group -from .constant import config_list, STREAM, SILENT, WORK_DIR +from .constant import config_list, STREAM, SILENT, WORK_DIR, CACHE agent_configs = load_agent_configs(os.path.join(os.path.dirname(os.path.abspath(__file__)), "config/generate_group.yaml")) venv_context = create_virtual_env(WORK_DIR) -llm_config = {"config_list": config_list, "stream": STREAM} +llm_config = {"config_list": config_list, "stream": STREAM, "cache_seed": CACHE} def init_generate_group(outer_retrieval_agent, inner_retrieval_admin): diff --git a/backend/matagent_main.py b/backend/matagent_main.py index 1e63f55..a896a12 100644 --- a/backend/matagent_main.py +++ b/backend/matagent_main.py @@ -6,7 +6,7 @@ from autogen.agentchat.contrib.multimodal_conversable_agent import MultimodalCon from pathlib import Path import autogen import os -from .constant import config_list, STREAM, SILENT, WORK_DIR +from .constant import config_list, STREAM, SILENT, WORK_DIR, CACHE from .utils import load_agent_configs from .retrieval_group import init_retrieval_group from .generate_group import init_generate_group @@ -16,7 +16,7 @@ from .optimize_group import init_optimize_group venv_context = create_virtual_env(WORK_DIR) -llm_config = {"config_list": config_list, "stream": STREAM} +llm_config = {"config_list": config_list, "stream": STREAM, "cache_seed": CACHE} def main(): diff --git a/backend/optimize_group.py b/backend/optimize_group.py index d86ba96..5ba88ee 100644 --- a/backend/optimize_group.py +++ b/backend/optimize_group.py @@ -2,9 +2,9 @@ from autogen.coding import LocalCommandLineCodeExecutor from autogen.agentchat.contrib.multimodal_conversable_agent import MultimodalConversableAgent import autogen from .utils import get_max_uv_wavelength_from_txt, get_max_pl_peak_from_txt -from .constant import config_list, STREAM, SILENT, WORK_DIR +from .constant import config_list, STREAM, SILENT, WORK_DIR, CACHE -llm_config = {"config_list": config_list, "stream": STREAM} +llm_config = {"config_list": config_list, "stream": STREAM, "cache_seed": CACHE} def init_optimize_group(work_dir, venv_context): optimize_env = LocalCommandLineCodeExecutor( diff --git a/backend/retrieval_group.py b/backend/retrieval_group.py index 00bdb87..c4a85b1 100644 --- a/backend/retrieval_group.py +++ b/backend/retrieval_group.py @@ -1,13 +1,13 @@ from autogen.coding import LocalCommandLineCodeExecutor import autogen import os -from .constant import config_list, STREAM, SILENT, WORK_DIR +from .constant import config_list, STREAM, SILENT, WORK_DIR, CACHE from .utils import load_agent_configs,retrieval_from_knowledge_base, web_searcher, retrieval_from_graphrag agent_configs = load_agent_configs(os.path.join(os.path.dirname(os.path.abspath(__file__)), "config/retrieval_group.yaml")) -llm_config = {"config_list": config_list, "stream": STREAM} +llm_config = {"config_list": config_list, "stream": STREAM, "cache_seed": CACHE} def init_retrieval_group(work_dir, venv_context): diff --git a/backend/utils.py b/backend/utils.py index 850ce27..5f4ffdf 100644 --- a/backend/utils.py +++ b/backend/utils.py @@ -283,14 +283,14 @@ def get_uv_latest_file(): import os import glob # UV数据缓存文件夹路径 (请将此路径修改为实际的文件夹路径) - current_folder = os.path.dirname(os.path.abspath(__file__)) + current_folder = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) folder_path = os.path.join(current_folder, 'data/UV/') # 查找文件夹中的所有 .wls 文件 uv_files = sorted(glob.glob(os.path.join(folder_path, '*.[Tt][Xx][Tt]'))) if not uv_files: - res = "ERROR: 缓存文件夹中没有找到任何UV文件" + res = f"ERROR: 缓存文件夹{current_folder}中没有找到任何UV文件" return res # 找到最新修改的文件 @@ -366,14 +366,14 @@ def get_pl_latest_file(): import glob # PL数据缓存文件夹路径 (请将此路径修改为实际的文件夹路径) - current_folder = os.getcwd() - folder_path = os.path.join(current_folder, 'data/UV/') + current_folder = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + folder_path = os.path.join(current_folder, 'data/PL/') # 查找文件夹中的所有 .txt 或 .TXT 文件 pl_files = sorted(glob.glob(os.path.join(folder_path, '*.[Tt][Xx][Tt]'))) if not pl_files: - res = "ERROR: 缓存文件夹中没有找到任何PL文件" + res = f"ERROR: 缓存文件夹{current_folder}中没有找到任何PL文件" return res # 找到最新修改的文件 diff --git a/backend/views.py b/backend/views.py index 6dba80f..9c46f68 100644 --- a/backend/views.py +++ b/backend/views.py @@ -86,9 +86,6 @@ class ChatConsumer(AsyncWebsocketConsumer): # break - - - def handle_message(self, iostream: IOWebsockets): initial_msg = iostream.input() agent_configs = load_agent_configs( diff --git a/frontend/.env.development b/frontend/.env.development index a491a21..b047a66 100644 --- a/frontend/.env.development +++ b/frontend/.env.development @@ -3,4 +3,4 @@ VITE_BASE_URL = / VITE_IS_REQUEST_PROXY = true VITE_API_URL = http://159.75.91.126 VITE_API_URL_PREFIX = /matagent -VITE_WB_BASE_URL = ws://159.75.91.126:8000/matagent/chat \ No newline at end of file +VITE_WB_BASE_URL = ws://47.121.220.134:8000/matagent/chat \ No newline at end of file diff --git a/frontend/.env.prod b/frontend/.env.prod index a491a21..b047a66 100644 --- a/frontend/.env.prod +++ b/frontend/.env.prod @@ -3,4 +3,4 @@ VITE_BASE_URL = / VITE_IS_REQUEST_PROXY = true VITE_API_URL = http://159.75.91.126 VITE_API_URL_PREFIX = /matagent -VITE_WB_BASE_URL = ws://159.75.91.126:8000/matagent/chat \ No newline at end of file +VITE_WB_BASE_URL = ws://47.121.220.134:8000/matagent/chat \ No newline at end of file diff --git a/frontend/src/utils/agent.ts b/frontend/src/utils/agent.ts index b1bc687..f546056 100644 --- a/frontend/src/utils/agent.ts +++ b/frontend/src/utils/agent.ts @@ -34,16 +34,16 @@ const Agent: ViolationBOType = reactive({ Analysis_Group_Admin: "Optimize: Analysis_Group_Admin", // - Outer_Retrieval_Admin: "Retrieval: Outer_Retrieval_Admin", - Outer_Converter_Admin: "Converter: Outer_Converter_Admin", - Outer_Executor_Admin: "Executor: Outer_Executor_Admin", + // Outer_Retrieval_Admin: "Retrieval: Outer_Retrieval_Admin", + // Outer_Converter_Admin: "Converter: Outer_Converter_Admin", + // Outer_Executor_Admin: "Executor: Outer_Executor_Admin", experiment_executor: "Executor: experiment_executor", - Outer_Generate_Admin: "Generate: Outer_Generate_Admin", - Outer_Analysis_Admin: "Optimize: Outer_Analysis_Admin", - vector_code_executor: "Retrieval: vector_code_executor", - graphrag_code_executor: "Retrieval: graphrag_code_executor", - web_code_executor: "Retrieval: web_code_executor", - web_summary: "Retrieval: web_summary", + // Outer_Generate_Admin: "Generate: Outer_Generate_Admin", + // Outer_Analysis_Admin: "Optimize: Outer_Analysis_Admin", + // vector_code_executor: "Retrieval: vector_code_executor", + // graphrag_code_executor: "Retrieval: graphrag_code_executor", + // web_code_executor: "Retrieval: web_code_executor", + // web_summary: "Retrieval: web_summary", // 5 Inner_Retrieval_Admin: "Retrieval: Inner_Retrieval_Admin",