Merge branch 'main' of https://git.siat-mic.com/tangger/matagent
This commit is contained in:
@@ -1,116 +1,61 @@
|
||||
import os
|
||||
import pandas
|
||||
import glob
|
||||
import requests
|
||||
|
||||
def get_max_uv_wavelength_from_txt(latest_file_path: str):
|
||||
import pandas as pd
|
||||
import os
|
||||
# 文件检查
|
||||
if not os.path.isfile(latest_file_path):
|
||||
res = "ERROR: 指定的文件不存在"
|
||||
return res
|
||||
def retrieval_from_knowledge_base(
|
||||
query: str,
|
||||
topk: int
|
||||
) -> str:
|
||||
"""
|
||||
Retrieval for knowledge from the knowledge base based on the specified query and returns the topk results.
|
||||
|
||||
Parameters:
|
||||
query (str): The query for knowledge retrieval.
|
||||
topk (int): The number of top results to return, default is 3.
|
||||
|
||||
Returns:
|
||||
str: The result of the knowledge retrieval in JSON format.
|
||||
"""
|
||||
url = 'http://127.0.0.1:7080/v1/chat-messages'
|
||||
headers = {
|
||||
'Authorization': f'Bearer app-uJgo3TQKcS1O9PMCDHko71Fp',
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
data = {
|
||||
"inputs": {"topK": topk},
|
||||
"query": query,
|
||||
"response_mode": "blocking",
|
||||
"user": "tangger",
|
||||
"files": []
|
||||
}
|
||||
|
||||
# 打开并读取最新文件
|
||||
with open(latest_file_path, 'r') as file:
|
||||
lines = file.readlines()
|
||||
response = requests.post(url, headers=headers, json=data)
|
||||
|
||||
# 找到数据开始的行号
|
||||
data_start_index = -1
|
||||
for i, line in enumerate(lines):
|
||||
if "Wavelength Scan Data Record" in line:
|
||||
data_start_index = i + 2 # 数据从该行的下两行开始
|
||||
break
|
||||
if response.status_code == 524:
|
||||
print("Server is not responding. Please try again later. Maybe GPU was down in the container.")
|
||||
return None
|
||||
|
||||
if data_start_index == -1:
|
||||
res = "ERROR: 无法找到数据记录部分"
|
||||
return res
|
||||
try:
|
||||
result = response.json()
|
||||
except ValueError:
|
||||
return [{"error": "Response is not in JSON format"}]
|
||||
|
||||
# 解析数据并构建表格
|
||||
data = []
|
||||
for line in lines[data_start_index:]:
|
||||
parts = line.split()
|
||||
if len(parts) == 7: # 保证每行有7列数据
|
||||
no, wavelength, abs_value, trans, energy, energy_100, energy_0 = parts
|
||||
try:
|
||||
data.append({
|
||||
'No': int(no),
|
||||
'Wavelength(nm)': float(wavelength),
|
||||
'Abs': float(abs_value),
|
||||
'Trans(%T)': float(trans),
|
||||
'Energy': float(energy),
|
||||
'Energy(100%T)': float(energy_100),
|
||||
'Energy(0%T)': float(energy_0)
|
||||
})
|
||||
except ValueError:
|
||||
print(f"跳过无法解析的行: {line}")
|
||||
|
||||
if not data:
|
||||
res = "ERROR: 未解析到任何有效数据"
|
||||
return res
|
||||
|
||||
# 构建DataFrame
|
||||
df = pd.DataFrame(data)
|
||||
|
||||
# 找到Abs值最大的行
|
||||
max_abs_row = df.loc[df['Abs'].idxmax()]
|
||||
|
||||
# 获取最大Abs值对应的波长
|
||||
max_abs_wavelength = max_abs_row['Wavelength(nm)']
|
||||
res = f"本次实验的UV波长为: {max_abs_wavelength} nm"
|
||||
print(res)
|
||||
return res
|
||||
|
||||
|
||||
def get_max_pl_peak_from_txt(latest_file_path: str):
|
||||
import pandas as pd
|
||||
import os
|
||||
# 文件检查
|
||||
if not os.path.isfile(latest_file_path):
|
||||
res = "ERROR: 指定的文件不存在"
|
||||
return res
|
||||
|
||||
# 打开并读取最新文件
|
||||
with open(latest_file_path, 'r') as file:
|
||||
lines = file.readlines()
|
||||
|
||||
# 找到 'Data Points' 开始的行号
|
||||
data_start_index = -1
|
||||
for i, line in enumerate(lines):
|
||||
if "Data Points" in line:
|
||||
data_start_index = i + 1 # 数据从该行的下一行开始
|
||||
break
|
||||
|
||||
if data_start_index == -1:
|
||||
res = "ERROR: 无法找到数据记录部分"
|
||||
return res
|
||||
|
||||
# 解析nm和Data数据
|
||||
data = []
|
||||
for line in lines[data_start_index:]:
|
||||
parts = line.split()
|
||||
if len(parts) == 2: # 每行应该有2列数据,nm 和 Data
|
||||
try:
|
||||
nm = float(parts[0])
|
||||
data_value = float(parts[1])
|
||||
data.append({'nm': nm, 'Data': data_value})
|
||||
except ValueError:
|
||||
print(f"跳过无法解析的行: {line}")
|
||||
|
||||
if not data:
|
||||
res = "ERROR: 未解析到任何有效数据"
|
||||
return res
|
||||
|
||||
# 构建DataFrame
|
||||
df = pd.DataFrame(data)
|
||||
|
||||
# 找到Data值最大的行
|
||||
max_data_row = df.loc[df['Data'].idxmax()]
|
||||
|
||||
# 获取最大Data值对应的nm
|
||||
max_data_nm = max_data_row['nm']
|
||||
|
||||
res = f"本次实验的PL峰位为: {max_data_nm} nm"
|
||||
print(res)
|
||||
return res
|
||||
useful_results = []
|
||||
try:
|
||||
answer = eval(result.get("answer", "[]"))
|
||||
for item in answer:
|
||||
metadata = item.get("metadata", {})
|
||||
useful_info = {
|
||||
"id": metadata.get("document_id"),
|
||||
"title": item.get("title"),
|
||||
"content": item.get("content"),
|
||||
"metadata": None,
|
||||
"embedding": None,
|
||||
"score": metadata.get("score")
|
||||
}
|
||||
useful_results.append(useful_info)
|
||||
except Exception as e:
|
||||
return [{"error": f"Error processing result: {e}", "status": "TERMINATE"}]
|
||||
if useful_results == []:
|
||||
useful_results = "NULL"
|
||||
return str(useful_results)
|
||||
|
||||
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
import urllib.parse
|
||||
import requests
|
||||
|
||||
def web_searcher(query: str):
|
||||
from urllib.parse import quote
|
||||
encoded_query = quote(query)
|
||||
url = 'https://s.jina.ai/' + encoded_query
|
||||
headers = {
|
||||
'Authorization': 'Bearer jina_8fc99db105ed48d7ab6a76ecd117e8e9GkNoWHY3tbmIMYSwQ6GAgzmr6c7z',
|
||||
'X-Return-Format': 'markdown'
|
||||
}
|
||||
|
||||
response = requests.get(url, headers=headers)
|
||||
|
||||
# print(response.text)
|
||||
return str(response.text)
|
||||
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
import requests
|
||||
|
||||
def retrieval_from_knowledge_base(
|
||||
query: str,
|
||||
topk: int
|
||||
) -> str:
|
||||
"""
|
||||
Retrieval for knowledge from the knowledge base based on the specified query and returns the topk results.
|
||||
|
||||
Parameters:
|
||||
query (str): The query for knowledge retrieval.
|
||||
topk (int): The number of top results to return, default is 3.
|
||||
|
||||
Returns:
|
||||
str: The result of the knowledge retrieval in JSON format.
|
||||
"""
|
||||
# url = 'https://dify.siat-mic.com/v1/chat-messages'
|
||||
# headers = {
|
||||
# 'Authorization': f'Bearer app-uJgo3TQKcS1O9PMCDHko71Fp',
|
||||
# 'Content-Type': 'application/json'
|
||||
# }
|
||||
url = f'{DIFY_BASE_URL}/chat-messages'
|
||||
headers = {
|
||||
'Authorization': f'{DIFY_API_KEY}',
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
data = {
|
||||
"inputs": {"topK": topk},
|
||||
"query": query,
|
||||
"response_mode": "blocking",
|
||||
"user": "tangger",
|
||||
"files": []
|
||||
}
|
||||
|
||||
response = requests.post(url, headers=headers, json=data)
|
||||
|
||||
if response.status_code == 524:
|
||||
print("Server is not responding. Please try again later. Maybe GPU was down in the container.")
|
||||
return None
|
||||
|
||||
try:
|
||||
result = response.json()
|
||||
except ValueError:
|
||||
return [{"error": "Response is not in JSON format"}]
|
||||
|
||||
useful_results = []
|
||||
try:
|
||||
answer = eval(result.get("answer", "[]"))
|
||||
for item in answer:
|
||||
metadata = item.get("metadata", {})
|
||||
useful_info = {
|
||||
"id": metadata.get("document_id"),
|
||||
"title": item.get("title"),
|
||||
"content": item.get("content"),
|
||||
"metadata": None,
|
||||
"embedding": None,
|
||||
"score": metadata.get("score")
|
||||
}
|
||||
useful_results.append(useful_info)
|
||||
except Exception as e:
|
||||
return [{"error": f"Error processing result: {e}", "status": "TERMINATE"}]
|
||||
if useful_results == []:
|
||||
useful_results = "NULL"
|
||||
return str(useful_results)
|
||||
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
import requests
|
||||
|
||||
def retrieval_from_knowledge_base(
|
||||
query: str,
|
||||
topk: int
|
||||
) -> str:
|
||||
"""
|
||||
Retrieval for knowledge from the knowledge base based on the specified query and returns the topk results.
|
||||
|
||||
Parameters:
|
||||
query (str): The query for knowledge retrieval.
|
||||
topk (int): The number of top results to return, default is 3.
|
||||
|
||||
Returns:
|
||||
str: The result of the knowledge retrieval in JSON format.
|
||||
"""
|
||||
url = 'https://dify.siat-mic.com/v1/chat-messages'
|
||||
headers = {
|
||||
'Authorization': f'Bearer app-uJgo3TQKcS1O9PMCDHko71Fp',
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
data = {
|
||||
"inputs": {"topK": topk},
|
||||
"query": query,
|
||||
"response_mode": "blocking",
|
||||
"user": "tangger",
|
||||
"files": []
|
||||
}
|
||||
|
||||
response = requests.post(url, headers=headers, json=data)
|
||||
|
||||
if response.status_code == 524:
|
||||
print("Server is not responding. Please try again later. Maybe GPU was down in the container.")
|
||||
return None
|
||||
|
||||
try:
|
||||
result = response.json()
|
||||
except ValueError:
|
||||
return [{"error": "Response is not in JSON format"}]
|
||||
|
||||
useful_results = []
|
||||
try:
|
||||
answer = eval(result.get("answer", "[]"))
|
||||
for item in answer:
|
||||
metadata = item.get("metadata", {})
|
||||
useful_info = {
|
||||
"id": metadata.get("document_id"),
|
||||
"title": item.get("title"),
|
||||
"content": item.get("content"),
|
||||
"metadata": None,
|
||||
"embedding": None,
|
||||
"score": metadata.get("score")
|
||||
}
|
||||
useful_results.append(useful_info)
|
||||
except Exception as e:
|
||||
return [{"error": f"Error processing result: {e}", "status": "TERMINATE"}]
|
||||
if useful_results == []:
|
||||
useful_results = "NULL"
|
||||
return str(useful_results)
|
||||
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
import requests
|
||||
import urllib.parse
|
||||
|
||||
def web_searcher(query: str):
|
||||
from urllib.parse import quote
|
||||
encoded_query = quote(query)
|
||||
url = 'https://s.jina.ai/' + encoded_query
|
||||
headers = {
|
||||
'Authorization': 'Bearer jina_8fc99db105ed48d7ab6a76ecd117e8e9GkNoWHY3tbmIMYSwQ6GAgzmr6c7z',
|
||||
'X-Return-Format': 'markdown'
|
||||
}
|
||||
|
||||
response = requests.get(url, headers=headers)
|
||||
|
||||
# print(response.text)
|
||||
return str(response.text)
|
||||
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
from functions import retrieval_from_knowledge_base
|
||||
|
||||
# Define the query for the task
|
||||
query = "如何在常温条件下制备CsPbBr3纳米立方体"
|
||||
|
||||
# Specify the number of relevant top results to return
|
||||
top_k_results = 3
|
||||
|
||||
# Retrieve information from the knowledge base
|
||||
results = retrieval_from_knowledge_base(query, top_k_results)
|
||||
|
||||
# Display the retrieved results
|
||||
print("Relevant Information:")
|
||||
print(results)
|
||||
@@ -1,28 +0,0 @@
|
||||
from functions import retrieval_from_knowledge_base
|
||||
|
||||
def retrieve_nanocube_preparation(query, topk=3):
|
||||
"""
|
||||
Retrieves information on the preparation of CsPbBr3 nanocubes at room temperature.
|
||||
|
||||
Parameters:
|
||||
query (str): The query string about the preparation process.
|
||||
topk (int): The number of top results to return.
|
||||
|
||||
Returns:
|
||||
str: The relevant information in JSON format.
|
||||
"""
|
||||
try:
|
||||
# Execute retrieval function with the query and topk value
|
||||
result = retrieval_from_knowledge_base(query, topk)
|
||||
return result
|
||||
except Exception as e:
|
||||
return f"An error occurred while retrieving information: {str(e)}"
|
||||
|
||||
# Define the query
|
||||
query = "Room-temperature synthesis of CsPbBr3 nanocubes"
|
||||
|
||||
# Perform retrieval
|
||||
retrieval_result = retrieve_nanocube_preparation(query)
|
||||
|
||||
# Print the result
|
||||
print(retrieval_result)
|
||||
@@ -6,7 +6,7 @@ import os
|
||||
# OPENAI_API_KEY = "sk-QQq3pi7xjy24YuY207Cc9fD9Af51479fA2B584058a7593Fa"
|
||||
OPENAI_API_KEY = "sk-urFGAQRThR6pysea0aC93bD27fA34bA69811A9254aAaD8B2"
|
||||
# OPENAI_API_KEY = "sk-FTmEt110USDrY7py99Af4a3eAbF6432f97Aa1103Ba9cCdE8"
|
||||
OPENAI_BASE_URL = "https://vip.apiyi.com/v1"
|
||||
OPENAI_BASE_URL = "http://8.218.238.241:17935/v1"
|
||||
# OPENAI_BASE_URL = "https://pro.aiskt.com/v1"
|
||||
|
||||
MODEL = "gpt-4o"
|
||||
@@ -16,6 +16,7 @@ config_list = [{"model": MODEL, "api_key": OPENAI_API_KEY, "base_url": OPENAI_BA
|
||||
|
||||
SILENT = False # 关闭嵌套智能体的输出
|
||||
STREAM = True # stream on console
|
||||
CACHE = None # None 就是关闭 41是默认值开启
|
||||
|
||||
current_path = os.path.dirname(os.path.abspath(__file__))
|
||||
WORK_DIR = Path(current_path, ".coding")
|
||||
|
||||
@@ -8,13 +8,13 @@ import autogen
|
||||
from .utils import load_agent_configs
|
||||
import os
|
||||
from .retrieval_group import init_retrieval_group
|
||||
from .constant import config_list, STREAM, SILENT, WORK_DIR
|
||||
from .constant import config_list, STREAM, SILENT, WORK_DIR, CACHE
|
||||
|
||||
|
||||
|
||||
agent_configs = load_agent_configs(os.path.join(os.path.dirname(os.path.abspath(__file__)), "config/converter_group.yaml"))
|
||||
venv_context = create_virtual_env(WORK_DIR)
|
||||
llm_config = {"config_list": config_list, "stream": STREAM}
|
||||
llm_config = {"config_list": config_list, "stream": STREAM, "cache_seed": CACHE}
|
||||
|
||||
|
||||
def init_converter_group():
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
from autogen.coding import LocalCommandLineCodeExecutor
|
||||
import autogen
|
||||
from .utils import send_instruction_to_robot, get_uv_latest_file, get_pl_latest_file
|
||||
from .constant import config_list
|
||||
from .constant import config_list, STREAM, CACHE
|
||||
llm_config = {"config_list": config_list, "stream": STREAM, "cache_seed": CACHE}
|
||||
|
||||
|
||||
def init_executor_group(work_dir, venv_context):
|
||||
@@ -47,7 +48,8 @@ def init_executor_group(work_dir, venv_context):
|
||||
name="expriment_code_writer",
|
||||
human_input_mode="NEVER",
|
||||
system_message=expriment_coder_system_message,
|
||||
llm_config={"config_list": config_list},
|
||||
# llm_config={"config_list": config_list},
|
||||
llm_config=llm_config,
|
||||
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
||||
# human_input_mode="TERMINATE",
|
||||
description="expriment_code_writer",
|
||||
@@ -77,7 +79,8 @@ def init_executor_group(work_dir, venv_context):
|
||||
name="collector_code_writer",
|
||||
human_input_mode="NEVER",
|
||||
system_message=collector_coder_system_message,
|
||||
llm_config={"config_list": config_list},
|
||||
# llm_config={"config_list": config_list},
|
||||
llm_config=llm_config,
|
||||
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
||||
# human_input_mode="TERMINATE",
|
||||
description="collector_code_writer",
|
||||
@@ -96,7 +99,8 @@ def init_executor_group(work_dir, venv_context):
|
||||
name="Inner_Executor_Admin",
|
||||
groupchat=executor_group,
|
||||
# is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
||||
llm_config={"config_list": config_list},
|
||||
# llm_config={"config_list": config_list},
|
||||
llm_config=llm_config,
|
||||
human_input_mode="NEVER",
|
||||
description="Inner_Executor_Admin",
|
||||
)
|
||||
|
||||
@@ -8,13 +8,13 @@ import autogen
|
||||
from .utils import load_agent_configs
|
||||
import os
|
||||
from .retrieval_group import init_retrieval_group
|
||||
from .constant import config_list, STREAM, SILENT, WORK_DIR
|
||||
from .constant import config_list, STREAM, SILENT, WORK_DIR, CACHE
|
||||
|
||||
|
||||
|
||||
agent_configs = load_agent_configs(os.path.join(os.path.dirname(os.path.abspath(__file__)), "config/generate_group.yaml"))
|
||||
venv_context = create_virtual_env(WORK_DIR)
|
||||
llm_config = {"config_list": config_list, "stream": STREAM}
|
||||
llm_config = {"config_list": config_list, "stream": STREAM, "cache_seed": CACHE}
|
||||
|
||||
|
||||
def init_generate_group(outer_retrieval_agent, inner_retrieval_admin):
|
||||
|
||||
@@ -6,7 +6,7 @@ from autogen.agentchat.contrib.multimodal_conversable_agent import MultimodalCon
|
||||
from pathlib import Path
|
||||
import autogen
|
||||
import os
|
||||
from .constant import config_list, STREAM, SILENT, WORK_DIR
|
||||
from .constant import config_list, STREAM, SILENT, WORK_DIR, CACHE
|
||||
from .utils import load_agent_configs
|
||||
from .retrieval_group import init_retrieval_group
|
||||
from .generate_group import init_generate_group
|
||||
@@ -16,7 +16,7 @@ from .optimize_group import init_optimize_group
|
||||
|
||||
|
||||
venv_context = create_virtual_env(WORK_DIR)
|
||||
llm_config = {"config_list": config_list, "stream": STREAM}
|
||||
llm_config = {"config_list": config_list, "stream": STREAM, "cache_seed": CACHE}
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@@ -2,9 +2,9 @@ from autogen.coding import LocalCommandLineCodeExecutor
|
||||
from autogen.agentchat.contrib.multimodal_conversable_agent import MultimodalConversableAgent
|
||||
import autogen
|
||||
from .utils import get_max_uv_wavelength_from_txt, get_max_pl_peak_from_txt
|
||||
from .constant import config_list, STREAM, SILENT, WORK_DIR
|
||||
from .constant import config_list, STREAM, SILENT, WORK_DIR, CACHE
|
||||
|
||||
llm_config = {"config_list": config_list, "stream": STREAM}
|
||||
llm_config = {"config_list": config_list, "stream": STREAM, "cache_seed": CACHE}
|
||||
|
||||
def init_optimize_group(work_dir, venv_context):
|
||||
optimize_env = LocalCommandLineCodeExecutor(
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
from autogen.coding import LocalCommandLineCodeExecutor
|
||||
import autogen
|
||||
import os
|
||||
from .constant import config_list, STREAM, SILENT, WORK_DIR
|
||||
from .constant import config_list, STREAM, SILENT, WORK_DIR, CACHE
|
||||
from .utils import load_agent_configs,retrieval_from_knowledge_base, web_searcher, retrieval_from_graphrag
|
||||
|
||||
|
||||
|
||||
agent_configs = load_agent_configs(os.path.join(os.path.dirname(os.path.abspath(__file__)), "config/retrieval_group.yaml"))
|
||||
llm_config = {"config_list": config_list, "stream": STREAM}
|
||||
llm_config = {"config_list": config_list, "stream": STREAM, "cache_seed": CACHE}
|
||||
|
||||
|
||||
def init_retrieval_group(work_dir, venv_context):
|
||||
|
||||
@@ -283,14 +283,14 @@ def get_uv_latest_file():
|
||||
import os
|
||||
import glob
|
||||
# UV数据缓存文件夹路径 (请将此路径修改为实际的文件夹路径)
|
||||
current_folder = os.path.dirname(os.path.abspath(__file__))
|
||||
current_folder = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
folder_path = os.path.join(current_folder, 'data/UV/')
|
||||
|
||||
# 查找文件夹中的所有 .wls 文件
|
||||
uv_files = sorted(glob.glob(os.path.join(folder_path, '*.[Tt][Xx][Tt]')))
|
||||
|
||||
if not uv_files:
|
||||
res = "ERROR: 缓存文件夹中没有找到任何UV文件"
|
||||
res = f"ERROR: 缓存文件夹{current_folder}中没有找到任何UV文件"
|
||||
return res
|
||||
|
||||
# 找到最新修改的文件
|
||||
@@ -366,14 +366,14 @@ def get_pl_latest_file():
|
||||
import glob
|
||||
|
||||
# PL数据缓存文件夹路径 (请将此路径修改为实际的文件夹路径)
|
||||
current_folder = os.getcwd()
|
||||
folder_path = os.path.join(current_folder, 'data/UV/')
|
||||
current_folder = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
folder_path = os.path.join(current_folder, 'data/PL/')
|
||||
|
||||
# 查找文件夹中的所有 .txt 或 .TXT 文件
|
||||
pl_files = sorted(glob.glob(os.path.join(folder_path, '*.[Tt][Xx][Tt]')))
|
||||
|
||||
if not pl_files:
|
||||
res = "ERROR: 缓存文件夹中没有找到任何PL文件"
|
||||
res = f"ERROR: 缓存文件夹{current_folder}中没有找到任何PL文件"
|
||||
return res
|
||||
|
||||
# 找到最新修改的文件
|
||||
|
||||
@@ -86,9 +86,6 @@ class ChatConsumer(AsyncWebsocketConsumer):
|
||||
# break
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def handle_message(self, iostream: IOWebsockets):
|
||||
initial_msg = iostream.input()
|
||||
agent_configs = load_agent_configs(
|
||||
|
||||
@@ -3,4 +3,4 @@ VITE_BASE_URL = /
|
||||
VITE_IS_REQUEST_PROXY = true
|
||||
VITE_API_URL = http://159.75.91.126
|
||||
VITE_API_URL_PREFIX = /matagent
|
||||
VITE_WB_BASE_URL = ws://159.75.91.126:8000/matagent/chat
|
||||
VITE_WB_BASE_URL = ws://47.121.220.134:8000/matagent/chat
|
||||
@@ -3,4 +3,4 @@ VITE_BASE_URL = /
|
||||
VITE_IS_REQUEST_PROXY = true
|
||||
VITE_API_URL = http://159.75.91.126
|
||||
VITE_API_URL_PREFIX = /matagent
|
||||
VITE_WB_BASE_URL = ws://159.75.91.126:8000/matagent/chat
|
||||
VITE_WB_BASE_URL = ws://47.121.220.134:8000/matagent/chat
|
||||
@@ -34,16 +34,16 @@ const Agent: ViolationBOType = reactive({
|
||||
Analysis_Group_Admin: "Optimize: Analysis_Group_Admin",
|
||||
|
||||
//
|
||||
Outer_Retrieval_Admin: "Retrieval: Outer_Retrieval_Admin",
|
||||
Outer_Converter_Admin: "Converter: Outer_Converter_Admin",
|
||||
Outer_Executor_Admin: "Executor: Outer_Executor_Admin",
|
||||
// Outer_Retrieval_Admin: "Retrieval: Outer_Retrieval_Admin",
|
||||
// Outer_Converter_Admin: "Converter: Outer_Converter_Admin",
|
||||
// Outer_Executor_Admin: "Executor: Outer_Executor_Admin",
|
||||
experiment_executor: "Executor: experiment_executor",
|
||||
Outer_Generate_Admin: "Generate: Outer_Generate_Admin",
|
||||
Outer_Analysis_Admin: "Optimize: Outer_Analysis_Admin",
|
||||
vector_code_executor: "Retrieval: vector_code_executor",
|
||||
graphrag_code_executor: "Retrieval: graphrag_code_executor",
|
||||
web_code_executor: "Retrieval: web_code_executor",
|
||||
web_summary: "Retrieval: web_summary",
|
||||
// Outer_Generate_Admin: "Generate: Outer_Generate_Admin",
|
||||
// Outer_Analysis_Admin: "Optimize: Outer_Analysis_Admin",
|
||||
// vector_code_executor: "Retrieval: vector_code_executor",
|
||||
// graphrag_code_executor: "Retrieval: graphrag_code_executor",
|
||||
// web_code_executor: "Retrieval: web_code_executor",
|
||||
// web_summary: "Retrieval: web_summary",
|
||||
|
||||
// 5
|
||||
Inner_Retrieval_Admin: "Retrieval: Inner_Retrieval_Admin",
|
||||
|
||||
Reference in New Issue
Block a user