Update claude endpoint

This commit is contained in:
Timothyxxx
2024-03-18 14:56:23 +08:00
parent 4067572af7
commit 204a2b949f

View File

@@ -15,7 +15,6 @@ import dashscope
import google.generativeai as genai
import openai
import requests
import wandb
from PIL import Image
from google.api_core.exceptions import InvalidArgument
@@ -523,20 +522,30 @@ class PromptAgent:
claude_messages[1]['content'].insert(0, claude_system_message_item)
claude_messages.pop(0)
# headers = {
# "x-api-key": os.environ["ANTHROPIC_API_KEY"],
# "anthropic-version": "2023-06-01",
# "content-type": "application/json"
# }
headers = {
"x-api-key": os.environ["ANTHROPIC_API_KEY"],
"anthropic-version": "2023-06-01",
"content-type": "application/json"
"Accept": "application / json",
"Authorization": "Bearer " + os.environ["ANTHROPIC_API_KEY"],
"User-Agent": "Apifox/1.0.0 (https://apifox.com)",
"Content-Type": "application/json"
}
payload = {
"model": self.model,
"max_tokens": max_tokens,
"messages": claude_messages
"messages": claude_messages,
"temperature": temperature,
"top_p": top_p
}
response = requests.post(
"https://api.anthropic.com/v1/messages",
# "https://chat.claude.com/v1/chat/completions",
"https://api.aigcbest.top/v1/chat/completions",
headers=headers,
json=payload
)
@@ -546,8 +555,10 @@ class PromptAgent:
logger.error("Failed to call LLM: " + response.text)
time.sleep(5)
return ""
# else:
# return response.json()['content'][0]['text']
else:
return response.json()['content'][0]['text']
return response.json()['choices'][0]['message']['content']
elif self.model.startswith("mistral"):
@@ -556,7 +567,7 @@ class PromptAgent:
max_tokens = payload["max_tokens"]
top_p = payload["top_p"]
temperature = payload["temperature"]
misrtal_messages = []
for i, message in enumerate(messages):
@@ -567,36 +578,34 @@ class PromptAgent:
for part in message["content"]:
mistral_message['content'] = part['text'] if part['type'] == "text" else ""
misrtal_messages.append(mistral_message)
# openai.api_base = "http://localhost:8000/v1"
# response = openai.ChatCompletion.create(
# messages=misrtal_messages,
# model="Mixtral-8x7B-Instruct-v0.1"
# )
from openai import OpenAI
client = OpenAI(api_key=os.environ["TOGETHER_API_KEY"],
base_url='https://api.together.xyz',
)
logger.info("Generating content with Mistral model: %s", self.model)
response = client.chat.completions.create(
messages=misrtal_messages,
model=self.model,
max_tokens=max_tokens
)
try:
return response.choices[0].message.content
except Exception as e:
print("Failed to call LLM: " + str(e))
return ""
elif self.model.startswith("THUDM"):
# THUDM/cogagent-chat-hf
print("Call CogAgent")
@@ -604,9 +613,9 @@ class PromptAgent:
max_tokens = payload["max_tokens"]
top_p = payload["top_p"]
temperature = payload["temperature"]
cog_messages = []
for i, message in enumerate(messages):
cog_message = {
"role": message["role"],
@@ -615,11 +624,12 @@ class PromptAgent:
for part in message["content"]:
if part['type'] == "image_url":
cog_message['content'].append({"type": "image_url", "image_url": {"url": part['image_url']['url'] } })
cog_message['content'].append(
{"type": "image_url", "image_url": {"url": part['image_url']['url']}})
if part['type'] == "text":
cog_message['content'].append({"type": "text", "text": part['text']})
cog_messages.append(cog_message)
# the cogagent not support system message in our endpoint, so we concatenate it at the first user message
@@ -627,7 +637,7 @@ class PromptAgent:
cog_system_message_item = cog_messages[0]['content'][0]
cog_messages[1]['content'].insert(0, cog_system_message_item)
cog_messages.pop(0)
payload = {
"model": self.model,
"max_tokens": max_tokens,
@@ -635,7 +645,7 @@ class PromptAgent:
}
base_url = "http://127.0.0.1:8000"
response = requests.post(f"{base_url}/v1/chat/completions", json=payload, stream=False)
if response.status_code == 200:
decoded_line = response.json()