add mixtral cogagent
This commit is contained in:
@@ -549,55 +549,101 @@ class PromptAgent:
|
||||
return response.json()['content'][0]['text']
|
||||
|
||||
|
||||
# elif self.model.startswith("mistral"):
|
||||
# print("Call mistral")
|
||||
# messages = payload["messages"]
|
||||
# max_tokens = payload["max_tokens"]
|
||||
#
|
||||
# misrtal_messages = []
|
||||
#
|
||||
# for i, message in enumerate(messages):
|
||||
# mistral_message = {
|
||||
# "role": message["role"],
|
||||
# "content": []
|
||||
# }
|
||||
#
|
||||
# for part in message["content"]:
|
||||
# mistral_message['content'] = part['text'] if part['type'] == "text" else None
|
||||
#
|
||||
# misrtal_messages.append(mistral_message)
|
||||
#
|
||||
# # the mistral not support system message in our endpoint, so we concatenate it at the first user message
|
||||
# if misrtal_messages[0]['role'] == "system":
|
||||
# misrtal_messages[1]['content'] = misrtal_messages[0]['content'] + "\n" + misrtal_messages[1]['content']
|
||||
# misrtal_messages.pop(0)
|
||||
#
|
||||
# # openai.api_base = "http://localhost:8000/v1"
|
||||
# # openai.api_key = "test"
|
||||
# # response = openai.ChatCompletion.create(
|
||||
# # messages=misrtal_messages,
|
||||
# # model="Mixtral-8x7B-Instruct-v0.1"
|
||||
# # )
|
||||
#
|
||||
# from openai import OpenAI
|
||||
# TOGETHER_API_KEY = "d011650e7537797148fb6170ec1e0be7ae75160375686fae02277136078e90d2"
|
||||
#
|
||||
# client = OpenAI(api_key=TOGETHER_API_KEY,
|
||||
# base_url='https://api.together.xyz',
|
||||
# )
|
||||
# logger.info("Generating content with Mistral model: %s", self.model)
|
||||
# response = client.chat.completions.create(
|
||||
# messages=misrtal_messages,
|
||||
# model="mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
# max_tokens=1024
|
||||
# )
|
||||
#
|
||||
# try:
|
||||
# # return response['choices'][0]['message']['content']
|
||||
# return response.choices[0].message.content
|
||||
# except Exception as e:
|
||||
# print("Failed to call LLM: " + str(e))
|
||||
# return ""
|
||||
elif self.model.startswith("mistral"):
|
||||
print("Call mistral")
|
||||
messages = payload["messages"]
|
||||
max_tokens = payload["max_tokens"]
|
||||
top_p = payload["top_p"]
|
||||
temperature = payload["temperature"]
|
||||
|
||||
misrtal_messages = []
|
||||
|
||||
for i, message in enumerate(messages):
|
||||
mistral_message = {
|
||||
"role": message["role"],
|
||||
"content": ""
|
||||
}
|
||||
|
||||
for part in message["content"]:
|
||||
mistral_message['content'] = part['text'] if part['type'] == "text" else ""
|
||||
|
||||
|
||||
misrtal_messages.append(mistral_message)
|
||||
|
||||
|
||||
# openai.api_base = "http://localhost:8000/v1"
|
||||
# response = openai.ChatCompletion.create(
|
||||
# messages=misrtal_messages,
|
||||
# model="Mixtral-8x7B-Instruct-v0.1"
|
||||
# )
|
||||
|
||||
from openai import OpenAI
|
||||
|
||||
client = OpenAI(api_key=os.environ["TOGETHER_API_KEY"],
|
||||
base_url='https://api.together.xyz',
|
||||
)
|
||||
logger.info("Generating content with Mistral model: %s", self.model)
|
||||
|
||||
response = client.chat.completions.create(
|
||||
messages=misrtal_messages,
|
||||
model=self.model,
|
||||
max_tokens=max_tokens
|
||||
)
|
||||
|
||||
try:
|
||||
return response.choices[0].message.content
|
||||
except Exception as e:
|
||||
print("Failed to call LLM: " + str(e))
|
||||
return ""
|
||||
|
||||
elif self.model.startswith("THUDM"):
|
||||
# THUDM/cogagent-chat-hf
|
||||
print("Call CogAgent")
|
||||
messages = payload["messages"]
|
||||
max_tokens = payload["max_tokens"]
|
||||
top_p = payload["top_p"]
|
||||
temperature = payload["temperature"]
|
||||
|
||||
cog_messages = []
|
||||
|
||||
for i, message in enumerate(messages):
|
||||
cog_message = {
|
||||
"role": message["role"],
|
||||
"content": []
|
||||
}
|
||||
|
||||
for part in message["content"]:
|
||||
if part['type'] == "image_url":
|
||||
cog_message['content'].append({"type": "image_url", "image_url": {"url": part['image_url']['url'] } })
|
||||
|
||||
if part['type'] == "text":
|
||||
cog_message['content'].append({"type": "text", "text": part['text']})
|
||||
|
||||
cog_messages.append(cog_message)
|
||||
|
||||
# the cogagent not support system message in our endpoint, so we concatenate it at the first user message
|
||||
if cog_messages[0]['role'] == "system":
|
||||
cog_system_message_item = cog_messages[0]['content'][0]
|
||||
cog_messages[1]['content'].insert(0, cog_system_message_item)
|
||||
cog_messages.pop(0)
|
||||
|
||||
payload = {
|
||||
"model": self.model,
|
||||
"max_tokens": max_tokens,
|
||||
"messages": cog_messages
|
||||
}
|
||||
|
||||
base_url = "http://127.0.0.1:8000"
|
||||
|
||||
response = requests.post(f"{base_url}/v1/chat/completions", json=payload, stream=False)
|
||||
if response.status_code == 200:
|
||||
decoded_line = response.json()
|
||||
content = decoded_line.get("choices", [{}])[0].get("message", "").get("content", "")
|
||||
return content
|
||||
else:
|
||||
print("Failed to call LLM: ", response.status_code)
|
||||
return ""
|
||||
|
||||
|
||||
elif self.model.startswith("gemini"):
|
||||
def encoded_img_to_pil_img(data_str):
|
||||
|
||||
Reference in New Issue
Block a user