add mixtral cogagent
This commit is contained in:
@@ -549,55 +549,101 @@ class PromptAgent:
|
|||||||
return response.json()['content'][0]['text']
|
return response.json()['content'][0]['text']
|
||||||
|
|
||||||
|
|
||||||
# elif self.model.startswith("mistral"):
|
elif self.model.startswith("mistral"):
|
||||||
# print("Call mistral")
|
print("Call mistral")
|
||||||
# messages = payload["messages"]
|
messages = payload["messages"]
|
||||||
# max_tokens = payload["max_tokens"]
|
max_tokens = payload["max_tokens"]
|
||||||
#
|
top_p = payload["top_p"]
|
||||||
# misrtal_messages = []
|
temperature = payload["temperature"]
|
||||||
#
|
|
||||||
# for i, message in enumerate(messages):
|
misrtal_messages = []
|
||||||
# mistral_message = {
|
|
||||||
# "role": message["role"],
|
for i, message in enumerate(messages):
|
||||||
# "content": []
|
mistral_message = {
|
||||||
# }
|
"role": message["role"],
|
||||||
#
|
"content": ""
|
||||||
# for part in message["content"]:
|
}
|
||||||
# mistral_message['content'] = part['text'] if part['type'] == "text" else None
|
|
||||||
#
|
for part in message["content"]:
|
||||||
# misrtal_messages.append(mistral_message)
|
mistral_message['content'] = part['text'] if part['type'] == "text" else ""
|
||||||
#
|
|
||||||
# # the mistral not support system message in our endpoint, so we concatenate it at the first user message
|
|
||||||
# if misrtal_messages[0]['role'] == "system":
|
misrtal_messages.append(mistral_message)
|
||||||
# misrtal_messages[1]['content'] = misrtal_messages[0]['content'] + "\n" + misrtal_messages[1]['content']
|
|
||||||
# misrtal_messages.pop(0)
|
|
||||||
#
|
# openai.api_base = "http://localhost:8000/v1"
|
||||||
# # openai.api_base = "http://localhost:8000/v1"
|
# response = openai.ChatCompletion.create(
|
||||||
# # openai.api_key = "test"
|
# messages=misrtal_messages,
|
||||||
# # response = openai.ChatCompletion.create(
|
# model="Mixtral-8x7B-Instruct-v0.1"
|
||||||
# # messages=misrtal_messages,
|
# )
|
||||||
# # model="Mixtral-8x7B-Instruct-v0.1"
|
|
||||||
# # )
|
from openai import OpenAI
|
||||||
#
|
|
||||||
# from openai import OpenAI
|
client = OpenAI(api_key=os.environ["TOGETHER_API_KEY"],
|
||||||
# TOGETHER_API_KEY = "d011650e7537797148fb6170ec1e0be7ae75160375686fae02277136078e90d2"
|
base_url='https://api.together.xyz',
|
||||||
#
|
)
|
||||||
# client = OpenAI(api_key=TOGETHER_API_KEY,
|
logger.info("Generating content with Mistral model: %s", self.model)
|
||||||
# base_url='https://api.together.xyz',
|
|
||||||
# )
|
response = client.chat.completions.create(
|
||||||
# logger.info("Generating content with Mistral model: %s", self.model)
|
messages=misrtal_messages,
|
||||||
# response = client.chat.completions.create(
|
model=self.model,
|
||||||
# messages=misrtal_messages,
|
max_tokens=max_tokens
|
||||||
# model="mistralai/Mixtral-8x7B-Instruct-v0.1",
|
)
|
||||||
# max_tokens=1024
|
|
||||||
# )
|
try:
|
||||||
#
|
return response.choices[0].message.content
|
||||||
# try:
|
except Exception as e:
|
||||||
# # return response['choices'][0]['message']['content']
|
print("Failed to call LLM: " + str(e))
|
||||||
# return response.choices[0].message.content
|
return ""
|
||||||
# except Exception as e:
|
|
||||||
# print("Failed to call LLM: " + str(e))
|
elif self.model.startswith("THUDM"):
|
||||||
# return ""
|
# THUDM/cogagent-chat-hf
|
||||||
|
print("Call CogAgent")
|
||||||
|
messages = payload["messages"]
|
||||||
|
max_tokens = payload["max_tokens"]
|
||||||
|
top_p = payload["top_p"]
|
||||||
|
temperature = payload["temperature"]
|
||||||
|
|
||||||
|
cog_messages = []
|
||||||
|
|
||||||
|
for i, message in enumerate(messages):
|
||||||
|
cog_message = {
|
||||||
|
"role": message["role"],
|
||||||
|
"content": []
|
||||||
|
}
|
||||||
|
|
||||||
|
for part in message["content"]:
|
||||||
|
if part['type'] == "image_url":
|
||||||
|
cog_message['content'].append({"type": "image_url", "image_url": {"url": part['image_url']['url'] } })
|
||||||
|
|
||||||
|
if part['type'] == "text":
|
||||||
|
cog_message['content'].append({"type": "text", "text": part['text']})
|
||||||
|
|
||||||
|
cog_messages.append(cog_message)
|
||||||
|
|
||||||
|
# the cogagent not support system message in our endpoint, so we concatenate it at the first user message
|
||||||
|
if cog_messages[0]['role'] == "system":
|
||||||
|
cog_system_message_item = cog_messages[0]['content'][0]
|
||||||
|
cog_messages[1]['content'].insert(0, cog_system_message_item)
|
||||||
|
cog_messages.pop(0)
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"model": self.model,
|
||||||
|
"max_tokens": max_tokens,
|
||||||
|
"messages": cog_messages
|
||||||
|
}
|
||||||
|
|
||||||
|
base_url = "http://127.0.0.1:8000"
|
||||||
|
|
||||||
|
response = requests.post(f"{base_url}/v1/chat/completions", json=payload, stream=False)
|
||||||
|
if response.status_code == 200:
|
||||||
|
decoded_line = response.json()
|
||||||
|
content = decoded_line.get("choices", [{}])[0].get("message", "").get("content", "")
|
||||||
|
return content
|
||||||
|
else:
|
||||||
|
print("Failed to call LLM: ", response.status_code)
|
||||||
|
return ""
|
||||||
|
|
||||||
|
|
||||||
elif self.model.startswith("gemini"):
|
elif self.model.startswith("gemini"):
|
||||||
def encoded_img_to_pil_img(data_str):
|
def encoded_img_to_pil_img(data_str):
|
||||||
|
|||||||
Reference in New Issue
Block a user