Add Support for QWEN models from API (QWEN-max, etc.); Improve on the robustness of getting observation
This commit is contained in:
@@ -943,8 +943,7 @@ class PromptAgent:
|
||||
messages = payload["messages"]
|
||||
max_tokens = payload["max_tokens"]
|
||||
top_p = payload["top_p"]
|
||||
if payload["temperature"]:
|
||||
logger.warning("Qwen model does not support temperature parameter, it will be ignored.")
|
||||
temperature = payload["temperature"]
|
||||
|
||||
qwen_messages = []
|
||||
|
||||
@@ -961,23 +960,42 @@ class PromptAgent:
|
||||
|
||||
qwen_messages.append(qwen_message)
|
||||
|
||||
response = dashscope.MultiModalConversation.call(
|
||||
model='qwen-vl-plus',
|
||||
messages=messages,
|
||||
max_length=max_tokens,
|
||||
top_p=top_p,
|
||||
)
|
||||
# The response status_code is HTTPStatus.OK indicate success,
|
||||
# otherwise indicate request is failed, you can get error code
|
||||
# and message from code and message.
|
||||
if response.status_code == HTTPStatus.OK:
|
||||
flag = 0
|
||||
while True:
|
||||
try:
|
||||
return response.json()['output']['choices'][0]['message']['content']
|
||||
except Exception:
|
||||
return ""
|
||||
else:
|
||||
print(response.code) # The error code.
|
||||
print(response.message) # The error message.
|
||||
if flag > 20:
|
||||
break
|
||||
logger.info("Generating content with model: %s", self.model)
|
||||
response = dashscope.Generation.call(
|
||||
model=self.model,
|
||||
messages=qwen_messages,
|
||||
result_format="message",
|
||||
max_length=max_tokens,
|
||||
top_p=top_p,
|
||||
temperature=temperature
|
||||
)
|
||||
|
||||
if response.status_code == HTTPStatus.OK:
|
||||
break
|
||||
else:
|
||||
logger.error('Request id: %s, Status code: %s, error code: %s, error message: %s' % (
|
||||
response.request_id, response.status_code,
|
||||
response.code, response.message
|
||||
))
|
||||
raise Exception("Failed to call LLM: " + response.message)
|
||||
except:
|
||||
if flag == 0:
|
||||
qwen_messages = [qwen_messages[0]] + qwen_messages[-1:]
|
||||
else:
|
||||
for i in range(len(qwen_messages[-1]["content"])):
|
||||
if "text" in qwen_messages[-1]["content"][i]:
|
||||
qwen_messages[-1]["content"][i]["text"] = ' '.join(qwen_messages[-1]["content"][i]["text"].split()[:-500])
|
||||
flag = flag + 1
|
||||
|
||||
try:
|
||||
return response['output']['choices'][0]['message']['content']
|
||||
except Exception as e:
|
||||
print("Failed to call LLM: " + str(e))
|
||||
return ""
|
||||
|
||||
else:
|
||||
|
||||
Reference in New Issue
Block a user