feat: 增强科研软件的 a11y tree 支持

- 扩展 heuristic_retrieve.py 白名单以覆盖科研软件 GUI 框架:
  - 新增 prefix 规则: sunawt (Java Swing), qt5q/qt6q (Qt), ovito, pymol,
    contentspanel, wx (wxWidgets), afx (MFC), thunderrt (VB6)
  - 新增 endswith 规则: edit, widget, box, dialog, view, frame, menuitem,
    menubar, toolbar, tabitem, treeitem, window
  - 新增 Qt 控件和 Win32 控件的精确匹配
- 在 agent.py 中添加原始 a11y tree 的调试日志
- 修复 run.py 中 agent 初始化缺少 platform='windows' 的问题
- 添加 NO_PROXY 绕过本地/VM IP (兼容 Clash 全局代理)
- lib_run_single.py 中应用启动等待时间增加到 15 秒
- 新增 test_each_domain_a11y_tree.json (每个域一个任务用于 a11y 验证)
This commit is contained in:
2026-02-26 15:04:28 +08:00
parent 9899d4a0c7
commit 07e66490dd
5 changed files with 118 additions and 45 deletions

View File

@@ -0,0 +1,9 @@
{
"avogadro": ["building-organic-molecules_task1"],
"imagej": ["user-guide_task1"],
"jade": ["MDIJade6.5使用手册_task1"],
"origin": ["Origin_User_Guide_2025b_E_task1"],
"ovito": ["animation_task3"],
"pymol": ["MovieSchool_1_task1"],
"vesta": ["VESTA_Manual_task1"]
}

View File

@@ -22,7 +22,7 @@ def run_single_example(agent, env, example, max_steps, instruction, args, exampl
# except Exception as e:
# agent.reset(vm_ip=env.vm_ip)
# time.sleep(10) # Wait for the environment to be ready
time.sleep(15) # Wait for the environment to be ready (apps like Avogadro need time to fully load)
# get initial observation
logger.info("Getting initial observation...")
@@ -40,17 +40,17 @@ def run_single_example(agent, env, example, max_steps, instruction, args, exampl
)
logger.info(f"Response: {response}")
logger.info(f"Actions: {actions}")
logger.info(f"Executing actions...")
for action in actions:
# Capture the timestamp before executing the action
action_timestamp = datetime.datetime.now().strftime("%Y%m%d@%H%M%S%f")
logger.info("Step %d: %s", step_idx + 1, action)
logger.info("执行动作中...")
obs, reward, done, info = env.step(action, args.sleep_after_execution)
logger.info("动作执行完成。")
logger.info("Reward: %.2f", reward)
logger.info("Done: %s", done)
# Save screenshot and trajectory information
@@ -78,10 +78,10 @@ def run_single_example(agent, env, example, max_steps, instruction, args, exampl
scores.append(result)
with open(os.path.join(example_result_dir, "result.txt"), "w", encoding="utf-8") as f:
f.write(f"{result}\n")
# Log task completion to results.json
log_task_completion(example, result, example_result_dir, args)
if getattr(args, 'enable_recording', False):
env.controller.end_recording(os.path.join(example_result_dir, "recording.mp4"))
@@ -97,11 +97,11 @@ def run_single_example_human(env, example, max_steps, instruction, args, example
env.reset(task_config=example)
time.sleep(60) # Wait for the environment to be ready
obs = env._get_obs() # Get the initial observation
# Save initial screenshot
with open(os.path.join(example_result_dir, "initial_state.png"), "wb") as _f:
_f.write(obs['screenshot'])
# Save trajectory information
with open(os.path.join(example_result_dir, "traj.jsonl"), "a") as f:
f.write(json.dumps({
@@ -109,7 +109,7 @@ def run_single_example_human(env, example, max_steps, instruction, args, example
"initial_state": "initial_state.png"
}))
f.write("\n")
# Evaluate the result
result = env.evaluate(result_dir=example_result_dir)
logger.info("Result: %.2f", result)
@@ -254,14 +254,14 @@ def run_single_example_opencua(agent, env, example, max_steps, instruction, args
logger.info(f"Got Action: {actions}")
# Breack if no actions
if not actions or len(actions)==0 or actions[0]=="" or actions[0].lower().startswith("error"):
if not actions or len(actions)==0 or actions[0]=="" or actions[0].lower().startswith("error"):
break
for action in actions:
# Capture the timestamp before executing the action
action_timestamp = datetime.datetime.now().strftime("%Y%m%d@%H%M%S")
logger.info("Step %d: %s", step_idx + 1, action)
obs, reward, done, info = env.step(action, args.sleep_after_execution)
logger.info(f"Action {action} executed, reward: {reward}, done: {done}")
@@ -304,7 +304,7 @@ def run_single_example_autoglm(agent, env, example, max_steps, instruction, args
agent.reset()
env.reset(task_config=example)
time.sleep(60) # Wait for the environment to be ready
obs = env._get_obs() # Get the initial observation
done = False
@@ -339,20 +339,20 @@ def run_single_example_autoglm(agent, env, example, max_steps, instruction, args
"screenshot_file": f"step_{step_idx + 1}_{action_timestamp}.png"
}))
f.write("\n")
if done:
logger.info("The episode is done.")
break
# Invalid Action
if not actions:
obs = env._get_obs() # update observation
step_idx += 1
if not done: # not completed the task yet
env.action_history.append('FAIL')
result = env.evaluate()
logger.info("Result: %.2f", result)
scores.append(result)
@@ -369,7 +369,7 @@ def run_single_example_mano(agent, env, example, max_steps, instruction, args, e
done = False
step_idx = 0
env.controller.start_recording()
with open(os.path.join(example_result_dir, f"step_0.png"),
"wb") as _f:
_f.write(obs['screenshot'])
@@ -379,12 +379,12 @@ def run_single_example_mano(agent, env, example, max_steps, instruction, args, e
obs
)
if len(actions) > 1:
if (("pyautogui.hotkey('shift')" in actions[0] or "pyautogui.hotkey('ctrl')" in actions[0])
if (("pyautogui.hotkey('shift')" in actions[0] or "pyautogui.hotkey('ctrl')" in actions[0])
and "pyautogui.click" in actions[1]):
hotkey_type = 'shift' if "shift" in actions[0] else 'ctrl'
action = f"pyautogui.keyDown('{hotkey_type}')\n{actions[1]}\npyautogui.keyUp('{hotkey_type}')"
actions = [action]
actions = [action]
for action in actions:
# Capture the timestamp before executing the action
action_timestamp = datetime.datetime.now().strftime("%Y%m%d@%H%M%S")
@@ -419,7 +419,7 @@ def run_single_example_mano(agent, env, example, max_steps, instruction, args, e
with open(os.path.join(example_result_dir, "result.txt"), "w", encoding="utf-8") as f:
f.write(f"{result}\n")
env.controller.end_recording(os.path.join(example_result_dir, "recording.mp4"))
def run_single_example_uipath(agent, env, example, max_steps, instruction, args, example_result_dir, scores):
runtime_logger = setup_logger(example, example_result_dir)
try:
@@ -485,7 +485,7 @@ logger = logging.getLogger("desktopenv.experiment")
def run_single_example_os_symphony(agent, env, example, max_steps, instruction, args, example_result_dir, scores):
set_current_result_dir(example_result_dir)
agent.reset(result_dir=example_result_dir)
env.reset(task_config=example)
time.sleep(30) # Wait for the environment to be ready
@@ -507,14 +507,14 @@ def run_single_example_os_symphony(agent, env, example, max_steps, instruction,
img_name = f"step_{step_idx + 1}_milestone.png"
else:
img_name = f"step_{step_idx + 1}.png"
with open(os.path.join(example_result_dir, img_name),
"wb") as _f:
_f.write(obs['screenshot'])
if "coordinates" in response and response["coordinates"]:
draw_coordinates(
image_bytes=obs['screenshot'],
coordinates=response["coordinates"],
image_bytes=obs['screenshot'],
coordinates=response["coordinates"],
save_path=os.path.join(example_result_dir, img_name[:-4] + "_draw.png")
)
@@ -563,10 +563,10 @@ def run_single_example_evocua(agent, env, example, max_steps, instruction, args,
Unified run function for EvoCUAAgent (supporting both S1 and S2 modes).
"""
runtime_logger = setup_logger(example, example_result_dir)
# Reset Environment
env.reset(task_config=example)
# Reset Agent
# Handle agent reset signature differences if any
try:
@@ -587,7 +587,7 @@ def run_single_example_evocua(agent, env, example, max_steps, instruction, args,
# EvoCUAAgent.predict unified signature: returns (response, actions)
# It handles both modes internally.
predict_res = agent.predict(instruction, obs)
# Check return signature logic
if len(predict_res) == 3:
# Compatibility with S1 original signature if agent was updated to match
@@ -597,7 +597,7 @@ def run_single_example_evocua(agent, env, example, max_steps, instruction, args,
info_dict = {}
logger.info(f"Step {step_idx + 1} Actions: {actions}")
# Break if no actions (fail-safe)
if not actions or (len(actions) == 1 and (actions[0] == "" or "error" in actions[0].lower())):
# Allow "FAIL" or "DONE" to process through execution loop if agent outputs them as actions
@@ -608,18 +608,18 @@ def run_single_example_evocua(agent, env, example, max_steps, instruction, args,
for action in actions:
action_timestamp = datetime.datetime.now().strftime("%Y%m%d@%H%M%S%f")
logger.info("Executing action: %s", action)
# Execute
obs, reward, done, info = env.step(action, args.sleep_after_execution)
logger.info("Reward: %.2f", reward)
logger.info("Done: %s", done)
# Save screenshot
screenshot_file = f"step_{step_idx + 1}_{action_timestamp}.png"
with open(os.path.join(example_result_dir, screenshot_file), "wb") as _f:
_f.write(obs['screenshot'])
# Log Trajectory
log_entry = {
"step_num": step_idx + 1,
@@ -634,25 +634,25 @@ def run_single_example_evocua(agent, env, example, max_steps, instruction, args,
# Add natural language info if available (S1 style)
if info_dict:
log_entry["natural_language_action"] = info_dict.get("action")
with open(os.path.join(example_result_dir, "traj.jsonl"), "a", encoding="utf-8") as f:
f.write(json.dumps(log_entry, ensure_ascii=False))
f.write("\n")
if done:
logger.info("The episode is done.")
break
step_idx += 1
time.sleep(20) # Wait for environment to settle
result = env.evaluate(result_dir=example_result_dir)
logger.info("Result: %.2f", result)
scores.append(result)
with open(os.path.join(example_result_dir, "result.txt"), "w", encoding="utf-8") as f:
f.write(f"{result}\n")
log_task_completion(example, result, example_result_dir, args)
env.controller.end_recording(os.path.join(example_result_dir, "recording.mp4"))

View File

@@ -46,6 +46,15 @@ def judge_node(node: ET, platform="ubuntu", check_image=False) -> bool:
raise ValueError("Invalid platform, must be 'ubuntu' or 'windows'")
keeps: bool = node.tag.startswith("document") \
or node.tag.startswith("sunawt") \
or node.tag.startswith("qt5q") \
or node.tag.startswith("qt6q") \
or node.tag.startswith("ovito") \
or node.tag.startswith("pymol") \
or node.tag.startswith("contentspanel") \
or node.tag.startswith("wx") \
or node.tag.startswith("afx") \
or node.tag.startswith("thunderrt") \
or node.tag.endswith("item") \
or node.tag.endswith("button") \
or node.tag.endswith("heading") \
@@ -58,6 +67,18 @@ def judge_node(node: ET, platform="ubuntu", check_image=False) -> bool:
or node.tag.endswith("textfield") \
or node.tag.endswith("textarea") \
or node.tag.endswith("menu") \
or node.tag.endswith("menuitem") \
or node.tag.endswith("menubar") \
or node.tag.endswith("toolbar") \
or node.tag.endswith("tabitem") \
or node.tag.endswith("treeitem") \
or node.tag.endswith("window") \
or node.tag.endswith("edit") \
or node.tag.endswith("widget") \
or node.tag.endswith("box") \
or node.tag.endswith("dialog") \
or node.tag.endswith("view") \
or node.tag.endswith("frame") \
or node.tag in {"alert", "canvas", "check-box"
, "combo-box", "entry", "icon"
, "image", "paragraph", "scroll-bar"
@@ -66,6 +87,16 @@ def judge_node(node: ET, platform="ubuntu", check_image=False) -> bool:
, "netuiribbontab", "start", "trayclockwclass"
, "traydummysearchcontrol", "uiimage", "uiproperty"
, "uiribboncommandbar"
, "qt5qwindowicon", "textblock", "listview"
, "chrome_widgetwin_1", "chrome_renderwidgethosthwnd"
, "unknown", "pane", "tree", "tab"
, "datagrid", "dataitem", "group"
, "statusbar", "titlebar", "tooltip"
, "toolbarwindow32", "richedit50w"
, "msctls_statusbar32", "qaction"
, "qsplitter", "qsplitterhandle"
, "qtoolbarseparator", "qtextbrowser"
, "qtabbar", "qopenglwidget"
}
keeps = keeps and (
platform == "ubuntu"

View File

@@ -329,7 +329,7 @@ class PromptAgent:
raise ValueError("Invalid action space: " + action_space)
else:
raise ValueError("Invalid experiment type: " + observation_type)
self.system_message = self.system_message.format(CLIENT_PASSWORD=self.client_password, SCREEN_WIDTH=self.screen_width, SCREEN_HEIGHT=self.screen_height)
def predict(self, instruction: str, obs: Dict) -> List:
@@ -502,9 +502,30 @@ class PromptAgent:
]
})
elif self.observation_type == "a11y_tree":
# Debug: log raw a11y tree XML to help diagnose missing elements
raw_tree = obs["accessibility_tree"]
if raw_tree:
# Log first 2000 chars of raw XML and count total nodes
root = ET.fromstring(raw_tree)
all_tags = set()
total_nodes = 0
for node in root.iter():
all_tags.add(node.tag)
total_nodes += 1
logger.info("Raw a11y tree: %d total nodes, unique tags: %s", total_nodes, all_tags)
logger.debug("Raw a11y tree XML (first 2000 chars): %s", raw_tree[:2000])
# Also log nodes containing 'avogadro' or 'qt5' in their attributes
for node in root.iter():
node_str = ET.tostring(node, encoding="unicode")
if 'avogadro' in node_str.lower() or 'qt5' in node_str.lower():
logger.info("Avogadro/Qt5 node: tag=%s, name=%s, visible=%s, enabled=%s",
node.tag, node.get("name", ""),
node.get("{https://accessibility.windows.example.org/ns/state}visible", "?"),
node.get("{https://accessibility.windows.example.org/ns/state}enabled", "?"))
linearized_accessibility_tree = linearize_accessibility_tree(accessibility_tree=obs["accessibility_tree"],
platform=self.platform)
logger.debug("LINEAR AT: %s", linearized_accessibility_tree)
logger.info("Linearized a11y tree lines: %d", len(linearized_accessibility_tree.split('\n')) if linearized_accessibility_tree else 0)
if linearized_accessibility_tree:
linearized_accessibility_tree = trim_accessibility_tree(linearized_accessibility_tree,
@@ -670,7 +691,7 @@ class PromptAgent:
return response.json()['choices'][0]['message']['content']
elif self.model.startswith("gpt"):
# Support custom OpenAI base URL via environment variable
base_url = os.environ.get('OPENAI_BASE_URL', 'https://api.openai.com')
base_url = os.environ.get('OPENAI_BASE_URL', os.environ.get('OPENAI_API_BASE', 'https://api.openai.com'))
# Smart handling: avoid duplicate /v1 if base_url already ends with /v1
api_url = f"{base_url}/chat/completions" if base_url.endswith('/v1') else f"{base_url}/v1/chat/completions"
headers = {
@@ -1153,7 +1174,7 @@ class PromptAgent:
except Exception as e:
print("Failed to call LLM: " + str(e))
return ""
else:
raise ValueError("Invalid model: " + self.model)
@@ -1192,4 +1213,4 @@ class PromptAgent:
self.thoughts = []
self.actions = []
self.observations = []
self.observations = []

14
run.py
View File

@@ -142,6 +142,7 @@ def test(args: argparse.Namespace, test_all_meta: dict) -> None:
}
agent = PromptAgent(
platform="windows",
model=args.model,
max_tokens=args.max_tokens,
top_p=args.top_p,
@@ -321,8 +322,19 @@ def get_result(action_space, use_model, observation_type, result_dir, total_file
if __name__ == "__main__":
####### The complete version of the list of examples #######
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# Bypass proxy for local/VM IPs so Clash global mode doesn't break VM communication
no_proxy = os.environ.get("NO_PROXY", os.environ.get("no_proxy", ""))
extra = "localhost,127.0.0.1,192.168.*,10.*,172.16.*,172.17.*,172.18.*,172.19.*,172.20.*,172.21.*,172.22.*,172.23.*,172.24.*,172.25.*,172.26.*,172.27.*,172.28.*,172.29.*,172.30.*,172.31.*"
if no_proxy:
no_proxy = f"{no_proxy},{extra}"
else:
no_proxy = extra
os.environ["NO_PROXY"] = no_proxy
os.environ["no_proxy"] = no_proxy
args = config()
# save args to json in result_dir/action_space/observation_type/model/args.json
path_to_args = os.path.join(
args.result_dir,