feat: 增强科研软件的 a11y tree 支持
- 扩展 heuristic_retrieve.py 白名单以覆盖科研软件 GUI 框架:
- 新增 prefix 规则: sunawt (Java Swing), qt5q/qt6q (Qt), ovito, pymol,
contentspanel, wx (wxWidgets), afx (MFC), thunderrt (VB6)
- 新增 endswith 规则: edit, widget, box, dialog, view, frame, menuitem,
menubar, toolbar, tabitem, treeitem, window
- 新增 Qt 控件和 Win32 控件的精确匹配
- 在 agent.py 中添加原始 a11y tree 的调试日志
- 修复 run.py 中 agent 初始化缺少 platform='windows' 的问题
- 添加 NO_PROXY 绕过本地/VM IP (兼容 Clash 全局代理)
- lib_run_single.py 中应用启动等待时间增加到 15 秒
- 新增 test_each_domain_a11y_tree.json (每个域一个任务用于 a11y 验证)
This commit is contained in:
@@ -22,7 +22,7 @@ def run_single_example(agent, env, example, max_steps, instruction, args, exampl
|
||||
# except Exception as e:
|
||||
# agent.reset(vm_ip=env.vm_ip)
|
||||
|
||||
# time.sleep(10) # Wait for the environment to be ready
|
||||
time.sleep(15) # Wait for the environment to be ready (apps like Avogadro need time to fully load)
|
||||
|
||||
# get initial observation
|
||||
logger.info("Getting initial observation...")
|
||||
@@ -40,17 +40,17 @@ def run_single_example(agent, env, example, max_steps, instruction, args, exampl
|
||||
)
|
||||
logger.info(f"Response: {response}")
|
||||
logger.info(f"Actions: {actions}")
|
||||
|
||||
|
||||
logger.info(f"Executing actions...")
|
||||
for action in actions:
|
||||
# Capture the timestamp before executing the action
|
||||
action_timestamp = datetime.datetime.now().strftime("%Y%m%d@%H%M%S%f")
|
||||
logger.info("Step %d: %s", step_idx + 1, action)
|
||||
|
||||
|
||||
logger.info("执行动作中...")
|
||||
obs, reward, done, info = env.step(action, args.sleep_after_execution)
|
||||
logger.info("动作执行完成。")
|
||||
|
||||
|
||||
logger.info("Reward: %.2f", reward)
|
||||
logger.info("Done: %s", done)
|
||||
# Save screenshot and trajectory information
|
||||
@@ -78,10 +78,10 @@ def run_single_example(agent, env, example, max_steps, instruction, args, exampl
|
||||
scores.append(result)
|
||||
with open(os.path.join(example_result_dir, "result.txt"), "w", encoding="utf-8") as f:
|
||||
f.write(f"{result}\n")
|
||||
|
||||
|
||||
# Log task completion to results.json
|
||||
log_task_completion(example, result, example_result_dir, args)
|
||||
|
||||
|
||||
if getattr(args, 'enable_recording', False):
|
||||
env.controller.end_recording(os.path.join(example_result_dir, "recording.mp4"))
|
||||
|
||||
@@ -97,11 +97,11 @@ def run_single_example_human(env, example, max_steps, instruction, args, example
|
||||
env.reset(task_config=example)
|
||||
time.sleep(60) # Wait for the environment to be ready
|
||||
obs = env._get_obs() # Get the initial observation
|
||||
|
||||
|
||||
# Save initial screenshot
|
||||
with open(os.path.join(example_result_dir, "initial_state.png"), "wb") as _f:
|
||||
_f.write(obs['screenshot'])
|
||||
|
||||
|
||||
# Save trajectory information
|
||||
with open(os.path.join(example_result_dir, "traj.jsonl"), "a") as f:
|
||||
f.write(json.dumps({
|
||||
@@ -109,7 +109,7 @@ def run_single_example_human(env, example, max_steps, instruction, args, example
|
||||
"initial_state": "initial_state.png"
|
||||
}))
|
||||
f.write("\n")
|
||||
|
||||
|
||||
# Evaluate the result
|
||||
result = env.evaluate(result_dir=example_result_dir)
|
||||
logger.info("Result: %.2f", result)
|
||||
@@ -254,14 +254,14 @@ def run_single_example_opencua(agent, env, example, max_steps, instruction, args
|
||||
|
||||
logger.info(f"Got Action: {actions}")
|
||||
# Breack if no actions
|
||||
if not actions or len(actions)==0 or actions[0]=="" or actions[0].lower().startswith("error"):
|
||||
if not actions or len(actions)==0 or actions[0]=="" or actions[0].lower().startswith("error"):
|
||||
break
|
||||
|
||||
for action in actions:
|
||||
# Capture the timestamp before executing the action
|
||||
action_timestamp = datetime.datetime.now().strftime("%Y%m%d@%H%M%S")
|
||||
logger.info("Step %d: %s", step_idx + 1, action)
|
||||
|
||||
|
||||
obs, reward, done, info = env.step(action, args.sleep_after_execution)
|
||||
|
||||
logger.info(f"Action {action} executed, reward: {reward}, done: {done}")
|
||||
@@ -304,7 +304,7 @@ def run_single_example_autoglm(agent, env, example, max_steps, instruction, args
|
||||
agent.reset()
|
||||
|
||||
env.reset(task_config=example)
|
||||
|
||||
|
||||
time.sleep(60) # Wait for the environment to be ready
|
||||
obs = env._get_obs() # Get the initial observation
|
||||
done = False
|
||||
@@ -339,20 +339,20 @@ def run_single_example_autoglm(agent, env, example, max_steps, instruction, args
|
||||
"screenshot_file": f"step_{step_idx + 1}_{action_timestamp}.png"
|
||||
}))
|
||||
f.write("\n")
|
||||
|
||||
|
||||
if done:
|
||||
logger.info("The episode is done.")
|
||||
break
|
||||
|
||||
|
||||
# Invalid Action
|
||||
if not actions:
|
||||
obs = env._get_obs() # update observation
|
||||
|
||||
|
||||
step_idx += 1
|
||||
|
||||
|
||||
if not done: # not completed the task yet
|
||||
env.action_history.append('FAIL')
|
||||
|
||||
|
||||
result = env.evaluate()
|
||||
logger.info("Result: %.2f", result)
|
||||
scores.append(result)
|
||||
@@ -369,7 +369,7 @@ def run_single_example_mano(agent, env, example, max_steps, instruction, args, e
|
||||
done = False
|
||||
step_idx = 0
|
||||
env.controller.start_recording()
|
||||
|
||||
|
||||
with open(os.path.join(example_result_dir, f"step_0.png"),
|
||||
"wb") as _f:
|
||||
_f.write(obs['screenshot'])
|
||||
@@ -379,12 +379,12 @@ def run_single_example_mano(agent, env, example, max_steps, instruction, args, e
|
||||
obs
|
||||
)
|
||||
if len(actions) > 1:
|
||||
if (("pyautogui.hotkey('shift')" in actions[0] or "pyautogui.hotkey('ctrl')" in actions[0])
|
||||
if (("pyautogui.hotkey('shift')" in actions[0] or "pyautogui.hotkey('ctrl')" in actions[0])
|
||||
and "pyautogui.click" in actions[1]):
|
||||
hotkey_type = 'shift' if "shift" in actions[0] else 'ctrl'
|
||||
action = f"pyautogui.keyDown('{hotkey_type}')\n{actions[1]}\npyautogui.keyUp('{hotkey_type}')"
|
||||
actions = [action]
|
||||
|
||||
actions = [action]
|
||||
|
||||
for action in actions:
|
||||
# Capture the timestamp before executing the action
|
||||
action_timestamp = datetime.datetime.now().strftime("%Y%m%d@%H%M%S")
|
||||
@@ -419,7 +419,7 @@ def run_single_example_mano(agent, env, example, max_steps, instruction, args, e
|
||||
with open(os.path.join(example_result_dir, "result.txt"), "w", encoding="utf-8") as f:
|
||||
f.write(f"{result}\n")
|
||||
env.controller.end_recording(os.path.join(example_result_dir, "recording.mp4"))
|
||||
|
||||
|
||||
def run_single_example_uipath(agent, env, example, max_steps, instruction, args, example_result_dir, scores):
|
||||
runtime_logger = setup_logger(example, example_result_dir)
|
||||
try:
|
||||
@@ -485,7 +485,7 @@ logger = logging.getLogger("desktopenv.experiment")
|
||||
|
||||
def run_single_example_os_symphony(agent, env, example, max_steps, instruction, args, example_result_dir, scores):
|
||||
set_current_result_dir(example_result_dir)
|
||||
|
||||
|
||||
agent.reset(result_dir=example_result_dir)
|
||||
env.reset(task_config=example)
|
||||
time.sleep(30) # Wait for the environment to be ready
|
||||
@@ -507,14 +507,14 @@ def run_single_example_os_symphony(agent, env, example, max_steps, instruction,
|
||||
img_name = f"step_{step_idx + 1}_milestone.png"
|
||||
else:
|
||||
img_name = f"step_{step_idx + 1}.png"
|
||||
|
||||
|
||||
with open(os.path.join(example_result_dir, img_name),
|
||||
"wb") as _f:
|
||||
_f.write(obs['screenshot'])
|
||||
if "coordinates" in response and response["coordinates"]:
|
||||
draw_coordinates(
|
||||
image_bytes=obs['screenshot'],
|
||||
coordinates=response["coordinates"],
|
||||
image_bytes=obs['screenshot'],
|
||||
coordinates=response["coordinates"],
|
||||
save_path=os.path.join(example_result_dir, img_name[:-4] + "_draw.png")
|
||||
)
|
||||
|
||||
@@ -563,10 +563,10 @@ def run_single_example_evocua(agent, env, example, max_steps, instruction, args,
|
||||
Unified run function for EvoCUAAgent (supporting both S1 and S2 modes).
|
||||
"""
|
||||
runtime_logger = setup_logger(example, example_result_dir)
|
||||
|
||||
|
||||
# Reset Environment
|
||||
env.reset(task_config=example)
|
||||
|
||||
|
||||
# Reset Agent
|
||||
# Handle agent reset signature differences if any
|
||||
try:
|
||||
@@ -587,7 +587,7 @@ def run_single_example_evocua(agent, env, example, max_steps, instruction, args,
|
||||
# EvoCUAAgent.predict unified signature: returns (response, actions)
|
||||
# It handles both modes internally.
|
||||
predict_res = agent.predict(instruction, obs)
|
||||
|
||||
|
||||
# Check return signature logic
|
||||
if len(predict_res) == 3:
|
||||
# Compatibility with S1 original signature if agent was updated to match
|
||||
@@ -597,7 +597,7 @@ def run_single_example_evocua(agent, env, example, max_steps, instruction, args,
|
||||
info_dict = {}
|
||||
|
||||
logger.info(f"Step {step_idx + 1} Actions: {actions}")
|
||||
|
||||
|
||||
# Break if no actions (fail-safe)
|
||||
if not actions or (len(actions) == 1 and (actions[0] == "" or "error" in actions[0].lower())):
|
||||
# Allow "FAIL" or "DONE" to process through execution loop if agent outputs them as actions
|
||||
@@ -608,18 +608,18 @@ def run_single_example_evocua(agent, env, example, max_steps, instruction, args,
|
||||
for action in actions:
|
||||
action_timestamp = datetime.datetime.now().strftime("%Y%m%d@%H%M%S%f")
|
||||
logger.info("Executing action: %s", action)
|
||||
|
||||
|
||||
# Execute
|
||||
obs, reward, done, info = env.step(action, args.sleep_after_execution)
|
||||
|
||||
|
||||
logger.info("Reward: %.2f", reward)
|
||||
logger.info("Done: %s", done)
|
||||
|
||||
|
||||
# Save screenshot
|
||||
screenshot_file = f"step_{step_idx + 1}_{action_timestamp}.png"
|
||||
with open(os.path.join(example_result_dir, screenshot_file), "wb") as _f:
|
||||
_f.write(obs['screenshot'])
|
||||
|
||||
|
||||
# Log Trajectory
|
||||
log_entry = {
|
||||
"step_num": step_idx + 1,
|
||||
@@ -634,25 +634,25 @@ def run_single_example_evocua(agent, env, example, max_steps, instruction, args,
|
||||
# Add natural language info if available (S1 style)
|
||||
if info_dict:
|
||||
log_entry["natural_language_action"] = info_dict.get("action")
|
||||
|
||||
|
||||
with open(os.path.join(example_result_dir, "traj.jsonl"), "a", encoding="utf-8") as f:
|
||||
f.write(json.dumps(log_entry, ensure_ascii=False))
|
||||
f.write("\n")
|
||||
|
||||
|
||||
if done:
|
||||
logger.info("The episode is done.")
|
||||
break
|
||||
|
||||
|
||||
step_idx += 1
|
||||
|
||||
|
||||
time.sleep(20) # Wait for environment to settle
|
||||
result = env.evaluate(result_dir=example_result_dir)
|
||||
logger.info("Result: %.2f", result)
|
||||
scores.append(result)
|
||||
|
||||
|
||||
with open(os.path.join(example_result_dir, "result.txt"), "w", encoding="utf-8") as f:
|
||||
f.write(f"{result}\n")
|
||||
|
||||
|
||||
log_task_completion(example, result, example_result_dir, args)
|
||||
|
||||
env.controller.end_recording(os.path.join(example_result_dir, "recording.mp4"))
|
||||
|
||||
Reference in New Issue
Block a user