Files
sci-gui-agent-benchmark/main.py
David Chang f4949c3e90 ver Dec20th
fixed bug caused by windows-only os.startfile
2023-12-20 20:55:47 +08:00

68 lines
1.9 KiB
Python

import json
from desktop_env.envs.desktop_env import DesktopEnv
def human_agent():
"""
Runs the Gym environment with human input.
"""
with open("evaluation_examples/examples/f9584479-3d0d-4c79-affa-9ad7afdd8850.json", "r") as f:
example = json.load(f)
#env = DesktopEnv( path_to_vm="/home/yuri/vmware/Windows 10 x64/Windows 10 x64.vmx"
# path_to_vm="/home/yuri/vmware/Ubuntu 64-bit/Ubuntu 64-bit.vmx",
env = DesktopEnv( path_to_vm="/home/david/vmware/KUbuntu 64-bit/KUbuntu 64-bit.vmx"
, action_space="computer_13"
#, snapshot_path="base_setup"
, snapshot_path="Init6"
, instruction=example["instruction"]
, config=example["config"]
, evaluator=example["evaluator"]
)
# reset the environment to certain snapshot
observation = env.reset()
done = False
trajectory = [
{
"action_type": "MOVE_TO",
"parameters": {
"x": 754,
"y": 1057
}
},
{"action_type": "CLICK", "parameters": {"button": "right", "num_clicks": 1}}
]
for i in range(len(trajectory)):
# action = get_human_action()
# action = {
# "action_type": 0,
# "click_type": 3,
# }
print(trajectory[i])
observation, reward, done, info = env.step(trajectory[i], pause=5)
print("Observation:", observation)
print("Reward:", reward)
print("Info:", info)
print("================================\n")
if done:
print("The episode is done.")
break
result = env.evaluate()
print("Result:", result)
env.close()
print("Environment closed.")
if __name__ == "__main__":
human_agent()