Files
sci-gui-agent-benchmark/desktop_env/evaluators/metrics/vscode.py
2024-01-20 19:44:52 +08:00

107 lines
2.2 KiB
Python

from typing import Dict
import json
def check_json_keybindings(actual: str, expected: str, **options) -> float:
"""
Args:
actual (str): path to result text file
expected (str): expected dict{}
Return:
float: the score
"""
with open(actual) as f:
data = json.load(f)
if expected in data:
return 1.0
else:
return 0.0
def check_json_settings(actual: str, expected: str, **options) -> float:
"""
Args:
actual (str): path to result text file
expected (str): expected dict{}
Return:
float: the score
"""
with open(actual) as f:
data = json.load(f)
expect = set(expected.items())
json = set(data.items())
if expect.issubset(json):
return 1.0
else:
return 0.0
def compare_text_file(actual: str, expected: str, **options) -> float:
"""
Args:
actual (str): path to result text file
expected (str): path to gold text file
Return:
float: the score
"""
if not actual:
return 0.
with open(actual) as f1:
actual_text = f1.read()
with open(expected) as f2:
expected_text = f2.read()
if actual_text == expected_text:
return 1.0
return 0.0
def compare_config(actual: str, rules: Dict, **options) -> float:
if not actual:
return 0.
with open(actual) as f1:
actual_text = f1.read()
if actual_text == rules['expect']:
return 1.0
return 0.0
def compare_answer(actual: str, rules: Dict, **options) -> float:
"""
Args:
actual (str): result string
expected (str): gold string
Return:
float: the score
"""
if not actual:
return 0.
if actual == rules['expect']:
return 1.0
# TODO: can use text embedding to get non-zero return
return 0.0
def is_extension_installed(actual: str, rules: Dict, **options):
if rules['type'] == 'contain':
if rules['expected'] in actual:
return 1.0
return 0.0
elif rules['type'] == 'not_contain':
if rules['expected'] not in actual:
return 1.0
return 0.0
else:
raise NotImplementedError