xiaochuan correct his bugs in multiapp examples, you can try it again now
This commit is contained in:
@@ -86,7 +86,6 @@ def get_vm_file(env, config: Dict[str, Any]) -> Union[Optional[str], List[Option
|
|||||||
if not config.get("multi", False):
|
if not config.get("multi", False):
|
||||||
paths: List[str] = [config["path"]]
|
paths: List[str] = [config["path"]]
|
||||||
dests: List[str] = [config["dest"]]
|
dests: List[str] = [config["dest"]]
|
||||||
print(config)
|
|
||||||
if "time_suffix" in config.keys() and config["time_suffix"]:
|
if "time_suffix" in config.keys() and config["time_suffix"]:
|
||||||
if "time_format" in config.keys():
|
if "time_format" in config.keys():
|
||||||
time_format = config["time_format"]
|
time_format = config["time_format"]
|
||||||
@@ -96,8 +95,6 @@ def get_vm_file(env, config: Dict[str, Any]) -> Union[Optional[str], List[Option
|
|||||||
else:
|
else:
|
||||||
paths: List[str] = config["path"]
|
paths: List[str] = config["path"]
|
||||||
dests: List[str] = config["dest"]
|
dests: List[str] = config["dest"]
|
||||||
print(paths)
|
|
||||||
print(dests)
|
|
||||||
|
|
||||||
cache_paths: List[str] = []
|
cache_paths: List[str] = []
|
||||||
|
|
||||||
@@ -121,9 +118,6 @@ def get_vm_file(env, config: Dict[str, Any]) -> Union[Optional[str], List[Option
|
|||||||
cache_paths.append(_path)
|
cache_paths.append(_path)
|
||||||
with open(_path, "wb") as f:
|
with open(_path, "wb") as f:
|
||||||
f.write(file)
|
f.write(file)
|
||||||
# debug
|
|
||||||
print("cache_paths")
|
|
||||||
print(cache_paths)
|
|
||||||
return cache_paths[0] if len(cache_paths)==1 else cache_paths
|
return cache_paths[0] if len(cache_paths)==1 else cache_paths
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ from .general import (
|
|||||||
compare_time_in_speedtest_results,
|
compare_time_in_speedtest_results,
|
||||||
is_included_all_json_objects,
|
is_included_all_json_objects,
|
||||||
is_gold_text_included_in_pdf,
|
is_gold_text_included_in_pdf,
|
||||||
check_csv_line_number,
|
check_line_number,
|
||||||
file_contains,
|
file_contains,
|
||||||
compare_terminal_and_txt,
|
compare_terminal_and_txt,
|
||||||
fuzzy_place_math,
|
fuzzy_place_math,
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
import csv
|
import csv
|
||||||
|
import os
|
||||||
import datetime
|
import datetime
|
||||||
import difflib
|
import difflib
|
||||||
import functools
|
import functools
|
||||||
@@ -91,23 +92,24 @@ def fuzzy_match(result, rules) -> float:
|
|||||||
|
|
||||||
|
|
||||||
def fuzzy_place_math(result_file_path, rules) -> float:
|
def fuzzy_place_math(result_file_path, rules) -> float:
|
||||||
|
if result_file_path is None:
|
||||||
|
return 0.
|
||||||
expect = rules["expected"] # a list of possible answers
|
expect = rules["expected"] # a list of possible answers
|
||||||
# read list.docx, and get all texts out, overlook blank lines, remove blanks before and after each line
|
# read list.docx, and get all texts out, overlook blank lines, remove blanks before and after each line
|
||||||
doc = Document(result_file_path)
|
doc = Document(result_file_path)
|
||||||
words_list = []
|
words_list = []
|
||||||
for para in doc.paragraphs:
|
for para in doc.paragraphs:
|
||||||
words_list.extend(para.text.split())
|
words_list.extend(para.text.split())
|
||||||
# Print out the list of extracted words
|
|
||||||
print("Your Answers: ")
|
|
||||||
print(words_list)
|
|
||||||
fuzzy_score_list = []
|
fuzzy_score_list = []
|
||||||
for word in words_list:
|
for word in words_list:
|
||||||
max_score = 0
|
max_score = 0
|
||||||
for ans in expect:
|
for ans in expect:
|
||||||
score = fuzz.ratio(word, ans)
|
score = fuzz.ratio(word, ans)/100
|
||||||
max_score = max(max_score, score)
|
max_score = max(max_score, score)
|
||||||
fuzzy_score_list.append(max_score)
|
fuzzy_score_list.append(max_score)
|
||||||
return sum(fuzzy_score_list) / len(fuzzy_score_list)
|
if len(fuzzy_score_list) != 3:
|
||||||
|
return 0.
|
||||||
|
return sum(fuzzy_score_list) / 3
|
||||||
|
|
||||||
def check_csv(result: str, rules: Dict[str, List[Dict[str, str]]]) -> float:
|
def check_csv(result: str, rules: Dict[str, List[Dict[str, str]]]) -> float:
|
||||||
"""
|
"""
|
||||||
@@ -304,29 +306,40 @@ def check_direct_json_object(result, rules) -> float:
|
|||||||
result = result.replace("'", '"')
|
result = result.replace("'", '"')
|
||||||
# load json object
|
# load json object
|
||||||
result = json.loads(result)
|
result = json.loads(result)
|
||||||
print("result: ")
|
|
||||||
print(result)
|
|
||||||
print("expected: ")
|
|
||||||
print(rules["expected"])
|
|
||||||
if result is None:
|
if result is None:
|
||||||
return 0.
|
return 0.
|
||||||
|
try:
|
||||||
|
expect_in_result = rules.get("expect_in_result", False)
|
||||||
|
if not expect_in_result:
|
||||||
|
expected_json = rules["expected"]
|
||||||
|
for key in expected_json.keys():
|
||||||
|
expected_value = expected_json.get(key)
|
||||||
|
if expected_value != result.get(key):
|
||||||
|
return 0.
|
||||||
|
return 1.0
|
||||||
|
else:
|
||||||
|
expected_json = rules["expected"]
|
||||||
|
|
||||||
expect_in_result = rules.get("expect_in_result", False)
|
for key in expected_json.keys():
|
||||||
if not expect_in_result:
|
if isinstance(expected_json.get(key), List):
|
||||||
expected_json = rules["expected"]
|
flag = 0
|
||||||
for key in expected_json.keys():
|
expected_value_list = expected_json.get(key)
|
||||||
expected_value = expected_json.get(key)
|
for each_expected_value in expected_value_list:
|
||||||
if expected_value != result.get(key):
|
if each_expected_value in result.get(key):
|
||||||
return 0.
|
flag = 1
|
||||||
return 1.0
|
break
|
||||||
else:
|
if flag == 0:
|
||||||
expected_json = rules["expected"]
|
return 0.
|
||||||
for key in expected_json.keys():
|
elif isinstance(expected_json.get(key), str):
|
||||||
expected_value = expected_json.get(key)
|
if expected_json.get(key) not in result.get(key):
|
||||||
if expected_value not in result.get(key):
|
return 0.
|
||||||
return 0.
|
else:
|
||||||
return 1.0
|
logger.debug("check_direct_json_object: expected value type not supported")
|
||||||
|
return 0.
|
||||||
|
return 1.0
|
||||||
|
except:
|
||||||
|
logger.debug("check_direct_json_object: result is not a valid json object")
|
||||||
|
return 0.
|
||||||
|
|
||||||
def compare_time_in_speedtest_results(speedtest_result_path, time_diff):
|
def compare_time_in_speedtest_results(speedtest_result_path, time_diff):
|
||||||
if not speedtest_result_path:
|
if not speedtest_result_path:
|
||||||
@@ -397,24 +410,37 @@ def is_gold_text_included_in_pdf(pdf_file_path, gold_text_path):
|
|||||||
def file_contains(file_path, config):
|
def file_contains(file_path, config):
|
||||||
# file_path ends with .txt
|
# file_path ends with .txt
|
||||||
if not file_path:
|
if not file_path:
|
||||||
return 1
|
return 0.
|
||||||
with open(file_path, 'r') as f:
|
try:
|
||||||
file_text = f.read()
|
with open(file_path, 'r') as f:
|
||||||
for text in config["expected"]:
|
file_text = f.read()
|
||||||
if text not in file_text:
|
for text in config["expected"]:
|
||||||
return 0
|
if text not in file_text:
|
||||||
return 1
|
logger.debug(f"file_contains: {text} not found in {file_path}")
|
||||||
|
return 0.
|
||||||
|
except:
|
||||||
|
logger.debug("file_contains: file not found or not readable")
|
||||||
|
return 0.
|
||||||
|
return 1.
|
||||||
|
|
||||||
|
|
||||||
def check_csv_line_number(file_path, line_number):
|
def check_line_number(file_path, line_number):
|
||||||
# check file_path suffix
|
# check if file_path exists
|
||||||
if not file_path.endswith('.csv'):
|
if file_path is None or not os.path.isfile(file_path):
|
||||||
return 0
|
return 0.
|
||||||
# check line number
|
timeRegex = "([01]\d|2[0-3]):[0-5]\d:[0-5]\d"
|
||||||
with open(file_path, 'r') as f:
|
# check if the string that matches the timeRegex in this txt file equals to line_number["expected"]
|
||||||
reader = csv.reader(f)
|
try:
|
||||||
line_count = sum(1 for row in reader)
|
with open(file_path, 'r') as f:
|
||||||
return 1 if line_count == int(line_number["expected"]) else 0
|
line_count = 0
|
||||||
|
for line in f:
|
||||||
|
if re.search(timeRegex, line):
|
||||||
|
line_count += 1
|
||||||
|
# if line_count equals to line_number["expected"], return 1, else return 0
|
||||||
|
return 1 if line_count == int(line_number["expected"]) else 0
|
||||||
|
except:
|
||||||
|
logger.debug("check_line_number: file not found or not readable")
|
||||||
|
return 0.
|
||||||
|
|
||||||
|
|
||||||
def compare_terminal_and_txt(txt_file_path, terminal_output):
|
def compare_terminal_and_txt(txt_file_path, terminal_output):
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
{
|
{
|
||||||
"id": "2373b66a-092d-44cb-bfd7-82e86e7a3b4d",
|
"id": "2373b66a-092d-44cb-bfd7-82e86e7a3b4d",
|
||||||
"snapshot": "multiapps",
|
"snapshot": "multiapps",
|
||||||
"instruction": "I want to understand the resource usage of my Ubuntu system under normal workloads. Please use the `sar` command in the `sysstat` toolkit to monitor the system CPU usage, evaluate the status once every second for 30 seconds, output the results to \"System_Resources_Report.txt\" under Desktop, and convert the txt to csv file with the same name.",
|
"instruction": "I want to understand the resource usage of my Ubuntu system under normal workloads. Please use the `sar` command in the `sysstat` toolkit to monitor system activity, evaluate the status once every second for 30 seconds, output the results to \"System_Resources_Report.txt\" under Desktop.",
|
||||||
"source": "author",
|
"source": "author",
|
||||||
"config": [
|
"config": [
|
||||||
{
|
{
|
||||||
@@ -17,7 +17,7 @@
|
|||||||
"os", "calc"
|
"os", "calc"
|
||||||
],
|
],
|
||||||
"evaluator": {
|
"evaluator": {
|
||||||
"func": ["file_contains", "check_csv_line_number"],
|
"func": ["file_contains", "check_line_number"],
|
||||||
"result":
|
"result":
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
@@ -27,8 +27,8 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"type": "vm_file",
|
"type": "vm_file",
|
||||||
"path": "/home/user/Desktop/System_Resources_Report.csv",
|
"path": "/home/user/Desktop/System_Resources_Report.txt",
|
||||||
"dest": "System_Resources_Report.csv"
|
"dest": "System_Resources_Report.txt"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"expected":
|
"expected":
|
||||||
@@ -36,13 +36,13 @@
|
|||||||
{
|
{
|
||||||
"type": "rule",
|
"type": "rule",
|
||||||
"rules" :{
|
"rules" :{
|
||||||
"expected": ["tps", "rtps", "wtps", "dtps", "bread/s", "bwrtn/s", "bdscd/s", "Average", "Linux"]
|
"expected": ["CPU", "%user","%nice","%system", "%iowait", "%steal", "%idle"]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"type": "rule",
|
"type": "rule",
|
||||||
"rules": {
|
"rules": {
|
||||||
"expected": "33"
|
"expected": "31"
|
||||||
}
|
}
|
||||||
}]
|
}]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -33,6 +33,24 @@
|
|||||||
"chrome", "os", "writer"
|
"chrome", "os", "writer"
|
||||||
],
|
],
|
||||||
"evaluator": {
|
"evaluator": {
|
||||||
|
"postconfig":[
|
||||||
|
{
|
||||||
|
"type": "activate_window",
|
||||||
|
"parameters": {
|
||||||
|
"window_name": "AllLocations.docx - LibreOffice Writer"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "execute",
|
||||||
|
"parameters": {
|
||||||
|
"command": [
|
||||||
|
"python",
|
||||||
|
"-c",
|
||||||
|
"import pyautogui; import time; pyautogui.hotkey(\"ctrl\", \"s\"); time.sleep(0.5); pyautogui.press(\"enter\");"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
"func": "fuzzy_place_math",
|
"func": "fuzzy_place_math",
|
||||||
"result": {
|
"result": {
|
||||||
"type": "vm_file",
|
"type": "vm_file",
|
||||||
|
|||||||
@@ -20,7 +20,7 @@
|
|||||||
"parameters": {
|
"parameters": {
|
||||||
"files": [
|
"files": [
|
||||||
{
|
{
|
||||||
"url": "https://drive.usercontent.google.com/download?id=1iquZNI4ktx2915srQ1MMlhcwCuXnmq5T&export=download&authuser=0&confirm=t&uuid=e0bfd86f-a0f1-4a9e-8344-0f645eb379dd&at=APZUnTWQnCd7bNtf8ZN4KfkQUaJ9:1709972645590",
|
"url": "https://drive.usercontent.google.com/download?id=1XLBhnopqQrTw3bxRWOyfTCslfawFyOaZ&export=download&authuser=0&confirm=t&uuid=f41a09c1-8319-4a50-b8c3-4fc58ba51c00&at=APZUnTV3uiv7wKiSvoCekJG_aVo3:1710051227101",
|
||||||
"path": "/home/user/Pictures/picture2.jpg"
|
"path": "/home/user/Pictures/picture2.jpg"
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -68,9 +68,9 @@
|
|||||||
"type": "rule",
|
"type": "rule",
|
||||||
"rules":{
|
"rules":{
|
||||||
"expected":{
|
"expected":{
|
||||||
"ec076282f61ba74642e94b5a6a1250c6988204d59d9b02936606b6b8ef1e4433": "Kilimanjaro",
|
"ec076282f61ba74642e94b5a6a1250c6988204d59d9b02936606b6b8ef1e4433": ["Kilimanjaro"],
|
||||||
"999957c8de835bfa420d198270e7a6b079ee20ff53a3f214491e8134768a7c0b": "Himalayas",
|
"6ed4239ecc2be3ec15ad65a78c5c823b9004d640b8cc83a6a7af5930f354de91": ["Himalayas", "Everest", "Sagarmatha"],
|
||||||
"79f45d40d8413d4e81f1b9734ea39e58622cafd79e12bab32959643fc245147c": "Hua"
|
"79f45d40d8413d4e81f1b9734ea39e58622cafd79e12bab32959643fc245147c": ["Hua", "hua"]
|
||||||
},
|
},
|
||||||
"expect_in_result": true
|
"expect_in_result": true
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
{
|
{
|
||||||
"id": "da922383-bfa4-4cd3-bbad-6bebab3d7742",
|
"id": "da922383-bfa4-4cd3-bbad-6bebab3d7742",
|
||||||
"snapshot": "multiapps",
|
"snapshot": "multiapps",
|
||||||
"instruction": "I browsed a lot of interesting blog articles today. I hope to store these articles in my local designated folder just like zotero stores papers. Please download the blogs you are opening now in pdf format to /home/user/Documents/Blogs.",
|
"instruction": "I browsed a lot of interesting blog articles today. I hope to store these articles in my local designated folder just like zotero stores papers. Please download the blogs opening now in pdf format and save them in their tile to /home/user/Documents/Blogs.",
|
||||||
"source": "authors",
|
"source": "authors",
|
||||||
"config": [
|
"config": [
|
||||||
{
|
{
|
||||||
|
|||||||
Reference in New Issue
Block a user