Initialize evaluation protocols and examples; Implement one kind of eval; Update requirements
This commit is contained in:
5
desktop_env/evaluators/__init__.py
Normal file
5
desktop_env/evaluators/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
from .table import compare_table
|
||||||
|
|
||||||
|
eval_funcs = {
|
||||||
|
"compare_table(expected, actual)": compare_table
|
||||||
|
}
|
||||||
0
desktop_env/evaluators/replay.py
Normal file
0
desktop_env/evaluators/replay.py
Normal file
14
desktop_env/evaluators/table.py
Normal file
14
desktop_env/evaluators/table.py
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
def compare_table(expected, actual):
|
||||||
|
import pandas as pd
|
||||||
|
df1 = pd.read_excel(expected)
|
||||||
|
df2 = pd.read_excel(actual)
|
||||||
|
|
||||||
|
# Compare the DataFrames
|
||||||
|
return 1 if df1.equals(df2) else 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
path1 = ""
|
||||||
|
path2 = ""
|
||||||
|
|
||||||
|
print(compare_table(path1, path2))
|
||||||
Reference in New Issue
Block a user