ver Jan31st
merged from main to zdy
This commit is contained in:
@@ -63,7 +63,8 @@ class PythonController:
|
||||
Executes a python command on the server.
|
||||
It can be used to execute the pyautogui commands, or... any other python command. who knows?
|
||||
"""
|
||||
command_list = ["python", "-c", self.pkgs_prefix.format(command=command)]
|
||||
# command_list = ["python", "-c", self.pkgs_prefix.format(command=command)]
|
||||
command_list = ["python3", "-c", self.pkgs_prefix.format(command=command)]
|
||||
payload = json.dumps({"command": command_list, "shell": False})
|
||||
headers = {
|
||||
'Content-Type': 'application/json'
|
||||
|
||||
@@ -1,10 +1,20 @@
|
||||
# Setup Instructions
|
||||
|
||||
## LibreOffice
|
||||
For LibreOffice, please enter into the app first, and then enable the no pop-up when 'ctrl + s'.
|
||||
|
||||
## LibreOffice Press
|
||||
### Setting Up the python-pptx Library
|
||||
```shell
|
||||
pip install python-pptx
|
||||
```
|
||||
|
||||
## LibreOffice Writer
|
||||
|
||||
### Setting Up the python-docx Library
|
||||
### Setting Up the python-docx and odfpy Library
|
||||
```shell
|
||||
pip install python-docx
|
||||
pip install odfpy
|
||||
```
|
||||
|
||||
## LibreOffice Calc
|
||||
|
||||
@@ -1,13 +1,25 @@
|
||||
from .chrome import get_default_search_engine, get_cookie_data, get_bookmarks, get_open_tabs_info, get_pdf_from_url, \
|
||||
get_shortcuts_on_desktop, get_history, get_enabled_experiments, get_chrome_language, get_chrome_font_size, \
|
||||
get_profile_name, get_number_of_search_results, get_googledrive_file, get_active_tab_info
|
||||
from .chrome import (
|
||||
get_default_search_engine,
|
||||
get_cookie_data,
|
||||
get_bookmarks,
|
||||
get_open_tabs_info,
|
||||
get_pdf_from_url,
|
||||
get_shortcuts_on_desktop,
|
||||
get_history,
|
||||
get_enabled_experiments,
|
||||
get_chrome_language,
|
||||
get_chrome_font_size,
|
||||
get_profile_name,
|
||||
get_number_of_search_results,
|
||||
get_googledrive_file,
|
||||
get_active_tab_info
|
||||
)
|
||||
from .file import get_cloud_file, get_vm_file, get_cache_file
|
||||
from .general import get_vm_command_line, get_vm_terminal_output
|
||||
from .gimp import get_gimp_config_file
|
||||
from .impress import get_audio_in_slide
|
||||
from .info import get_vm_screen_size, get_vm_window_size, get_vm_wallpaper, get_list_directory
|
||||
from .misc import get_rule, get_accessibility_tree
|
||||
from .replay import get_replay
|
||||
from .vlc import get_vlc_playing_info, get_vlc_config
|
||||
from .vlc import get_vlc_playing_info, get_vlc_config, get_default_video_player
|
||||
from .vscode import get_vscode_config
|
||||
from .impress import get_audio_in_slide
|
||||
|
||||
|
||||
33
desktop_env/evaluators/getters/gimp.py
Normal file
33
desktop_env/evaluators/getters/gimp.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import logging
|
||||
import os
|
||||
from typing import Dict
|
||||
|
||||
logger = logging.getLogger("desktopenv.getters.gimp")
|
||||
|
||||
|
||||
def get_gimp_config_file(env, config: Dict[str, str]):
|
||||
"""
|
||||
Gets the config setting of GIMP.
|
||||
"""
|
||||
|
||||
os_type = env.vm_platform
|
||||
print(os_type)
|
||||
|
||||
if os_type == "Linux":
|
||||
config_path = \
|
||||
env.controller.execute_python_command(f"import os; print("
|
||||
f"os"
|
||||
f".path.expanduser("
|
||||
f"'~/.config/GIMP/2.10/"
|
||||
f"{config['file_name']}'))")[
|
||||
'output'].strip()
|
||||
# TODO: Add support for macOS and Windows
|
||||
else:
|
||||
raise Exception("Unsupported operating system", os_type)
|
||||
|
||||
_path = os.path.join(env.cache_dir, config["dest"])
|
||||
content = env.controller.get_file(config_path)
|
||||
with open(_path, "wb") as f:
|
||||
f.write(content)
|
||||
|
||||
return _path
|
||||
@@ -1,7 +1,8 @@
|
||||
import logging
|
||||
import os
|
||||
from typing import Dict
|
||||
|
||||
from collections import Counter
|
||||
from .general import get_vm_command_line
|
||||
import requests
|
||||
|
||||
logger = logging.getLogger("desktopenv.getters.vlc")
|
||||
@@ -58,3 +59,28 @@ def get_vlc_config(env, config: Dict[str, str]):
|
||||
f.write(content)
|
||||
|
||||
return _path
|
||||
|
||||
|
||||
def get_default_video_player(env, config: dict):
|
||||
""" Gets the default application for a category or file extension.
|
||||
"""
|
||||
|
||||
os_type = env.vm_platform
|
||||
|
||||
if os_type == "Linux":
|
||||
extensions = ['3gp', '3gp', '3gpp', '3gpp', '3gpp2', '3gpp2', 'avi', 'avi', 'divx', 'divx', 'dv', 'dv', 'fli', 'fli', 'flv', 'flv', 'mp2t', 'mp2t', 'mp4', 'mp4', 'mp4v-es', 'mp4v-es', 'mpeg', 'mpeg', 'mpeg-system', 'mpeg-system', 'msvideo', 'msvideo', 'ogg', 'ogg', 'quicktime', 'quicktime', 'vnd.divx', 'vnd.divx', 'vnd.mpegurl', 'vnd.mpegurl', 'vnd.rn-realvideo', 'vnd.rn-realvideo', 'webm', 'webm', 'x-anim', 'x-anim', 'x-avi', 'x-avi', 'x-flc', 'x-flc', 'x-fli', 'x-fli', 'x-flv', 'x-flv', 'x-m4v', 'x-m4v', 'x-matroska', 'x-matroska', 'x-mpeg', 'x-mpeg', 'x-mpeg-system', 'x-mpeg-system', 'x-mpeg2', 'x-mpeg2', 'x-ms-asf', 'x-ms-asf', 'x-ms-asf-plugin', 'x-ms-asf-plugin', 'x-ms-asx', 'x-ms-asx', 'x-ms-wm', 'x-ms-wm', 'x-ms-wmv', 'x-ms-wmv', 'x-ms-wmx', 'x-ms-wmx', 'x-ms-wvx', 'x-ms-wvx', 'x-msvideo', 'x-msvideo', 'x-nsv', 'x-nsv', 'x-ogm', 'x-ogm', 'x-ogm+ogg', 'x-theora', 'x-theora', 'x-theora+ogg', 'x-theora+ogg']
|
||||
apps = []
|
||||
for ext in extensions:
|
||||
app = get_vm_command_line(env, {"command": ["xdg-mime", "query", "default", f"video/{ext}"]})
|
||||
if app:
|
||||
apps.append(app)
|
||||
if len(apps) == 0:
|
||||
return 'unknown'
|
||||
else:
|
||||
return Counter(apps).most_common(1)[0][0]
|
||||
elif os_type == "Darwin":
|
||||
raise Exception("Unsupported operating system", os_type)
|
||||
elif os_type == "Windows":
|
||||
raise Exception("Unsupported operating system", os_type)
|
||||
else:
|
||||
raise Exception("Unsupported operating system", os_type)
|
||||
@@ -1,6 +1,6 @@
|
||||
import logging
|
||||
from typing import Any, Dict
|
||||
|
||||
import time
|
||||
from .file import get_vm_file
|
||||
from .replay import get_replay
|
||||
|
||||
@@ -27,6 +27,7 @@ def get_vscode_config(env, config: Dict[str, Any]) -> str:
|
||||
]
|
||||
|
||||
get_replay(env, trajectory)
|
||||
time.sleep(1.0)
|
||||
|
||||
return get_vm_file(env, {
|
||||
"path": config["path"],
|
||||
|
||||
@@ -1,36 +1,118 @@
|
||||
from .chrome import is_expected_tabs, is_expected_bookmarks, compare_pdfs, is_cookie_deleted, is_shortcut_on_desktop, \
|
||||
check_font_size, \
|
||||
check_enabled_experiments, check_history_deleted, is_expected_search_query
|
||||
from .docs import compare_font_names, compare_subscript_contains, has_page_numbers_in_footers, compare_docx_lines
|
||||
from .docs import evaluate_colored_words_in_tables, check_highlighted_words, evaluate_strike_through_last_paragraph, \
|
||||
evaluate_conversion, evaluate_spacing, check_italic_font_size_14, evaluate_alignment, get_unique_train_ids, \
|
||||
check_no_duplicates, compare_init_lines
|
||||
from .general import exact_match, fuzzy_match
|
||||
|
||||
from .docs import find_default_font, contains_page_break, compare_docx_files, compare_docx_tables, compare_line_spacing, \
|
||||
compare_insert_equation, compare_highlighted_text
|
||||
from .docs import is_first_line_centered, check_file_exists, compare_contains_image
|
||||
from .general import check_csv, check_accessibility_tree, run_sqlite3, check_json, check_list
|
||||
from .general import exact_match, fuzzy_match, check_include_exclude
|
||||
from .gimp import increase_saturation, decrease_brightness, check_file_exists, compare_triangle_positions
|
||||
from .gimp import compare_images
|
||||
from .basic_os import (
|
||||
check_gnome_favorite_apps,
|
||||
is_utc_0,
|
||||
check_text_enlarged,
|
||||
check_moved_jpgs
|
||||
)
|
||||
from .chrome import (
|
||||
is_expected_tabs,
|
||||
is_expected_bookmarks,
|
||||
compare_pdfs,
|
||||
is_cookie_deleted,
|
||||
is_shortcut_on_desktop,
|
||||
check_font_size,
|
||||
check_enabled_experiments,
|
||||
check_history_deleted,
|
||||
is_expected_search_query
|
||||
)
|
||||
from .docs import (
|
||||
compare_font_names,
|
||||
compare_subscript_contains,
|
||||
has_page_numbers_in_footers,
|
||||
compare_docx_lines,
|
||||
evaluate_colored_words_in_tables,
|
||||
check_highlighted_words,
|
||||
evaluate_strike_through_last_paragraph,
|
||||
evaluate_conversion,
|
||||
evaluate_spacing,
|
||||
check_italic_font_size_14,
|
||||
evaluate_alignment,
|
||||
get_unique_train_ids,
|
||||
check_no_duplicates,
|
||||
compare_init_lines
|
||||
)
|
||||
from .docs import (
|
||||
find_default_font,
|
||||
contains_page_break,
|
||||
compare_docx_files,
|
||||
compare_docx_tables,
|
||||
compare_line_spacing,
|
||||
compare_insert_equation,
|
||||
compare_highlighted_text,
|
||||
is_first_line_centered,
|
||||
check_file_exists,
|
||||
compare_contains_image
|
||||
)
|
||||
from .general import (
|
||||
check_csv,
|
||||
check_accessibility_tree,
|
||||
run_sqlite3,
|
||||
check_json,
|
||||
check_list,
|
||||
exact_match,
|
||||
fuzzy_match,
|
||||
check_include_exclude
|
||||
)
|
||||
from .gimp import (
|
||||
check_brightness_decrease_and_structure_sim,
|
||||
check_contrast_increase_and_structure_sim,
|
||||
check_saturation_increase_and_structure_sim,
|
||||
check_image_size_and_structure_sim,
|
||||
check_image_mirror,
|
||||
check_palette_and_structure_sim,
|
||||
check_textbox_on_leftside,
|
||||
check_green_background,
|
||||
check_file_exists_and_structure_sim,
|
||||
check_triangle_position,
|
||||
check_structure_sim,
|
||||
check_config_status,
|
||||
compare_image_list,
|
||||
increase_saturation,
|
||||
decrease_brightness,
|
||||
check_file_exists,
|
||||
compare_triangle_positions
|
||||
)
|
||||
from .libreoffice import check_libre_locale
|
||||
from .pdf import check_pdf_pages
|
||||
from .slides import check_presenter_console_disable, check_image_stretch_and_center, check_slide_numbers_color, \
|
||||
compare_pptx_files, check_strikethrough, \
|
||||
check_slide_orientation_Portrait, evaluate_presentation_fill_to_rgb_distance, check_left_panel
|
||||
# from .table import check_sheet_list, check_xlsx_freeze, check_xlsx_zoom, check_data_validations
|
||||
from .table import compare_table, compare_csv
|
||||
from .thunderbird import check_thunderbird_prefs, check_thunderbird_filter
|
||||
|
||||
from .vlc import is_vlc_playing, is_vlc_recordings_folder, is_vlc_fullscreen, compare_images, compare_audios, \
|
||||
compare_videos
|
||||
from .thunderbird import check_thunderbird_prefs, check_thunderbird_filter
|
||||
from .vlc import is_vlc_playing, is_vlc_recordings_folder, is_vlc_fullscreen, compare_images, compare_audios, \
|
||||
compare_videos, check_qt_bgcone, check_one_instance_when_started_from_file, check_qt_minimal_view, \
|
||||
check_qt_max_volume, \
|
||||
check_qt_slider_colours, check_global_key_play_pause
|
||||
from .vscode import compare_text_file, compare_config, compare_answer, is_extension_installed, check_json_settings, \
|
||||
from .slides import (
|
||||
check_presenter_console_disable,
|
||||
check_image_stretch_and_center,
|
||||
check_slide_numbers_color,
|
||||
compare_pptx_files,
|
||||
check_strikethrough,
|
||||
check_slide_orientation_Portrait,
|
||||
evaluate_presentation_fill_to_rgb_distance,
|
||||
check_left_panel,
|
||||
check_transition,
|
||||
check_page_number_colors
|
||||
)
|
||||
from .table import (
|
||||
compare_table,
|
||||
compare_csv
|
||||
)
|
||||
from .thunderbird import (
|
||||
check_thunderbird_prefs,
|
||||
check_thunderbird_filter
|
||||
)
|
||||
from .vlc import (
|
||||
is_vlc_playing,
|
||||
is_vlc_recordings_folder,
|
||||
is_vlc_fullscreen,
|
||||
compare_images,
|
||||
compare_audios,
|
||||
compare_videos,
|
||||
check_qt_bgcone,
|
||||
check_one_instance_when_started_from_file,
|
||||
check_qt_minimal_view,
|
||||
check_qt_max_volume,
|
||||
check_qt_slider_colours,
|
||||
check_global_key_play_pause
|
||||
)
|
||||
from .vscode import (
|
||||
compare_text_file,
|
||||
compare_config,
|
||||
compare_answer,
|
||||
is_extension_installed,
|
||||
check_json_settings,
|
||||
check_json_keybindings
|
||||
from .os import check_gnome_favorite_apps, is_utc_0, check_text_enlarged, check_moved_jpgs
|
||||
|
||||
)
|
||||
|
||||
@@ -1,13 +1,16 @@
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import xml.etree.ElementTree as ET
|
||||
import zipfile
|
||||
import re
|
||||
from typing import List, Dict, Any
|
||||
|
||||
from docx import Document
|
||||
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT
|
||||
from docx.shared import RGBColor
|
||||
from odf.opendocument import load
|
||||
from odf.text import P
|
||||
from odf.text import Span
|
||||
|
||||
logger = logging.getLogger("desktopenv.metric.docs")
|
||||
|
||||
@@ -51,12 +54,39 @@ def contains_page_break(docx_file):
|
||||
|
||||
|
||||
def compare_docx_files(file1, file2, ignore_blanks=True):
|
||||
doc1 = Document(file1)
|
||||
doc2 = Document(file2)
|
||||
def get_paragraph_texts_odt(document):
|
||||
paragraphs = document.getElementsByType(P)
|
||||
paragraph_texts = []
|
||||
for paragraph in paragraphs:
|
||||
text_parts = []
|
||||
for node in paragraph.childNodes:
|
||||
if node.nodeType == node.TEXT_NODE:
|
||||
text_parts.append(node.data)
|
||||
elif node.nodeType == node.ELEMENT_NODE and node.tagName == 'text:span':
|
||||
# Assuming direct text content in <text:span>, for simplicity
|
||||
for child in node.childNodes:
|
||||
if child.nodeType == child.TEXT_NODE:
|
||||
text_parts.append(child.data)
|
||||
paragraph_texts.append(''.join(text_parts))
|
||||
return paragraph_texts
|
||||
|
||||
doc1_paragraphs = [p.text for p in doc1.paragraphs]
|
||||
doc2_paragraphs = [p.text for p in doc2.paragraphs]
|
||||
# Determine file types and load documents
|
||||
if file1.endswith('.docx') and file2.endswith('.docx'):
|
||||
doc1 = Document(file1)
|
||||
doc2 = Document(file2)
|
||||
doc1_paragraphs = [p.text for p in doc1.paragraphs]
|
||||
doc2_paragraphs = [p.text for p in doc2.paragraphs]
|
||||
elif file1.endswith('.odt') and file2.endswith('.odt'):
|
||||
doc1 = load(file1)
|
||||
doc2 = load(file2)
|
||||
doc1_paragraphs = get_paragraph_texts_odt(doc1)
|
||||
doc2_paragraphs = get_paragraph_texts_odt(doc2)
|
||||
else:
|
||||
# Unsupported file types or mismatch
|
||||
print("Unsupported file types or mismatch between file types.")
|
||||
return 0
|
||||
|
||||
# Process and compare documents
|
||||
if ignore_blanks:
|
||||
text1 = re.sub(r'\s+', ' ', '\n'.join(doc1_paragraphs)).strip()
|
||||
text2 = re.sub(r'\s+', ' ', '\n'.join(doc2_paragraphs)).strip()
|
||||
@@ -64,19 +94,16 @@ def compare_docx_files(file1, file2, ignore_blanks=True):
|
||||
return 0
|
||||
else:
|
||||
if len(doc1_paragraphs) != len(doc2_paragraphs):
|
||||
# print(len(doc1_paragraphs))
|
||||
# print(len(doc2_paragraphs))
|
||||
return 0
|
||||
|
||||
# Compare each paragraph
|
||||
for p1, p2 in zip(doc1_paragraphs, doc2_paragraphs):
|
||||
if p1 != p2:
|
||||
# print(p1)
|
||||
# print(p2)
|
||||
return 0
|
||||
|
||||
return 1
|
||||
|
||||
|
||||
def compare_init_lines(file1, file2):
|
||||
doc1 = Document(file1)
|
||||
doc2 = Document(file2)
|
||||
@@ -93,6 +120,7 @@ def compare_init_lines(file1, file2):
|
||||
|
||||
return 1
|
||||
|
||||
|
||||
def compare_docx_tables(docx_file1, docx_file2):
|
||||
doc1 = Document(docx_file1)
|
||||
doc2 = Document(docx_file2)
|
||||
@@ -217,6 +245,7 @@ def compare_contains_image(docx_file1, docx_file2):
|
||||
return 0
|
||||
return 1
|
||||
|
||||
|
||||
# file1 = 'path/to/file1.docx'
|
||||
# file2 = 'path/to/file2.docx'
|
||||
|
||||
@@ -255,24 +284,23 @@ def evaluate_colored_words_in_tables(file_path1, file_path2):
|
||||
def check_highlighted_words(file_path1, file_path2):
|
||||
if not compare_docx_files(file_path1, file_path2):
|
||||
return 0
|
||||
|
||||
# Extract content.xml from the .odt file
|
||||
extract_dir = file_path1 + "_extracted"
|
||||
with zipfile.ZipFile(file_path1, 'r') as zip_ref:
|
||||
zip_ref.extractall(extract_dir)
|
||||
content_xml_path = os.path.join(extract_dir, 'content.xml')
|
||||
with open(content_xml_path, 'r') as file:
|
||||
content_xml = file.read()
|
||||
|
||||
# Check for yellow highlights in the content.xml
|
||||
yellow_highlight_pattern = re.compile(r'(.{0,50}background-color="#ffff00"[^>]*>.{0,50})')
|
||||
yellow_highlight_matches = yellow_highlight_pattern.findall(content_xml)
|
||||
doc = load(file_path1)
|
||||
highlighted = False
|
||||
|
||||
# Return True if yellow highlights are NOT found, otherwise True
|
||||
if yellow_highlight_matches:
|
||||
return 0
|
||||
else:
|
||||
return 1
|
||||
for span in doc.getElementsByType(Span):
|
||||
style_name = span.getAttribute('stylename')
|
||||
if style_name:
|
||||
for automatic_style in doc.automaticstyles.childNodes:
|
||||
if automatic_style.getAttribute('name') == style_name:
|
||||
for property in automatic_style.childNodes:
|
||||
if property.getAttribute('backgroundcolor') == '#ffff00':
|
||||
highlighted = True
|
||||
break
|
||||
if highlighted:
|
||||
break
|
||||
|
||||
return 0 if highlighted else 1
|
||||
|
||||
|
||||
def evaluate_strike_through_last_paragraph(file_path1, file_path2):
|
||||
@@ -318,9 +346,9 @@ def evaluate_spacing(file_path):
|
||||
body_spacing = document.paragraphs[1].paragraph_format.line_spacing
|
||||
conclusion_spacing = document.paragraphs[2].paragraph_format.line_spacing
|
||||
if (introduction_spacing == 1.0 and body_spacing == 2.0 and conclusion_spacing == 1.5):
|
||||
return 1
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
return 0
|
||||
|
||||
|
||||
def check_italic_font_size_14(path1, path2):
|
||||
@@ -358,13 +386,14 @@ def evaluate_alignment(docx_path):
|
||||
second_part = ' '.join(words[3:])
|
||||
|
||||
# Check if the sentence structure matches the pattern: first part + large space/tab + second part
|
||||
if not (first_part in sentence and second_part in sentence and sentence.find(first_part) < sentence.find(second_part)):
|
||||
if not (first_part in sentence and second_part in sentence and sentence.find(first_part) < sentence.find(
|
||||
second_part)):
|
||||
return 0 # The sentence does not meet the alignment criteria
|
||||
|
||||
return 1 # All sentences meet the alignment criteria
|
||||
|
||||
|
||||
def get_unique_train_ids(initial_file): #fixed standard
|
||||
def get_unique_train_ids(initial_file): # fixed standard
|
||||
doc = Document(initial_file)
|
||||
train_ids = set()
|
||||
processed_lines = 0
|
||||
@@ -374,11 +403,12 @@ def get_unique_train_ids(initial_file): #fixed standard
|
||||
if len(line_parts) == 4:
|
||||
train_id = line_parts[1].strip()
|
||||
if train_id not in train_ids:
|
||||
train_ids.add(train_id)
|
||||
processed_lines += 1
|
||||
train_ids.add(train_id)
|
||||
processed_lines += 1
|
||||
|
||||
return train_ids, processed_lines
|
||||
|
||||
|
||||
def check_no_duplicates(initial_file, processed_file):
|
||||
# Open the document
|
||||
train_ids_ini, ini_lines = get_unique_train_ids(initial_file)
|
||||
@@ -405,6 +435,7 @@ def check_no_duplicates(initial_file, processed_file):
|
||||
# No duplicates found and at least one valid line was processed
|
||||
return 1
|
||||
|
||||
|
||||
def compare_docx_lines(file1, file2):
|
||||
# Read the text of the document, line by line
|
||||
doc1 = Document(file1)
|
||||
@@ -416,21 +447,46 @@ def compare_docx_lines(file1, file2):
|
||||
# print(doc2_lines)
|
||||
|
||||
# Convert the list of lines to sets and compare
|
||||
return set(doc1_lines) == set(doc2_lines)
|
||||
if set(doc1_lines) == set(doc2_lines):
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
# Docx file saved in the ubuntu cannot use this function to compare highlight, don't know why, deprecated
|
||||
def compare_highlighted_text(file1, file2):
|
||||
def extract_highlighted_text(doc):
|
||||
highlighted_text = []
|
||||
# Iterate through each run in each paragraph to check for highlight
|
||||
for paragraph in doc.paragraphs:
|
||||
for run in paragraph.runs:
|
||||
if run.font.highlight_color: # Checks if the run is highlighted
|
||||
highlighted_text.append(run.text.strip())
|
||||
return highlighted_text
|
||||
def extract_highlighted_text(file_path):
|
||||
highlighted_texts = []
|
||||
|
||||
# Open the .docx file as a zip file and read the document.xml
|
||||
with zipfile.ZipFile(file_path, 'r') as docx:
|
||||
with docx.open('word/document.xml') as document_xml:
|
||||
tree = ET.parse(document_xml)
|
||||
root = tree.getroot()
|
||||
|
||||
# Define the namespaces
|
||||
namespaces = {
|
||||
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
|
||||
}
|
||||
|
||||
# Find all runs with highlight property
|
||||
for run in root.findall('.//w:r', namespaces):
|
||||
highlight = run.find('.//w:highlight', namespaces)
|
||||
if highlight is not None and highlight.get(
|
||||
'{http://schemas.openxmlformats.org/wordprocessingml/2006/main}val') != 'none':
|
||||
text = run.find('.//w:t', namespaces)
|
||||
if text is not None:
|
||||
highlighted_texts.append(text.text)
|
||||
|
||||
return highlighted_texts
|
||||
|
||||
# Read the highlighted text from both documents
|
||||
doc1_highlighted = extract_highlighted_text(Document(file1))
|
||||
doc2_highlighted = extract_highlighted_text(Document(file2))
|
||||
doc1_highlighted = extract_highlighted_text(file1)
|
||||
doc2_highlighted = extract_highlighted_text(file2)
|
||||
|
||||
# Compare the sets of highlighted text to check if they are the same
|
||||
return set(doc1_highlighted) == set(doc2_highlighted)
|
||||
if set(doc1_highlighted) == set(doc2_highlighted):
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
@@ -17,10 +17,16 @@ from .utils import _match_record, _match_value_to_rule
|
||||
|
||||
|
||||
def check_include_exclude(result: str, rules: Dict[str, List[str]]) -> float:
|
||||
if result is None:
|
||||
return 0.
|
||||
|
||||
print(result, rules)
|
||||
include = rules.get("include", [])
|
||||
exclude = rules.get("exclude", [])
|
||||
return all(r in result for r in include) and all(r not in result for r in exclude)
|
||||
if all(r in result for r in include) and all(r not in result for r in exclude):
|
||||
return 1.
|
||||
else:
|
||||
return 0.
|
||||
|
||||
|
||||
def exact_match(result, rules) -> float:
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
import os
|
||||
from typing import List, Union
|
||||
from skimage.metrics import structural_similarity as ssim
|
||||
from PIL import Image, ImageChops, ImageStat
|
||||
|
||||
def compare_images(pred_img_path_list: Union[str, List[str]], gold_img_path_list: Union[str, List[str]]) -> float:
|
||||
|
||||
def compare_image_list(pred_img_path_list: Union[str, List[str]],
|
||||
gold_img_path_list: Union[str, List[str]]) -> float:
|
||||
""" Compare two image lists, only if all images are the same, return 1.0, otherwise return 0.0
|
||||
"""
|
||||
if type(pred_img_path_list) != list:
|
||||
@@ -36,11 +39,13 @@ def get_gimp_export_path():
|
||||
# Handle the case where the configuration file is not found
|
||||
print("GIMP configuration file not found")
|
||||
return False
|
||||
|
||||
|
||||
|
||||
def check_file_exists(directory, filename):
|
||||
file_path = os.path.join(directory, filename)
|
||||
return 1 if os.path.isfile(file_path) else 0
|
||||
|
||||
|
||||
def increase_saturation(image1_path: str, image2_path: str) -> float:
|
||||
def calculate_saturation(image):
|
||||
# convert the image to HSV mode
|
||||
@@ -63,6 +68,7 @@ def increase_saturation(image1_path: str, image2_path: str) -> float:
|
||||
|
||||
return 1 if saturation1 < saturation2 else 0
|
||||
|
||||
|
||||
def decrease_brightness(image1_path: str, image2_path: str) -> float:
|
||||
def calculate_brightness(image):
|
||||
# Convert the image to grayscale mode
|
||||
@@ -81,9 +87,12 @@ def decrease_brightness(image1_path: str, image2_path: str) -> float:
|
||||
brightness2 = calculate_brightness(image2)
|
||||
|
||||
return 1 if brightness1 > brightness2 else 0
|
||||
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
|
||||
def find_yellow_triangle(image):
|
||||
# Convert the image to RGBA
|
||||
rgba = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA)
|
||||
@@ -111,6 +120,7 @@ def find_yellow_triangle(image):
|
||||
|
||||
return cx, cy
|
||||
|
||||
|
||||
def compare_triangle_positions(image1, image2):
|
||||
image1 = cv2.imread(image1, cv2.IMREAD_COLOR)
|
||||
image2 = cv2.imread(image2, cv2.IMREAD_COLOR)
|
||||
@@ -119,16 +129,419 @@ def compare_triangle_positions(image1, image2):
|
||||
cx2, cy2 = find_yellow_triangle(image2)
|
||||
|
||||
# calculate the distance between the center of the triangle and the center of the image
|
||||
center_distance1 = np.sqrt((cx1 - image1.shape[1] // 2)**2 + (cy1 - image1.shape[0] // 2)**2)
|
||||
center_distance2 = np.sqrt((cx2 - image2.shape[1] // 2)**2 + (cy2 - image2.shape[0] // 2)**2)
|
||||
center_distance1 = np.sqrt(
|
||||
(cx1 - image1.shape[1] // 2) ** 2 + (cy1 - image1.shape[0] // 2) ** 2)
|
||||
center_distance2 = np.sqrt(
|
||||
(cx2 - image2.shape[1] // 2) ** 2 + (cy2 - image2.shape[0] // 2) ** 2)
|
||||
|
||||
return 1 if center_distance1 > center_distance2 else 0
|
||||
|
||||
|
||||
# Functions for the GIMP evaluator
|
||||
def calculate_brightness(image):
|
||||
"""Calculate the average brightness of an image"""
|
||||
grayscale = image.convert('L')
|
||||
stat = ImageStat.Stat(grayscale)
|
||||
return stat.mean[0]
|
||||
|
||||
|
||||
def normalize_brightness(image, target_brightness):
|
||||
"""Normalize the brightness of an image to a target brightness in [0, 1]"""
|
||||
current_brightness = calculate_brightness(image)
|
||||
factor = target_brightness / current_brightness
|
||||
|
||||
# Apply a point transform to each pixel
|
||||
def point_transform(x):
|
||||
return min(255, max(0, int(x * factor)))
|
||||
|
||||
return image.point(point_transform)
|
||||
|
||||
|
||||
def measure_saturation(hsv_image):
|
||||
"""Measure the average saturation of an image"""
|
||||
# Split into H, S, V channels
|
||||
_, s, _ = hsv_image.split()
|
||||
# Convert the saturation channel to a numpy array
|
||||
s_array = np.array(s)
|
||||
# Calculate the average saturation
|
||||
avg_saturation = np.mean(s_array)
|
||||
return avg_saturation
|
||||
|
||||
|
||||
def calculate_contrast(image):
|
||||
"""Calculate the contrast of an image as the standard deviation of the pixel
|
||||
values."""
|
||||
pixels = np.asarray(image, dtype=np.float32)
|
||||
return np.std(pixels)
|
||||
|
||||
|
||||
def structure_check_by_mse(img1, img2, threshold=0.03):
|
||||
"""Check if two images are approximately the same by MSE"""
|
||||
mse = np.mean(
|
||||
(np.array(img1, dtype=np.float32) / 255
|
||||
- np.array(img2, dtype=np.float32) / 255) ** 2)
|
||||
structure_same = True if mse < threshold else False
|
||||
print("MSE: ", mse)
|
||||
return structure_same
|
||||
|
||||
|
||||
def structure_check_by_ssim(img1, img2, threshold=0.9):
|
||||
"""Check if two images are approximately the same by SSIM"""
|
||||
similarity = ssim(np.array(img1), np.array(img2), multichannel=True)
|
||||
print("SSIM: ", similarity)
|
||||
return similarity >= threshold
|
||||
|
||||
|
||||
def check_brightness_decrease_and_structure_sim(src_path, tgt_path):
|
||||
"""
|
||||
Check the brightness of src is lower than tgt and the structures are similar
|
||||
gimp:7a4deb26-d57d-4ea9-9a73-630f66a7b568
|
||||
"""
|
||||
if src_path is None or tgt_path is None:
|
||||
return 0.
|
||||
|
||||
img_src = Image.open(src_path)
|
||||
img_tgt = Image.open(tgt_path)
|
||||
|
||||
# Brightness comparison
|
||||
brightness_src = calculate_brightness(img_src)
|
||||
brightness_tgt = calculate_brightness(img_tgt)
|
||||
brightness_reduced = brightness_tgt > brightness_src
|
||||
|
||||
# Normalize and compare images
|
||||
target_brightness = 128
|
||||
img_src_normalized = normalize_brightness(img_src, target_brightness)
|
||||
img_tgt_normalized = normalize_brightness(img_tgt, target_brightness)
|
||||
|
||||
structure_same = structure_check_by_mse(img_src_normalized, img_tgt_normalized)
|
||||
if brightness_reduced and structure_same:
|
||||
return 1.
|
||||
else:
|
||||
return 0.
|
||||
|
||||
|
||||
def check_saturation_increase_and_structure_sim(src_path, tgt_path):
|
||||
"""
|
||||
Check the saturation of src is higher than tgt and the structures are similar
|
||||
gimp:554785e9-4523-4e7a-b8e1-8016f565f56a
|
||||
"""
|
||||
if src_path is None or tgt_path is None:
|
||||
return 0.
|
||||
|
||||
img_src = Image.open(src_path)
|
||||
hsv_img_src = img_src.convert('HSV')
|
||||
img_tgt = Image.open(tgt_path)
|
||||
hsv_img_tgt = img_tgt.convert('HSV')
|
||||
|
||||
# Saturation comparison
|
||||
src_saturation = measure_saturation(hsv_img_src)
|
||||
tgt_saturation = measure_saturation(hsv_img_tgt)
|
||||
|
||||
saturation_increased = tgt_saturation < src_saturation
|
||||
|
||||
# Structure comparison
|
||||
h1, s1, v1 = hsv_img_src.split()
|
||||
h2, s2, v2 = hsv_img_tgt.split()
|
||||
h_same = structure_check_by_ssim(h1, h2)
|
||||
v_same = structure_check_by_ssim(v1, v2)
|
||||
if h_same and v_same:
|
||||
structure_same = True
|
||||
else:
|
||||
structure_same = False
|
||||
|
||||
if saturation_increased and structure_same:
|
||||
return 1.
|
||||
else:
|
||||
return 0.
|
||||
|
||||
|
||||
def check_file_exists_and_structure_sim(src_path, tgt_path):
|
||||
"""
|
||||
Check if the image has been exported to the desktop
|
||||
gimp:77b8ab4d-994f-43ac-8930-8ca087d7c4b4
|
||||
"""
|
||||
if src_path is None or tgt_path is None:
|
||||
return 0.
|
||||
|
||||
# Check if the file exists
|
||||
export_file_exists = os.path.isfile(src_path)
|
||||
if not export_file_exists:
|
||||
return 0.
|
||||
|
||||
# Check whether the target image is the same as the source image
|
||||
img_src = Image.open(src_path)
|
||||
img_tgt = Image.open(tgt_path)
|
||||
structure_same = structure_check_by_ssim(img_src, img_tgt)
|
||||
|
||||
if structure_same:
|
||||
return 1.
|
||||
else:
|
||||
return 0.
|
||||
|
||||
|
||||
def check_triangle_position(tgt_path):
|
||||
"""
|
||||
Check if the triangle is in the middle of the image.
|
||||
gimp:f4aec372-4fb0-4df5-a52b-79e0e2a5d6ce
|
||||
"""
|
||||
if tgt_path is None:
|
||||
return 0.
|
||||
|
||||
# Load the image
|
||||
img = Image.open(tgt_path)
|
||||
img_array = np.array(img)
|
||||
|
||||
# We assume the triangle is a different color from the background
|
||||
# Find the unique colors
|
||||
unique_colors, counts = np.unique(img_array.reshape(-1, img_array.shape[2]), axis=0, return_counts=True)
|
||||
unique_colors_sorted = unique_colors[np.argsort(counts)]
|
||||
|
||||
# Assuming the background is the most common color and the triangle is a different color
|
||||
triangle_color = unique_colors_sorted[1]
|
||||
|
||||
# Create a mask where the triangle pixels are True
|
||||
triangle_mask = np.all(img_array == triangle_color, axis=2)
|
||||
|
||||
# Get the coordinates of the triangle pixels
|
||||
triangle_coords = np.argwhere(triangle_mask)
|
||||
|
||||
# Calculate the centroid of the triangle
|
||||
centroid = triangle_coords.mean(axis=0)
|
||||
|
||||
# Check if the centroid is approximately in the middle of the image
|
||||
image_center = np.array(img_array.shape[:2]) / 2
|
||||
|
||||
# We will consider the triangle to be in the middle if the centroid is within 5% of the image's center
|
||||
tolerance = 0.05 * np.array(img_array.shape[:2])
|
||||
middle = np.all(np.abs(centroid - image_center) < tolerance)
|
||||
|
||||
if bool(middle):
|
||||
return 1.
|
||||
else:
|
||||
return 0.
|
||||
|
||||
|
||||
def check_structure_sim(src_path, tgt_path):
|
||||
"""
|
||||
Check if the structure of the two images are similar
|
||||
gimp:2a729ded-3296-423d-aec4-7dd55ed5fbb3
|
||||
"""
|
||||
img_src = Image.open(src_path)
|
||||
img_tgt = Image.open(tgt_path)
|
||||
structure_same = structure_check_by_ssim(img_src, img_tgt)
|
||||
return structure_same
|
||||
|
||||
|
||||
def check_contrast_increase_and_structure_sim(src_path, tgt_path):
|
||||
"""
|
||||
Check if the src image has higher contrast than the tgt image and the structures are similar
|
||||
gimp:f723c744-e62c-4ae6-98d1-750d3cd7d79d
|
||||
"""
|
||||
if src_path is None or tgt_path is None:
|
||||
return 0.
|
||||
|
||||
# Load images
|
||||
source_image = Image.open(src_path)
|
||||
target_image = Image.open(tgt_path)
|
||||
|
||||
# Calculate contrast
|
||||
source_contrast = calculate_contrast(source_image)
|
||||
target_contrast = calculate_contrast(target_image)
|
||||
higher_contrast = target_contrast < source_contrast
|
||||
|
||||
# Check structure
|
||||
structure_same = structure_check_by_ssim(source_image, target_image, threshold=0.65)
|
||||
|
||||
if higher_contrast and structure_same:
|
||||
return 1.
|
||||
else:
|
||||
return 0.
|
||||
|
||||
|
||||
def check_config_status(actual_config_path, rule):
|
||||
"""
|
||||
Check if the GIMP status is as expected
|
||||
"""
|
||||
if actual_config_path is None:
|
||||
return 0.
|
||||
|
||||
with open(actual_config_path, 'r') as f:
|
||||
content = f.readlines()
|
||||
|
||||
for line in content:
|
||||
if line.startswith('#') or line == '\n':
|
||||
continue
|
||||
items = line.strip().lstrip('(').rstrip(')\n').split()
|
||||
if isinstance(rule["key"], str):
|
||||
if items[0] == rule["key"] and items[-1] == rule["value"]:
|
||||
return 1.
|
||||
elif isinstance(rule["key"], list) and len(rule["key"]) == 2:
|
||||
if items[0] == rule["key"][0] \
|
||||
and items[1] == rule["key"][1] \
|
||||
and items[-1] == rule["value"]:
|
||||
return 1.
|
||||
return 0.
|
||||
|
||||
|
||||
def check_image_size_and_structure_sim(src_path, tgt_path, height=512, width=None):
|
||||
"""
|
||||
Check if the size of the src image is correct and the structure of the two images are similar.
|
||||
gimp:d16c99dc-2a1e-46f2-b350-d97c86c85c15
|
||||
"""
|
||||
|
||||
if src_path is None or tgt_path is None:
|
||||
return 0.
|
||||
|
||||
# Load images
|
||||
source_image = Image.open(src_path)
|
||||
target_image = Image.open(tgt_path)
|
||||
|
||||
# Check size
|
||||
if width is not None:
|
||||
width_same = source_image.size[0] == width
|
||||
else:
|
||||
width_same = True
|
||||
if height is not None:
|
||||
height_same = source_image.size[1] == height
|
||||
else:
|
||||
height_same = True
|
||||
|
||||
# Check structure
|
||||
resized_target_image = target_image.resize(source_image.size)
|
||||
structure_same = structure_check_by_ssim(source_image, resized_target_image)
|
||||
|
||||
if width_same and height_same and structure_same:
|
||||
return 1.
|
||||
else:
|
||||
return 0.
|
||||
|
||||
|
||||
def check_palette_and_structure_sim(src_path, tgt_path):
|
||||
"""
|
||||
Check if the src image is palette-based and the structure of the two images are similar
|
||||
gimp:06ca5602-62ca-47f6-ad4f-da151cde54cc
|
||||
"""
|
||||
if src_path is None or tgt_path is None:
|
||||
return 0.
|
||||
|
||||
# Check if the source image is palette-based
|
||||
source_image = Image.open(src_path)
|
||||
palette_based = source_image.mode == 'P'
|
||||
|
||||
# Check structure
|
||||
target_image = Image.open(tgt_path)
|
||||
source_image = source_image.convert('RGB')
|
||||
structure_same = structure_check_by_ssim(source_image, target_image)
|
||||
if palette_based and structure_same:
|
||||
return 1.
|
||||
else:
|
||||
return 0.
|
||||
|
||||
|
||||
def check_textbox_on_leftside(src_path):
|
||||
"""
|
||||
Check if the textbox is on the left side of the image.
|
||||
gimp:e2dd0213-26db-4349-abe5-d5667bfd725c
|
||||
"""
|
||||
if src_path is None:
|
||||
return 0.
|
||||
|
||||
source_image = Image.open(src_path)
|
||||
gray_image = source_image.convert("L")
|
||||
width, height = source_image.size
|
||||
|
||||
# Find the bounds of the black text
|
||||
left_most_dark_pixel = width # Start with the farthest possible left position
|
||||
for y in range(height):
|
||||
for x in range(width):
|
||||
# If the pixel is dark, consider it as part of the text
|
||||
if gray_image.getpixel((x, y)) < 128: # Arbitrary threshold for "dark"
|
||||
left_most_dark_pixel = min(left_most_dark_pixel, x)
|
||||
break # Stop after finding the first dark pixel in this row
|
||||
|
||||
# Here we define "almost" on the left side as being within the left 5% of the image
|
||||
if left_most_dark_pixel < width * 0.05:
|
||||
return 1.
|
||||
else:
|
||||
return 0.
|
||||
|
||||
|
||||
def check_image_mirror(src_path, tgt_path):
|
||||
"""
|
||||
Check if the image is mirrored
|
||||
gimp:72f83cdc-bf76-4531-9a1b-eb893a13f8aa
|
||||
"""
|
||||
if src_path is None or tgt_path is None:
|
||||
return 0.
|
||||
|
||||
# Load images
|
||||
source_image = Image.open(src_path)
|
||||
target_image = Image.open(tgt_path)
|
||||
|
||||
# Check if the image is mirrored
|
||||
transposed_image = source_image.transpose(Image.FLIP_LEFT_RIGHT)
|
||||
# Use 0.99 because the image may not be exactly mirrored by gimp
|
||||
mirrored = structure_check_by_ssim(transposed_image, target_image, 0.99)
|
||||
if mirrored:
|
||||
return 1.
|
||||
else:
|
||||
return 0.
|
||||
|
||||
|
||||
def check_green_background(src_path, tgt_path):
|
||||
"""
|
||||
Check if the background of the source image is green.
|
||||
gimp:734d6579-c07d-47a8-9ae2-13339795476b
|
||||
"""
|
||||
if src_path is None or tgt_path is None:
|
||||
return 0.
|
||||
|
||||
# Load images
|
||||
source_image = Image.open(src_path)
|
||||
target_image = Image.open(tgt_path)
|
||||
|
||||
source_pixels = np.array(source_image)
|
||||
target_pixels = np.array(target_image)
|
||||
|
||||
for x in range(target_image.width):
|
||||
for y in range(target_image.height):
|
||||
# Identify background pixel in target image (not black)
|
||||
if tuple(target_pixels[x, y][:3]) != (0, 0, 0):
|
||||
# Check if corresponding pixel in source image is green
|
||||
# Here, "green" means more green than red or blue
|
||||
r, g, b = source_pixels[x, y][:3]
|
||||
if not (g > r and g > b):
|
||||
return 0.
|
||||
|
||||
return 1.
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
image1_path = "../Downloads/1.png"
|
||||
image2_path = "../Downloads/edited_darker.png"
|
||||
actual_config_path = "../../../cache/sessionrc_test"
|
||||
rule = {
|
||||
"key": "hide-docks",
|
||||
"value": "no"
|
||||
}
|
||||
print(check_config_status(actual_config_path, rule))
|
||||
|
||||
decrease_brightness(image1_path, image2_path)
|
||||
actual_config_path = "../../../cache/action-history_test"
|
||||
rule = {
|
||||
"key": ["history-item", "\"filters-vignette\""],
|
||||
"value": "1"
|
||||
}
|
||||
print(check_config_status(actual_config_path, rule))
|
||||
|
||||
actual_config_path = "../../../cache/gimprc_test"
|
||||
rule = {
|
||||
"key": "undo-levels",
|
||||
"value": "100"
|
||||
}
|
||||
print(check_config_status(actual_config_path, rule))
|
||||
|
||||
src_path = "../../../cache/734d6579-c07d-47a8-9ae2-13339795476b/green_background_with_object.png"
|
||||
tgt_path = "../../../cache/734d6579-c07d-47a8-9ae2-13339795476b/white_background_with_object.png"
|
||||
print(check_green_background(src_path, tgt_path))
|
||||
|
||||
tgt_path = "../../../cache/f4aec372-4fb0-4df5-a52b-79e0e2a5d6ce/Triangle_In_The_Middle.png"
|
||||
print(check_triangle_position(tgt_path))
|
||||
|
||||
increase_saturation(image1_path, image2_path)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import logging
|
||||
import xml.etree.ElementTree as ET
|
||||
import zipfile
|
||||
from math import sqrt
|
||||
|
||||
from pptx import Presentation
|
||||
@@ -96,6 +97,7 @@ def check_slide_numbers_color(pptx_file_path):
|
||||
print(font_color)
|
||||
return 1 if font_color is not None and is_red_color(font_color) else 0
|
||||
|
||||
|
||||
# import numpy as np
|
||||
# from PIL import Image
|
||||
# from skimage.metrics import structural_similarity as ssim
|
||||
@@ -143,6 +145,7 @@ def compare_pptx_files(file1_path, file2_path, **options):
|
||||
examine_font_italic = options.get("examine_font_italic", True)
|
||||
examine_color_rgb = options.get("examine_color_rgb", True)
|
||||
examine_font_underline = options.get("examine_font_underline", True)
|
||||
examine_strike_through = options.get("examine_strike_through", True)
|
||||
|
||||
# compare the number of slides
|
||||
if len(prs1.slides) != len(prs2.slides) and examine_number_of_slides:
|
||||
@@ -156,7 +159,7 @@ def compare_pptx_files(file1_path, file2_path, **options):
|
||||
if (
|
||||
shape1.left != shape2.left or shape1.top != shape2.top or shape1.width != shape2.width or shape1.height != shape2.height) and examine_shape:
|
||||
return 0
|
||||
|
||||
|
||||
if hasattr(shape1, "text") and hasattr(shape2, "text"):
|
||||
if shape1.text != shape2.text and examine_text:
|
||||
return 0
|
||||
@@ -187,6 +190,10 @@ def compare_pptx_files(file1_path, file2_path, **options):
|
||||
if run1.font.underline != run2.font.underline and examine_font_underline:
|
||||
return 0
|
||||
|
||||
if ('strike' in run1.font._element.attrib) != (
|
||||
'strike' in run2.font._element.attrib) and examine_strike_through:
|
||||
return 0
|
||||
|
||||
# fixme: Actually there are more properties to be compared, but we cannot get them through pptx
|
||||
|
||||
return 1
|
||||
@@ -200,21 +207,27 @@ def check_strikethrough(pptx_path, rules):
|
||||
shape_index_s = rules["shape_index_s"]
|
||||
paragraph_index_s = rules["paragraph_index_s"]
|
||||
|
||||
for slide_index in slide_index_s:
|
||||
# Get the slide
|
||||
slide = presentation.slides[slide_index]
|
||||
try:
|
||||
for slide_index in slide_index_s:
|
||||
# Get the slide
|
||||
slide = presentation.slides[slide_index]
|
||||
|
||||
for shape_index in shape_index_s:
|
||||
# Get the text box
|
||||
paragraphs = slide.shapes[shape_index].text_frame.paragraphs
|
||||
for shape_index in shape_index_s:
|
||||
# Get the text box
|
||||
paragraphs = slide.shapes[shape_index].text_frame.paragraphs
|
||||
|
||||
for paragraph_index in paragraph_index_s:
|
||||
paragraph = paragraphs[paragraph_index]
|
||||
run = paragraph.runs[0]
|
||||
if 'strike' not in run.font._element.attrib:
|
||||
return False
|
||||
for paragraph_index in paragraph_index_s:
|
||||
paragraph = paragraphs[paragraph_index]
|
||||
run = paragraph.runs[0]
|
||||
if 'strike' not in run.font._element.attrib:
|
||||
return 0
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error: {e}")
|
||||
return 0
|
||||
|
||||
return 1
|
||||
|
||||
|
||||
def check_slide_orientation_Portrait(pptx_path):
|
||||
@@ -253,20 +266,100 @@ def check_left_panel(accessibility_tree):
|
||||
|
||||
root = ET.fromstring(accessibility_tree)
|
||||
|
||||
for root_pane in root.iter('root-pane'):
|
||||
for split_pane in root_pane.iter('split-pane'):
|
||||
for panel in split_pane.iter('panel'):
|
||||
for scroll_panel in panel.iter('scroll-pane'):
|
||||
for document_frame in scroll_panel.iter('document-frame'):
|
||||
# Get the left panel
|
||||
panel_name = document_frame.get("name")
|
||||
# visible = scroll_bar.attrib.get(f"{{{namespaces['st']}}}visible")
|
||||
if panel_name == "Slides View":
|
||||
# Left panel is open
|
||||
return 1.0
|
||||
for root_pane in root.iter('root-pane'):
|
||||
for panel in root_pane.iter('panel'):
|
||||
for split_pane in panel.iter('split-pane'):
|
||||
# Get the left panel
|
||||
if split_pane.attrib.get("{{{}}}parentcoord".format(namespaces['cp'])) == "(0, 0)":
|
||||
# Get the visible attribute
|
||||
visible = split_pane.attrib.get("{{{}}}visible".format(namespaces['st']))
|
||||
if visible:
|
||||
# decide if it is left panel
|
||||
return 1.
|
||||
|
||||
# Left panel is not open
|
||||
return 0.0
|
||||
return 0.
|
||||
|
||||
|
||||
# print(compare_pptx_files("D:\\NJU\\HKUNLP\\Desktop-Env\\cache\\bf4e9888-f10f-47af-8dba-76413038b73c\\4.3-Template_4.29.2016.pptx", "D:\\NJU\HKUNLP\\Desktop-Env\\cache\\bf4e9888-f10f-47af-8dba-76413038b73c\\4.3-Template_4.29.2016_Gold.pptx"))
|
||||
def check_transition(pptx_file, rules):
|
||||
slide_idx = rules['slide_idx']
|
||||
transition_type = rules['transition_type']
|
||||
|
||||
# Use the zipfile module to open the .pptx file
|
||||
with zipfile.ZipFile(pptx_file, 'r') as zip_ref:
|
||||
# Get the slide XML file
|
||||
slide_name = 'ppt/slides/slide{}.xml'.format(slide_idx + 1)
|
||||
try:
|
||||
zip_ref.getinfo(slide_name)
|
||||
except KeyError:
|
||||
# Slide does not exist
|
||||
return 0.
|
||||
|
||||
with zip_ref.open(slide_name) as slide_file:
|
||||
# 解析XML
|
||||
tree = ET.parse(slide_file)
|
||||
root = tree.getroot()
|
||||
|
||||
# XML namespace
|
||||
namespaces = {
|
||||
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
|
||||
'p': 'http://schemas.openxmlformats.org/presentationml/2006/main',
|
||||
}
|
||||
|
||||
# Search for the transition element
|
||||
transition = root.find('.//p:transition', namespaces)
|
||||
if transition is not None:
|
||||
# Check if the transition is an expected transition
|
||||
dissolve = transition.find('.//p:{}'.format(transition_type), namespaces)
|
||||
if dissolve is not None:
|
||||
return 1.
|
||||
else:
|
||||
return 0.
|
||||
else:
|
||||
return 0.
|
||||
|
||||
|
||||
def check_page_number_colors(pptx_file, rules):
|
||||
color = rules["color"]
|
||||
|
||||
def is_red(rgb_str, threshold=50):
|
||||
r, g, b = int(rgb_str[1:3], 16), int(rgb_str[3:5], 16), int(rgb_str[5:7], 16)
|
||||
return r > g + threshold and r > b + threshold
|
||||
|
||||
def is_blue(rgb_str, threshold=50):
|
||||
r, g, b = int(rgb_str[1:3], 16), int(rgb_str[3:5], 16), int(rgb_str[5:7], 16)
|
||||
return b > g + threshold and b > r + threshold
|
||||
|
||||
def is_green(rgb_str, threshold=50):
|
||||
r, g, b = int(rgb_str[1:3], 16), int(rgb_str[3:5], 16), int(rgb_str[5:7], 16)
|
||||
return g > r + threshold and g > b + threshold
|
||||
|
||||
def is_black(rgb_str, threshold=50):
|
||||
r, g, b = int(rgb_str[1:3], 16), int(rgb_str[3:5], 16), int(rgb_str[5:7], 16)
|
||||
return r < threshold and g < threshold and b < threshold
|
||||
|
||||
with zipfile.ZipFile(pptx_file, 'r') as zip_ref:
|
||||
slide_master_name = 'ppt/slideMasters/slideMaster1.xml'
|
||||
with zip_ref.open(slide_master_name) as slide_master_file:
|
||||
tree = ET.parse(slide_master_file)
|
||||
root = tree.getroot()
|
||||
|
||||
namespaces = {
|
||||
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
|
||||
'p': 'http://schemas.openxmlformats.org/presentationml/2006/main',
|
||||
}
|
||||
|
||||
color_elems = root.findall('.//a:solidFill//a:srgbClr', namespaces)
|
||||
slides_color_val = color_elems[-2].get('val')
|
||||
|
||||
if slides_color_val is None:
|
||||
return 0
|
||||
elif color == "red" and not is_red(slides_color_val):
|
||||
return 0
|
||||
elif color == "blue" and not is_blue(slides_color_val):
|
||||
return 0
|
||||
elif color == "green" and not is_green(slides_color_val):
|
||||
return 0
|
||||
elif color == "black" and not is_black(slides_color_val):
|
||||
return 0
|
||||
|
||||
return 1
|
||||
|
||||
@@ -56,6 +56,12 @@ def execute_command():
|
||||
if isinstance(command, str) and not shell:
|
||||
command = shlex.split(command)
|
||||
|
||||
# WARNING: auto expansion should be completed by set `shell=True` and use str as `command`
|
||||
# Expand user directory
|
||||
for i, arg in enumerate(command):
|
||||
if arg.startswith("~/"):
|
||||
command[i] = os.path.expanduser(arg)
|
||||
|
||||
# Execute the command without any safety checks.
|
||||
try:
|
||||
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell, text=True)
|
||||
@@ -93,6 +99,12 @@ def launch_app():
|
||||
if isinstance(command, str) and not shell:
|
||||
command = shlex.split(command)
|
||||
|
||||
# WARNING: auto expansion should be completed by set `shell=True` and use str as `command`
|
||||
# Expand user directory
|
||||
for i, arg in enumerate(command):
|
||||
if arg.startswith("~/"):
|
||||
command[i] = os.path.expanduser(arg)
|
||||
|
||||
try:
|
||||
if 'google-chrome' in command and _get_machine_architecture() == 'arm':
|
||||
index = command.index('google-chrome')
|
||||
@@ -177,8 +189,9 @@ def get_terminal_output():
|
||||
# raise NotImplementedError
|
||||
return "Currently not implemented for platform {:}.".format(platform.platform()), 500
|
||||
return jsonify({"output": output, "status": "success"})
|
||||
except:
|
||||
return jsonify({"output": None, "status": "error"})
|
||||
except Exception as e:
|
||||
logger.error("Failed to get terminal output. Error: %s", e)
|
||||
return jsonify({"status": "error", "message": str(e)}), 500
|
||||
|
||||
|
||||
_accessibility_ns_map = { "st": "uri:deskat:state.at-spi.gnome.org"
|
||||
@@ -806,6 +819,8 @@ def download_file():
|
||||
data = request.json
|
||||
url = data.get('url', None)
|
||||
path = data.get('path', None)
|
||||
print(url, path)
|
||||
print("*" * 100)
|
||||
|
||||
if not url or not path:
|
||||
return "Path or URL not supplied!", 400
|
||||
|
||||
Reference in New Issue
Block a user