Add DuckTrack as initial annotation tool; Initial multimodal test
This commit is contained in:
48
annotation/experiments/delays/delay.py
Normal file
48
annotation/experiments/delays/delay.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import glob
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import seaborn as sns
|
||||
from scipy.stats import sem, t
|
||||
|
||||
|
||||
def calculate_confidence_interval(data, confidence=0.95):
|
||||
n = len(data)
|
||||
m = np.mean(data)
|
||||
std_err = sem(data)
|
||||
h = std_err * t.ppf((1 + confidence) / 2, n - 1)
|
||||
return m, m-h, m+h
|
||||
|
||||
runs = glob.glob("run*.txt")
|
||||
TOTAL_EVENTS = 22509
|
||||
percent_delays = []
|
||||
all_delays = []
|
||||
|
||||
for run in runs:
|
||||
with open(run, "r") as f:
|
||||
delays = [float(line.split()[3]) for line in f if float(line.split()[3]) > 0] # consider only positive delays
|
||||
percent_delays.append((len(delays) / TOTAL_EVENTS) * 100)
|
||||
all_delays.extend(delays)
|
||||
|
||||
average_percent_delays = np.mean(percent_delays)
|
||||
confidence_interval_percent_delays = calculate_confidence_interval(percent_delays)
|
||||
print(f"Average percentage of delayed events across all runs: {average_percent_delays:.2f}%")
|
||||
print(f"95% Confidence interval: ({confidence_interval_percent_delays[1]:.2f}%, {confidence_interval_percent_delays[2]:.2f}%)")
|
||||
|
||||
if all_delays:
|
||||
mean_delay = np.mean(all_delays)
|
||||
confidence_interval_delays = calculate_confidence_interval(all_delays)
|
||||
print(f"Mean delay time: {mean_delay:.2f}")
|
||||
print(f"95% Confidence interval for delay time: ({confidence_interval_delays[1]:.2f}, {confidence_interval_delays[2]:.2f})")
|
||||
else:
|
||||
print("No delay data available for calculation.")
|
||||
|
||||
sns.histplot(all_delays, bins=30, kde=False)
|
||||
plt.xlabel('Delay Time (ms)')
|
||||
plt.ylabel('Frequency')
|
||||
plt.yscale('log')
|
||||
plt.title('Histogram of Delay Times (macOS)')
|
||||
|
||||
plt.savefig('delays.png', dpi=300)
|
||||
|
||||
plt.show()
|
||||
110
annotation/experiments/drawing/drawing.py
Normal file
110
annotation/experiments/drawing/drawing.py
Normal file
@@ -0,0 +1,110 @@
|
||||
import glob
|
||||
import os
|
||||
|
||||
import cv2
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import scipy.stats as stats
|
||||
from skimage.metrics import structural_similarity as ssim
|
||||
from tqdm import tqdm
|
||||
|
||||
# use this: https://sketch.io
|
||||
|
||||
def calculate_rmse(imageA, imageB):
|
||||
err = np.sum((imageA - imageB) ** 2)
|
||||
err /= float(imageA.shape[0] * imageA.shape[1])
|
||||
return np.sqrt(err)
|
||||
|
||||
def compare_images(ground_truth_path, sample_paths):
|
||||
results = []
|
||||
gt_image = cv2.imread(ground_truth_path, cv2.IMREAD_GRAYSCALE)
|
||||
|
||||
if gt_image is None:
|
||||
raise ValueError("Ground truth image could not be read. Please check the file path.")
|
||||
|
||||
gt_image = gt_image.astype("float") / 255.0
|
||||
|
||||
for path in tqdm(sample_paths):
|
||||
sample_image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
|
||||
|
||||
if sample_image is None:
|
||||
print(f"WARNING: Sample image at path {path} could not be read. Skipping this image.")
|
||||
continue
|
||||
|
||||
sample_image = sample_image.astype("float") / 255.0
|
||||
|
||||
rmse_value = calculate_rmse(gt_image, sample_image)
|
||||
ssim_value, _ = ssim(gt_image, sample_image, full=True, data_range=1) # Corrected line
|
||||
|
||||
diff_mask = cv2.absdiff(gt_image, sample_image)
|
||||
|
||||
# plt.imshow(diff_mask * 255, cmap='gray')
|
||||
# plt.title(f'Difference Mask for {os.path.basename(path)}\nRMSE: {rmse_value:.5f} - SSIM: {ssim_value:.5f}')
|
||||
# plt.show()
|
||||
|
||||
results.append({
|
||||
'path': path,
|
||||
'rmse': rmse_value,
|
||||
'ssim': ssim_value,
|
||||
'diff_mask': diff_mask
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
ground_truth = 'ground_truth.png'
|
||||
sample_images = glob.glob("samples/*.png")
|
||||
|
||||
results = compare_images(ground_truth, sample_images)
|
||||
|
||||
for res in results:
|
||||
print(f"Image: {res['path']} - RMSE: {res['rmse']} - SSIM: {res['ssim']}")
|
||||
|
||||
def calculate_confidence_interval(data, confidence_level=0.95):
|
||||
mean = np.mean(data)
|
||||
sem = stats.sem(data)
|
||||
df = len(data) - 1
|
||||
me = sem * stats.t.ppf((1 + confidence_level) / 2, df)
|
||||
return mean - me, mean + me
|
||||
|
||||
rmse_values = [res['rmse'] for res in results]
|
||||
ssim_values = [res['ssim'] for res in results]
|
||||
|
||||
rmse_mean = np.mean(rmse_values)
|
||||
rmse_median = np.median(rmse_values)
|
||||
rmse_stdev = np.std(rmse_values, ddof=1)
|
||||
|
||||
ssim_mean = np.mean(ssim_values)
|
||||
ssim_median = np.median(ssim_values)
|
||||
ssim_stdev = np.std(ssim_values, ddof=1)
|
||||
|
||||
rmse_ci = calculate_confidence_interval(rmse_values)
|
||||
ssim_ci = calculate_confidence_interval(ssim_values)
|
||||
|
||||
print(f"\nRMSE - Mean: {rmse_mean}, Median: {rmse_median}, Std Dev: {rmse_stdev}, 95% CI: {rmse_ci}")
|
||||
print(f"SSIM - Mean: {ssim_mean}, Median: {ssim_median}, Std Dev: {ssim_stdev}, 95% CI: {ssim_ci}")
|
||||
|
||||
print(f"RMSE: {rmse_mean} ± {rmse_ci[1] - rmse_mean}")
|
||||
print(f"SSIM: {ssim_mean} ± {ssim_ci[1] - ssim_mean}")
|
||||
|
||||
def save_average_diff_map(results, save_path='average_diff_map.png'):
|
||||
if not results:
|
||||
print("No results available to create an average diff map.")
|
||||
return
|
||||
|
||||
avg_diff_map = None
|
||||
|
||||
for res in results:
|
||||
if avg_diff_map is None:
|
||||
avg_diff_map = np.zeros_like(res['diff_mask'])
|
||||
|
||||
avg_diff_map += res['diff_mask']
|
||||
|
||||
avg_diff_map /= len(results)
|
||||
|
||||
avg_diff_map = (avg_diff_map * 255).astype(np.uint8)
|
||||
|
||||
cv2.imwrite(save_path, avg_diff_map)
|
||||
|
||||
# Usage
|
||||
save_average_diff_map(results)
|
||||
4
annotation/experiments/recaptcha/recaptcha.py
Normal file
4
annotation/experiments/recaptcha/recaptcha.py
Normal file
@@ -0,0 +1,4 @@
|
||||
success = 10
|
||||
total = 10
|
||||
|
||||
print(success / total)
|
||||
48
annotation/experiments/sleep_testing/calc_errors.py
Normal file
48
annotation/experiments/sleep_testing/calc_errors.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import csv
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
from tqdm import tqdm
|
||||
|
||||
|
||||
def check_sleep(duration, sleep_function):
|
||||
start = time.perf_counter()
|
||||
sleep_function(duration)
|
||||
end = time.perf_counter()
|
||||
elapsed = end - start
|
||||
return abs(elapsed - duration)
|
||||
|
||||
def busy_sleep(duration):
|
||||
end_time = time.perf_counter() + duration
|
||||
while time.perf_counter() < end_time:
|
||||
pass
|
||||
|
||||
def measure_accuracy(sleep_function, durations, iterations=100):
|
||||
average_errors = []
|
||||
for duration in tqdm(durations):
|
||||
errors = [check_sleep(duration, sleep_function) for _ in range(iterations)]
|
||||
average_error = np.mean(errors)
|
||||
average_errors.append(average_error)
|
||||
return average_errors
|
||||
|
||||
durations = np.arange(0.001, 0.101, 0.001) # From 1ms to 100ms in 1ms increments
|
||||
iterations = 100
|
||||
|
||||
sleep_errors = measure_accuracy(time.sleep, durations, iterations)
|
||||
busy_sleep_errors = measure_accuracy(busy_sleep, durations, iterations)
|
||||
|
||||
def save_to_csv(filename, durations, sleep_errors, busy_sleep_errors):
|
||||
with open(filename, 'w', newline='') as csvfile:
|
||||
fieldnames = ['duration', 'sleep_error', 'busy_sleep_error']
|
||||
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
||||
|
||||
writer.writeheader()
|
||||
for duration, sleep_error, busy_sleep_error in zip(durations, sleep_errors, busy_sleep_errors):
|
||||
writer.writerow({
|
||||
'duration': duration,
|
||||
'sleep_error': sleep_error,
|
||||
'busy_sleep_error': busy_sleep_error
|
||||
})
|
||||
print("Data saved to", filename)
|
||||
|
||||
save_to_csv('sleep_data.csv', durations * 1000, np.array(sleep_errors) * 1000, np.array(busy_sleep_errors) * 1000)
|
||||
33
annotation/experiments/sleep_testing/plot_errors.py
Normal file
33
annotation/experiments/sleep_testing/plot_errors.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import csv
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
def plot_from_csv(filename, save_plot=False):
|
||||
durations = []
|
||||
sleep_errors = []
|
||||
busy_sleep_errors = []
|
||||
|
||||
with open(filename, 'r') as csvfile:
|
||||
reader = csv.DictReader(csvfile)
|
||||
for row in reader:
|
||||
durations.append(float(row['duration']))
|
||||
sleep_errors.append(float(row['sleep_error']))
|
||||
busy_sleep_errors.append(float(row['busy_sleep_error']))
|
||||
|
||||
plt.figure(figsize=(10, 5))
|
||||
plt.plot(durations, sleep_errors, label='time.sleep()', marker='o')
|
||||
plt.plot(durations, busy_sleep_errors, label='busy_sleep()', marker='x')
|
||||
plt.xlabel('Desired Delay (ms)')
|
||||
plt.ylabel('Average Error (ms)')
|
||||
plt.title('Sleep Accuracy: time.sleep() vs Busy-Wait Loop (macOS)')
|
||||
plt.legend()
|
||||
plt.grid(True)
|
||||
|
||||
if save_plot:
|
||||
plt.savefig('sleep_accuracy_plot.png', dpi=300)
|
||||
print("Plot saved as sleep_accuracy_plot.png")
|
||||
|
||||
plt.show()
|
||||
|
||||
plot_from_csv('sleep_data.csv', save_plot=True)
|
||||
110
annotation/experiments/stopwatch/stopwatch.py
Normal file
110
annotation/experiments/stopwatch/stopwatch.py
Normal file
@@ -0,0 +1,110 @@
|
||||
import glob
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import scipy.stats as stats
|
||||
import seaborn as sns
|
||||
|
||||
# use this: https://www.estopwatch.net/
|
||||
|
||||
def read_file(file_path):
|
||||
df = pd.read_csv(file_path)
|
||||
df['Elapsed time'] = pd.to_datetime(df['Elapsed time'], errors='coerce')
|
||||
return df
|
||||
|
||||
|
||||
def analyze_new_error(run_df, groundtruth_df):
|
||||
cumulative_errors = run_df['Elapsed time'] - groundtruth_df['Elapsed time']
|
||||
cumulative_errors_in_seconds = cumulative_errors.dt.total_seconds()
|
||||
|
||||
new_errors_in_seconds = cumulative_errors_in_seconds.diff().fillna(cumulative_errors_in_seconds[0])
|
||||
new_error_points = new_errors_in_seconds[new_errors_in_seconds != 0].index.tolist()
|
||||
|
||||
return new_errors_in_seconds[new_error_points]
|
||||
|
||||
def calculate_statistics(errors):
|
||||
if len(errors) == 0:
|
||||
return {
|
||||
'mean_error': 0,
|
||||
'median_error': 0,
|
||||
'stddev_error': 0,
|
||||
'rmse_error': 0,
|
||||
'confidence_interval': (0, 0),
|
||||
'error_frequency': 0
|
||||
}
|
||||
|
||||
mean_error = np.mean(errors)
|
||||
median_error = np.median(errors)
|
||||
stddev_error = np.std(errors)
|
||||
rmse_error = np.sqrt(np.mean(np.square(errors)))
|
||||
|
||||
ci_low, ci_high = stats.t.interval(
|
||||
confidence=0.95,
|
||||
df=len(errors) - 1,
|
||||
loc=mean_error,
|
||||
scale=stats.sem(errors) if len(errors) > 1 else 0
|
||||
)
|
||||
|
||||
return {
|
||||
'mean_error': mean_error,
|
||||
'median_error': median_error,
|
||||
'stddev_error': stddev_error,
|
||||
'rmse_error': rmse_error,
|
||||
'confidence_interval': (ci_low, ci_high),
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
groundtruth_file = 'groundtruth.csv'
|
||||
run_files = glob.glob('runs/*.csv')
|
||||
|
||||
groundtruth_df = read_file(groundtruth_file)
|
||||
run_dfs = {f'run{i+1}': read_file(file) for i, file in enumerate(run_files)}
|
||||
|
||||
total_errors = []
|
||||
total_points = 0
|
||||
all_errors = []
|
||||
|
||||
for run, df in run_dfs.items():
|
||||
errors = analyze_new_error(df, groundtruth_df)
|
||||
total_errors.extend(errors)
|
||||
all_errors.extend(errors)
|
||||
total_points += len(df)
|
||||
|
||||
results = calculate_statistics(errors)
|
||||
error_frequency = len(errors) / len(df)
|
||||
|
||||
print(f"Results for {run}:")
|
||||
print(f"Mean New Error: {results['mean_error']:.5f} seconds")
|
||||
print(f"Median New Error: {results['median_error']:.5f} seconds")
|
||||
print(f"Standard Deviation of New Error: {results['stddev_error']:.5f} seconds")
|
||||
print(f"RMSE of New Error: {results['rmse_error']:.5f} seconds")
|
||||
print(f"95% Confidence Interval of New Error: ({results['confidence_interval'][0]:.5f}, {results['confidence_interval'][1]:.5f}) seconds")
|
||||
print(f"New Error Frequency: {error_frequency*100:.5f} %")
|
||||
print('-----------------------------------------')
|
||||
|
||||
total_results = calculate_statistics(total_errors)
|
||||
total_error_frequency = len(total_errors) / total_points
|
||||
|
||||
print("Total Statistics:")
|
||||
print(f"Mean New Error: {total_results['mean_error']:.5f} seconds")
|
||||
print(f"Median New Error: {total_results['median_error']:.5f} seconds")
|
||||
print(f"Standard Deviation of New Error: {total_results['stddev_error']:.5f} seconds")
|
||||
print(f"RMSE of New Error: {total_results['rmse_error']:.5f} seconds")
|
||||
print(f"95% Confidence Interval of New Error: ({total_results['confidence_interval'][0]:.5f}, {total_results['confidence_interval'][1]:.5f}) seconds")
|
||||
print(f"New Error Frequency: {total_error_frequency*100:.5f} %")
|
||||
|
||||
# do plus minus
|
||||
print(f"New Error: {total_results['mean_error']:.5f} ± {total_results['confidence_interval'][1] - total_results['mean_error']:.5f} seconds")
|
||||
|
||||
plt.figure(figsize=(10, 5))
|
||||
sns.histplot(all_errors, bins=12, kde=False)
|
||||
plt.title('Distribution of Newly Introduced Errors (macOS)')
|
||||
plt.xlabel('Error Duration (seconds)')
|
||||
plt.ylabel('Frequency')
|
||||
plt.savefig('error_dist', dpi=300)
|
||||
plt.show()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user