Add DuckTrack as initial annotation tool; Initial multimodal test
This commit is contained in:
48
annotation/experiments/sleep_testing/calc_errors.py
Normal file
48
annotation/experiments/sleep_testing/calc_errors.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import csv
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
from tqdm import tqdm
|
||||
|
||||
|
||||
def check_sleep(duration, sleep_function):
|
||||
start = time.perf_counter()
|
||||
sleep_function(duration)
|
||||
end = time.perf_counter()
|
||||
elapsed = end - start
|
||||
return abs(elapsed - duration)
|
||||
|
||||
def busy_sleep(duration):
|
||||
end_time = time.perf_counter() + duration
|
||||
while time.perf_counter() < end_time:
|
||||
pass
|
||||
|
||||
def measure_accuracy(sleep_function, durations, iterations=100):
|
||||
average_errors = []
|
||||
for duration in tqdm(durations):
|
||||
errors = [check_sleep(duration, sleep_function) for _ in range(iterations)]
|
||||
average_error = np.mean(errors)
|
||||
average_errors.append(average_error)
|
||||
return average_errors
|
||||
|
||||
durations = np.arange(0.001, 0.101, 0.001) # From 1ms to 100ms in 1ms increments
|
||||
iterations = 100
|
||||
|
||||
sleep_errors = measure_accuracy(time.sleep, durations, iterations)
|
||||
busy_sleep_errors = measure_accuracy(busy_sleep, durations, iterations)
|
||||
|
||||
def save_to_csv(filename, durations, sleep_errors, busy_sleep_errors):
|
||||
with open(filename, 'w', newline='') as csvfile:
|
||||
fieldnames = ['duration', 'sleep_error', 'busy_sleep_error']
|
||||
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
||||
|
||||
writer.writeheader()
|
||||
for duration, sleep_error, busy_sleep_error in zip(durations, sleep_errors, busy_sleep_errors):
|
||||
writer.writerow({
|
||||
'duration': duration,
|
||||
'sleep_error': sleep_error,
|
||||
'busy_sleep_error': busy_sleep_error
|
||||
})
|
||||
print("Data saved to", filename)
|
||||
|
||||
save_to_csv('sleep_data.csv', durations * 1000, np.array(sleep_errors) * 1000, np.array(busy_sleep_errors) * 1000)
|
||||
33
annotation/experiments/sleep_testing/plot_errors.py
Normal file
33
annotation/experiments/sleep_testing/plot_errors.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import csv
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
def plot_from_csv(filename, save_plot=False):
|
||||
durations = []
|
||||
sleep_errors = []
|
||||
busy_sleep_errors = []
|
||||
|
||||
with open(filename, 'r') as csvfile:
|
||||
reader = csv.DictReader(csvfile)
|
||||
for row in reader:
|
||||
durations.append(float(row['duration']))
|
||||
sleep_errors.append(float(row['sleep_error']))
|
||||
busy_sleep_errors.append(float(row['busy_sleep_error']))
|
||||
|
||||
plt.figure(figsize=(10, 5))
|
||||
plt.plot(durations, sleep_errors, label='time.sleep()', marker='o')
|
||||
plt.plot(durations, busy_sleep_errors, label='busy_sleep()', marker='x')
|
||||
plt.xlabel('Desired Delay (ms)')
|
||||
plt.ylabel('Average Error (ms)')
|
||||
plt.title('Sleep Accuracy: time.sleep() vs Busy-Wait Loop (macOS)')
|
||||
plt.legend()
|
||||
plt.grid(True)
|
||||
|
||||
if save_plot:
|
||||
plt.savefig('sleep_accuracy_plot.png', dpi=300)
|
||||
print("Plot saved as sleep_accuracy_plot.png")
|
||||
|
||||
plt.show()
|
||||
|
||||
plot_from_csv('sleep_data.csv', save_plot=True)
|
||||
Reference in New Issue
Block a user