202 lines
No EOL
7.2 KiB
Python
202 lines
No EOL
7.2 KiB
Python
import timeit
|
|
import random
|
|
import string
|
|
import sqlite3
|
|
import threading
|
|
import time
|
|
from memory_profiler import memory_usage
|
|
from orchestrator.core.dispatcher import Dispatcher
|
|
from security.rbac_engine import RBACEngine
|
|
from storage.adapters.sqlite_adapter import SQLiteAdapter
|
|
|
|
class MockTask:
|
|
def __init__(self, task_id):
|
|
self.task_id = task_id
|
|
self.payload = {
|
|
"data": generate_random_string(100),
|
|
"priority": random.randint(1, 3)
|
|
}
|
|
|
|
def generate_random_string(length=32):
|
|
return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
|
|
|
|
class DispatcherBenchmarks:
|
|
def __init__(self):
|
|
self.dispatcher = Dispatcher()
|
|
|
|
def benchmark_dispatch(self, num_tasks=1000):
|
|
"""Test dispatch performance against 500ms response time guardian"""
|
|
def single_dispatch():
|
|
task = MockTask(generate_random_string())
|
|
self.dispatcher.dispatch(task)
|
|
|
|
# Measure single dispatch time
|
|
single_time = timeit.timeit(single_dispatch, number=1)
|
|
print(f"Single dispatch request: {single_time*1000:.2f}ms")
|
|
|
|
# Measure batch performance
|
|
start = time.time()
|
|
for i in range(num_tasks):
|
|
task = MockTask(f"task_{i}")
|
|
self.dispatcher.dispatch(task)
|
|
elapsed = time.time() - start
|
|
avg_time = elapsed / num_tasks * 1000
|
|
print(f"Average dispatch time ({num_tasks} tasks): {avg_time:.2f}ms")
|
|
|
|
# Verify against architectural guardian
|
|
if avg_time > 500:
|
|
print("WARNING: Exceeds 500ms dispatch time guardian")
|
|
return avg_time
|
|
|
|
def benchmark_memory_usage(self):
|
|
"""Test memory usage against 256MB footprint guardian"""
|
|
def operation_wrapper():
|
|
for i in range(1000):
|
|
task = MockTask(f"mem_test_{i}")
|
|
self.dispatcher.dispatch(task)
|
|
|
|
mem_usage = memory_usage((operation_wrapper,), max_usage=True)
|
|
print(f"Peak memory usage: {mem_usage:.2f} MB")
|
|
|
|
# Verify against architectural guardian
|
|
if mem_usage > 256:
|
|
print("WARNING: Exceeds 256MB memory footprint guardian")
|
|
return mem_usage
|
|
|
|
def run_dispatcher_benchmarks(self):
|
|
print("\n=== Dispatcher Benchmarks ===")
|
|
print("1. Dispatch Performance")
|
|
self.benchmark_dispatch(1000)
|
|
self.benchmark_dispatch(5000)
|
|
|
|
print("\n2. Memory Usage")
|
|
self.benchmark_memory_usage()
|
|
|
|
class RBACBenchmarks:
|
|
def __init__(self):
|
|
self.rbac = RBACEngine()
|
|
# Setup test roles and permissions
|
|
self.rbac.create_role("admin", ["*"])
|
|
self.rbac.create_role("editor", ["read", "write"])
|
|
self.rbac.create_role("viewer", ["read"])
|
|
|
|
def benchmark_evaluation(self, num_checks=1000):
|
|
"""Test RBAC evaluation performance"""
|
|
def single_check():
|
|
self.rbac.check_permission("editor", "write")
|
|
|
|
# Measure single check time
|
|
single_time = timeit.timeit(single_check, number=1)
|
|
print(f"Single RBAC check: {single_time*1000:.2f}ms")
|
|
|
|
# Measure batch performance
|
|
start = time.time()
|
|
for i in range(num_checks):
|
|
role = random.choice(["admin", "editor", "viewer"])
|
|
permission = random.choice(["read", "write", "delete"])
|
|
self.rbac.check_permission(role, permission)
|
|
elapsed = time.time() - start
|
|
avg_time = elapsed / num_checks * 1000
|
|
print(f"Average RBAC check time ({num_checks} checks): {avg_time:.2f}ms")
|
|
|
|
# Verify against architectural guardian
|
|
if avg_time > 100:
|
|
print("WARNING: Exceeds 100ms RBAC check time guardian")
|
|
return avg_time
|
|
|
|
def run_rbac_benchmarks(self):
|
|
print("\n=== RBAC Benchmarks ===")
|
|
print("1. Permission Evaluation")
|
|
self.benchmark_evaluation(1000)
|
|
self.benchmark_evaluation(5000)
|
|
|
|
class SQLiteBenchmarks:
|
|
def __init__(self):
|
|
self.adapter = SQLiteAdapter(":memory:")
|
|
# Create test table
|
|
self.adapter.execute("""
|
|
CREATE TABLE benchmark_data (
|
|
id TEXT PRIMARY KEY,
|
|
value TEXT,
|
|
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
|
|
)
|
|
""")
|
|
|
|
def benchmark_insert(self, num_rows=1000):
|
|
"""Test SQLite insert performance"""
|
|
def insert_row():
|
|
self.adapter.execute(
|
|
"INSERT INTO benchmark_data (id, value) VALUES (?, ?)",
|
|
(generate_random_string(), generate_random_string(100))
|
|
)
|
|
|
|
# Measure single insert time
|
|
single_time = timeit.timeit(insert_row, number=1)
|
|
print(f"Single insert: {single_time*1000:.2f}ms")
|
|
|
|
# Measure batch performance
|
|
start = time.time()
|
|
for i in range(num_rows):
|
|
self.adapter.execute(
|
|
"INSERT INTO benchmark_data (id, value) VALUES (?, ?)",
|
|
(f"row_{i}", generate_random_string(100))
|
|
)
|
|
elapsed = time.time() - start
|
|
avg_time = elapsed / num_rows * 1000
|
|
print(f"Average insert time ({num_rows} rows): {avg_time:.2f}ms")
|
|
|
|
# Verify against architectural guardian
|
|
if avg_time > 50:
|
|
print("WARNING: Exceeds 50ms insert time guardian")
|
|
return avg_time
|
|
|
|
def benchmark_query(self, num_queries=1000):
|
|
"""Test SQLite query performance"""
|
|
# First ensure we have data
|
|
if self.adapter.fetch_one("SELECT COUNT(*) FROM benchmark_data")[0] < 1000:
|
|
self.benchmark_insert(1000)
|
|
|
|
def single_query():
|
|
self.adapter.fetch_one("SELECT * FROM benchmark_data LIMIT 1")
|
|
|
|
# Measure single query time
|
|
single_time = timeit.timeit(single_query, number=1)
|
|
print(f"Single query: {single_time*1000:.2f}ms")
|
|
|
|
# Measure batch performance
|
|
start = time.time()
|
|
for i in range(num_queries):
|
|
self.adapter.fetch_one(f"SELECT * FROM benchmark_data WHERE id = 'row_{i%1000}'")
|
|
elapsed = time.time() - start
|
|
avg_time = elapsed / num_queries * 1000
|
|
print(f"Average query time ({num_queries} queries): {avg_time:.2f}ms")
|
|
|
|
# Verify against architectural guardian
|
|
if avg_time > 20:
|
|
print("WARNING: Exceeds 20ms query time guardian")
|
|
return avg_time
|
|
|
|
def run_sqlite_benchmarks(self):
|
|
print("\n=== SQLite Benchmarks ===")
|
|
print("1. Insert Performance")
|
|
self.benchmark_insert(1000)
|
|
self.benchmark_insert(5000)
|
|
|
|
print("\n2. Query Performance")
|
|
self.benchmark_query(1000)
|
|
self.benchmark_query(5000)
|
|
|
|
if __name__ == "__main__":
|
|
print("Running Goal-1 performance benchmarks...")
|
|
|
|
# Run dispatcher benchmarks
|
|
dispatch_bench = DispatcherBenchmarks()
|
|
dispatch_bench.run_dispatcher_benchmarks()
|
|
|
|
# Run RBAC benchmarks
|
|
rbac_bench = RBACBenchmarks()
|
|
rbac_bench.run_rbac_benchmarks()
|
|
|
|
# Run SQLite benchmarks
|
|
sqlite_bench = SQLiteBenchmarks()
|
|
sqlite_bench.run_sqlite_benchmarks() |