ai-agent/tests/performance/web_benchmark.py

108 lines
No EOL
3.2 KiB
Python

"""Web interface performance benchmarks."""
import time
import threading
import requests
import json
from typing import Dict, List
BASE_URL = "http://localhost:8000"
def measure_endpoint(endpoint: str, method: str = "GET",
data: dict = None, iterations: int = 100) -> Dict:
"""Measure execution time of a web endpoint."""
times = []
for _ in range(iterations):
start = time.time()
if method == "GET":
requests.get(f"{BASE_URL}{endpoint}")
elif method == "POST":
requests.post(f"{BASE_URL}{endpoint}", json=data)
times.append(time.time() - start)
return {
"endpoint": endpoint,
"method": method,
"iterations": iterations,
"avg_time": sum(times) / iterations,
"min_time": min(times),
"max_time": max(times),
"throughput": iterations / sum(times)
}
def benchmark_with_security(endpoint: str, method: str = "GET",
data: dict = None) -> Dict:
"""Measure performance with security overhead."""
results = {}
# Baseline without security
results["baseline"] = measure_endpoint(endpoint, method, data)
# With RBAC checks
results["rbac"] = measure_endpoint(
f"{endpoint}?rbac_enforce=true", method, data)
# With TLS 1.3 (simulated)
results["tls"] = measure_endpoint(
f"{endpoint}?tls_simulated=true", method, data)
# With both
results["full_security"] = measure_endpoint(
f"{endpoint}?rbac_enforce=true&tls_simulated=true", method, data)
return results
def concurrent_benchmark(endpoint: str, method: str = "GET",
data: dict = None, threads: int = 10) -> Dict:
"""Measure concurrent endpoint access."""
results = []
lock = threading.Lock()
def worker():
res = measure_endpoint(endpoint, method, data, iterations=10)
with lock:
results.append(res)
threads = [threading.Thread(target=worker) for _ in range(threads)]
start = time.time()
for t in threads:
t.start()
for t in threads:
t.join()
elapsed = time.time() - start
return {
"endpoint": endpoint,
"method": method,
"threads": threads,
"total_time": elapsed,
"throughput": (threads * 10) / elapsed,
"individual_results": results
}
if __name__ == "__main__":
# Example endpoints to benchmark
endpoints = [
("/api/tasks", "GET"),
("/api/events", "POST", {"event": "test"}),
("/api/status", "GET")
]
results = {}
for endpoint in endpoints:
if len(endpoint) == 2:
path, method = endpoint
data = None
else:
path, method, data = endpoint
print(f"\nBenchmarking: {method} {path}")
results[f"{method} {path}"] = {
"single_thread": benchmark_with_security(path, method, data),
"concurrent": concurrent_benchmark(path, method, data)
}
# Save results
with open("performance_logs.json", "w") as f:
json.dump(results, f, indent=2)
print("\nBenchmark results saved to performance_logs.json")