91 lines
No EOL
2.6 KiB
Python
91 lines
No EOL
2.6 KiB
Python
"""CLI interface performance benchmarks."""
|
|
import time
|
|
import threading
|
|
import subprocess
|
|
from typing import List, Dict
|
|
import json
|
|
|
|
def measure_command(command: str, iterations: int = 100) -> Dict:
|
|
"""Measure execution time of a CLI command."""
|
|
times = []
|
|
for _ in range(iterations):
|
|
start = time.time()
|
|
subprocess.run(command, shell=True, check=True,
|
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
times.append(time.time() - start)
|
|
|
|
return {
|
|
"command": command,
|
|
"iterations": iterations,
|
|
"avg_time": sum(times) / iterations,
|
|
"min_time": min(times),
|
|
"max_time": max(times),
|
|
"throughput": iterations / sum(times)
|
|
}
|
|
|
|
def benchmark_with_security(command: str) -> Dict:
|
|
"""Measure performance with security overhead."""
|
|
results = {}
|
|
|
|
# Baseline without security
|
|
results["baseline"] = measure_command(command)
|
|
|
|
# With RBAC checks
|
|
results["rbac"] = measure_command(f"{command} --rbac-enforce")
|
|
|
|
# With TLS 1.3
|
|
results["tls"] = measure_command(f"{command} --tls-1.3")
|
|
|
|
# With both
|
|
results["full_security"] = measure_command(
|
|
f"{command} --rbac-enforce --tls-1.3")
|
|
|
|
return results
|
|
|
|
def concurrent_benchmark(command: str, threads: int = 10) -> Dict:
|
|
"""Measure concurrent command execution."""
|
|
results = []
|
|
lock = threading.Lock()
|
|
|
|
def worker():
|
|
res = measure_command(command, iterations=10)
|
|
with lock:
|
|
results.append(res)
|
|
|
|
threads = [threading.Thread(target=worker) for _ in range(threads)]
|
|
start = time.time()
|
|
for t in threads:
|
|
t.start()
|
|
for t in threads:
|
|
t.join()
|
|
elapsed = time.time() - start
|
|
|
|
return {
|
|
"command": command,
|
|
"threads": threads,
|
|
"total_time": elapsed,
|
|
"throughput": (threads * 10) / elapsed,
|
|
"individual_results": results
|
|
}
|
|
|
|
if __name__ == "__main__":
|
|
# Example commands to benchmark
|
|
commands = [
|
|
"python cli_interface.py task list",
|
|
"python cli_interface.py event trigger test",
|
|
"python cli_interface.py status check"
|
|
]
|
|
|
|
results = {}
|
|
for cmd in commands:
|
|
print(f"\nBenchmarking: {cmd}")
|
|
results[cmd] = {
|
|
"single_thread": benchmark_with_security(cmd),
|
|
"concurrent": concurrent_benchmark(cmd)
|
|
}
|
|
|
|
# Save results
|
|
with open("performance_logs.json", "w") as f:
|
|
json.dump(results, f, indent=2)
|
|
|
|
print("\nBenchmark results saved to performance_logs.json") |