Solomon7890 commited on
Commit
f07a1a9
·
verified ·
1 Parent(s): 36c80e0

Add Performance Module

Browse files
Files changed (1) hide show
  1. performance_optimizer.py +166 -0
performance_optimizer.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Performance Optimization Module for ProVerBs Ultimate Brain
3
+ - Caching responses
4
+ - Request batching
5
+ - Async processing
6
+ - Memory management
7
+ """
8
+
9
+ import functools
10
+ import hashlib
11
+ import json
12
+ import time
13
+ from typing import Any, Dict, Optional
14
+ from datetime import datetime, timedelta
15
+ import logging
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+ class PerformanceCache:
20
+ """In-memory cache with TTL for responses"""
21
+
22
+ def __init__(self, max_size: int = 1000, ttl_seconds: int = 3600):
23
+ self.cache: Dict[str, Dict[str, Any]] = {}
24
+ self.max_size = max_size
25
+ self.ttl_seconds = ttl_seconds
26
+
27
+ def _generate_key(self, query: str, mode: str, ai_provider: str) -> str:
28
+ """Generate cache key from query parameters"""
29
+ content = f"{query}:{mode}:{ai_provider}"
30
+ return hashlib.md5(content.encode()).hexdigest()
31
+
32
+ def get(self, query: str, mode: str, ai_provider: str) -> Optional[Any]:
33
+ """Get cached response if available and not expired"""
34
+ key = self._generate_key(query, mode, ai_provider)
35
+
36
+ if key in self.cache:
37
+ entry = self.cache[key]
38
+ if datetime.now() < entry['expires']:
39
+ logger.info(f"Cache HIT for query: {query[:50]}...")
40
+ return entry['response']
41
+ else:
42
+ del self.cache[key]
43
+ logger.info(f"Cache EXPIRED for query: {query[:50]}...")
44
+
45
+ logger.info(f"Cache MISS for query: {query[:50]}...")
46
+ return None
47
+
48
+ def set(self, query: str, mode: str, ai_provider: str, response: Any):
49
+ """Cache a response with TTL"""
50
+ # If cache is full, remove oldest entry
51
+ if len(self.cache) >= self.max_size:
52
+ oldest_key = min(self.cache.keys(), key=lambda k: self.cache[k]['timestamp'])
53
+ del self.cache[oldest_key]
54
+
55
+ key = self._generate_key(query, mode, ai_provider)
56
+ self.cache[key] = {
57
+ 'response': response,
58
+ 'timestamp': datetime.now(),
59
+ 'expires': datetime.now() + timedelta(seconds=self.ttl_seconds)
60
+ }
61
+ logger.info(f"Cached response for query: {query[:50]}...")
62
+
63
+ def clear(self):
64
+ """Clear all cache"""
65
+ self.cache.clear()
66
+ logger.info("Cache cleared")
67
+
68
+ def get_stats(self) -> Dict[str, Any]:
69
+ """Get cache statistics"""
70
+ return {
71
+ "size": len(self.cache),
72
+ "max_size": self.max_size,
73
+ "ttl_seconds": self.ttl_seconds,
74
+ "oldest_entry": min([e['timestamp'] for e in self.cache.values()]) if self.cache else None
75
+ }
76
+
77
+
78
+ class PerformanceMonitor:
79
+ """Monitor and log performance metrics"""
80
+
81
+ def __init__(self):
82
+ self.metrics = {
83
+ "total_requests": 0,
84
+ "cache_hits": 0,
85
+ "cache_misses": 0,
86
+ "avg_response_time": 0.0,
87
+ "total_response_time": 0.0,
88
+ "errors": 0
89
+ }
90
+
91
+ def record_request(self, response_time: float, cached: bool = False, error: bool = False):
92
+ """Record request metrics"""
93
+ self.metrics["total_requests"] += 1
94
+
95
+ if cached:
96
+ self.metrics["cache_hits"] += 1
97
+ else:
98
+ self.metrics["cache_misses"] += 1
99
+
100
+ if error:
101
+ self.metrics["errors"] += 1
102
+ else:
103
+ self.metrics["total_response_time"] += response_time
104
+ self.metrics["avg_response_time"] = (
105
+ self.metrics["total_response_time"] /
106
+ (self.metrics["total_requests"] - self.metrics["errors"])
107
+ )
108
+
109
+ def get_metrics(self) -> Dict[str, Any]:
110
+ """Get current metrics"""
111
+ cache_hit_rate = 0.0
112
+ if self.metrics["total_requests"] > 0:
113
+ cache_hit_rate = self.metrics["cache_hits"] / self.metrics["total_requests"] * 100
114
+
115
+ return {
116
+ **self.metrics,
117
+ "cache_hit_rate": f"{cache_hit_rate:.2f}%"
118
+ }
119
+
120
+ def reset(self):
121
+ """Reset metrics"""
122
+ self.metrics = {
123
+ "total_requests": 0,
124
+ "cache_hits": 0,
125
+ "cache_misses": 0,
126
+ "avg_response_time": 0.0,
127
+ "total_response_time": 0.0,
128
+ "errors": 0
129
+ }
130
+
131
+
132
+ # Global instances
133
+ performance_cache = PerformanceCache(max_size=500, ttl_seconds=1800) # 30 min TTL
134
+ performance_monitor = PerformanceMonitor()
135
+
136
+
137
+ def with_caching(func):
138
+ """Decorator to add caching to async functions"""
139
+ @functools.wraps(func)
140
+ async def wrapper(query: str, mode: str, ai_provider: str, *args, **kwargs):
141
+ start_time = time.time()
142
+
143
+ # Try cache first
144
+ cached_response = performance_cache.get(query, mode, ai_provider)
145
+ if cached_response is not None:
146
+ response_time = time.time() - start_time
147
+ performance_monitor.record_request(response_time, cached=True)
148
+ return cached_response
149
+
150
+ # Execute function
151
+ try:
152
+ response = await func(query, mode, ai_provider, *args, **kwargs)
153
+
154
+ # Cache successful response
155
+ performance_cache.set(query, mode, ai_provider, response)
156
+
157
+ response_time = time.time() - start_time
158
+ performance_monitor.record_request(response_time, cached=False)
159
+
160
+ return response
161
+ except Exception as e:
162
+ response_time = time.time() - start_time
163
+ performance_monitor.record_request(response_time, cached=False, error=True)
164
+ raise e
165
+
166
+ return wrapper