WanIrfan commited on
Commit
e495386
·
verified ·
1 Parent(s): 9cfe3dd

Upload 6 files

Browse files
Files changed (6) hide show
  1. __init__.py +5 -0
  2. app_1.py +518 -0
  3. doc_qa.py +751 -0
  4. docker +34 -0
  5. medical_swarm.py +149 -0
  6. metrics_tracker.py +206 -0
__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .docparser import DocParser
2
+ from .chunkers import Chunker, SemanticChunker, AgenticChunker
3
+ from .imageprocessing import ImageProcessor
4
+ from .doc_qa import AgenticQA
5
+ from src.indexing import indexing
app_1.py ADDED
@@ -0,0 +1,518 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, render_template, session, url_for, redirect, jsonify
2
+ from flask_session import Session
3
+ from langchain_core.messages import HumanMessage, AIMessage
4
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
5
+ import os
6
+ import logging
7
+ import re
8
+ import traceback
9
+ import base64
10
+ import shutil
11
+ import zipfile
12
+ from dotenv import load_dotenv
13
+ from huggingface_hub import hf_hub_download
14
+ from PIL import Image
15
+
16
+ # --- Core Application Imports ---
17
+ # Make sure you have an empty __init__.py file in your 'src' folder
18
+ from api import api_bp
19
+ from src.medical_swarm import run_medical_swarm
20
+ from src.utils import load_rag_system, standardize_query, get_standalone_question, parse_agent_response, markdown_bold_to_html
21
+ from langchain_google_genai import ChatGoogleGenerativeAI
22
+
23
+ # Setup logging
24
+ logging.basicConfig(level=logging.DEBUG)
25
+ logger = logging.getLogger(__name__)
26
+
27
+ # Load environment variables
28
+ load_dotenv()
29
+
30
+ # --- 1. DATABASE SETUP FUNCTION (For Deployment) ---
31
+ def setup_database():
32
+ """Downloads and unzips the ChromaDB folder from Hugging Face Datasets."""
33
+
34
+ # --- !!! IMPORTANT !!! ---
35
+ # YOU MUST CHANGE THIS to your Hugging Face Dataset repo ID
36
+ # For example: "your_username/your_database_repo_name"
37
+ DATASET_REPO_ID = "WanIrfan/atlast-db"
38
+ # -------------------------
39
+
40
+ ZIP_FILENAME = "chroma_db.zip"
41
+ DB_DIR = "chroma_db"
42
+
43
+ if os.path.exists(DB_DIR) and os.listdir(DB_DIR):
44
+ logger.info("✅ Database directory already exists. Skipping download.")
45
+ return
46
+
47
+ logger.info(f"📥 Downloading database from HF Hub: {DATASET_REPO_ID}")
48
+ try:
49
+ zip_path = hf_hub_download(
50
+ repo_id=DATASET_REPO_ID,
51
+ filename=ZIP_FILENAME,
52
+ repo_type="dataset",
53
+ # You might need to add your HF token to secrets if the dataset is private
54
+ # token=os.getenv("HF_TOKEN")
55
+ )
56
+
57
+ logger.info(f"📦 Unzipping database from {zip_path}...")
58
+ with zipfile.ZipFile(zip_path, 'r') as zip_ref:
59
+ zip_ref.extractall(".") # Extracts to the root, creating ./chroma_db
60
+
61
+ logger.info("✅ Database setup complete!")
62
+
63
+ # Clean up the downloaded zip file to save space
64
+ if os.path.exists(zip_path):
65
+ os.remove(zip_path)
66
+
67
+ except Exception as e:
68
+ logger.error(f"❌ CRITICAL ERROR setting up database: {e}", exc_info=True)
69
+ # This will likely cause the RAG system to fail loading, which is expected
70
+ # if the database isn't available.
71
+
72
+ # --- RUN DATABASE SETUP *BEFORE* INITIALIZING THE APP ---
73
+ setup_database()
74
+
75
+
76
+ # --- STANDARD FLASK APP INITIALIZATION ---
77
+ app = Flask(__name__)
78
+ app.secret_key = os.urandom(24) # Set a secret key for session signing
79
+
80
+ # --- CONFIGURE SERVER-SIDE SESSIONS ---
81
+ app.config["SESSION_PERMANENT"] = False
82
+ app.config["SESSION_TYPE"] = "filesystem"
83
+ Session(app)
84
+
85
+ google_api_key = os.getenv("GOOGLE_API_KEY")
86
+ if not google_api_key:
87
+ logger.warning("⚠️ GOOGLE_API_KEY not found in environment variables. LLM calls will fail.")
88
+ else:
89
+ logger.info("GOOGLE_API_KEY loaded successfully.")
90
+
91
+ # Initialize LLM
92
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash", temperature=0.05, google_api_key=google_api_key)
93
+
94
+ # --- LOAD RAG SYSTEMS (AFTER DB SETUP) ---
95
+ logger.info("🌟 Starting Multi-Domain AI Assistant...")
96
+ try:
97
+ rag_systems = {
98
+ 'medical': load_rag_system(collection_name="medical_csv_Agentic_retrieval", domain="medical"),
99
+ 'islamic': load_rag_system(collection_name="islamic_texts_Agentic_retrieval", domain="islamic"),
100
+ 'insurance': load_rag_system(collection_name="etiqa_Agentic_retrieval", domain="insurance")
101
+ }
102
+ except Exception as e:
103
+ logger.error(f"❌ FAILED to load RAG systems. Check database path and permissions. Error: {e}", exc_info=True)
104
+ rag_systems = {'medical': None, 'islamic': None, 'insurance': None}
105
+
106
+ # Store systems and LLM on the app for blueprints
107
+ app.rag_systems = rag_systems
108
+ app.llm = llm
109
+
110
+ # Register the API blueprint
111
+ app.register_blueprint(api_bp)
112
+ logger.info(f"✅ API Blueprint registered. API endpoints are now available under /api")
113
+
114
+ # Check initialization status
115
+ logger.info("\n📊 SYSTEM STATUS:")
116
+ for domain, system in rag_systems.items():
117
+ status = "✅ Ready" if system else "❌ Failed (DB missing?)"
118
+ logger.info(f" {domain}: {status}")
119
+
120
+
121
+ # --- FLASK ROUTES ---
122
+
123
+ @app.route("/")
124
+ def homePage():
125
+ # Clear all session history when visiting the home page
126
+ session.pop('medical_history', None)
127
+ session.pop('islamic_history', None)
128
+ session.pop('insurance_history', None)
129
+ session.pop('current_medical_document', None)
130
+ return render_template("homePage.html")
131
+
132
+
133
+ @app.route("/medical", methods=["GET", "POST"])
134
+ def medical_page():
135
+ # Use session for history and document context
136
+ if request.method == "GET":
137
+ # Load all latest data from session (or default to empty if not found)
138
+ latest_response = session.pop('latest_medical_response', {}) # POP to clear it after one display
139
+
140
+ answer = latest_response.get('answer', "")
141
+ thoughts = latest_response.get('thoughts', "")
142
+ validation = latest_response.get('validation', "")
143
+ source = latest_response.get('source', "")
144
+
145
+ # Clear history only when a user first navigates (not on redirect)
146
+ if not latest_response and 'medical_history' not in session:
147
+ session.pop('current_medical_document', None)
148
+
149
+ return render_template("medical_page.html",
150
+ history=session.get('medical_history', []),
151
+ answer=answer,
152
+ thoughts=thoughts,
153
+ validation=validation,
154
+ source=source)
155
+
156
+ # POST Request Logic
157
+ answer, thoughts, validation, source = "", "", "", ""
158
+ history = session.get('medical_history', [])
159
+ current_medical_document = session.get('current_medical_document', "")
160
+
161
+
162
+ try:
163
+ query=standardize_query(request.form.get("query", ""))
164
+ has_image = 'image' in request.files and request.files['image'].filename
165
+ has_document = 'document' in request.files and request.files['document'].filename
166
+ has_query = request.form.get("query") or request.form.get("question", "")
167
+
168
+ logger.info(f"POST request received: has_image={has_image}, has_document={has_document}, has_query={has_query}")
169
+
170
+ if has_document:
171
+ # Scenario 3: Query + Document
172
+ logger.info("Processing Scenario 3: Query + Document with Medical Swarm")
173
+ file = request.files['document']
174
+ try:
175
+ # Store the new document text in the session
176
+ document_text = file.read().decode("utf-8")
177
+ session['current_medical_document'] = document_text
178
+ current_medical_document = document_text # Use the new document for this turn
179
+ except UnicodeDecodeError:
180
+ answer = "Error: Could not decode the uploaded document. Please ensure it is a valid text or PDF file."
181
+ logger.error("Scenario 3: Document decode error")
182
+ thoughts = traceback.format_exc()
183
+
184
+ swarm_answer = run_medical_swarm(current_medical_document, query)
185
+ answer = markdown_bold_to_html(swarm_answer)
186
+
187
+ history.append(HumanMessage(content=f"[Document Uploaded] Query: '{query}'"))
188
+ history.append(AIMessage(content=swarm_answer))
189
+ thoughts = "Swarm analysis complete. The process is orchestrated and does not use the ReAct thought process. You can now ask follow-up questions."
190
+ source= "Medical Swarm"
191
+ validation = (True, "Swarm output generated.") # Swarm has its own validation logic
192
+
193
+ elif has_image :
194
+ #Scenario 1
195
+ logger.info("Processing Multimodal RAG: Query + Image")
196
+ # --- Step 1 & 2: Image Setup & Vision Analysis ---
197
+ file = request.files['image']
198
+ upload_dir = "Uploads"
199
+ os.makedirs(upload_dir, exist_ok=True)
200
+ image_path = os.path.join(upload_dir, file.filename)
201
+
202
+ try:
203
+ file.save(image_path)
204
+ file.close()
205
+
206
+ with open(image_path, "rb") as img_file:
207
+ img_data = base64.b64encode(img_file.read()).decode("utf-8")
208
+
209
+
210
+ vision_prompt = f"Analyze this image and identify the main subject in a single, concise sentence. The user's query is: '{query}'"
211
+ message = HumanMessage(content=[
212
+ {"type": "text", "text": vision_prompt},
213
+ {"type": "image_url", "image_url": f"data:image/jpeg;base64,{img_data}"}
214
+ ])
215
+ vision_response = llm.invoke([message])
216
+ visual_prediction = vision_response.content
217
+ logger.info(f"Vision Prediction: {visual_prediction}")
218
+
219
+ # --- Create an Enhanced Query ---
220
+ enhanced_query = (
221
+ f'User Query: "{query}" '
222
+ f'Context from an image provided by the LLM: "{visual_prediction}" '
223
+ 'Based on the user\'s query and the context from LLM, provide a comprehensive answer.'
224
+ )
225
+ logger.info(f"Enhanced query : {enhanced_query}")
226
+
227
+ agent = rag_systems['medical']
228
+ if not agent: raise Exception("Medical RAG system is not loaded.")
229
+ response_dict = agent.answer(enhanced_query, chat_history=history)
230
+ answer, thoughts, validation, source = parse_agent_response(response_dict)
231
+ history.append(HumanMessage(content=query))
232
+ history.append(AIMessage(content=answer))
233
+
234
+ finally:
235
+ if os.path.exists(image_path):
236
+ try:
237
+ os.remove(image_path)
238
+ logger.info(f"Successfully deleted temporary image file: {image_path}")
239
+ except PermissionError as e:
240
+ logger.warning(f"Could not remove {image_path} after processing. "
241
+ f"File may be locked by another process. Error: {e}")
242
+
243
+ elif query:
244
+ # --- SCENARIO 2: TEXT-ONLY QUERY OR SWARM FOLLOW-UP ---
245
+ history_for_agent = history
246
+ if current_medical_document:
247
+ logger.info("Processing Follow-up Query for Document")
248
+ history_for_agent = [HumanMessage(content=f"We are discussing this document:\n{current_medical_document}")] + history
249
+ else:
250
+ logger.info("Processing Text RAG query for Medical domain")
251
+
252
+ logger.info(f"Original Query: '{query}'")
253
+ print(f"📚 Using chat history with {len(history)} previous messages to create standalone query")
254
+ standalone_query = get_standalone_question(query, history_for_agent,llm)
255
+ logger.info(f"Standalone Query: '{standalone_query}'")
256
+
257
+ agent = rag_systems['medical']
258
+ if not agent: raise Exception("Medical RAG system is not loaded.")
259
+ response_dict = agent.answer(standalone_query, chat_history=history_for_agent)
260
+ answer, thoughts, validation, source = parse_agent_response(response_dict)
261
+
262
+ history.append(HumanMessage(content=query))
263
+ history.append(AIMessage(content=answer))
264
+
265
+ else:
266
+ raise ValueError("No query or file provided.")
267
+ except Exception as e:
268
+ logger.error(f"Error on /medical page: {e}", exc_info=True)
269
+ answer = f"An error occurred: {e}"
270
+ thoughts = traceback.format_exc()
271
+
272
+ # Save updated history and LATEST RESPONSE DATA back to the session
273
+ session['medical_history'] = history
274
+ session['latest_medical_response'] = {
275
+ 'answer': answer,
276
+ 'thoughts': thoughts,
277
+ 'validation': validation,
278
+ 'source': source
279
+ }
280
+ session.modified = True
281
+
282
+ logger.debug(f"Redirecting after saving latest response.")
283
+ return redirect(url_for('medical_page'))
284
+
285
+ @app.route("/medical/clear")
286
+ def clear_medical_chat():
287
+ session.pop('medical_history', None)
288
+ session.pop('current_medical_document', None)
289
+ logger.info("Medical chat history cleared.")
290
+ return redirect(url_for('medical_page'))
291
+
292
+ @app.route("/islamic", methods=["GET", "POST"])
293
+ def islamic_page():
294
+ #Use session
295
+
296
+ if request.method == "GET":
297
+ # Load all latest data from session (or default to empty if not found)
298
+ latest_response = session.pop('latest_islamic_response', {}) # POP to clear it after one display
299
+
300
+ answer = latest_response.get('answer', "")
301
+ thoughts = latest_response.get('thoughts', "")
302
+ validation = latest_response.get('validation', "")
303
+ source = latest_response.get('source', "")
304
+
305
+ # Clear history only when a user first navigates (no latest_response and no current history)
306
+ if not latest_response and 'islamic_history' not in session:
307
+ session.pop('islamic_history', None)
308
+
309
+ return render_template("islamic_page.html",
310
+ history=session.get('islamic_history', []),
311
+ answer=answer,
312
+ thoughts=thoughts,
313
+ validation=validation,
314
+ source=source)
315
+
316
+ # POST Request Logic
317
+ answer, thoughts, validation, source = "", "", "", ""
318
+ history = session.get('islamic_history', [])
319
+
320
+ # This try/except block wraps the ENTIRE POST logic
321
+ try:
322
+ query = standardize_query(request.form.get("query", ""))
323
+ has_image = 'image' in request.files and request.files['image'].filename
324
+
325
+ final_query = query # Default to the original query
326
+
327
+ if has_image:
328
+ logger.info("Processing Multimodal RAG query for Islamic domain")
329
+
330
+ file = request.files['image']
331
+
332
+ upload_dir = "Uploads"
333
+ os.makedirs(upload_dir, exist_ok=True)
334
+ image_path = os.path.join(upload_dir, file.filename)
335
+
336
+ try:
337
+ file.save(image_path)
338
+ file.close()
339
+
340
+ with open(image_path, "rb") as img_file:
341
+ img_base64 = base64.b64encode(img_file.read()).decode("utf-8")
342
+
343
+ vision_prompt = f"Analyze this image's main subject. User's query is: '{query}'"
344
+ message = HumanMessage(content=[{"type": "text", "text": vision_prompt}, {"type": "image_url", "image_url": f"data:image/jpeg;base64,{img_base64}"}])
345
+ visual_prediction = llm.invoke([message]).content
346
+
347
+ enhanced_query = (
348
+ f'User Query: "{query}" '
349
+ f'Context from an image provided by the LLM: "{visual_prediction}" '
350
+ 'Based on the user\'s query and the context from LLM, provide a comprehensive answer.'
351
+ )
352
+ logger.info(f"Create enchanced query : {enhanced_query}")
353
+
354
+ final_query = enhanced_query
355
+
356
+ finally:
357
+ if os.path.exists(image_path):
358
+ try:
359
+ os.remove(image_path)
360
+ logger.info(f"Successfully cleaned up {image_path}")
361
+ except PermissionError as e:
362
+ logger.warning(f"Could not remove {image_path} after processing. "
363
+ f"File may be locked. Error: {e}")
364
+
365
+ elif query: # Only run text logic if there's a query and no image
366
+ logger.info("Processing Text RAG query for Islamic domain")
367
+ standalone_query = get_standalone_question(query, history,llm)
368
+ logger.info(f"Original Query: '{query}'")
369
+ print(f"📚 Using chat history with {len(history)} previous messages to create standalone query")
370
+ logger.info(f"Standalone Query: '{standalone_query}'")
371
+ final_query = standalone_query
372
+
373
+ if not final_query:
374
+ raise ValueError("No query or file provided.")
375
+
376
+ agent = rag_systems['islamic']
377
+ if not agent: raise Exception("Islamic RAG system is not loaded.")
378
+ response_dict = agent.answer(final_query, chat_history=history)
379
+ answer, thoughts , validation, source = parse_agent_response(response_dict)
380
+ history.append(HumanMessage(content=query))
381
+ history.append(AIMessage(content=answer))
382
+
383
+ except Exception as e:
384
+ logger.error(f"Error on /islamic page: {e}", exc_info=True)
385
+ answer = f"An error occurred: {e}"
386
+ thoughts = traceback.format_exc()
387
+
388
+ # Save updated history and LATEST RESPONSE DATA back to the session
389
+ session['islamic_history'] = history
390
+ session['latest_islamic_response'] = {
391
+ 'answer': answer,
392
+ 'thoughts': thoughts,
393
+ 'validation': validation,
394
+ 'source': source
395
+ }
396
+ session.modified = True
397
+
398
+ logger.debug(f"Redirecting after saving latest response.")
399
+ return redirect(url_for('islamic_page'))
400
+
401
+ @app.route("/islamic/clear")
402
+ def clear_islamic_chat():
403
+ session.pop('islamic_history', None)
404
+ logger.info("Islamic chat history cleared.")
405
+ return redirect(url_for('islamic_page'))
406
+
407
+ @app.route("/insurance", methods=["GET", "POST"])
408
+ def insurance_page():
409
+ if request.method == "GET" :
410
+ latest_response = session.pop('latest_insurance_response',{})
411
+
412
+ answer = latest_response.get('answer', "")
413
+ thoughts = latest_response.get('thoughts', "")
414
+ validation = latest_response.get('validation', "")
415
+ source = latest_response.get('source', "")
416
+
417
+ if not latest_response and 'insurance_history' not in session:
418
+ session.pop('insurance_history', None)
419
+
420
+ return render_template("insurance_page.html", # You will need to create this HTML file
421
+ history=session.get('insurance_history', []),
422
+ answer=answer,
423
+ thoughts=thoughts,
424
+ validation=validation,
425
+ source=source)
426
+
427
+ # POST Request Logic
428
+ answer, thoughts, validation, source = "", "", "", ""
429
+ history = session.get('insurance_history', [])
430
+
431
+ try:
432
+ query = standardize_query(request.form.get("query", ""))
433
+
434
+ if query:
435
+ logger.info("Processing Text RAG query for Insurance domain")
436
+ standalone_query = get_standalone_question(query, history, llm)
437
+ logger.info(f"Original Query: '{query}'")
438
+ logger.info(f"Standalone Query: '{standalone_query}'")
439
+
440
+ agent = rag_systems['insurance']
441
+ if not agent: raise Exception("Insurance RAG system is not loaded.")
442
+ response_dict = agent.answer(standalone_query, chat_history=history)
443
+ answer, thoughts, validation, source = parse_agent_response(response_dict)
444
+
445
+ history.append(HumanMessage(content=query))
446
+ history.append(AIMessage(content=answer))
447
+ else:
448
+ raise ValueError("No query provided.")
449
+
450
+ except Exception as e:
451
+ logger.error(f"Error on /insurance page: {e}", exc_info=True)
452
+ answer = f"An error occurred: {e}"
453
+ thoughts = traceback.format_exc()
454
+
455
+ session['insurance_history'] = history
456
+ session['latest_insurance_response'] = {
457
+ 'answer': answer,
458
+ 'thoughts': thoughts,
459
+ 'validation': validation,
460
+ 'source': source
461
+ }
462
+ session.modified = True
463
+
464
+ logger.debug(f"Redirecting after saving latest response.")
465
+ return redirect(url_for('insurance_page'))
466
+
467
+ @app.route("/insurance/clear")
468
+ def clear_insurance_chat():
469
+ session.pop('insurance_history', None)
470
+ logger.info("Insurance chat history cleared.")
471
+ return redirect(url_for('insurance_page'))
472
+
473
+ @app.route("/about", methods=["GET"])
474
+ def about():
475
+ return render_template("about.html")
476
+
477
+ @app.route('/metrics/<domain>')
478
+ def get_metrics(domain):
479
+ """API endpoint to get metrics for a specific domain."""
480
+ try:
481
+ if domain == "medical" and rag_systems['medical']:
482
+ stats = rag_systems['medical'].metrics_tracker.get_stats()
483
+ elif domain == "islamic" and rag_systems['islamic']:
484
+ stats = rag_systems['islamic'].metrics_tracker.get_stats()
485
+ elif domain == "insurance" and rag_systems['insurance']:
486
+ stats = rag_systems['insurance'].metrics_tracker.get_stats()
487
+ elif not rag_systems.get(domain):
488
+ return jsonify({"error": f"{domain} RAG system not loaded"}), 500
489
+ else:
490
+ return jsonify({"error": "Invalid domain"}), 400
491
+
492
+ return jsonify(stats)
493
+ except Exception as e:
494
+ return jsonify({"error": str(e)}), 500
495
+
496
+ @app.route('/metrics/reset/<domain>', methods=['POST'])
497
+ def reset_metrics(domain):
498
+ """Reset metrics for a domain (useful for testing)."""
499
+ try:
500
+ if domain == "medical" and rag_systems['medical']:
501
+ rag_systems['medical'].metrics_tracker.reset_metrics()
502
+ elif domain == "islamic" and rag_systems['islamic']:
503
+ rag_systems['islamic'].metrics_tracker.reset_metrics()
504
+ elif domain == "insurance" and rag_systems['insurance']:
505
+ rag_systems['insurance'].metrics_tracker.reset_metrics()
506
+ elif not rag_systems.get(domain):
507
+ return jsonify({"error": f"{domain} RAG system not loaded"}), 500
508
+ else:
509
+ return jsonify({"error": "Invalid domain"}), 400
510
+
511
+ return jsonify({"success": True, "message": f"Metrics reset for {domain}"})
512
+ except Exception as e:
513
+ return jsonify({"error": str(e)}), 500
514
+
515
+ if __name__ == "__main__":
516
+ logger.info("Starting Flask app for deployment testing...")
517
+ # This port 7860 is what Hugging Face Spaces expects by default
518
+ app.run(host="0.0.0.0", port=7860, debug=False)
doc_qa.py ADDED
@@ -0,0 +1,751 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
2
+ from langchain_classic import hub
3
+ from langchain_google_genai import ChatGoogleGenerativeAI
4
+ from langchain_classic.chains.combine_documents import create_stuff_documents_chain
5
+ from langchain_core.tools import Tool
6
+ from langchain_community.tools.tavily_search import TavilySearchResults
7
+ from langchain_community.retrievers import BM25Retriever
8
+ from concurrent.futures import ThreadPoolExecutor, as_completed
9
+ from langchain_core.output_parsers import JsonOutputParser
10
+ from langchain_classic.agents import AgentExecutor, create_react_agent
11
+ from langchain_core.documents import Document
12
+ from langchain_core.messages import AIMessage, HumanMessage
13
+ from langchain_chroma import Chroma
14
+ from langchain_core.agents import AgentAction
15
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings
16
+ from flashrank import Ranker, RerankRequest
17
+ from src.metrics_tracker import MetricsTracker
18
+ import logging
19
+
20
+
21
+ # Setup logging
22
+ logging.basicConfig(level=logging.DEBUG)
23
+ logger = logging.getLogger(__name__)
24
+
25
+ class ContextRetriever:
26
+ def __init__(self, retriever):
27
+ self.retriever = retriever
28
+
29
+ def deduplicate_context(self, context_list):
30
+ """Deduplicate context entries to avoid repetition."""
31
+ seen = set()
32
+ deduped = []
33
+ for item in context_list:
34
+ if item not in seen:
35
+ seen.add(item)
36
+ deduped.append(item)
37
+ return "\n".join(deduped) if deduped else "No relevant context found."
38
+
39
+ def retrieve(self, query, top_k=5):
40
+ """
41
+ Retrieve the top-k relevant contexts from ChromaDB based on the query.
42
+
43
+ Args:
44
+ query (str): The query or prediction to search for.
45
+ top_k (int): Number of top results to return (default: 3).
46
+
47
+ Returns:
48
+ str: Deduplicated context string from the top-k results.
49
+ """
50
+ logger.info(f"Retrieving for query: {query}")
51
+ try:
52
+ # Perform similarity search using ChromaDB retriever
53
+ results = self.retriever.invoke(query, k=top_k)
54
+ logger.info(f"Retrieved documents: {[doc.metadata.get('source') for doc in results]}")
55
+
56
+ # Extract the page content (context) from each result
57
+ contexts = [doc.page_content for doc in results]
58
+ logger.info(f"Context : {contexts}")
59
+
60
+ # Deduplicate the contexts
61
+ deduped_context = self.deduplicate_context(contexts)
62
+ logger.info(f"Deduplicated context: {deduped_context}")
63
+
64
+ return deduped_context
65
+ except Exception as e:
66
+ logger.error(f"Retrieval error: {str(e)}")
67
+ return "Retrieval failed due to error."
68
+
69
+ class LLMComplexityAnalyzer:
70
+ """
71
+ Analyzes query complexity using an LLM to make a "managerial" decision
72
+ on the optimal retrieval strategy.
73
+ """
74
+
75
+ def __init__(self, domain: str, llm: ChatGoogleGenerativeAI):
76
+ self.domain = domain
77
+ self.llm = llm
78
+
79
+ self.system_prompt = (
80
+ "You are a 'Complexity Analyzer' manager for a RAG (Retrieval-Augmented Generation) system. "
81
+ "Your domain of expertise is: **{domain}**."
82
+ "\n"
83
+ "Your task is to analyze the user's query and determine its complexity. Based on this, "
84
+ "you will decide how many documents (k) to retrieve. More complex queries require "
85
+ "more documents to synthesize a good answer."
86
+ "\n"
87
+ "Here are the retrieval strategies:"
88
+ "1. **simple**: For simple, direct fact-finding queries. (e.g., 'What is takaful?') "
89
+ " - Set k = 5"
90
+ "2. **moderate**: For queries that require explanation, some comparison, or have multiple parts. "
91
+ " (e.g., 'What is the difference between madhab Shafi'i and Maliki on prayer?') "
92
+ " - Set k = 10"
93
+ "3. **complex**: For deep, nuanced, multi-step, or highly comparative/synthetic queries. "
94
+ " (e.g., 'Explain in detail the treatment options for type 2 diabetes, comparing "
95
+ " their side effects and suitability for elderly patients.')"
96
+ " - Set k = 15"
97
+ "\n"
98
+ "Analyze the following query and provide your reasoning."
99
+ "\n"
100
+ "**IMPORTANT**: You MUST respond ONLY with a single, valid JSON object. Do not add any "
101
+ "other text. The JSON object must have these three keys:"
102
+ "- `complexity`: (string) Must be one of 'simple', 'moderate', or 'complex'."
103
+ "- `k`: (integer) Must be 5, 10, or 15, corresponding to the complexity."
104
+ "- `reasoning`: (string) A brief 1-sentence explanation for your decision."
105
+ )
106
+
107
+ self.prompt_template = ChatPromptTemplate.from_messages([
108
+ ("system", self.system_prompt.format(domain=self.domain)),
109
+ ("human", "{query}")
110
+ ])
111
+
112
+ self.output_parser = JsonOutputParser()
113
+
114
+ # This chain will output a parsed dictionary
115
+ self.chain = self.prompt_template | self.llm | self.output_parser
116
+
117
+ logger.info(f"🧠 LLMComplexityAnalyzer initialized for '{self.domain}'")
118
+
119
+ def analyze(self, query: str) -> dict:
120
+ """
121
+ Analyzes query complexity using an LLM and returns the retrieval strategy.
122
+ """
123
+ logger.info(f"🧠 LLMComplexityAnalyzer: Analyzing query...")
124
+
125
+ try:
126
+ # Invoke the chain to get the structured JSON output
127
+ result = self.chain.invoke({"query": query})
128
+
129
+ # Add a 'score' field for compatibility
130
+ score_map = {"simple": 2, "moderate": 4, "complex": 6}
131
+ result['score'] = score_map.get(result.get('complexity'), 0)
132
+
133
+ logger.info(f"🧠 LLM Decision: {result.get('complexity').upper()} (k={result.get('k')})")
134
+ logger.info(f" Reasoning: {result.get('reasoning')}")
135
+
136
+ return result
137
+
138
+ except Exception as e:
139
+ # Fallback in case the LLM fails or returns bad JSON
140
+ logger.error(f"❌ LLMComplexityAnalyzer failed: {e}. Defaulting to 'moderate' strategy.")
141
+ return {
142
+ "complexity": "moderate",
143
+ "k": 12,
144
+ "score": 4,
145
+ "reasoning": "Fallback: LLM analysis or JSON parsing failed."
146
+ }
147
+
148
+
149
+ class SwarmRetriever:
150
+ """
151
+ Multi-retriever swarm that executes parallel retrieval strategies.
152
+ Worker component that takes orders from LLMComplexityAnalyzer.
153
+ """
154
+
155
+ def __init__(self, chroma_retriever, documents):
156
+ self.dense_retriever = chroma_retriever # Semantic search
157
+ self.bm25_retriever = BM25Retriever.from_documents(documents) # Keyword search
158
+ self.bm25_retriever.k = 20 # Set high, will be limited by k parameter
159
+ logger.info("✅ SwarmRetriever initialized (Dense + BM25 workers)")
160
+
161
+ def retrieve_with_swarm(self, query: str, k: int) -> list:
162
+ """
163
+ Execute multi-retriever swarm with parallel workers.
164
+ """
165
+ logger.info(f"🐝 Swarm deployment: {2} workers, target k={k}")
166
+
167
+ # Define worker tasks
168
+ retrieval_tasks = {
169
+ "dense_semantic": lambda: self.dense_retriever.invoke(query, k=k),
170
+ "bm25_keyword": lambda: self.bm25_retriever.invoke(query)[:k],
171
+ }
172
+
173
+ # Execute workers in parallel
174
+ swarm_results = {}
175
+ with ThreadPoolExecutor(max_workers=2) as executor:
176
+ futures = {
177
+ executor.submit(task): name
178
+ for name, task in retrieval_tasks.items()
179
+ }
180
+
181
+ for future in as_completed(futures):
182
+ worker_name = futures[future]
183
+ try:
184
+ results = future.result()
185
+ swarm_results[worker_name] = results
186
+ logger.info(f" ✅ Worker '{worker_name}': {len(results)} docs")
187
+ except Exception as e:
188
+ logger.error(f" ❌ Worker '{worker_name}' failed: {e}")
189
+ swarm_results[worker_name] = []
190
+
191
+ # Combine and deduplicate documents
192
+ combined_docs = self._combine_and_deduplicate(swarm_results)
193
+
194
+ return combined_docs
195
+
196
+ def _combine_and_deduplicate(self, swarm_results: dict) -> list:
197
+ """Combine results from all workers and remove duplicates."""
198
+ all_docs = []
199
+ seen_content = set()
200
+ worker_contributions = {}
201
+
202
+ for worker_name, docs in swarm_results.items():
203
+ for doc in docs:
204
+ # Use first 200 chars as hash to detect duplicates
205
+ content_hash = hash(doc.page_content[:200])
206
+
207
+ if content_hash not in seen_content:
208
+ seen_content.add(content_hash)
209
+
210
+ # Tag document with source worker
211
+ doc.metadata['swarm_worker'] = worker_name
212
+ all_docs.append(doc)
213
+
214
+ # Track contributions
215
+ worker_contributions[worker_name] = \
216
+ worker_contributions.get(worker_name, 0) + 1
217
+
218
+ logger.info(f"🐝 Swarm combined: {len(all_docs)} unique docs")
219
+ logger.info(f" Worker contributions: {worker_contributions}")
220
+
221
+ return all_docs
222
+
223
+ class AgenticQA:
224
+ def __init__(self, config=None):
225
+ logger.info("Initializing AgenticQA...")
226
+
227
+ # Load a small, fast reranker model. This runs locally.
228
+ try:
229
+ self.reranker = Ranker(model_name="ms-marco-MiniLM-L-12-v2")
230
+ logger.info("FlashRank Reranker loaded successfully.")
231
+ except Exception as e:
232
+ logger.error(f"Failed to load FlashRank reranker: {e}")
233
+ self.reranker = None
234
+
235
+ self.contextualize_q_system_prompt = (
236
+ "Given a chat history and the latest user question which might reference context in the chat history, "
237
+ "formulate a standalone question which can be understood without the chat history. "
238
+ "IMPORTANT: DO NOT provide any answers or explanations. ONLY rephrase the question if needed. "
239
+ "If the question is already clear and standalone, return it exactly as is. "
240
+ "Output ONLY the reformulated question, nothing else."
241
+ )
242
+
243
+ self.contextualize_q_prompt = ChatPromptTemplate.from_messages(
244
+ [("system", self.contextualize_q_system_prompt),
245
+ MessagesPlaceholder("chat_history"),
246
+ ("human", "{input}")]
247
+ )
248
+ self.qa_system_prompt = (
249
+ "You are an assistant that answers questions in a specific domain for citizens mainly in Malaysia, "
250
+ "depending on the context. "
251
+ "You will receive:\n"
252
+ " • domain = {domain} (either 'medical', 'islamic' , or 'insurance')\n"
253
+ " • context = relevant retrieved passages\n"
254
+ " • user question\n\n"
255
+ "If the context does not contain the answer, **YOU MUST SAY 'I do not know'** or 'I cannot find that information in the provided documents.' Do not use your general knowledge.\n\n"
256
+ "Instructions based on domain:\n"
257
+ "1. If domain = 'medical' :\n"
258
+ " - Answer the question in clear, simple layperson language, "
259
+ " - Citing your sources (e.g. article name, section)."
260
+ " - Add a medical disclaimer: “I am not a doctor…”.\n"
261
+ "2. If domain = 'islamic':\n"
262
+ " - **ALWAYS present both Shafi'i AND Maliki perspectives** if the question is about fiqh/rulings\n"
263
+ " - **Cite specific sources**: Always mention the book name (e.g., 'According to Muwatta Imam Malik...', 'Minhaj al-Talibin states...', 'Umdat al-Salik explains...')\n"
264
+ " - **Structure answer as**:\n"
265
+ " - Shafi'i view (from Umdat al-Salik/Minhaj): [ruling with citation]\n"
266
+ " - Maliki view (from Muwatta): [ruling with citation]\n"
267
+ " - If they agree: mention the consensus\n"
268
+ " - If they differ: present both views objectively without favoring one\n"
269
+ " - **For hadith questions**: provide the narration text, source (book name, hadith number)\n "
270
+ " - - **If ruling has EXCEPTIONS** (like 'except for...', 'unless...'), YOU MUST include them. "
271
+ " If context doesn't show exceptions but the ruling seems absolute, indicate this uncertainty.\n"
272
+ " - If the context does not contain relevant information from BOTH madhabs, acknowledge which sources you have "
273
+ " (e.g., 'Based on Shafi'i sources only...') and suggest consulting additional madhab resources.\n"
274
+ " - **Always end with**: 'This is not a fatwa. Consult a local scholar for guidance specific to your situation.'\n"
275
+ " - Always include hadith narration or quran verse as evidence (if it exists) in the final response "
276
+ " - Keep answers concise but comprehensive enough to show different scholarly views.\n\n"
277
+
278
+ "3. If domain = 'insurance':\n"
279
+ " - Your knowledge is STRICTLY limited to Etiqa Takaful (Motor and Car policies).\n"
280
+ " - First, try to answer ONLY using the provided <context>.\n"
281
+ " - **If the answer is not in the context, YOU MUST SAY 'I do not have information on that specific topic.'** Do not make up an answer.\n"
282
+ " - If the user asks about other Etiqa products (e.g., medical, travel), you MUST use the 'EtiqaWebSearch' tool.\n"
283
+ " - If the user asks about another insurance company (e.g., 'Prudential', 'Takaful Ikhlas'), state that you can only answer about Etiqa Takaful.\n"
284
+ " - If the user asks a general insurance question (e.g., 'What is takaful?', 'What is an excess?'), use the 'GeneralWebSearch' tool.\n"
285
+
286
+ "4. For ALL domains: If the context does not contain the answer, do not make one up. Be honest.\n\n"
287
+ "Context:\n"
288
+ "{context}"
289
+ )
290
+
291
+ self.qa_prompt = ChatPromptTemplate.from_messages(
292
+ [("system", self.qa_system_prompt),
293
+ MessagesPlaceholder("chat_history"),
294
+ ("human", "{input}")]
295
+ )
296
+ self.llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash",temperature=0.05)
297
+ # --- START: NEW QUERY REFINER ---
298
+ self.refiner_system_prompt = (
299
+ "You are an expert search query refiner. Your task is to take a user's question "
300
+ "and rewrite it to be a perfect, concise search query for a database. "
301
+ "Remove all conversational fluff, emotion, and filler words. "
302
+ "Distill the query to its core semantic intent. "
303
+ "For example:"
304
+ "- 'Hi, I was wondering if I can touch a dog if I found it is cute?' becomes 'ruling on touching a dog in islam'"
305
+ "- 'What are the treatments for, like, a common cold?' becomes 'common cold treatment options'"
306
+ "- 'Tell me about diabetes' becomes 'what is diabetes'"
307
+ "Output ONLY the refined query, nothing else."
308
+ )
309
+
310
+ self.refiner_prompt = ChatPromptTemplate.from_messages([
311
+ ("system", self.refiner_system_prompt),
312
+ ("human", "{query}")
313
+ ])
314
+
315
+ self.refiner_chain = self.refiner_prompt | self.llm
316
+ logger.info("✅ Query Refiner chain initialized.")
317
+ # --- END: NEW QUERY REFINER ---
318
+
319
+ self.react_docstore_prompt = hub.pull("aallali/react_tool_priority")
320
+ self.answer_validator = AnswerValidatorAgent(self.llm)
321
+
322
+ self.retriever = None
323
+ self.agent_executor = None
324
+ self.tools = [] # Initialize the attribute
325
+ self.domain = "general"
326
+ self.answer_validator = None
327
+ self.retrieval_agent = None
328
+
329
+ if config:
330
+ logger.info(f"Configuring AgenticQA with provided config: {config}")
331
+ try:
332
+ collection_name = config["retriever"]["collection_name"]
333
+ persist_directory = config["retriever"]["persist_directory"]
334
+ self.domain = config.get("domain", "general") # Get domain from config
335
+
336
+ # 1. Initialize the embedding function
337
+ embedding_function = GoogleGenerativeAIEmbeddings(model="models/text-embedding-004")
338
+
339
+ # 2. Connect to the persistent ChromaDB
340
+ db_client = Chroma(
341
+ persist_directory=persist_directory,
342
+ embedding_function=embedding_function,
343
+ collection_name=collection_name
344
+ )
345
+
346
+ # 3. Set the retriever for this instance
347
+ self.retriever = db_client.as_retriever()
348
+ logger.info(f"✅ Successfully created retriever for collection '{collection_name}'")
349
+ # --- START: NEW SWARM INITIALIZATION ---
350
+ logger.info("Initializing Swarm components...")
351
+ # Get all documents from Chroma for BM25
352
+ all_docs_data = db_client.get()
353
+ docs_for_bm25 = [
354
+ Document(page_content=content, metadata=meta)
355
+ for content, meta in zip(
356
+ all_docs_data['documents'],
357
+ all_docs_data['metadatas']
358
+ )
359
+ ]
360
+
361
+ # Initialize SwarmRetriever (Workers)
362
+ self.swarm_retriever = SwarmRetriever(self.retriever, docs_for_bm25)
363
+
364
+ # Initialize LLMComplexityAnalyzer (Manager)
365
+ self.complexity_analyzer = LLMComplexityAnalyzer(self.domain, self.llm)
366
+ logger.info("✅ Swarm components (Manager + Workers) initialized.")
367
+ # --- END: NEW SWARM INITIALIZATION ---
368
+ self.metrics_tracker = MetricsTracker(save_path=f"metrics_{self.domain}.json")
369
+ logger.info("✅ Metrics tracker initialized")
370
+ # Initialize validator *after* setting domain
371
+ self.answer_validator = AnswerValidatorAgent(self.llm, self.domain)
372
+ # --- This is the new, simple QA chain that will be used *after* reranking ---
373
+ self.qa_chain = create_stuff_documents_chain(self.llm, self.qa_prompt)
374
+
375
+ self._initialize_agent()
376
+
377
+ except Exception as e:
378
+ logger.error(f"❌ Error during AgenticQA setup for '{self.domain}': {e}", exc_info=True)
379
+ else:
380
+ logger.warning("⚠️ AgenticQA initialized without a config. Retriever will be None.")
381
+
382
+ # --- 5. NEW UPGRADED RAG FUNCTION ---
383
+ # This is our new, smarter "worker" function that includes the reranker.
384
+ def _run_rag_with_reranking(self, query: str, chat_history: list) -> str:
385
+ """
386
+ Enhanced Swarm-RAG pipeline with adaptive retrieval and reranking.
387
+
388
+ Pipeline:
389
+ 1. Contextualize query
390
+ 2. Refine query
391
+ 3. ComplexityAnalyzer (Manager) determines optimal k
392
+ 4. SwarmRetriever (Workers) deploys parallel retrievers with k
393
+ 5. Rerank combined swarm results
394
+ 6. Filter results by threshold
395
+ 7. Generate Answer
396
+ """
397
+ logger.info(f"--- 🐝 SWARM RAG (with Reranker) PIPELINE RUNNING for query: '{query}' ---")
398
+
399
+ if not self.reranker or not self.swarm_retriever or not self.complexity_analyzer:
400
+ logger.error("Swarm components or Reranker not initialized. Cannot perform RAG.")
401
+ return "Error: RAG components are not available."
402
+
403
+ try:
404
+ # 1. Contextualize query
405
+ standalone_query = query
406
+ if chat_history:
407
+ contextualize_chain = self.contextualize_q_prompt | self.llm
408
+ response = contextualize_chain.invoke({"chat_history": chat_history, "input": query})
409
+ standalone_query = response.content
410
+ logger.info(f"Contextualized query: '{standalone_query}'")
411
+
412
+ # 2 - REFINE QUERY ---
413
+ logger.info("Refining query for search...")
414
+ response = self.refiner_chain.invoke({"query": standalone_query})
415
+ refined_query = response.content.strip()
416
+ logger.info(f"Refined query: '{refined_query}'")
417
+
418
+
419
+ # 3. Complexity analysis
420
+ analysis = self.complexity_analyzer.analyze(standalone_query)
421
+ k = analysis['k']
422
+ self._last_complexity_analysis = analysis
423
+ logger.info(f"Query complexity: {analysis['complexity'].upper()} | k={k}")
424
+
425
+ # 4. Retrieve with Swarm (Workers)
426
+ swarm_docs = self.swarm_retriever.retrieve_with_swarm(standalone_query, k=k)
427
+
428
+ if not swarm_docs:
429
+ logger.warning("Swarm Retriever found no documents.")
430
+ return "I do not know the answer to that as it is not in my documents."
431
+
432
+ # 5. Format for Reranker
433
+ passages = [
434
+ {"id": i, "text": doc.page_content, "meta": doc.metadata}
435
+ for i, doc in enumerate(swarm_docs)
436
+ ]
437
+
438
+ # 6. Rerank
439
+ logger.info(f"Reranking {len(passages)} swarm-retrieved documents...")
440
+ rerank_request = RerankRequest(query=standalone_query, passages=passages)
441
+ reranked_results = self.reranker.rerank(rerank_request)
442
+
443
+ top_score = reranked_results[0]['score'] if reranked_results else 0
444
+ logger.info(f"Reranking complete. Top score: {top_score:.3f}")
445
+
446
+ # 7. Filter
447
+ threshold = 0.1
448
+ if self.domain == "islamic":
449
+ threshold = 0.05
450
+ elif self.domain == "medical":
451
+ threshold = 0.15
452
+ else:
453
+ threshold = 0.10
454
+
455
+ logger.info(f"Using threshold={threshold} for {self.domain} domain")
456
+ final_docs = []
457
+ worker_contributions = {}
458
+
459
+ for result in reranked_results:
460
+ if result['score'] > threshold:
461
+ # Re-create the Document object from reranked data
462
+ doc = Document(
463
+ page_content=result['text'],
464
+ metadata=result.get('meta', {})
465
+ )
466
+ final_docs.append(doc)
467
+
468
+ # Track worker contributions in final answer
469
+ worker = result.get('meta', {}).get('swarm_worker', 'unknown')
470
+ worker_contributions[worker] = \
471
+ worker_contributions.get(worker, 0) + 1
472
+
473
+ logger.info(f"Filtered to {len(final_docs)} documents above threshold {threshold}.")
474
+ logger.info(f"Final doc contributions: {worker_contributions}")
475
+
476
+ self.metrics_tracker.log_worker_contribution(worker_contributions)
477
+ # 8. Respond
478
+ if not final_docs:
479
+ logger.warning("No documents passed the reranker threshold. Returning 'I don't know.'")
480
+ return "I do not know the answer to that as my document search found no relevant information."
481
+
482
+ # Call the QA chain with the *reranked, filtered* docs
483
+ response = self.qa_chain.invoke({
484
+ "context": final_docs,
485
+ "chat_history": chat_history,
486
+ "input": query,
487
+ "domain": self.domain
488
+ })
489
+
490
+ logger.info("🐝 Swarm RAG pipeline complete. Returning answer.")
491
+ return response
492
+
493
+ except Exception as e:
494
+ logger.error(f"Error in Swarm RAG pipeline: {e}", exc_info=True)
495
+ return "An error occurred while processing your request."
496
+
497
+ def _initialize_agent(self):
498
+ """Build the ReAct agent"""
499
+ """A helper function to build the agent components."""
500
+
501
+ logger.info(f"Initializing agent for domain: '{self.domain}'")
502
+ self.context_retriever = ContextRetriever(self.retriever)
503
+
504
+ # Store chat_history as instance variable so tools can access it
505
+ self._current_chat_history = []
506
+
507
+ # We need a RAG chain for the tool
508
+ # history_aware_retriever = create_history_aware_retriever(self.llm, self.retriever, self.contextualize_q_prompt)
509
+ # question_answer_chain = create_stuff_documents_chain(self.llm, self.qa_prompt)
510
+ # rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
511
+
512
+ def rag_tool_wrapper(query: str) -> str:
513
+ """Wrapper to pass chat history to RAG pipeline."""
514
+ return self._run_rag_with_reranking(query, self._current_chat_history)
515
+
516
+ self.tools = [
517
+ Tool(
518
+ name="RAG",
519
+ func=rag_tool_wrapper,
520
+ description=(f"Use this tool FIRST to search and answer questions about the {self.domain} domain using internal vector database.")
521
+ )
522
+
523
+ ]
524
+
525
+ # --- DOMAIN-SPECIFIC TOOLS ---
526
+ if self.domain == "insurance":
527
+ # Add a specific tool for searching Etiqa's website
528
+ etiqa_search_tool = TavilySearchResults(max_results=3)
529
+ etiqa_search_tool.description = "Use this tool to search the Etiqa Takaful website for products NOT in the RAG context (e.g., medical, travel)."
530
+ # This is a bit of a "hack" to force Tavily to search a specific site.
531
+ # We modify the function it calls.
532
+ original_etiqa_func = etiqa_search_tool.invoke
533
+ def etiqa_site_search(query):
534
+ return original_etiqa_func(f"site:etiqa.com.my {query}")
535
+
536
+ self.tools.append(Tool(
537
+ name="EtiqaWebSearch",
538
+ func=etiqa_site_search,
539
+ description=etiqa_search_tool.description
540
+ ))
541
+
542
+ # Add a general web search tool
543
+ self.tools.append(Tool(
544
+ name="GeneralWebSearch",
545
+ func=TavilySearchResults(max_results=2).invoke,
546
+ description="Use this tool as a fallback for general, non-Etiqa questions (e.g., 'What is takaful?')."
547
+ ))
548
+ elif self.domain == "islamic":
549
+ # Trusted Islamic sources for Malaysia
550
+ islamic_search = TavilySearchResults(max_results=3)
551
+
552
+ def islamic_trusted_search(query):
553
+ # Search only trusted Malaysian Islamic authorities
554
+ sites = "site:muftiwp.gov.my OR site:zulkiflialbakri.com"
555
+ return islamic_search.invoke(f"{sites} {query}")
556
+
557
+ self.tools.append(Tool(
558
+ name="TrustedIslamicSearch",
559
+ func=islamic_trusted_search,
560
+ description=(
561
+ "Use this tool if RAG has incomplete or no answer. "
562
+ "Searches ONLY trusted Malaysian Islamic sources: "
563
+ "Pejabat Mufti Wilayah Persekutuan (muftiwp.gov.my) and "
564
+ "Dr Zulkifli Mohamad Al Bakri (zulkiflialbakri.com/category/soal-jawab-agama/). "
565
+ "These follow Shafi'i madhab which is official in Malaysia."
566
+ )
567
+ ))
568
+
569
+ # General fallback (last resort)
570
+ self.tools.append(Tool(
571
+ name="GeneralWebSearch",
572
+ func=TavilySearchResults(max_results=2).invoke,
573
+ description="Last resort: Use only for general Islamic terms or definitions not found in RAG or trusted sources."
574
+ ))
575
+ else:
576
+ # Medical and Islamic domains only get the general web search fallback
577
+ self.tools.append(Tool(
578
+ name="GeneralWebSearch",
579
+ func=TavilySearchResults(max_results=2).invoke,
580
+ description="Use this tool as a fallback if the RAG tool finds no relevant information or if the query is about a general topic."
581
+ ))
582
+
583
+ agent = create_react_agent(llm=self.llm, tools=self.tools, prompt=self.react_docstore_prompt)
584
+
585
+ self.agent_executor = AgentExecutor.from_agent_and_tools(
586
+ agent=agent,
587
+ tools=self.tools,
588
+ handle_parsing_errors=True,
589
+ verbose=True,
590
+ return_intermediate_steps=True,
591
+ max_iterations=5
592
+ )
593
+ logger.info(f"✅ Agent Executor(ReAct Agent) created successfully for '{self.domain}'.")
594
+
595
+
596
+ def answer(self, query, chat_history=None):
597
+ """
598
+ Process a query using the agent and returns a clean dictionary.
599
+
600
+ Args:
601
+ query (str): User's question
602
+ chat_history (list): List of previous messages (AIMessage, HumanMessage)
603
+
604
+ Returns:
605
+ dict: Contains 'answer', 'context', 'validation', 'source', 'thoughts'
606
+ """
607
+ if chat_history is None:
608
+ chat_history = []
609
+ self._current_chat_history = chat_history
610
+ if not self.agent_executor:
611
+ return {"answer": "Error: Agent not initialized.", "context": "", "validation": (False, "Init failed"), "source": "Error"}
612
+ # START TIMING
613
+ start_time = self.metrics_tracker.start_query()
614
+ print(f"\n📝 AGENTIC_QA PROCESSING QUERY: '{query}'")
615
+
616
+ response = self.agent_executor.invoke({
617
+ "input": query,
618
+ "chat_history": chat_history,
619
+ "domain": self.domain, # Pass domain to agent
620
+ "metadata": {
621
+ "domain": self.domain
622
+ }
623
+ })
624
+ thoughts= ""
625
+
626
+ final_answer = response.get("output", "Could not generate an answer")
627
+
628
+ tool_used = None
629
+ if "intermediate_steps" in response:
630
+ thought_log= []
631
+ for step in response["intermediate_steps"]:
632
+ # --- FIX: Unpack the (Action, Observation) tuple first ---
633
+ action, observation = step
634
+
635
+ if isinstance(action, AgentAction) and action.tool:
636
+ tool_used = action.tool #Capture the last tool used
637
+
638
+ # Append Thought, Action, Action Input & Observation
639
+ thought_log.append(action.log)
640
+ thought_log.append(f"\nObservation: {str(observation)}\n---")
641
+
642
+ thoughts = "\n".join(thought_log)
643
+
644
+ # Assign source based on the LAST tool used
645
+ if tool_used == "RAG":
646
+ source = "Etiqa Takaful Database" if self.domain == "insurance" else "Domain Database (RAG)"
647
+ elif tool_used == "EtiqaWebSearch":
648
+ source = "Etiqa Website Search"
649
+ elif tool_used == "TrustedIslamicSearch":
650
+ source = "Mufti WP & Dr Zul Search"
651
+ elif tool_used == "GeneralWebSearch":
652
+ source = "General Web Search"
653
+ else:
654
+ source = "Agent Logic"
655
+
656
+ logger.info(f"Tool used: {tool_used}, Source determined: {source}")
657
+
658
+ # Retrieve context only if the RAG tool was used
659
+ # This call is inefficient (it runs a *second* retrieval), but it
660
+ # respects your architecture and works for logging.
661
+ context = "No RAG context retrieved."
662
+ if source.endswith("(RAG)") or source.startswith("Etiqa Takaful Database"):
663
+ if self.context_retriever:
664
+ context = self.context_retriever.retrieve(query)
665
+ else:
666
+ context = "RAG tool was used, but ContextRetriever not initialized."
667
+ elif "Web" in source:
668
+ context = "Web search results were used. See 'Observation' in thoughts log."
669
+
670
+ validation = self.answer_validator.validate(query, final_answer, source=source)
671
+ # END TIMING
672
+ response_time = self.metrics_tracker.end_query(start_time)
673
+
674
+ complexity_info = getattr(self, '_last_complexity_analysis', None)
675
+
676
+ # LOG METRICS
677
+ self.metrics_tracker.log_query(
678
+ query=query,
679
+ domain=self.domain,
680
+ source=source,
681
+ complexity=complexity_info,
682
+ validation=validation,
683
+ response_time=response_time,
684
+ answer_preview=final_answer
685
+ )
686
+ return {"answer": final_answer, "context": context, "validation": validation, "source": source, "thoughts": thoughts,"response_time": response_time,
687
+ "complexity": complexity_info}
688
+
689
+ class AnswerValidatorAgent:
690
+ def __init__(self, llm, domain="general"):
691
+ self.llm = llm
692
+ self.domain = domain
693
+ self.general_prompt = ChatPromptTemplate.from_messages([
694
+ ("system", (
695
+ "You are an answer validator. Check if the generated answer is factually correct "
696
+ "and relevant to the query. Return 'Valid' if the answer is correct and relevant, "
697
+ "or 'Invalid: [reason]' if not, where [reason] is a brief explanation of the issue."
698
+ )),
699
+ ("human", "Query: {query}\nAnswer: {answer}")
700
+ ])
701
+ self.medical_prompt = ChatPromptTemplate.from_messages([
702
+ ("system", (
703
+ "You are an answer validator. Check if the generated answer is factually correct, "
704
+ "relevant to the query, and consistent with known medical knowledge. "
705
+ "Return 'Valid' if the answer is correct and relevant, or 'Invalid: [reason]' if not, "
706
+ "where [reason] is a brief explanation of the issue. "
707
+ "**Pay close attention to contradictions.** If an answer gives advice and then "
708
+ "contradicts it (e.g., 'switch immediately' and then 'always consult your doctor first'), "
709
+ "it is **Invalid** because it is unsafe and confusing."
710
+ )),
711
+ ("human", "Query: {query}\nAnswer: {answer}")
712
+ ])
713
+ self.islamic_prompt = ChatPromptTemplate.from_messages([
714
+ ("system", (
715
+ "You are an answer validator for Islamic Fiqh or anything related to Islam. Check if the answer correctly addresses "
716
+ "the query based on the provided sources. The answer should be neutral and present "
717
+ "the required perspectives (e.g., Shafi'i and Maliki) if available. "
718
+ "Return 'Valid' if the answer is correct and relevant, or 'Invalid: [reason]' if not."
719
+ )),
720
+ ("human", "Query: {query}\nAnswer: {answer}")
721
+ ])
722
+
723
+ def validate(self, query, answer, source="RAG"):
724
+ if self.domain == "insurance":
725
+ logger.info("Skipping validation for insurance domain.")
726
+ return True, "Validation skipped for insurance domain."
727
+
728
+ try:
729
+ # --- 11. IMPROVED VALIDATOR LOGIC ---
730
+ # Choose the right prompt based on domain and source
731
+ prompt = self.general_prompt # Default
732
+ if source == "RAG" or "Database" in source:
733
+ if self.domain == "medical":
734
+ prompt = self.medical_prompt
735
+ elif self.domain == "islamic":
736
+ prompt = self.islamic_prompt
737
+
738
+ response = self.llm.invoke(prompt.format(query=query, answer=answer))
739
+ validation = response.content.strip()
740
+ logger.info(f"AnswerValidator result for query '{query}': {validation}")
741
+
742
+ if validation.lower().startswith("valid"):
743
+ return True, "Answer is valid and relevant."
744
+ elif validation.lower().startswith("invalid"):
745
+ reason = validation.split(":", 1)[1].strip() if ":" in validation else "No reason provided."
746
+ return False, reason
747
+ else:
748
+ return False, "Validation response format unexpected."
749
+ except Exception as e:
750
+ logger.error(f"AnswerValidator error: {str(e)}")
751
+ return False, "Validation failed due to error."
docker ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use Python 3.10 (Stable for LangChain & Flashrank)
2
+ FROM python:3.10-slim
3
+
4
+ # Set the working directory to /app
5
+ WORKDIR /app
6
+
7
+ # Install system dependencies (needed for compiling packages like flashrank)
8
+ RUN apt-get update && apt-get install -y \
9
+ build-essential \
10
+ && rm -rf /var/lib/apt/lists/*
11
+
12
+ # Copy requirements first to cache dependencies
13
+ COPY requirements.txt .
14
+
15
+ # Install Python dependencies
16
+ # --no-cache-dir keeps the image size smaller
17
+ RUN pip install --no-cache-dir -r requirements.txt
18
+
19
+ # Copy all your code (app_1.py, src/, templates/, api.py) into the container
20
+ COPY . .
21
+
22
+ # Create writable directories for the database and file uploads
23
+ # The 'chmod -R 777' gives the container permission to write here
24
+ RUN mkdir -p /app/chroma_db && chmod -R 777 /app/chroma_db
25
+ RUN mkdir -p /app/Uploads && chmod -R 777 /app/Uploads
26
+
27
+ # Tell Docker that the container will listen on port 7860
28
+ EXPOSE 7860
29
+
30
+ # --- The Command to Run Your App ---
31
+ # We use Gunicorn, a production-grade server
32
+ # It looks for the 'app_1.py' file and the 'app' object inside it
33
+ # --timeout 300 gives your RAG system 5 minutes to respond before timing out
34
+ CMD ["gunicorn", "-b", "0.0.0.0:7860", "app_1:app", "--timeout", "300"]
medical_swarm.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from dotenv import load_dotenv
3
+ from langchain_google_genai import ChatGoogleGenerativeAI
4
+ import traceback
5
+ import os
6
+
7
+ #1. SETUP
8
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
9
+ logger = logging.getLogger(__name__)
10
+
11
+ # Load env
12
+ load_dotenv()
13
+ google_api_key=os.getenv("GOOGLE_API_KEY")
14
+ if not google_api_key:
15
+ raise ValueError("GOOGLE_API_KEY not found in environment variable.Make sure it is in .env file")
16
+
17
+ # Agent Definition
18
+ NO_SUGGESTIONS_INSTRUCTION= (
19
+ "This is a finalized medical report.Do not provide suggestions or improvements."
20
+ "Focus strictly on your assigned task based on the provided data"
21
+ )
22
+ class MedicalAgent:
23
+ def __init__(self, llm, name:str, role_prompt: str):
24
+ self.llm = llm
25
+ self.name = name
26
+ self.role_prompt= role_prompt
27
+
28
+ def run(self, input_data:str):
29
+
30
+ try:
31
+ logger.info(f"Agent '{self.name}' is processing")
32
+ full_prompt=f"{NO_SUGGESTIONS_INSTRUCTION}\n\n{self.role_prompt}\n\n{input_data}"
33
+ response=self.llm.invoke(full_prompt)
34
+ logger.info(f"Agent {self.name} finished")
35
+ return response.content
36
+ except Exception as e:
37
+ logger.error(f"Agent {self.name} error: {str(e)}")
38
+ traceback.print_exc()
39
+ return f"Error in agent {self.name}: {str(e)}"
40
+
41
+ # Initialize SWARM
42
+ llm=ChatGoogleGenerativeAI(model="gemini-2.5-pro",temperature=0.1,google_api_key=google_api_key)
43
+
44
+ #Define specific roles for each agent in the team
45
+ medical_data_extractor = MedicalAgent(
46
+ llm, "Medical Data Extractor",
47
+ "You are a specialized medical data extraction expert. Your role is to extract relevant medical information, focusing on key clinical indicators, test results, vital signs, and patient history from the provided text."
48
+ )
49
+ diagnostic_specialist = MedicalAgent(
50
+ llm, "Diagnostic Specialist",
51
+ "You are a senior diagnostic physician. Your role is to analyze the provided symptoms, lab results, and clinical findings to develop a diagnostic assessment based solely on the data."
52
+ )
53
+ treatment_planner = MedicalAgent(
54
+ llm, "Treatment Planner",
55
+ "You are an experienced clinical treatment specialist. Your role is to outline a prescribed treatment plan (pharmacological and non-pharmacological interventions) based on the provided diagnosis and data."
56
+ )
57
+ specialist_consultant = MedicalAgent(
58
+ llm, "Specialist Consultant",
59
+ "You are a medical specialist consultant. Your role is to provide deep insights on the existing diagnosis and treatment plan, highlighting any potential complications or considerations based on the record."
60
+ )
61
+ patient_care_coordinator = MedicalAgent(
62
+ llm, "Patient Care Coordinator (Orchestrator)",
63
+ "You are a patient care coordinator specializing in comprehensive healthcare management. Your primary role is to manage a team of specialist agents and synthesize their findings."
64
+ )
65
+
66
+ # ORCHESTRATOR LOGIC
67
+ def run_medical_swarm(document_text: str, initial_query: str,chat_history: list = None):
68
+ """
69
+ Orchestrates a swarm of medical agents to analyze document.
70
+
71
+ Args:
72
+ document_text: The full text of medical record
73
+ initial_query: The initial question or goal of analysis
74
+
75
+ Returns:
76
+ A string containing final, synthesized response
77
+ """
78
+ logger.info("--- MEDICAL SWARM INITIATED ---")
79
+
80
+ # If there's a history, use it as the starting point. Otherwise, start fresh.
81
+ workspace = [f"Initial Patient Document:\n{document_text}", f"Initial Goal: '{initial_query}'"]
82
+
83
+ # The Patient Care Coordinator is our orchestrator/manager
84
+ orchestrator= patient_care_coordinator
85
+
86
+ # Limit the number of collab rounds to prevent infinite loops
87
+ for i in range(5):
88
+ logger.info(f"\n-- Swarm Iteration {i+1} --")
89
+ current_state = "\n\n".join(workspace)
90
+
91
+ # The orchestrator reviews the current state and decide the next action.
92
+ orchestrator_prompt= f"""
93
+ You are the Patient Care Coordinator managing a team of specialist AI agents to analyze a medical case.
94
+ Review the current Case File below and decide the single next best action.
95
+
96
+ Your available specialists are:
97
+ - 'medical_data_extractor': Best for the first step to get raw data from the initial document.
98
+ - 'diagnostic_specialist': Best for forming a diagnosis after key data has been extracted.
99
+ - 'treatment_planner': Best for creating a plan after a clear diagnosis is available.
100
+ - 'specialist_consultant': Best for getting deeper insight on an existing diagnosis or plan.
101
+
102
+ Case File (Summary of work so far):
103
+ ---
104
+ {current_state}
105
+ ---
106
+
107
+ Based on the file, which specialist should be called next? Or, if you have a clear diagnosis, treatment plan, and specialist insight, is it time to write the final summary for the patient?
108
+
109
+ Respond with ONLY one of the following commands:
110
+ - "CALL: medical_data_extractor"
111
+ - "CALL: diagnostic_specialist"
112
+ - "CALL: treatment_planner"
113
+ - "CALL: specialist_consultant"
114
+ - "FINISH"
115
+ """
116
+ command=orchestrator.run(orchestrator_prompt).strip().upper()
117
+ logger.info(f"Orchestrator Command: {command}")
118
+
119
+ if command == "CALL: MEDICAL_DATA_EXTRACTOR":
120
+ report= medical_data_extractor.run(f"Original Document:\n{document_text}")
121
+ workspace.append(f"--- Extractor's Report ---\n{report}")
122
+
123
+ elif command == "CALL: DIAGNOSTIC_SPECIALIST":
124
+ # Other agents get the whole workspace for context
125
+ report = diagnostic_specialist.run(f"Full Case File for Diagnosis:\n{current_state}")
126
+ workspace.append(f"--- Diagnostician's Report ---\n{report}")
127
+
128
+ elif command == "CALL: TREATMENT_PLANNER":
129
+ report = treatment_planner.run(f"Full Case File for Treatment Plan:\n{current_state}")
130
+ workspace.append(f"--- Treatment Planner's Report ---\n{report}")
131
+
132
+ elif command == "CALL: SPECIALIST_CONSULTANT":
133
+ report = specialist_consultant.run(f"Full Case File for Specialist Consultation:\n{current_state}")
134
+ workspace.append(f"--- Specialist Consultant's Report ---\n{report}")
135
+
136
+ elif command == "FINISH":
137
+ logger.info("Orchestrator has decided the work is complete. Generating final summary.")
138
+ final_summary_prompt = f"You are the Patient Care Coordinator. Based on the complete case file below, write a comprehensive, patient-facing summary that coordinates all the findings.\n\nFull Case File:\n{current_state}"
139
+ final_answer = orchestrator.run(final_summary_prompt)
140
+ return final_answer
141
+ else:
142
+ logger.warning(f"Orchestrator gave an unknown command: '{command}'. Ending swarm.")
143
+ break
144
+
145
+ # Fallback if the loop finishes without a "FINISH" command
146
+ logger.warning("Swarm reached max iterations. Finalizing with current data.")
147
+ final_fallback_prompt = f"You are the Patient Care Coordinator. The analysis time has expired. Summarize the findings from the case file below into a cohesive patient-facing report.\n\nFull Case File:\n{current_state}"
148
+ final_answer = orchestrator.run(final_fallback_prompt)
149
+ return final_answer
metrics_tracker.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from datetime import datetime
3
+ from collections import defaultdict
4
+ import json
5
+ import os
6
+
7
+ class MetricsTracker:
8
+ """
9
+ Tracks system performance metrics across queries.
10
+ """
11
+
12
+ def __init__(self, save_path="metrics_data.json"):
13
+ self.save_path = save_path
14
+ self.metrics = {
15
+ "total_queries": 0,
16
+ "rag_success": 0,
17
+ "web_search_fallback": 0,
18
+ "trusted_search_used": 0,
19
+ "general_search_used": 0,
20
+ "complexity_distribution": {
21
+ "simple": 0,
22
+ "moderate": 0,
23
+ "complex": 0
24
+ },
25
+ "domain_usage": {
26
+ "medical": 0,
27
+ "islamic": 0,
28
+ "insurance": 0
29
+ },
30
+ "response_times": [],
31
+ "worker_contributions": {
32
+ "dense_semantic": 0,
33
+ "bm25_keyword": 0
34
+ },
35
+ "validation_stats": {
36
+ "valid": 0,
37
+ "invalid": 0,
38
+ "skipped": 0
39
+ },
40
+ "query_history": [] # Store recent queries for analysis
41
+ }
42
+
43
+ # Load existing metrics if available
44
+ self.load_metrics()
45
+
46
+ def start_query(self):
47
+ """Start timing a query."""
48
+ return time.time()
49
+
50
+ def end_query(self, start_time):
51
+ """Calculate and store query response time."""
52
+ response_time = time.time() - start_time
53
+ self.metrics["response_times"].append(response_time)
54
+ return response_time
55
+
56
+ def log_query(self, query, domain, source, complexity=None,
57
+ validation=None, response_time=None, answer_preview=None):
58
+ """
59
+ Log a complete query with all its metadata.
60
+
61
+ Args:
62
+ query (str): User's query
63
+ domain (str): Domain (medical, islamic, insurance)
64
+ source (str): Where answer came from (RAG, WebSearch, etc.)
65
+ complexity (dict): Complexity analysis result
66
+ validation (tuple): (is_valid, reason)
67
+ response_time (float): Time taken in seconds
68
+ answer_preview (str): First 100 chars of answer
69
+ """
70
+ self.metrics["total_queries"] += 1
71
+
72
+ # Track domain usage
73
+ if domain in self.metrics["domain_usage"]:
74
+ self.metrics["domain_usage"][domain] += 1
75
+
76
+ # Track source usage
77
+ if "RAG" in source or "Database" in source:
78
+ self.metrics["rag_success"] += 1
79
+ elif "Trusted" in source:
80
+ self.metrics["trusted_search_used"] += 1
81
+ self.metrics["web_search_fallback"] += 1
82
+ elif "Etiqa" in source:
83
+ self.metrics["web_search_fallback"] += 1
84
+ elif "Web" in source or "Search" in source:
85
+ self.metrics["general_search_used"] += 1
86
+ self.metrics["web_search_fallback"] += 1
87
+
88
+ # Track complexity distribution
89
+ if complexity and "complexity" in complexity:
90
+ comp_level = complexity["complexity"]
91
+ if comp_level in self.metrics["complexity_distribution"]:
92
+ self.metrics["complexity_distribution"][comp_level] += 1
93
+
94
+ # Track validation
95
+ if validation:
96
+ is_valid, reason = validation
97
+ if "skip" in reason.lower():
98
+ self.metrics["validation_stats"]["skipped"] += 1
99
+ elif is_valid:
100
+ self.metrics["validation_stats"]["valid"] += 1
101
+ else:
102
+ self.metrics["validation_stats"]["invalid"] += 1
103
+
104
+ # Store query history (last 50 queries)
105
+ query_record = {
106
+ "timestamp": datetime.now().isoformat(),
107
+ "query": query[:100], # Truncate long queries
108
+ "domain": domain,
109
+ "source": source,
110
+ "complexity": complexity.get("complexity") if complexity else None,
111
+ "k_used": complexity.get("k") if complexity else None,
112
+ "response_time": round(response_time, 2) if response_time else None,
113
+ "validated": is_valid if validation else None,
114
+ "answer_preview": answer_preview[:100] if answer_preview else None
115
+ }
116
+
117
+ self.metrics["query_history"].append(query_record)
118
+
119
+ # Keep only last 50 queries
120
+ if len(self.metrics["query_history"]) > 50:
121
+ self.metrics["query_history"] = self.metrics["query_history"][-50:]
122
+
123
+ # Auto-save after each query
124
+ self.save_metrics()
125
+
126
+ def log_worker_contribution(self, worker_stats):
127
+ """
128
+ Log which swarm workers contributed to the final answer.
129
+
130
+ Args:
131
+ worker_stats (dict): e.g., {"dense_semantic": 5, "bm25_keyword": 3}
132
+ """
133
+ for worker, count in worker_stats.items():
134
+ if worker in self.metrics["worker_contributions"]:
135
+ self.metrics["worker_contributions"][worker] += count
136
+
137
+ def get_stats(self):
138
+ """Get current statistics."""
139
+ total = self.metrics["total_queries"]
140
+
141
+ if total == 0:
142
+ return {
143
+ "total_queries": 0,
144
+ "rag_success_rate": 0,
145
+ "web_search_rate": 0,
146
+ "avg_response_time": 0,
147
+ "complexity_distribution": self.metrics["complexity_distribution"],
148
+ "domain_usage": self.metrics["domain_usage"]
149
+ }
150
+
151
+ # Calculate averages and percentages
152
+ avg_response_time = (
153
+ sum(self.metrics["response_times"]) / len(self.metrics["response_times"])
154
+ if self.metrics["response_times"] else 0
155
+ )
156
+
157
+ stats = {
158
+ "total_queries": total,
159
+ "rag_success_rate": round((self.metrics["rag_success"] / total) * 100, 1),
160
+ "web_search_rate": round((self.metrics["web_search_fallback"] / total) * 100, 1),
161
+ "trusted_search_rate": round((self.metrics["trusted_search_used"] / total) * 100, 1),
162
+ "general_search_rate": round((self.metrics["general_search_used"] / total) * 100, 1),
163
+ "avg_response_time": round(avg_response_time, 2),
164
+ "median_response_time": self._get_median(self.metrics["response_times"]),
165
+ "complexity_distribution": self.metrics["complexity_distribution"],
166
+ "domain_usage": self.metrics["domain_usage"],
167
+ "worker_contributions": self.metrics["worker_contributions"],
168
+ "validation_stats": self.metrics["validation_stats"],
169
+ "recent_queries": self.metrics["query_history"][-10:] # Last 10 queries
170
+ }
171
+
172
+ return stats
173
+
174
+ def _get_median(self, values):
175
+ """Calculate median of a list."""
176
+ if not values:
177
+ return 0
178
+ sorted_values = sorted(values)
179
+ n = len(sorted_values)
180
+ mid = n // 2
181
+ if n % 2 == 0:
182
+ return round((sorted_values[mid-1] + sorted_values[mid]) / 2, 2)
183
+ return round(sorted_values[mid], 2)
184
+
185
+ def save_metrics(self):
186
+ """Save metrics to JSON file."""
187
+ try:
188
+ with open(self.save_path, 'w') as f:
189
+ json.dump(self.metrics, f, indent=2)
190
+ except Exception as e:
191
+ print(f"Warning: Could not save metrics: {e}")
192
+
193
+ def load_metrics(self):
194
+ """Load metrics from JSON file if it exists."""
195
+ if os.path.exists(self.save_path):
196
+ try:
197
+ with open(self.save_path, 'r') as f:
198
+ self.metrics = json.load(f)
199
+ print(f"✅ Loaded existing metrics from {self.save_path}")
200
+ except Exception as e:
201
+ print(f"Warning: Could not load metrics: {e}")
202
+
203
+ def reset_metrics(self):
204
+ """Reset all metrics (useful for testing)."""
205
+ self.__init__(self.save_path)
206
+ self.save_metrics()