WanIrfan commited on
Commit
4771fc5
·
verified ·
1 Parent(s): bee9522

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -10
app.py CHANGED
@@ -113,6 +113,27 @@ for domain, system in rag_systems.items():
113
  status = "✅ Ready" if system else "❌ Failed (DB missing?)"
114
  logger.info(f" {domain}: {status}")
115
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
  # --- FLASK ROUTES ---
118
 
@@ -149,9 +170,12 @@ def medical_page():
149
  validation=validation,
150
  source=source)
151
 
152
- # POST Request Logic
153
  answer, thoughts, validation, source = "", "", "", ""
154
- history = session.get('medical_history', [])
 
 
 
155
  current_medical_document = session.get('current_medical_document', "")
156
 
157
 
@@ -180,11 +204,11 @@ def medical_page():
180
  swarm_answer = run_medical_swarm(current_medical_document, query)
181
  answer = markdown_bold_to_html(swarm_answer)
182
 
183
- history.append(HumanMessage(content=f"[Document Uploaded] Query: '{query}'"))
184
- history.append(AIMessage(content=swarm_answer))
185
  thoughts = "Swarm analysis complete. The process is orchestrated and does not use the ReAct thought process. You can now ask follow-up questions."
186
  source= "Medical Swarm"
187
  validation = (True, "Swarm output generated.") # Swarm has its own validation logic
 
 
188
 
189
  elif has_image :
190
  #Scenario 1
@@ -224,8 +248,8 @@ def medical_page():
224
  if not agent: raise Exception("Medical RAG system is not loaded.")
225
  response_dict = agent.answer(enhanced_query, chat_history=history)
226
  answer, thoughts, validation, source = parse_agent_response(response_dict)
227
- history.append(HumanMessage(content=query))
228
- history.append(AIMessage(content=answer))
229
 
230
  finally:
231
  if os.path.exists(image_path):
@@ -255,8 +279,8 @@ def medical_page():
255
  response_dict = agent.answer(standalone_query, chat_history=history_for_agent)
256
  answer, thoughts, validation, source = parse_agent_response(response_dict)
257
 
258
- history.append(HumanMessage(content=query))
259
- history.append(AIMessage(content=answer))
260
 
261
  else:
262
  raise ValueError("No query or file provided.")
@@ -265,8 +289,9 @@ def medical_page():
265
  answer = f"An error occurred: {e}"
266
  thoughts = traceback.format_exc()
267
 
268
- # Save updated history and LATEST RESPONSE DATA back to the session
269
- session['medical_history'] = history
 
270
  session['latest_medical_response'] = {
271
  'answer': answer,
272
  'thoughts': thoughts,
 
113
  status = "✅ Ready" if system else "❌ Failed (DB missing?)"
114
  logger.info(f" {domain}: {status}")
115
 
116
+ def hydrate_history(raw_history_list: list) -> list:
117
+ """Converts a list of dicts from session back into LangChain Message objects."""
118
+ history = []
119
+ if not raw_history_list:
120
+ return history
121
+ for item in raw_history_list:
122
+ if item.get('type') == 'human':
123
+ history.append(HumanMessage(content=item.get('content', '')))
124
+ elif item.get('type') == 'ai':
125
+ history.append(AIMessage(content=item.get('content', '')))
126
+ return history
127
+
128
+ def dehydrate_history(history_messages: list) -> list:
129
+ """Converts LangChain Message objects into a JSON-serializable list of dicts."""
130
+ raw_list = []
131
+ for msg in history_messages:
132
+ if isinstance(msg, HumanMessage):
133
+ raw_list.append({'type': 'human', 'content': msg.content})
134
+ elif isinstance(msg, AIMessage):
135
+ raw_list.append({'type': 'ai', 'content': msg.content})
136
+ return raw_list
137
 
138
  # --- FLASK ROUTES ---
139
 
 
170
  validation=validation,
171
  source=source)
172
 
173
+ # POST Request
174
  answer, thoughts, validation, source = "", "", "", ""
175
+ # 1. Get raw history (list of dicts) from session
176
+ raw_history_list = session.get('medical_history', [])
177
+ # 2. Hydrate it for the agent
178
+ history_for_agent = hydrate_history(raw_history_list)
179
  current_medical_document = session.get('current_medical_document', "")
180
 
181
 
 
204
  swarm_answer = run_medical_swarm(current_medical_document, query)
205
  answer = markdown_bold_to_html(swarm_answer)
206
 
 
 
207
  thoughts = "Swarm analysis complete. The process is orchestrated and does not use the ReAct thought process. You can now ask follow-up questions."
208
  source= "Medical Swarm"
209
  validation = (True, "Swarm output generated.") # Swarm has its own validation logic
210
+ history_for_agent.append(HumanMessage(content=f"[Document Uploaded] Query: '{query}'"))
211
+ history_for_agent.append(AIMessage(content=answer))
212
 
213
  elif has_image :
214
  #Scenario 1
 
248
  if not agent: raise Exception("Medical RAG system is not loaded.")
249
  response_dict = agent.answer(enhanced_query, chat_history=history)
250
  answer, thoughts, validation, source = parse_agent_response(response_dict)
251
+ history_for_agent.append(HumanMessage(content=query))
252
+ history_for_agent.append(AIMessage(content=answer))
253
 
254
  finally:
255
  if os.path.exists(image_path):
 
279
  response_dict = agent.answer(standalone_query, chat_history=history_for_agent)
280
  answer, thoughts, validation, source = parse_agent_response(response_dict)
281
 
282
+ history_for_agent.append(HumanMessage(content=query))
283
+ history_for_agent.append(AIMessage(content=answer))
284
 
285
  else:
286
  raise ValueError("No query or file provided.")
 
289
  answer = f"An error occurred: {e}"
290
  thoughts = traceback.format_exc()
291
 
292
+ # 4. DEHYDRATE the full history back into dicts for session saving
293
+ session['medical_history'] = dehydrate_history(history_for_agent)
294
+ # This (latest_response) is ALREADY JSON-serializable, so it's fine.
295
  session['latest_medical_response'] = {
296
  'answer': answer,
297
  'thoughts': thoughts,