AI_Doctors / backend /chat_router.py
NandanData's picture
Upload 19 files
b171cab verified
# -------------------------------
# πŸ‘‡ HuggingFace OpenAI-Compatible Client
# -------------------------------
from openai import OpenAI
import os
from utils.constants import (
ROUTER_CHAT_URL,
ROUTER_MODEL,
REQUEST_TIMEOUT_SECONDS_DEFAULT,
)
from utils.persona import AI_GYNO_PERSONA_V3
# Force correct model name
ROUTER_MODEL = "meta-llama/Llama-3.1-8B-Instruct"
# HF Token
token = os.getenv("HF_API_TOKEN") or os.getenv("HF_TOKEN")
# HF Router client
client = OpenAI(
base_url="https://router.huggingface.co/v1",
api_key=token,
)
def chat(user_message: str, mode: str = "patient") -> str:
"""
Uses HuggingFace Router with OpenAI SDK to get chat completions.
"""
if not token:
return "⚠ Set HF_API_TOKEN or HF_TOKEN in your environment."
# Patient-friendly language vs Clinical doctor mode
style = (
"Use simple, reassuring language."
if mode == "patient"
else "Use concise clinical phrasing with differentials and next steps."
)
system_prompt = AI_GYNO_PERSONA_V3 + f"\nMode: {mode}. {style}"
try:
completion = client.chat.completions.create(
model=ROUTER_MODEL,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_message},
],
max_tokens=400,
temperature=0.2,
timeout=REQUEST_TIMEOUT_SECONDS_DEFAULT,
)
return completion.choices[0].message.content.strip()
except Exception as e:
return f"❌ Error: {str(e)}"