Agentic_Rag4_dep_space / helpers_LOCAL.py
irajkoohi's picture
Redeploy all files after cleaning space
a338b61
# Utility: Get installed Ollama LLMs as a list (for UI)
def get_installed_llms():
"""Returns a list of locally installed Ollama LLMs (model names)."""
import subprocess
try:
result = subprocess.run(["ollama", "list"], capture_output=True, text=True)
lines = result.stdout.splitlines()
models = []
for line in lines:
if line.strip() and not line.startswith("NAME"):
name = line.split()[0]
models.append(name)
return models
except Exception as e:
print(f"Error listing local LLMs: {e}")
return []
# Utility: Display installed Ollama LLMs in terminal (for CLI use)
def display_installed_llms():
"""Prints a list of locally installed Ollama LLMs to the terminal."""
models = get_installed_llms()
if models:
print("Available local LLMs:")
for m in models:
print(f"- {m}")
else:
print("No local LLMs found.")
"""
Local environment (Ollama) specific helpers for LLM operations.
Contains: Ollama/ChatOllama initialization, agent creation, and response generation.
"""
from helpers_SHARED import CONFIG, IS_HF_SPACE, AGENT_TOOLS
# ============================================================================
# OLLAMA LLM INITIALIZATION
# ============================================================================
ollama_llm = None
LLM_NAME = None
def init_ollama_llm(model_name=None):
"""Initialize Ollama (ChatOllama) for local LLM. Accepts a model name for dynamic selection."""
global ollama_llm, LLM_NAME
if IS_HF_SPACE:
print("ℹ️ Running on HF Space, skipping Ollama initialization")
return None, None
from langchain_ollama import ChatOllama
if model_name is None:
model_name = CONFIG["ollama_model"]
LLM_NAME = model_name
try:
ollama_llm = ChatOllama(model=model_name, base_url=CONFIG["ollama_base_url"])
print(f"βœ“ Ollama (ChatOllama) initialized successfully with {LLM_NAME}")
return ollama_llm, LLM_NAME
except Exception as e:
print(f"βœ— Warning: Ollama not available: {e}")
ollama_llm = None
return None, LLM_NAME
# ============================================================================
# LANGCHAIN AGENT (LOCAL ONLY)
# ============================================================================
agent_executor = None
def create_langchain_agent():
"""Create a LangGraph ReAct agent with the available tools.
Only works in local environment with Ollama.
Returns None on HF Spaces.
"""
global agent_executor, ollama_llm
if IS_HF_SPACE:
print("ℹ️ HF Space detected - using manual tool routing (HF InferenceClient doesn't support LangChain agents)")
return None
if ollama_llm is None:
print("❌ Ollama LLM not initialized, cannot create agent")
return None
from langgraph.prebuilt import create_react_agent # type: ignore
try:
agent_executor = create_react_agent(
model=ollama_llm,
tools=AGENT_TOOLS,
)
print("βœ… LangGraph ReAct Agent created successfully with Ollama")
return agent_executor
except Exception as e:
print(f"❌ Failed to create LangGraph agent: {e}")
return None
# ============================================================================
# OLLAMA RESPONSE GENERATION
# ============================================================================
def ollama_generate_response(prompt: str, ollama_instance=None) -> str:
"""Generate a response using Ollama.
Args:
prompt: The prompt to send to the model
ollama_instance: Optional Ollama instance, uses global if not provided
Returns:
Generated response string or None if failed
"""
llm = ollama_instance or ollama_llm
if llm is None:
print("❌ Ollama not available")
return None
try:
print(f"🧠 Generating response with Ollama ({LLM_NAME})...")
response = llm.invoke(prompt)
print(f"βœ“ Agent response generated.")
return response
except Exception as ollama_error:
print(f"❌ Ollama error: {str(ollama_error)}")
return None
def run_agent(query: str) -> str:
"""Run the LangGraph agent on a query.
Args:
query: User query to process
Returns:
Agent response string or None if agent unavailable
"""
global agent_executor
if agent_executor is None:
return None
try:
print(f"πŸ€– Using LangGraph Agent to process: {query}")
result = agent_executor.invoke({"messages": [{"role": "user", "content": query}]})
# Extract the last AI message
messages = result.get("messages", [])
for msg in reversed(messages):
if hasattr(msg, 'content') and msg.type == "ai":
return msg.content
return "No response from agent"
except Exception as e:
print(f"❌ Agent execution failed: {e}, falling back to manual routing")
return None
def get_ollama_llm():
"""Get the Ollama LLM instance."""
return ollama_llm
def get_local_llm_name():
"""Get the local LLM name."""
return LLM_NAME
def get_agent_executor():
"""Get the agent executor instance."""
return agent_executor