File size: 5,436 Bytes
a338b61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
# Utility: Get installed Ollama LLMs as a list (for UI)
def get_installed_llms():
    """Returns a list of locally installed Ollama LLMs (model names)."""
    import subprocess
    try:
        result = subprocess.run(["ollama", "list"], capture_output=True, text=True)
        lines = result.stdout.splitlines()
        models = []
        for line in lines:
            if line.strip() and not line.startswith("NAME"):
                name = line.split()[0]
                models.append(name)
        return models
    except Exception as e:
        print(f"Error listing local LLMs: {e}")
        return []

# Utility: Display installed Ollama LLMs in terminal (for CLI use)
def display_installed_llms():
    """Prints a list of locally installed Ollama LLMs to the terminal."""
    models = get_installed_llms()
    if models:
        print("Available local LLMs:")
        for m in models:
            print(f"- {m}")
    else:
        print("No local LLMs found.")
"""
Local environment (Ollama) specific helpers for LLM operations.
Contains: Ollama/ChatOllama initialization, agent creation, and response generation.
"""

from helpers_SHARED import CONFIG, IS_HF_SPACE, AGENT_TOOLS

# ============================================================================
# OLLAMA LLM INITIALIZATION
# ============================================================================

ollama_llm = None
LLM_NAME = None

def init_ollama_llm(model_name=None):
    """Initialize Ollama (ChatOllama) for local LLM. Accepts a model name for dynamic selection."""
    global ollama_llm, LLM_NAME
    
    if IS_HF_SPACE:
        print("ℹ️ Running on HF Space, skipping Ollama initialization")
        return None, None
    
    from langchain_ollama import ChatOllama
    
    if model_name is None:
        model_name = CONFIG["ollama_model"]
    LLM_NAME = model_name
    try:
        ollama_llm = ChatOllama(model=model_name, base_url=CONFIG["ollama_base_url"])
        print(f"βœ“ Ollama (ChatOllama) initialized successfully with {LLM_NAME}")
        return ollama_llm, LLM_NAME
    except Exception as e:
        print(f"βœ— Warning: Ollama not available: {e}")
        ollama_llm = None
        return None, LLM_NAME

# ============================================================================
# LANGCHAIN AGENT (LOCAL ONLY)
# ============================================================================

agent_executor = None

def create_langchain_agent():
    """Create a LangGraph ReAct agent with the available tools.
    
    Only works in local environment with Ollama.
    Returns None on HF Spaces.
    """
    global agent_executor, ollama_llm
    
    if IS_HF_SPACE:
        print("ℹ️ HF Space detected - using manual tool routing (HF InferenceClient doesn't support LangChain agents)")
        return None
    
    if ollama_llm is None:
        print("❌ Ollama LLM not initialized, cannot create agent")
        return None
    
    from langgraph.prebuilt import create_react_agent  # type: ignore
    
    try:
        agent_executor = create_react_agent(
            model=ollama_llm,
            tools=AGENT_TOOLS,
        )
        print("βœ… LangGraph ReAct Agent created successfully with Ollama")
        return agent_executor
    except Exception as e:
        print(f"❌ Failed to create LangGraph agent: {e}")
        return None

# ============================================================================
# OLLAMA RESPONSE GENERATION
# ============================================================================

def ollama_generate_response(prompt: str, ollama_instance=None) -> str:
    """Generate a response using Ollama.
    
    Args:
        prompt: The prompt to send to the model
        ollama_instance: Optional Ollama instance, uses global if not provided
    
    Returns:
        Generated response string or None if failed
    """
    llm = ollama_instance or ollama_llm
    
    if llm is None:
        print("❌ Ollama not available")
        return None
    
    try:
        print(f"🧠 Generating response with Ollama ({LLM_NAME})...")
        response = llm.invoke(prompt)
        print(f"βœ“ Agent response generated.")
        return response
    except Exception as ollama_error:
        print(f"❌ Ollama error: {str(ollama_error)}")
        return None

def run_agent(query: str) -> str:
    """Run the LangGraph agent on a query.
    
    Args:
        query: User query to process
    
    Returns:
        Agent response string or None if agent unavailable
    """
    global agent_executor
    
    if agent_executor is None:
        return None
    
    try:
        print(f"πŸ€– Using LangGraph Agent to process: {query}")
        result = agent_executor.invoke({"messages": [{"role": "user", "content": query}]})
        # Extract the last AI message
        messages = result.get("messages", [])
        for msg in reversed(messages):
            if hasattr(msg, 'content') and msg.type == "ai":
                return msg.content
        return "No response from agent"
    except Exception as e:
        print(f"❌ Agent execution failed: {e}, falling back to manual routing")
        return None

def get_ollama_llm():
    """Get the Ollama LLM instance."""
    return ollama_llm

def get_local_llm_name():
    """Get the local LLM name."""
    return LLM_NAME

def get_agent_executor():
    """Get the agent executor instance."""
    return agent_executor