alfulanny's picture
Update app.py
3fd4b07 verified
"""
Gradio interface for the smolagents CodeAgent.
By default this script will NOT launch a local Gradio server. To allow
local runs (for testing) set the environment variable `RUN_LOCAL=1` or pass
`--run-local` on the command line. This prevents accidental local launches
when you intended to deploy to Hugging Face Spaces.
"""
import os
import sys
import logging
import json
from typing import List, Dict, Any
from code_agent import run_agent
# (Gradio update/analytics env flags removed per user request)
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def respond(prompt: str) -> str:
if not prompt or not prompt.strip():
return "Please provide a prompt."
try:
return run_agent(prompt)
except Exception as e:
logger.error("Agent failed: %s", e)
return f"Agent error: {type(e).__name__}: {str(e)[:200]}"
def extract_prompt_from_question(q: Dict[str, Any]) -> str:
"""Extract the actual question/prompt from a question dict."""
for key in ("question", "prompt", "input", "text", "task"):
if key in q and isinstance(q[key], str):
return q[key]
return str(q)
def fetch_all_questions() -> List[Dict[str, Any]]:
"""Fetch all questions from the scoring API."""
try:
from evaluation_client import ScoringAPIClient
client = ScoringAPIClient()
questions = client.get_questions()
return questions if questions else []
except Exception as e:
logger.error("Failed to fetch questions: %s", e)
return []
def answer_all_questions(questions: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Answer all questions and return answers in submission format.
Args:
questions: List of question dicts from the scoring API.
Returns:
List of dicts with keys: task_id, submitted_answer.
"""
answers = []
total = len(questions)
for idx, q in enumerate(questions, start=1):
task_id = q.get("task_id") or q.get("id") or q.get("taskId")
if not task_id:
logger.warning("Question %d/%d: Missing task_id, skipping", idx, total)
continue
prompt = extract_prompt_from_question(q)
logger.info("Question %d/%d: task_id=%s, prompt_len=%d", idx, total, task_id, len(prompt))
try:
ans = run_agent(prompt)
ans = ans.strip()
logger.info(" ✓ Answer: %s", ans[:60])
answers.append({"task_id": task_id, "submitted_answer": ans})
except Exception as e:
logger.error(" ❌ Failed to answer: %s", type(e).__name__)
answers.append({"task_id": task_id, "submitted_answer": f"(error) {type(e).__name__}"})
logger.info("✓ Answered %d/%d questions", len(answers), total)
return answers
def submit_answers(username: str, agent_code_url: str, answers: List[Dict[str, Any]]) -> str:
"""Submit answers to the scoring API."""
try:
from evaluation_client import ScoringAPIClient
client = ScoringAPIClient()
resp = client.submit(username=username, agent_code=agent_code_url, answers=answers)
return f"✓ Submission successful! Response: {resp}"
except Exception as e:
logger.error("Submission failed: %s", e)
return f"ERROR: Submission failed: {e}"
_demo = None
def _get_demo():
"""Lazily import gradio and construct the demo to avoid network calls on import."""
global _demo
if _demo is not None:
return _demo
try:
import gradio as gr
except Exception as e:
logger.error("Failed to import gradio: %s", e)
raise
# We'll provide a readonly prompt field populated from the scoring API
# and buttons to fetch a random task and run the agent on it.
def fetch_random_task():
try:
from evaluation_client import ScoringAPIClient
client = ScoringAPIClient()
q = client.get_random_question()
if not q:
return "(no question returned)"
# extract prompt safely
for key in ("question", "prompt", "input", "text", "task"):
if key in q and isinstance(q[key], str):
return q[key]
return str(q)
except Exception as e:
logger.error("Failed to fetch random question: %s", e)
return f"(fetch error) {type(e).__name__}: {str(e)[:200]}"
def run_on_current(prompt_text: str):
if not prompt_text or not prompt_text.strip():
return "(no prompt to run on)"
try:
return respond(prompt_text)
except Exception as e:
logger.error("Agent run failed: %s", e)
return f"Agent error: {type(e).__name__}: {str(e)[:200]}"
with gr.Blocks() as demo:
gr.Markdown("# Agents Course — Final Agent Demo")
# Single question interface
with gr.Row():
prompt_box = gr.Textbox(label="Fetched Prompt (read-only)", lines=6)
with gr.Row():
fetch_btn = gr.Button("Fetch Random Task")
run_btn = gr.Button("Run Agent on Fetched Task")
single_output = gr.Textbox(label="Agent Response", lines=6)
# Batch processing interface
gr.Markdown("## Batch Processing")
questions_state = gr.State([])
answers_state = gr.State([])
status_box = gr.Textbox(label="Status", lines=4)
with gr.Row():
fetch_all_btn = gr.Button("Fetch All Questions")
answer_all_btn = gr.Button("Answer All Questions")
submit_btn = gr.Button("Submit All Answers")
with gr.Row():
username_box = gr.Textbox(label="Hugging Face Username", placeholder="your_hf_username")
agent_url_box = gr.Textbox(label="Agent Code URL", placeholder="https://huggingface.co/spaces/...")
batch_output = gr.Textbox(label="Batch Results", lines=12)
# Wire up the buttons
fetch_btn.click(fn=fetch_random_task, inputs=[], outputs=[prompt_box])
run_btn.click(fn=run_on_current, inputs=[prompt_box], outputs=[single_output])
def fetch_all_questions_wrapper():
questions = fetch_all_questions()
status = f"Fetched {len(questions)} questions"
return questions, [], status
def answer_all_questions_wrapper(questions):
if not questions:
return [], "No questions to answer"
answers = answer_all_questions(questions)
status = f"Answered {len(answers)} questions"
return answers, status
def submit_answers_wrapper(username, agent_url, answers):
if not answers:
return "No answers to submit"
if not username or not agent_url:
return "Please provide both username and agent code URL"
return submit_answers(username, agent_url, answers)
fetch_all_btn.click(
fn=fetch_all_questions_wrapper,
inputs=[],
outputs=[questions_state, answers_state, status_box]
)
answer_all_btn.click(
fn=answer_all_questions_wrapper,
inputs=[questions_state],
outputs=[answers_state, status_box]
)
submit_btn.click(
fn=submit_answers_wrapper,
inputs=[username_box, agent_url_box, answers_state],
outputs=[batch_output]
)
_demo = demo
return _demo
if __name__ == "__main__":
# Launch unconditionally when executed as a script.
try:
_get_demo().launch(server_name="0.0.0.0", server_port=7860, share=False)
except Exception as e:
logger.error("Failed to launch Gradio demo: %s", e)