byte-vortex's picture
Update app.py
a01fb4d verified
import gradio as gr
import numpy as np
import random
import torch
import time
from diffusers import StableDiffusionImg2ImgPipeline
from PIL import Image
import os
# =========================================================================
# 🧬 A.R.I.A. (Autonomous Rendering & Imaging Agent)
# =========================================================================
APP_TITLE = "A.R.I.A."
APP_SUBTITLE = "Autonomous Rendering & Imaging Agent // MCP Pipeline"
APP_DESCRIPTION = """
A.R.I.A. is an AI-powered artistic transformation engine that converts your images through a two-stage synthesis pipeline.
First, it applies structural styling (texture & form), then injects atmospheric elements (mood & lighting) to create unique artistic renditions.
The process log displays each transformation step as the agent works through your image.
"""
DEFAULT_SIZE = 512
MAX_SEED = np.iinfo(np.int32).max
# -----------------------------
# 1. Configuration & Presets
# -----------------------------
PANEL_V1_PRESETS = {
"Oil Painting": {"prompt": "Oil painting, heavy impasto, visible brushstrokes, masterpiece", "neg": "photorealistic, smooth, digital", "str": 0.80},
"Ink Illustration": {"prompt": "Ink and watercolor sketch, loose lines, artistic, intricate details", "neg": "3d render, solid colors", "str": 0.70},
"Noir Photography": {"prompt": "High contrast black and white photography, film grain, dramatic shadows", "neg": "color, soft, painting", "str": 0.65},
"Watercolor": {"prompt": "Soft watercolor painting, bleeding edges, wet on wet, paper texture", "neg": "harsh lines, digital, opaque", "str": 0.75},
}
PANEL_V2_PRESETS = {
"Scenic Anime": {"prompt": "High quality anime background art, hand-painted style, cumulus clouds, rolling hills, vibrant nature", "neg": "dark, gritty, horror, realism, sketch", "str": 0.60},
"Cyberpunk City": {"prompt": "Cyberpunk atmosphere, neon lights, rain, volumetric fog, futuristic", "neg": "daylight, rustic, vintage", "str": 0.55},
"Deep Forest Mood": {"prompt": "Deep moody forest aesthetic, dark emerald and charcoal tones, subtle volumetric fog, mossy texture, soft sunlight through canopy", "neg": "pink, purple, neon, high contrast, warm light, cartoon", "str": 0.50},
"Ethereal Fantasy": {"prompt": "Dreamy fantasy atmosphere, soft glowing light, magical particles, celestial", "neg": "dark, scary, industrial", "str": 0.60},
}
PROMPT_EXAMPLES = [
"A lone astronaut standing on a purple moon.",
"A cozy kitchen with steam rising from a kettle.",
"A medieval fortress surrounded by fog.",
"A close-up portrait of a mischievous cat."
]
V1_KEYS = list(PANEL_V1_PRESETS.keys())
V2_KEYS = list(PANEL_V2_PRESETS.keys())
# -----------------------------
# 2. System & Model Setup (CPU Optimized)
# -----------------------------
device = "cpu"
model_repo_id = "runwayml/stable-diffusion-v1-5"
pipe = None
try:
print(f"βš™οΈ Loading AI Model on {device.upper()}... This may take a moment.")
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
model_repo_id,
torch_dtype=torch.float32,
use_safetensors=True,
).to(device)
pipe.enable_attention_slicing()
print("βœ… Model Loaded Successfully.")
except Exception as e:
print(f"⚠️ CRITICAL ERROR: Model failed to load. {str(e)}")
class MockPipe:
def __call__(self, *args, **kwargs):
time.sleep(1)
return type('obj', (object,), {'images': [Image.new('RGB', (512, 512), color='gray')]})
pipe = MockPipe()
class AriaAgent:
def __init__(self, pipe, device):
self.pipe = pipe
self.device = device
def generate(self, image, prompt, neg, strength, steps, guide, seed):
image = image.resize((DEFAULT_SIZE, DEFAULT_SIZE))
gen = torch.Generator(device=self.device).manual_seed(seed)
return self.pipe(
prompt=prompt, image=image, strength=strength, negative_prompt=neg,
guidance_scale=guide, num_inference_steps=steps, generator=gen
).images[0]
agent = AriaAgent(pipe, device)
# -----------------------------
# 3. Dynamic CoT Generation (MOCK LLM)
# -----------------------------
def generate_llm_cot(user_prompt, v1_key, v2_key):
context_intent = user_prompt if user_prompt.strip() else "Artistic Enhancement (Default Mode)"
analysis_cot = (
f"πŸ’­ THOUGHT: Analyzing user request. Core intent is: '{context_intent}'.\n"
f" > Strategy: Two-Stage Synthesis confirmed. Structure first, then Atmosphere.\n"
f" > Structural Focus (Step 1): '{v1_key}' for texture and form."
)
step1_cot = (
f"πŸ’­ THOUGHT: Executing Foundation Layer (Step 1).\n"
f" > Target Tool: '{v1_key}' matrix injection.\n"
f" > Task: Injecting high-frequency details and structural elements based on input image."
)
step2_cot = (
f"πŸ’­ THOUGHT: Executing Atmosphere Injection (Step 2).\n"
f" > Target Tool: '{v2_key}' aesthetic application.\n"
f" > Task: Refining lighting, color composition, and mood to match the final style."
)
return {
"analysis": analysis_cot,
"step1": step1_cot,
"step2": step2_cot
}
# -----------------------------
# 4. UI Streaming Logic (Typewriter Effect)
# -----------------------------
def type_log(current_logs, new_text):
for char in new_text:
current_logs += char
yield current_logs
time.sleep(0.04)
yield current_logs + "\n\n"
def run_ui_stream(
init_img, v1_select, v2_select, user_p,
seed, rnd, guide, steps, strength_mult
):
log_content = ""
gallery_state = []
try:
if init_img is None:
gr.Warning("Please upload an input image first.")
yield "❌ Waiting for image...", [], 0, "πŸ”΄ NO INPUT"
return
if rnd: seed = random.randint(0, MAX_SEED)
gallery_state.append((init_img, "1. Input Signal"))
boot_msg = f"⚑ SYSTEM BOOT | A.R.I.A. v2.0\n>> Connected to {device.upper()} Cluster\n>> Seed: {seed}\n--------------------------------"
for update in type_log(log_content, boot_msg):
log_content = update
yield log_content, gallery_state, seed, "⚑ BOOTING"
dynamic_cot = generate_llm_cot(user_p, v1_select, v2_select)
for update in type_log(log_content, dynamic_cot['analysis']):
log_content = update
yield log_content, gallery_state, seed, "πŸ€” ANALYZING"
if v1_select not in PANEL_V1_PRESETS: v1_select = V1_KEYS[0]
p1 = PANEL_V1_PRESETS[v1_select]
final_str_1 = min(1.0, p1['str'] * strength_mult)
for update in type_log(log_content, dynamic_cot['step1']):
log_content = update
yield log_content, gallery_state, seed, "🎨 PAINTING (Step 1)"
img_v1 = agent.generate(init_img, f"{p1['prompt']}, {user_p}", p1['neg'], final_str_1, steps, guide, seed)
gallery_state.append((img_v1, f"2. Base: {v1_select}"))
result_msg = f"βœ… RESULT: Step 1 Complete. Texture latents updated. Strength: {final_str_1:.2f}"
for update in type_log(log_content, result_msg):
log_content = update
yield log_content, gallery_state, seed, "πŸ‘€ OBSERVING"
if v2_select not in PANEL_V2_PRESETS: v2_select = V2_KEYS[0]
p2 = PANEL_V2_PRESETS[v2_select]
for update in type_log(log_content, dynamic_cot['step2']):
log_content = update
yield log_content, gallery_state, seed, "🌫️ ATMOSPHERE (Step 2)"
final_image = agent.generate(img_v1, f"{p2['prompt']}, {user_p}", p2['neg'], p2['str'], steps, guide, seed + 1)
gallery_state.append((final_image, f"3. Final: {v2_select}"))
final_msg = "🏁 CONCLUSION: Pipeline finished. Artifact ready for retrieval."
for update in type_log(log_content, final_msg):
log_content = update
yield log_content, gallery_state, seed, "πŸš€ COMPLETE"
except Exception as e:
import traceback
error_msg = f"❌ SYSTEM CRASH: {str(e)}"
print(traceback.format_exc())
yield log_content + "\n" + error_msg, gallery_state, seed, "πŸ”΄ CRASHED"
# -----------------------------
# 5. CSS Styling
# -----------------------------
hackathon_css = """
@import url('https://fonts.googleapis.com/css2?family=Rajdhani:wght@400;600;700&family=Space+Mono:wght@400;700&display=swap');
/* Colors - Beautiful Dark Theme (Teal/Gold) */
:root {
--dark-bg-start: #0A1919;
--dark-bg-end: #172F2F;
--dark-group-bg: #142525;
--light-text: #FFFFFF;
--accent-teal: #00A896;
--accent-gold: #C6A765;
--secondary-text: #E0E0E0;
--border-color: rgba(0, 168, 150, 0.5);
--secondary-border: rgba(198, 167, 101, 0.5);
}
/* Page */
body {
background: linear-gradient(135deg, var(--dark-bg-start), var(--dark-bg-end));
color: var(--light-text);
font-family: 'Rajdhani', sans-serif;
}
/* Container */
.gradio-container {
max-width: 1200px !important;
}
/* Header & Title (Main Title - H1) */
h1 {
color: var(--accent-teal);
text-transform: uppercase;
text-shadow: 0 0 15px rgba(0, 168, 150, 0.7);
font-size: 2rem;
}
.subtitle {
font-family: 'Space Mono', monospace;
color: var(--accent-gold);
font-size: 1rem;
font-weight: bold;
margin-bottom: 10px;
display: block;
text-shadow: 0 0 10px rgba(198, 167, 101, 0.6);
}
.description {
color: var(--secondary-text);
font-size: 0.9rem;
line-height: 1;
margin-bottom: 25px;
padding: 15px;
background: rgba(20, 37, 37, 0.5);
border-left: 3px solid var(--accent-teal);
border-radius: 8px;
}
/* Group Containers */
.group-container {
background: var(--dark-group-bg);
border: 1px solid var(--border-color);
border-radius: 16px;
padding: 20px;
box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.2),
0px 0px 0px 1px rgba(0, 168, 150, 0.1);
}
/* Textbox Labels */
.gradio-label {
color: var(--secondary-text) !important;
font-weight: 600 !important;
}
/* Buttons */
#run-btn {
background: linear-gradient(135deg, var(--accent-teal), var(--accent-gold));
border: none;
color: white;
font-weight: bold;
text-transform: uppercase;
box-shadow: 0 0 10px rgba(0, 168, 150, 0.7);
}
#run-btn:hover {
transform: scale(1.01);
box-shadow: 0 0 15px rgba(198, 167, 101, 0.9);
}
/* --- ANIMATION FIXES START --- */
@keyframes blink-border {
from, to { border-right-color: transparent }
50% { border-right-color: var(--accent-gold) }
}
/* Log Output (Typewriter Effect) */
#log-output-box textarea {
background-color: #0a0a1a !important;
color: var(--accent-teal) !important;
font-family: 'Space Mono', monospace !important;
border: 1px solid #30363d !important;
font-size: 12px;
height: 300px !important;
border-right: 2px solid var(--accent-gold) !important;
animation: blink-border 0.75s step-end infinite;
}
@keyframes status-flash {
0% { background-color: rgba(198, 167, 101, 0.2); }
50% { background-color: rgba(198, 167, 101, 0.05); }
100% { background-color: #0a0a1a; }
}
@keyframes glitch-flicker {
0% { color: var(--accent-gold); text-shadow: none; }
50% { color: #FFFFFF; text-shadow: 0 0 5px #FFFFFF; }
100% { color: var(--accent-gold); text-shadow: none; }
}
/* Status Bar */
#status-bar textarea {
background-color: #0a0a1a !important;
border: 1px solid var(--secondary-border) !important;
color: var(--accent-gold) !important;
font-weight: bold;
text-align: right;
font-size: 1.2rem;
padding: 0.5rem 0.75rem !important;
border-radius: 8px;
box-shadow: 0 0 10px rgba(198, 167, 101, 0.4);
animation:
status-flash 0.4s ease-out 1,
glitch-flicker 1.5s steps(2, end) infinite;
}
/* GALLERY CAPTIONS */
.gr-gallery-caption {
color: var(--secondary-text) !important;
font-style: italic;
margin: 0 !important;
padding-top: 5px !important;
padding-bottom: 0 !important;
line-height: 1.2 !important;
font-size: 0.75rem !important;
}
/* GALLERY ITEMS */
.gr-gallery-item {
margin: 0 !important;
padding: 5px 5px 0 5px !important;
border: none !important;
}
/* TIMELINE GALLERY HEIGHT */
.group-container .gr-gallery {
min-height: 0 !important;
max-height: 250px !important;
}
/* ============================================
MULTI-STAGE EXAMPLES SECTION
============================================ */
.gallery-outer-group {
background: radial-gradient(ellipse at top, #0f3a3a 0%, #0a1f1f 50%, #0a1919 100%) !important;
border: 2px solid rgba(0, 168, 150, 0.3) !important;
border-radius: 20px !important;
padding: 30px !important;
box-shadow: 0 8px 32px rgba(0, 0, 0, 0.4);
position: relative;
overflow: hidden;
}
.gallery-outer-group::before {
content: '';
position: absolute;
top: -50%;
left: -50%;
width: 200%;
height: 200%;
background: radial-gradient(circle, rgba(0, 168, 150, 0.03) 0%, transparent 50%);
animation: pulse-glow 8s ease-in-out infinite;
pointer-events: none;
}
@keyframes pulse-glow {
0%,100% { opacity: 0.3; transform: scale(1); }
50% { opacity: 0.6; transform: scale(1.1); }
}
.gallery-header h2 {
color: var(--accent-teal) !important;
text-shadow: 0 0 20px rgba(0,168,150,0.6);
font-size: 1rem !important;
text-transform: uppercase;
}
.example-card {
background: linear-gradient(135deg, #0d2424 0%, #142525 100%) !important;
border: 1px solid rgba(0, 168, 150, 0.4) !important;
border-radius: 12px !important;
padding: 15px !important;
box-shadow: 0 4px 16px rgba(0,0,0,0.3);
transition: all 0.3s ease;
}
.example-card:hover {
transform: translateY(-4px);
border-color: rgba(0,168,150,0.6) !important;
}
/* ================================================================
⭐ STATIC EXAMPLE GALLERIES ⭐
================================================================ */
/* Force the gallery area to a fixed height */
.example-card .gr-gallery {
height: 180px !important;
max-height: 180px !important;
overflow: hidden !important;
}
/* FORCE the internal grid NOT to grow vertically */
.example-card .gr-gallery .grid-container {
height: 100% !important;
max-height: 100% !important;
overflow: hidden !important;
}
/* NEW VERSION: The scroll host is NOT grid-wrap. It is the placeholder div. */
.example-card .gr-gallery .placeholder {
height: 100% !important;
max-height: 100% !important;
min-height: 0 !important;
overflow: hidden !important;
}
/* Avoid auto rows expanding */
.example-card .gr-gallery .grid-wrap {
height: 100% !important;
max-height: 100% !important;
overflow: hidden !important;
display: grid !important;
grid-template-columns: repeat(3, 1fr) !important;
grid-auto-rows: 1fr !important;
}
/* Force gallery-item to fit fixed grid */
.example-card .gr-gallery .gallery-item {
height: 100% !important;
max-height: 100% !important;
overflow: hidden !important;
}
/* The <button class="thumbnail-item"> container must not expand */
.example-card .gr-gallery button.thumbnail-item {
height: 100% !important;
max-height: 100% !important;
padding: 0 !important;
}
/* Finally, force image into the grid cell */
.example-card .gr-gallery img {
width: 100% !important;
height: 100% !important;
object-fit: cover !important;
}
"""
# -----------------------------
# 6. Static Gallery Data (Placeholders)
# -----------------------------
ASSETS_DIR = "assets"
static_gallery_ex1 = [
(os.path.join(ASSETS_DIR, "rabbit2.png"), "1. Input Signal"),
(os.path.join(ASSETS_DIR, "ex1-step2.webp"), "2. Base: Oil Painting"),
(os.path.join(ASSETS_DIR, "ex1-step3.webp"), "3. Final: Scenic Anime"),
]
static_gallery_ex2 = [
(os.path.join(ASSETS_DIR, "rabbit2.png"), "1. Input Signal"),
(os.path.join(ASSETS_DIR, "ex2-step2.webp"), "2. Base: Ink Illustration"),
(os.path.join(ASSETS_DIR, "ex2-step3.webp"), "3. Final: Deep Forest Mood"),
]
static_gallery_ex3 = [
(os.path.join(ASSETS_DIR, "rabbit2.png"), "1. Input Signal"),
(os.path.join(ASSETS_DIR, "ex7-step2.webp"), "2. Base: Oil Painting"),
(os.path.join(ASSETS_DIR, "ex7-step3.webp"), "3. Final: Deep Forest Mood"),
]
static_gallery_ex4 = [
(os.path.join(ASSETS_DIR, "rabbit2.png"), "1. Input Signal"),
(os.path.join(ASSETS_DIR, "ex4-step2.webp"), "2. Base: Noir Photography"),
(os.path.join(ASSETS_DIR, "ex4-step3.webp"), "3. Final: Cyberpunk City"),
]
static_gallery_ex5 = [
(os.path.join(ASSETS_DIR, "rabbit2.png"), "1. Input Signal"),
(os.path.join(ASSETS_DIR, "ex5-step2.webp"), "2. Base: Watercolor"),
(os.path.join(ASSETS_DIR, "ex5-step3.webp"), "3. Final: Ethereal Fantasy"),
]
static_gallery_ex6 = [
(os.path.join(ASSETS_DIR, "rabbit2.png"), "1. Input Signal"),
(os.path.join(ASSETS_DIR, "ex6-step2.webp"), "2. Base: Oil Painting"),
(os.path.join(ASSETS_DIR, "ex6-step3.webp"), "3. Final: Deep Forest Mood"),
]
# -----------------------------
# 7. Main UI Construction (WITH CSS FIXES APPLIED)
# -----------------------------
theme = gr.themes.Base(
primary_hue="teal",
neutral_hue="slate",
font=[gr.themes.GoogleFont('Rajdhani'), 'ui-sans-serif', 'system-ui'],
)
with gr.Blocks(title="A.R.I.A. Agent") as demo:
with gr.Row(elem_classes="header-row"):
with gr.Column(scale=5):
gr.Markdown(f"# 🧬 {APP_TITLE}")
gr.HTML(f"<span class='subtitle'>{APP_SUBTITLE}</span>")
gr.HTML(f"<div class='description'>{APP_DESCRIPTION}</div>")
with gr.Column(scale=2):
status_display = gr.Textbox(value="🟒 CPU READY", label="Agent Status", interactive=False, elem_id="status-bar")
# ==========================================================
# πŸš€ NEW POSITION: Full Width Banner BELOW Description & Status
# ==========================================================
with gr.Row(elem_classes="hero-row"):
with gr.Column():
with gr.Group(elem_classes="hero-card"):
gr.Gallery(
value=[
(os.path.join(ASSETS_DIR, "rabbit2.png"), "1. Input Signal"),
(os.path.join(ASSETS_DIR, "ex3-step2.webp"), "2. Base: Watercolor"),
(os.path.join(ASSETS_DIR, "ex3-step3.webp"), "3. Final: Ethereal Fantasy")
],
columns=3,
rows=1,
object_fit="cover",
show_label=False,
preview=False,
elem_classes=["hero-gallery"]
)
with gr.Row():
with gr.Column(scale=4):
with gr.Group(elem_classes="group-container"):
gr.Markdown("### πŸ“‘ 1. Input Signal: Source Data")
input_image = gr.Image(label="", type="pil", height=280, sources=['upload', 'clipboard'], elem_id="input-image-box")
with gr.Group(elem_classes="group-container"):
gr.Markdown("### πŸ•ΉοΈ 2. Parameters")
user_prompt = gr.Textbox(placeholder="E.g. 'A soaring dragon at sunrise'...", lines=2, label="User Intent Prompt")
gr.Examples(
examples=PROMPT_EXAMPLES,
inputs=[user_prompt],
label="Clickable Examples"
)
with gr.Row():
v1_select = gr.Dropdown(label="Step 1: Structural Style (Texture)",
choices=V1_KEYS,
value=V1_KEYS[0],
interactive=True,
type="value")
v2_select = gr.Dropdown(label="Step 2: Mood & Lighting (Color)",
choices=V2_KEYS,
value=V2_KEYS[2],
interactive=True,
type="value")
strength_mult_slide = gr.Slider(label="Concept Divergence / Style Intensity",
minimum=0.2, maximum=1.0, value=0.9, step=0.05)
with gr.Accordion("βš™οΈ Advanced Config", open=False):
with gr.Row():
seed_input = gr.Number(label="Seed", value=0, precision=0)
rnd_check = gr.Checkbox(label="Randomize", value=True)
with gr.Row():
guide_slide = gr.Slider(1, 15, value=7.5, label="Guidance Scale")
step_slide = gr.Slider(5, 50, value=20, step=5, label="Inference Steps")
run_btn = gr.Button("πŸš€ RUN MCP AGENT SEQUENCE", elem_id="run-btn", size="lg")
with gr.Column(scale=6):
with gr.Group(elem_classes="group-container"):
gr.Markdown("### πŸ–₯️ Visual Output Stream: Timeline")
gallery_output = gr.Gallery(
label="",
columns=3,
rows=1,
height=400,
object_fit="contain",
show_label=True,
elem_id="gallery-output-box"
)
with gr.Group(elem_classes="group-container"):
gr.Markdown("### πŸ“Ÿ Agent Internal Monologue")
log_text_output = gr.Textbox(
lines=14,
interactive=False,
elem_id="log-output-box",
label="Thinking Process"
)
# --- 8. Revamped Multi-Stage Synthesis Examples (Aesthetic) ---
with gr.Group(elem_classes="group-container gallery-outer-group"):
with gr.Group(elem_classes="gallery-header"):
gr.Markdown("### 🌟 Multi-Stage Synthesis Examples")
gr.HTML("<div style='color: var(--secondary-text); font-style: italic; font-size: 0.9rem; margin-bottom: 20px;'>Explore various artistic paths by combining a Structural Style (left) and an Atmospheric Style (right).</div>")
# Row 1 Examples
with gr.Row():
with gr.Column(scale=1):
with gr.Group(elem_classes="example-card"):
gallery_output_static_1 = gr.Gallery(
value=static_gallery_ex1,
columns=3,
rows=1,
height=180,
object_fit="contain",
show_label=True,
preview=False,
label="Example 1: Oil Canvas Structure with Anime Background Mood"
)
with gr.Column(scale=1):
with gr.Group(elem_classes="example-card"):
gallery_output_static_2 = gr.Gallery(
value=static_gallery_ex2,
columns=3,
rows=1,
height=180,
object_fit="contain",
show_label=True,
preview=False,
label="Example 2: Ink Outline Structure with Deep Forest Mood"
)
with gr.Column(scale=1):
with gr.Group(elem_classes="example-card"):
gallery_output_static_3 = gr.Gallery(
value=static_gallery_ex3,
columns=3,
rows=1,
height=180,
object_fit="contain",
show_label=True,
preview=False,
label="Example 3: Oil Canvas Structure with Deep Forest Mood"
)
# Row 2 Examples
with gr.Row():
with gr.Column(scale=1):
with gr.Group(elem_classes="example-card"):
gallery_output_static_4 = gr.Gallery(
value=static_gallery_ex4,
columns=3,
rows=1,
height=180,
object_fit="contain",
show_label=True,
preview=False,
label="Example 4: High-Contrast Noir Structure with Cyberpunk Lighting"
)
with gr.Column(scale=1):
with gr.Group(elem_classes="example-card"):
gallery_output_static_5 = gr.Gallery(
value=static_gallery_ex5,
columns=3,
rows=1,
height=180,
object_fit="contain",
show_label=True,
preview=False,
label="Example 5: Soft Watercolor Structure with Ethereal Fantasy Mood"
)
with gr.Column(scale=1):
with gr.Group(elem_classes="example-card"):
gallery_output_static_6 = gr.Gallery(
value=static_gallery_ex6,
columns=3,
rows=1,
height=180,
object_fit="contain",
show_label=True,
preview=False,
label="Example 6: Oil Canvas Structure with Deep Forest Mood"
)
def on_upload(img): return [(img, "1. Input Data")] if img else []
input_image.change(fn=on_upload, inputs=[input_image], outputs=[gallery_output], show_progress=False)
run_btn.click(
fn=run_ui_stream,
inputs=[input_image, v1_select, v2_select, user_prompt, seed_input, rnd_check, guide_slide, step_slide, strength_mult_slide],
outputs=[log_text_output, gallery_output, seed_input, status_display],
concurrency_limit=1
)
# -----------------------------
# 8. App Launch
# -----------------------------
if __name__ == "__main__":
demo.queue().launch(
theme=theme,
css=hackathon_css,
mcp_server=True,
allowed_paths=[".", "./assets"]
)