Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import sys | |
| import os | |
| from flask import Flask, request, jsonify | |
| import threading | |
| # Import your inference function directly | |
| from test_mode import run_inference | |
| def classify_meme(image): | |
| try: | |
| # Convert PIL to bytes if needed | |
| import io | |
| if hasattr(image, 'save'): | |
| img_bytes = io.BytesIO() | |
| image.save(img_bytes, format='PNG') | |
| img_bytes = img_bytes.getvalue() | |
| else: | |
| img_bytes = image | |
| result = run_inference(img_bytes) | |
| if "error" in result: | |
| return f"Error: {result['error']}" | |
| prediction = result['prediction'] | |
| confidence = max(result['probabilities'][0]) * 100 | |
| return f"Classification: {prediction}\nConfidence: {confidence:.1f}%" | |
| except Exception as e: | |
| return f"Error: {str(e)}" | |
| # Simple Gradio interface with API enabled | |
| iface = gr.Interface( | |
| fn=classify_meme, | |
| inputs=gr.Image(type="pil"), | |
| outputs="text", | |
| title="MemeSenseX Backend", | |
| description="Meme content classifier" | |
| ) | |
| # Launch with API enabled | |
| iface.launch(share=False) |