|
|
import os |
|
|
import gradio as gr |
|
|
from transformers import AutoModelForImageSegmentation |
|
|
import torch |
|
|
from torchvision import transforms |
|
|
from PIL import Image |
|
|
import timm |
|
|
import io |
|
|
import sys |
|
|
import psutil |
|
|
|
|
|
|
|
|
try: |
|
|
model = AutoModelForImageSegmentation.from_pretrained( |
|
|
"ZhengPeng7/BiRefNet", |
|
|
trust_remote_code=True, |
|
|
) |
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
model.to(device) |
|
|
model.eval() |
|
|
print(f"✅ 模型載入成功!使用裝置: {device}") |
|
|
except Exception as e: |
|
|
print(f"❌ 模型載入失敗: {e}") |
|
|
|
|
|
|
|
|
def process_image(input_image): |
|
|
if input_image is None: |
|
|
return None |
|
|
|
|
|
|
|
|
image_size = (1024, 1024) |
|
|
transform_image = transforms.Compose([ |
|
|
transforms.Resize(image_size), |
|
|
transforms.ToTensor(), |
|
|
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) |
|
|
]) |
|
|
|
|
|
input_images = transform_image(input_image).unsqueeze(0).to(device) |
|
|
|
|
|
with torch.no_grad(): |
|
|
preds = model(input_images)[-1].sigmoid().cpu() |
|
|
|
|
|
pred = preds[0].squeeze() |
|
|
pred_pil = transforms.ToPILImage()(pred) |
|
|
mask = pred_pil.resize(input_image.size) |
|
|
|
|
|
image = input_image.convert("RGBA") |
|
|
image.putalpha(mask) |
|
|
return image |
|
|
|
|
|
|
|
|
|
|
|
def get_system_stats_api(): |
|
|
return { |
|
|
"cpu": psutil.cpu_percent(interval=1), |
|
|
"ram": psutil.virtual_memory().percent |
|
|
} |
|
|
|
|
|
def get_system_stats_ui(): |
|
|
cpu = psutil.cpu_percent(interval=1) |
|
|
ram = psutil.virtual_memory().percent |
|
|
return f""" |
|
|
## 🖥️ System Status |
|
|
| Metric | Usage | |
|
|
|--------|-------| |
|
|
| **CPU** | {cpu}% | |
|
|
| **RAM** | {ram}% | |
|
|
""" |
|
|
|
|
|
|
|
|
with gr.Blocks(title="去背服務測試") as app: |
|
|
gr.Markdown("## ✂️ 去背服務測試Bi") |
|
|
|
|
|
with gr.Tabs(): |
|
|
|
|
|
with gr.Tab("✂️ Remove Background"): |
|
|
with gr.Row(): |
|
|
img_in = gr.Image(type="pil", label="Input Image") |
|
|
img_out = gr.Image(type="pil", label="Result (PNG)", format="png") |
|
|
btn = gr.Button("Remove Background", variant="primary") |
|
|
btn.click(process_image, inputs=img_in, outputs=img_out) |
|
|
|
|
|
|
|
|
with gr.Tab("📊 System Monitor"): |
|
|
gr.Markdown("Click the button below to check current server load.") |
|
|
stats_output = gr.Markdown("### Status: Waiting...") |
|
|
refresh_btn = gr.Button("🔄 Refresh Stats") |
|
|
refresh_btn.click(get_system_stats_ui, outputs=stats_output) |
|
|
|
|
|
app.load(get_system_stats_ui, outputs=stats_output) |
|
|
|
|
|
|
|
|
|
|
|
api_status = gr.JSON(visible=False, label="API Response") |
|
|
api_btn = gr.Button("API Status", visible=False) |
|
|
api_btn.click(get_system_stats_api, outputs=api_status, api_name="status") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
app.launch(server_name="0.0.0.0", server_port=7860) |