Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import torch | |
| import torch.nn as nn | |
| from PIL import Image | |
| import torchvision.transforms as T | |
| # Define your model class | |
| class UNetClassifier(nn.Module): | |
| def __init__(self, num_classes=1): | |
| super().__init__() | |
| def conv_block(in_c, out_c): | |
| return nn.Sequential( | |
| nn.Conv2d(in_c, out_c, 3, padding=1), | |
| nn.ReLU(), | |
| nn.Conv2d(out_c, out_c, 3, padding=1), | |
| nn.ReLU() | |
| ) | |
| self.enc1 = conv_block(3, 64) | |
| self.enc2 = conv_block(64, 128) | |
| self.pool = nn.MaxPool2d(2) | |
| self.global_pool = nn.AdaptiveAvgPool2d((1, 1)) | |
| self.fc = nn.Linear(128, num_classes) | |
| def forward(self, x): | |
| x = self.enc1(x) | |
| x = self.pool(x) | |
| x = self.enc2(x) | |
| x = self.pool(x) | |
| x = self.global_pool(x) | |
| x = torch.flatten(x, 1) | |
| x = self.fc(x) | |
| return torch.sigmoid(x) | |
| # Load model | |
| model = UNetClassifier() | |
| model.load_state_dict(torch.load("model_weights.pth", map_location="cpu")) | |
| model.eval() | |
| # Image transform | |
| transform = T.Compose([ | |
| T.Resize((128, 128)), | |
| T.ToTensor() | |
| ]) | |
| # Prediction function | |
| def classify_fire(image): | |
| image = image.convert("RGB") | |
| img = transform(image).unsqueeze(0) | |
| with torch.no_grad(): | |
| output = model(img) | |
| return "π₯ FIRE" if output.item() > 0.5 else "β NO FIRE" | |
| # Gradio UI | |
| gr.Interface(fn=classify_fire, inputs="image", outputs="text", title="Fire Classifier").launch() | |