File size: 3,862 Bytes
4a87ebd b49dba5 399c813 2ce14ec 7e60cdd 2ce14ec 7e60cdd 2ce14ec 7e60cdd 2ce14ec 4a87ebd e52e0ba b49dba5 e52e0ba 4a87ebd e52e0ba b49dba5 4a87ebd b49dba5 e52e0ba 399c813 e52e0ba 399c813 e52e0ba b49dba5 4a87ebd e52e0ba 4a87ebd b49dba5 79ba3a2 2ce14ec 52895d6 2ce14ec 79ba3a2 4a87ebd 79ba3a2 d2dcb0f ca09c0e 2ce14ec 79ba3a2 4273668 79ba3a2 d2dcb0f 4a87ebd e52e0ba d2dcb0f 36b5f90 e52e0ba |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
import gradio as gr
import numpy as np
from tensorflow.keras.models import load_model
from tensorflow.keras import backend as K
from PIL import Image
from huggingface_hub import hf_hub_download
import cv2
# -----------------------
# Preprocessing
# -----------------------
def preprocess(img):
img_gray = np.array(img.convert("L")) # grayscale
img_resized = cv2.resize(img_gray, (224, 224)) # resize to 224x224
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
img_clahe = clahe.apply(img_resized) # enhance contrast
img_norm = img_clahe / 255.0 # normalize
# Add channel dimension
img_norm = np.expand_dims(img_norm, axis=-1) # shape: (224,224,1)
return img_norm
from skimage import measure
def remove_small_blobs(mask, min_size=50):
labels = measure.label(mask)
for region in measure.regionprops(labels):
if region.area < min_size:
mask[labels == region.label] = 0
return mask
# -----------------------
# Custom metric
# -----------------------
def dice_coef(y_true, y_pred):
y_true = K.cast(y_true, 'float32')
y_pred = K.cast(y_pred, 'float32')
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersect = K.sum(y_true_f * y_pred_f)
return (2. * intersect + K.epsilon()) / (K.sum(y_true_f) + K.sum(y_pred_f) + K.epsilon())
# -----------------------
# Model loading
# -----------------------
model = None
def get_model():
global model
if model is None:
try:
# Download model from HF Hub
model_path = hf_hub_download(
repo_id="yaraa11/brain-tumor-segmentation",
filename="CNNSegmentation_model.keras"
)
model = load_model(model_path, custom_objects={'dice_coef': dice_coef}, compile=False)
except Exception as e:
print("Error loading model:", e)
raise e
return model
# -----------------------
# Prediction function
# -----------------------
def predict(img):
model = get_model()
# Save original size
orig_size = img.size # (width, height)
# Resize image for model
img_processed = preprocess(img) # preprocess function
x = np.expand_dims(img_processed, 0)
# Predict mask
pred = model.predict(x)[0]
tumor_present = pred.max() > 0.7
if tumor_present:
mask = (pred > 0.7).astype(np.uint8) * 255
mask = remove_small_blobs(mask, min_size=50)
mask_img = Image.fromarray(mask.squeeze()).convert("L")
# Resize mask back to original image size
mask_img = mask_img.resize(orig_size)
# Create red overlay
red_overlay = Image.new("RGB", orig_size, (255, 0, 0))
overlay = img.convert("RGB")
overlay.paste(red_overlay, (0, 0), mask_img)
return "Tumor Detected", overlay
else:
return "No Tumor Detected", None
# -----------------------
# Gradio Interface
# -----------------------
with gr.Blocks(title="Brain Tumor Segmentation") as demo:
gr.Markdown("<h1 style='text-align:center'>Brain Tumor Segmentation</h1>")
gr.Markdown("Upload a brain MRI scan, and the app will tell you if a tumor is present. If yes, it will show the segmented tumor mask.")
with gr.Row():
with gr.Column():
img_input = gr.Image(type="pil", label="Upload MRI Scan")
submit_btn = gr.Button("Analyze Scan")
with gr.Column():
output_text = gr.Textbox(label="Tumor Detection Result")
output_image = gr.Image(label="Tumor Mask / Overlay")
submit_btn.click(
fn=predict,
inputs=img_input,
outputs=[output_text, output_image]
)
demo.launch()
# Expose 'demo' as the interface for Hugging Face Spaces
# No need to call demo.launch()
|